allocgate: change resize to return optional instead of error
This commit is contained in:
parent
f68cda738a
commit
066eaa5e9c
|
@ -350,5 +350,5 @@ test "OptionsStep" {
|
|||
\\
|
||||
, options.contents.items);
|
||||
|
||||
_ = try std.zig.parse(&arena.allocator, try options.contents.toOwnedSliceSentinel(0));
|
||||
_ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0));
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ const CAllocator = struct {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
) ?usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
if (new_len <= buf.len) {
|
||||
|
@ -141,7 +141,7 @@ const CAllocator = struct {
|
|||
return mem.alignAllocLen(full_len, new_len, len_align);
|
||||
}
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
|
@ -205,13 +205,13 @@ fn rawCResize(
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
) ?usize {
|
||||
_ = old_align;
|
||||
_ = ret_addr;
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn rawCFree(
|
||||
|
@ -361,7 +361,7 @@ const PageAllocator = struct {
|
|||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
) ?usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
|
@ -387,7 +387,7 @@ const PageAllocator = struct {
|
|||
if (new_size_aligned <= old_size_aligned) {
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
|
||||
|
@ -403,7 +403,7 @@ const PageAllocator = struct {
|
|||
|
||||
// TODO: call mremap
|
||||
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
|
||||
|
@ -579,11 +579,11 @@ const WasmPageAllocator = struct {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
) ?usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return error.OutOfMemory;
|
||||
if (new_len > aligned_len) return null;
|
||||
const current_n = nPages(aligned_len);
|
||||
const new_n = nPages(new_len);
|
||||
if (new_n != current_n) {
|
||||
|
@ -674,7 +674,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
|||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
) ?usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
|
||||
|
@ -686,7 +686,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
|||
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
|
||||
@intToPtr(*c_void, root_addr),
|
||||
amt,
|
||||
) orelse return error.OutOfMemory;
|
||||
) orelse return null;
|
||||
assert(new_ptr == @intToPtr(*c_void, root_addr));
|
||||
const return_len = init: {
|
||||
if (len_align == 0) break :init new_size;
|
||||
|
@ -788,14 +788,13 @@ pub const FixedBufferAllocator = struct {
|
|||
new_size: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Allocator.Error!usize {
|
||||
) ?usize {
|
||||
_ = buf_align;
|
||||
_ = return_address;
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
if (new_size > buf.len)
|
||||
return error.OutOfMemory;
|
||||
if (new_size > buf.len) return null;
|
||||
return mem.alignAllocLen(buf.len, new_size, len_align);
|
||||
}
|
||||
|
||||
|
@ -806,9 +805,8 @@ pub const FixedBufferAllocator = struct {
|
|||
}
|
||||
|
||||
const add = new_size - buf.len;
|
||||
if (add + self.end_index > self.buffer.len) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
if (add + self.end_index > self.buffer.len) return null;
|
||||
|
||||
self.end_index += add;
|
||||
return new_size;
|
||||
}
|
||||
|
@ -891,7 +889,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
) ?usize {
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
|
||||
} else {
|
||||
|
|
|
@ -78,26 +78,23 @@ pub const ArenaAllocator = struct {
|
|||
|
||||
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
|
||||
// Try to grow the buffer in-place
|
||||
cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
// Allocate a new node if that's not possible
|
||||
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
|
||||
continue;
|
||||
},
|
||||
cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse {
|
||||
// Allocate a new node if that's not possible
|
||||
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
|
||||
continue;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
|
||||
fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
|
||||
_ = buf_align;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
|
||||
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
|
||||
const cur_node = self.state.buffer_list.first orelse return null;
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) {
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
if (new_len > buf.len) return null;
|
||||
return new_len;
|
||||
}
|
||||
|
||||
|
@ -108,7 +105,7 @@ pub const ArenaAllocator = struct {
|
|||
self.state.end_index += new_len - buf.len;
|
||||
return new_len;
|
||||
} else {
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -517,7 +517,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
new_size: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
) ?usize {
|
||||
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
|
||||
if (config.safety) {
|
||||
@panic("Invalid free");
|
||||
|
@ -557,7 +557,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
|
@ -565,7 +565,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
self.total_requested_bytes = prev_req_bytes;
|
||||
};
|
||||
|
||||
const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr);
|
||||
const result_len = self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr) orelse return null;
|
||||
|
||||
if (config.enable_memory_limit) {
|
||||
entry.value_ptr.requested_size = new_size;
|
||||
|
@ -650,7 +650,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
new_size: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
) ?usize {
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
|
@ -705,7 +705,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
if (config.enable_memory_limit) {
|
||||
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
|
||||
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
self.total_requested_bytes = new_req_bytes;
|
||||
}
|
||||
|
@ -726,7 +726,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
}
|
||||
return new_size;
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
|
@ -735,8 +735,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
old_align: u29,
|
||||
ret_addr: usize,
|
||||
) void {
|
||||
const held = self.mutex.acquire();
|
||||
defer held.release();
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
assert(old_mem.len != 0);
|
||||
|
||||
|
@ -850,7 +850,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
return true;
|
||||
}
|
||||
|
||||
fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
fn alloc(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
|
@ -1065,7 +1065,7 @@ test "shrink large object to large object" {
|
|||
slice[0] = 0x12;
|
||||
slice[60] = 0x34;
|
||||
|
||||
slice = try allocator.resize(slice, page_size * 2 + 1);
|
||||
slice = allocator.resize(slice, page_size * 2 + 1) orelse return;
|
||||
try std.testing.expect(slice[0] == 0x12);
|
||||
try std.testing.expect(slice[60] == 0x34);
|
||||
|
||||
|
|
|
@ -45,22 +45,23 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
) ?usize {
|
||||
if (new_len <= buf.len) {
|
||||
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
|
||||
} else {
|
||||
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
|
||||
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.writer.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
return resized_len;
|
||||
} else |e| {
|
||||
std.debug.assert(new_len > buf.len);
|
||||
self.writer.print(" failure!\n", .{}) catch {};
|
||||
return e;
|
||||
}
|
||||
|
||||
std.debug.assert(new_len > buf.len);
|
||||
self.writer.print(" failure!\n", .{}) catch {};
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
|
@ -95,7 +96,7 @@ test "LogToWriterAllocator" {
|
|||
var a = try allocator.alloc(u8, 10);
|
||||
a = allocator.shrink(a, 5);
|
||||
try std.testing.expect(a.len == 5);
|
||||
try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
|
||||
try std.testing.expect(allocator.resize(a, 20) == null);
|
||||
allocator.free(a);
|
||||
|
||||
try std.testing.expectEqualSlices(u8,
|
||||
|
|
|
@ -77,7 +77,7 @@ pub fn ScopedLoggingAllocator(
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
) ?usize {
|
||||
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
|
||||
if (new_len <= buf.len) {
|
||||
logHelper(
|
||||
|
@ -94,15 +94,15 @@ pub fn ScopedLoggingAllocator(
|
|||
}
|
||||
|
||||
return resized_len;
|
||||
} else |err| {
|
||||
std.debug.assert(new_len > buf.len);
|
||||
logHelper(
|
||||
failure_log_level,
|
||||
"expand - failure: {s} - {} to {}, len_align: {}, buf_align: {}",
|
||||
.{ @errorName(err), buf.len, new_len, len_align, buf_align },
|
||||
);
|
||||
return err;
|
||||
}
|
||||
|
||||
std.debug.assert(new_len > buf.len);
|
||||
logHelper(
|
||||
failure_log_level,
|
||||
"expand - failure - {} to {}, len_align: {}, buf_align: {}",
|
||||
.{ buf.len, new_len, len_align, buf_align },
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
|
|
|
@ -88,14 +88,14 @@ pub fn ValidationAllocator(comptime T: type) type {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Allocator.Error!usize {
|
||||
) ?usize {
|
||||
assert(buf.len > 0);
|
||||
if (len_align != 0) {
|
||||
assert(mem.isAlignedAnyAlign(new_len, len_align));
|
||||
assert(new_len >= len_align);
|
||||
}
|
||||
const underlying = self.getUnderlyingAllocatorPtr();
|
||||
const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr);
|
||||
const result = underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr) orelse return null;
|
||||
if (len_align == 0) {
|
||||
assert(result == new_len);
|
||||
} else {
|
||||
|
@ -188,7 +188,7 @@ test "Allocator.resize" {
|
|||
defer testing.allocator.free(values);
|
||||
|
||||
for (values) |*v, i| v.* = @intCast(T, i);
|
||||
values = try testing.allocator.resize(values, values.len + 10);
|
||||
values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
|
||||
try testing.expect(values.len == 110);
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ test "Allocator.resize" {
|
|||
defer testing.allocator.free(values);
|
||||
|
||||
for (values) |*v, i| v.* = @intToFloat(T, i);
|
||||
values = try testing.allocator.resize(values, values.len + 10);
|
||||
values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
|
||||
try testing.expect(values.len == 110);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,9 +29,9 @@ pub const VTable = struct {
|
|||
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
|
||||
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
|
||||
///
|
||||
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// `null` can only be returned if `new_len` is greater than `buf.len`.
|
||||
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
|
||||
/// unmodified and error.OutOfMemory MUST be returned.
|
||||
/// unmodified and `null` MUST be returned.
|
||||
///
|
||||
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
|
||||
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
|
||||
|
@ -42,7 +42,7 @@ pub const VTable = struct {
|
|||
///
|
||||
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
|
||||
/// If the value is `0` it means no return address has been provided.
|
||||
resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize,
|
||||
|
||||
/// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`.
|
||||
/// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call.
|
||||
|
@ -55,7 +55,7 @@ pub const VTable = struct {
|
|||
pub fn init(
|
||||
pointer: anytype,
|
||||
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
|
||||
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
|
||||
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize,
|
||||
comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void,
|
||||
) Allocator {
|
||||
const Ptr = @TypeOf(pointer);
|
||||
|
@ -71,7 +71,7 @@ pub fn init(
|
|||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr });
|
||||
}
|
||||
fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
|
||||
fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
|
||||
assert(new_len != 0);
|
||||
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
||||
return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr });
|
||||
|
@ -104,14 +104,12 @@ pub fn NoResize(comptime AllocatorType: type) type {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ret_addr: usize,
|
||||
) Error!usize {
|
||||
) ?usize {
|
||||
_ = self;
|
||||
_ = buf_align;
|
||||
_ = len_align;
|
||||
_ = ret_addr;
|
||||
if (new_len > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return new_len;
|
||||
return if (new_len > buf.len) null else new_len;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -157,7 +155,7 @@ pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u
|
|||
}
|
||||
|
||||
/// This function is not intended to be called except from within the implementation of an Allocator
|
||||
pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
|
||||
pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
|
||||
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr);
|
||||
}
|
||||
|
||||
|
@ -166,99 +164,6 @@ pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usiz
|
|||
return self.vtable.free(self.ptr, buf, buf_align, ret_addr);
|
||||
}
|
||||
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
/// When the size/alignment is greater than the previous allocation, this function
|
||||
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
|
||||
/// When the size/alignment is less than or equal to the previous allocation,
|
||||
/// this function returns `error.OutOfMemory` when the allocator decides the client
|
||||
/// would be better off keeping the extra alignment/size. Clients will call
|
||||
/// `vtable.resize` when they require the allocator to track a new alignment/size,
|
||||
/// and so this function should only return success when the allocator considers
|
||||
/// the reallocation desirable from the allocator's perspective.
|
||||
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
|
||||
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
|
||||
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
|
||||
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
|
||||
/// and thus the `std.ArrayList` would be better off retaining its capacity.
|
||||
/// When `reallocFn` returns,
|
||||
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
fn reallocBytes(
|
||||
self: Allocator,
|
||||
/// Guaranteed to be the same as what was returned from most recent call to
|
||||
/// `vtable.alloc` or `vtable.resize`.
|
||||
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
/// is guaranteed to be >= 1.
|
||||
old_mem: []u8,
|
||||
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
|
||||
/// Guaranteed to be the same as what was passed to `allocFn`.
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
/// `new_byte_count` must be greater than zero
|
||||
new_byte_count: usize,
|
||||
/// Guaranteed to be >= 1.
|
||||
/// Guaranteed to be a power of 2.
|
||||
/// Returned slice's pointer must have this alignment.
|
||||
new_alignment: u29,
|
||||
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
|
||||
/// non-zero means the length of the returned slice must be aligned by `len_align`
|
||||
/// `new_len` must be aligned by `len_align`
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
if (old_mem.len == 0) {
|
||||
const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(new_mem.ptr, undefined, new_byte_count);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free
|
||||
|
||||
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
|
||||
if (new_byte_count <= old_mem.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
|
||||
return old_mem.ptr[0..shrunk_len];
|
||||
}
|
||||
if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
|
||||
assert(resized_len >= new_byte_count);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
|
||||
return old_mem.ptr[0..resized_len];
|
||||
} else |_| {}
|
||||
}
|
||||
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address);
|
||||
}
|
||||
|
||||
/// Move the given memory to a new location in the given allocator to accomodate a new
|
||||
/// size and alignment.
|
||||
fn moveBytes(
|
||||
self: Allocator,
|
||||
old_mem: []u8,
|
||||
old_align: u29,
|
||||
new_len: usize,
|
||||
new_alignment: u29,
|
||||
len_align: u29,
|
||||
return_address: usize,
|
||||
) Error![]u8 {
|
||||
assert(old_mem.len > 0);
|
||||
assert(new_len > 0);
|
||||
const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
self.rawFree(old_mem, old_align, return_address);
|
||||
return new_mem;
|
||||
}
|
||||
|
||||
/// Returns a pointer to undefined memory.
|
||||
/// Call `destroy` with the result to free the memory.
|
||||
pub fn create(self: Allocator, comptime T: type) Error!*T {
|
||||
|
@ -409,7 +314,7 @@ pub fn allocAdvancedWithRetAddr(
|
|||
}
|
||||
|
||||
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
|
||||
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
|
||||
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) ?@TypeOf(old_mem) {
|
||||
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (new_n == 0) {
|
||||
|
@ -417,8 +322,8 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old
|
|||
return &[0]T{};
|
||||
}
|
||||
const old_byte_slice = mem.sliceAsBytes(old_mem);
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
|
||||
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return null;
|
||||
const rc = self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()) orelse return null;
|
||||
assert(rc == new_byte_count);
|
||||
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
|
||||
return mem.bytesAsSlice(T, new_byte_slice);
|
||||
|
@ -488,8 +393,31 @@ pub fn reallocAdvancedWithRetAddr(
|
|||
.exact => 0,
|
||||
.at_least => @sizeOf(T),
|
||||
};
|
||||
const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address);
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
|
||||
|
||||
if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), new_alignment)) {
|
||||
if (byte_count <= old_byte_slice.len) {
|
||||
const shrunk_len = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, len_align, return_address);
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..shrunk_len]));
|
||||
}
|
||||
|
||||
if (self.rawResize(old_byte_slice, Slice.alignment, byte_count, len_align, return_address)) |resized_len| {
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice.ptr + byte_count, undefined, resized_len - byte_count);
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..resized_len]));
|
||||
}
|
||||
}
|
||||
|
||||
if (byte_count <= old_byte_slice.len and new_alignment <= Slice.alignment) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
const new_mem = try self.rawAlloc(byte_count, new_alignment, len_align, return_address);
|
||||
@memcpy(new_mem.ptr, old_byte_slice.ptr, math.min(byte_count, old_byte_slice.len));
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice.ptr, undefined, old_byte_slice.len);
|
||||
self.rawFree(old_byte_slice, Slice.alignment, return_address);
|
||||
|
||||
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_mem));
|
||||
}
|
||||
|
||||
/// Prefer calling realloc to shrink if you can tolerate failure, such as
|
||||
|
@ -580,7 +508,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
|
|||
}
|
||||
|
||||
/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning
|
||||
/// error.OutOfMemory should be impossible.
|
||||
/// than a `null` return value should be impossible.
|
||||
/// This function allows a runtime `buf_align` value. Callers should generally prefer
|
||||
/// to call `shrink` directly.
|
||||
pub fn shrinkBytes(
|
||||
|
@ -592,5 +520,5 @@ pub fn shrinkBytes(
|
|||
return_address: usize,
|
||||
) usize {
|
||||
assert(new_len <= buf.len);
|
||||
return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable;
|
||||
return self.rawResize(buf, buf_align, new_len, len_align, return_address) orelse unreachable;
|
||||
}
|
||||
|
|
|
@ -68,11 +68,8 @@ pub const FailingAllocator = struct {
|
|||
new_len: usize,
|
||||
len_align: u29,
|
||||
ra: usize,
|
||||
) error{OutOfMemory}!usize {
|
||||
const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| {
|
||||
std.debug.assert(new_len > old_mem.len);
|
||||
return e;
|
||||
};
|
||||
) ?usize {
|
||||
const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) orelse return null;
|
||||
if (r < old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - r;
|
||||
} else {
|
||||
|
|
|
@ -1288,7 +1288,7 @@ fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs: any
|
|||
// TODO this should not be performed if the user specifies `-flat_namespace` flag.
|
||||
// See ld64 manpages.
|
||||
var arena_alloc = std.heap.ArenaAllocator.init(self.base.allocator);
|
||||
const arena = &arena_alloc.allocator;
|
||||
const arena = arena_alloc.allocator();
|
||||
defer arena_alloc.deinit();
|
||||
|
||||
while (dependent_libs.readItem()) |*id| {
|
||||
|
|
|
@ -138,7 +138,7 @@ pub const LibStub = struct {
|
|||
err: {
|
||||
log.debug("trying to parse as []TbdV3", .{});
|
||||
const inner = lib_stub.yaml.parse([]TbdV3) catch break :err;
|
||||
var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len);
|
||||
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
|
||||
for (inner) |doc, i| {
|
||||
out[i] = .{ .v3 = doc };
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ pub fn main() anyerror!void {
|
|||
|
||||
if (tracy.enable_allocation) {
|
||||
var gpa_tracy = tracy.tracyAllocator(gpa);
|
||||
return mainArgs(&gpa_tracy.allocator, arena, args);
|
||||
return mainArgs(gpa_tracy.allocator(), arena, args);
|
||||
}
|
||||
|
||||
return mainArgs(gpa, arena, args);
|
||||
|
|
|
@ -113,18 +113,14 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
|
|||
|
||||
const Self = @This();
|
||||
|
||||
pub fn allocator(self: *Self) std.mem.Allocator {
|
||||
return std.mem.Allocator.init(self, allocFn, resizeFn);
|
||||
pub fn init(parent_allocator: std.mem.Allocator) Self {
|
||||
return .{
|
||||
.parent_allocator = parent_allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) Self {
|
||||
return .{
|
||||
.parent_allocator = allocator,
|
||||
.allocator = .{
|
||||
.allocFn = allocFn,
|
||||
.resizeFn = resizeFn,
|
||||
},
|
||||
};
|
||||
pub fn allocator(self: *Self) std.mem.Allocator {
|
||||
return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn);
|
||||
}
|
||||
|
||||
fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
|
||||
|
@ -162,12 +158,11 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
|
|||
}
|
||||
|
||||
return resized_len;
|
||||
} else |err| {
|
||||
// this is not really an error condition, during normal operation the compiler hits this case thousands of times
|
||||
// due to this emitting messages for it is both slow and causes clutter
|
||||
// messageColor("allocation resize failed", 0xFF0000);
|
||||
return err;
|
||||
}
|
||||
|
||||
// during normal operation the compiler hits this case thousands of times due to this
|
||||
// emitting messages for it is both slow and causes clutter
|
||||
return null;
|
||||
}
|
||||
|
||||
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
|
||||
|
|
|
@ -496,7 +496,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
|
|||
\\ var a = try allocator.alloc(u8, 10);
|
||||
\\ a = allocator.shrink(a, 5);
|
||||
\\ try std.testing.expect(a.len == 5);
|
||||
\\ try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
|
||||
\\ try std.testing.expect(allocator.resize(a, 20) == null);
|
||||
\\ allocator.free(a);
|
||||
\\}
|
||||
\\
|
||||
|
@ -514,8 +514,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
|
|||
,
|
||||
\\debug: alloc - success - len: 10, ptr_align: 1, len_align: 0
|
||||
\\debug: shrink - success - 10 to 5, len_align: 0, buf_align: 1
|
||||
\\error: expand - failure: OutOfMemory - 5 to 20, len_align: 0, buf_align: 1
|
||||
\\debug: free - success - len: 5
|
||||
\\error: expand - failure - 5 to 20, len_align: 0, buf_align: 1
|
||||
\\debug: free - len: 5
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ const PathTable = std.StringHashMap(*TargetToHash);
|
|||
|
||||
pub fn main() !void {
|
||||
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const arena = &arena_state.allocator;
|
||||
const arena = arena_state.allocator();
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
var search_paths = std.ArrayList([]const u8).init(arena);
|
||||
var opt_out_dir: ?[]const u8 = null;
|
||||
|
|
Loading…
Reference in New Issue
Block a user