Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace mem.page_size with mem.min_page_size and mem.max_page_size #3815

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions lib/std/c.zig
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const builtin = @import("builtin");
const std = @import("std");
const page_size = std.mem.page_size;
const min_page_size = std.mem.min_page_size;

pub usingnamespace @import("os/bits.zig");

Expand Down Expand Up @@ -83,9 +83,9 @@ pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint,
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: u64) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void;
pub extern "c" fn munmap(addr: *align(page_size) c_void, len: usize) c_int;
pub extern "c" fn mprotect(addr: *align(page_size) c_void, len: usize, prot: c_uint) c_int;
pub extern "c" fn mmap(addr: ?*align(min_page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void;
pub extern "c" fn munmap(addr: *align(min_page_size) c_void, len: usize) c_int;
pub extern "c" fn mprotect(addr: *align(min_page_size) c_void, len: usize, prot: c_uint) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int;
pub extern "c" fn unlinkat(dirfd: fd_t, path: [*]const u8, flags: c_uint) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
Expand Down
4 changes: 2 additions & 2 deletions lib/std/debug.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1031,7 +1031,7 @@ fn openSelfDebugInfoPosix(allocator: *mem.Allocator) !DwarfInfo {
errdefer S.self_exe_file.close();

const self_exe_len = math.cast(usize, try S.self_exe_file.getEndPos()) catch return error.DebugInfoTooLarge;
const self_exe_mmap_len = mem.alignForward(self_exe_len, mem.page_size);
const self_exe_mmap_len = mem.alignForward(self_exe_len, mem.min_page_size);
const self_exe_mmap = try os.mmap(
null,
self_exe_mmap_len,
Expand Down Expand Up @@ -1135,7 +1135,7 @@ fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void {
defer f.close();
// TODO fstat and make sure that the file has the correct size

var buf: [mem.page_size]u8 = undefined;
var buf: [mem.bufsiz]u8 = undefined;
var line: usize = 1;
var column: usize = 1;
var abs_index: usize = 0;
Expand Down
4 changes: 2 additions & 2 deletions lib/std/dynamic_library.zig
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
pub const LinuxDynLib = struct {
elf_lib: ElfLib,
fd: i32,
memory: []align(mem.page_size) u8,
memory: []align(mem.min_page_size) u8,

/// Trusts the file
pub fn open(path: []const u8) !DynLib {
Expand All @@ -113,7 +113,7 @@ pub const LinuxDynLib = struct {

const bytes = try os.mmap(
null,
mem.alignForward(size, mem.page_size),
mem.alignForward(size, mem.min_page_size),
os.PROT_READ | os.PROT_EXEC,
os.MAP_PRIVATE,
fd,
Expand Down
2 changes: 1 addition & 1 deletion lib/std/event/fs.zig
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ pub fn readFile(allocator: *Allocator, file_path: []const u8, max_size: usize) !
defer list.deinit();

while (true) {
try list.ensureCapacity(list.len + mem.page_size);
try list.ensureCapacity(list.len + mem.bufsiz);
const buf = list.items[list.len..];
const buf_array = [_][]u8{buf};
const amt = try preadv(allocator, fd, buf_array, list.len);
Expand Down
2 changes: 1 addition & 1 deletion lib/std/fifo.zig
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ pub fn LinearFifo(
mem.copy(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
self.head = 0;
} else {
var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined;
var tmp: [mem.bufsiz / @sizeOf(T)]T = undefined;

while (self.head != 0) {
const n = math.min(self.head, tmp.len);
Expand Down
6 changes: 3 additions & 3 deletions lib/std/fs.zig
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ pub fn updateFileMode(source_path: []const u8, dest_path: []const u8, mode: ?Fil

const in_stream = &src_file.inStream().stream;

var buf: [mem.page_size * 6]u8 = undefined;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I removed this * 6... it seemed oddly arbitrary.

var buf: [mem.bufsiz]u8 = undefined;
while (true) {
const amt = try in_stream.readFull(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
Expand All @@ -166,7 +166,7 @@ pub fn copyFile(source_path: []const u8, dest_path: []const u8) !void {
var atomic_file = try AtomicFile.init(dest_path, mode);
defer atomic_file.deinit();

var buf: [mem.page_size]u8 = undefined;
var buf: [mem.bufsiz]u8 = undefined;
while (true) {
const amt = try in_stream.readFull(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
Expand All @@ -186,7 +186,7 @@ pub fn copyFileMode(source_path: []const u8, dest_path: []const u8, mode: File.M
var atomic_file = try AtomicFile.init(dest_path, mode);
defer atomic_file.deinit();

var buf: [mem.page_size * 6]u8 = undefined;
var buf: [mem.bufsiz]u8 = undefined;
while (true) {
const amt = try in_file.read(buf[0..]);
try atomic_file.file.write(buf[0..amt]);
Expand Down
52 changes: 26 additions & 26 deletions lib/std/heap.zig
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,10 @@ const PageAllocator = struct {
return @ptrCast([*]u8, final_addr)[0..n];
}

const alloc_size = if (alignment <= mem.page_size) n else n + alignment;
const alloc_size = if (alignment <= mem.min_page_size) n else n + alignment;
const slice = os.mmap(
null,
mem.alignForward(alloc_size, mem.page_size),
mem.alignForward(alloc_size, mem.min_page_size),
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
Expand All @@ -129,17 +129,17 @@ const PageAllocator = struct {
if (unused_start_len != 0) {
os.munmap(slice[0..unused_start_len]);
}
const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size);
const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.min_page_size);
const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr;
if (unused_end_len != 0) {
os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]);
os.munmap(@intToPtr([*]align(mem.min_page_size) u8, aligned_end_addr)[0..unused_end_len]);
}

return @intToPtr([*]u8, aligned_addr)[0..n];
}

fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
const old_mem = @alignCast(mem.min_page_size, old_mem_unaligned);
if (builtin.os == .windows) {
const w = os.windows;
if (new_size == 0) {
Expand All @@ -154,7 +154,7 @@ const PageAllocator = struct {
const base_addr = @ptrToInt(old_mem.ptr);
const old_addr_end = base_addr + old_mem.len;
const new_addr_end = base_addr + new_size;
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size);
if (old_addr_end > new_addr_end_rounded) {
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
Expand All @@ -170,16 +170,16 @@ const PageAllocator = struct {
const base_addr = @ptrToInt(old_mem.ptr);
const old_addr_end = base_addr + old_mem.len;
const new_addr_end = base_addr + new_size;
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size);
if (old_addr_end > new_addr_end_rounded) {
const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded);
const ptr = @intToPtr([*]align(mem.min_page_size) u8, new_addr_end_rounded);
os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]);
}
return old_mem[0..new_size];
}

fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
const old_mem = @alignCast(mem.min_page_size, old_mem_unaligned);
if (builtin.os == .windows) {
if (old_mem.len == 0) {
return alloc(allocator, new_size, new_align);
Expand All @@ -205,9 +205,9 @@ const PageAllocator = struct {
}

const old_addr_end = base_addr + old_mem.len;
const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size);
const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.min_page_size);
const new_addr_end = base_addr + new_size;
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size);
if (new_addr_end_rounded == old_addr_end_rounded) {
// The reallocation fits in the already allocated pages.
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
Expand Down Expand Up @@ -270,11 +270,11 @@ const WasmPageAllocator = struct {
const adjusted_index = end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + size;

if (new_end_index > num_pages * mem.page_size) {
const required_memory = new_end_index - (num_pages * mem.page_size);
if (new_end_index > num_pages * mem.min_page_size) {
const required_memory = new_end_index - (num_pages * mem.min_page_size);

var inner_num_pages: usize = required_memory / mem.page_size;
if (required_memory % mem.page_size != 0) {
var inner_num_pages: usize = required_memory / mem.min_page_size;
if (required_memory % mem.min_page_size != 0) {
inner_num_pages += 1;
}

Expand All @@ -300,14 +300,14 @@ const WasmPageAllocator = struct {
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
// Initialize start_ptr at the first realloc
if (num_pages == 0) {
start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size);
start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.min_page_size);
}

if (is_last_item(old_mem, new_align)) {
const start_index = end_index - old_mem.len;
const new_end_index = start_index + new_size;

if (new_end_index > num_pages * mem.page_size) {
if (new_end_index > num_pages * mem.min_page_size) {
_ = try alloc(allocator, new_end_index - end_index, new_align);
}
const result = start_ptr[start_index..new_end_index];
Expand Down Expand Up @@ -464,7 +464,7 @@ pub const ArenaAllocator = struct {
var len = prev_len;
while (true) {
len += len / 2;
len += mem.page_size - @rem(len, mem.page_size);
len += mem.min_page_size - @rem(len, mem.min_page_size);
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
Expand Down Expand Up @@ -886,10 +886,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
if (mem.max_page_size << 2 > maxInt(usize)) return;

const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
const large_align = @as(u29, mem.page_size << 2);
const large_align = @as(u29, mem.min_page_size << 2);

var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask);
Expand All @@ -916,16 +916,16 @@ fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!voi
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;

const alloc_size = mem.page_size * 2 + 50;
const alloc_size = mem.min_page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);

var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) {
// which is 16 pages. This test may require to increase the size of the
// allocations feeding the `allocator` parameter if they fail, because
// of this high over-alignment we want to have.
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), 64 * 1024)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
Expand All @@ -936,7 +936,7 @@ fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!voi
slice[60] = 0x34;

// realloc to a smaller size but with a larger alignment
slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2);
slice = try allocator.alignedRealloc(slice, mem.min_page_size * 32, alloc_size / 2);
testing.expect(slice[0] == 0x12);
testing.expect(slice[60] == 0x34);
}
4 changes: 2 additions & 2 deletions lib/std/io.zig
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
}

pub fn BufferedInStream(comptime Error: type) type {
return BufferedInStreamCustom(mem.page_size, Error);
return BufferedInStreamCustom(mem.bufsiz, Error);
}

pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type) type {
Expand Down Expand Up @@ -545,7 +545,7 @@ test "io.CountingOutStream" {
}

pub fn BufferedOutStream(comptime Error: type) type {
return BufferedOutStreamCustom(mem.page_size, Error);
return BufferedOutStreamCustom(mem.bufsiz, Error);
}

pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamError: type) type {
Expand Down
2 changes: 1 addition & 1 deletion lib/std/io/in_stream.zig
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ pub fn InStream(comptime ReadError: type) type {
return;
}

const new_buf_size = math.min(max_size, actual_buf_len + mem.page_size);
const new_buf_size = math.min(max_size, actual_buf_len + mem.bufsiz);
if (new_buf_size == actual_buf_len) return error.StreamTooLong;
try buffer.resize(new_buf_size);
}
Expand Down
22 changes: 21 additions & 1 deletion lib/std/mem.zig
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,31 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;

pub const page_size = switch (builtin.arch) {
pub const min_page_size = switch (builtin.arch) {
.mips, .mipsel, .mips64, .mips64el => switch (builtin.os) {
else => 1024, // NEC VR41xx processors support as low as 1K page size
.linux => 4 * 1024, // Linux doesn't support < 4K pages
},
.sparcv9 => 8 * 1024,
.wasm32, .wasm64 => 64 * 1024,
else => 4 * 1024,
};

pub const max_page_size = switch (builtin.arch) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you justify the inclusion of this constant? The one place you used it, I disagree with. That test should be using actual page size.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you mean by "actual page size"?

.arm, .armeb => 16 * 1024 * 1024, // At least ARMv7 has optional support for 16M pages
.aarch64, .aarch64_be => 1 * 1024 * 1024 * 1024, // ARM64 supports 4K, 16K, 64K, 2M, 32M, 512M, 1G pages
.mips, .mipsel, .mips64, .mips64el => 64 * 1024, // Every MIPS III, MIPS IV, MIPS32 and MIPS64 processor supports 4K, 16K and 64K page sizes.
.powerpc64, .powerpc64le => 64 * 1024,
.s390x => 2 * 1024 * 1024 * 1024, // 4K, 1M, 2G pages
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fun fact: this doesn't fit in a u29!

.sparcv9 => 16 * 1024 * 1024 * 1024, // 8K, 64K, 512K, 4M, 32M, 256M, 2G, 16G
.wasm32, .wasm64 => 64 * 1024,
.x86_64 => 1 * 1024 * 1024 * 1024, // 1G huge pages.
else => min_page_size,
};

/// A good default size for buffers
pub const bufsiz = min_page_size;

pub const Allocator = struct {
pub const Error = error{OutOfMemory};

Expand Down
14 changes: 7 additions & 7 deletions lib/std/os.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2197,8 +2197,8 @@ pub const MProtectError = error{
} || UnexpectedError;

/// `memory.len` must be page-aligned.
pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
assert(mem.isAligned(memory.len, mem.page_size));
pub fn mprotect(memory: []align(mem.min_page_size) u8, protection: u32) MProtectError!void {
assert(mem.isAligned(memory.len, mem.min_page_size));
switch (errno(system.mprotect(memory.ptr, memory.len, protection))) {
0 => return,
EINVAL => unreachable,
Expand Down Expand Up @@ -2242,21 +2242,21 @@ pub const MMapError = error{
/// * SIGSEGV - Attempted write into a region mapped as read-only.
/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file
pub fn mmap(
ptr: ?[*]align(mem.page_size) u8,
ptr: ?[*]align(mem.min_page_size) u8,
length: usize,
prot: u32,
flags: u32,
fd: fd_t,
offset: u64,
) MMapError![]align(mem.page_size) u8 {
) MMapError![]align(mem.min_page_size) u8 {
const err = if (builtin.link_libc) blk: {
const rc = std.c.mmap(ptr, length, prot, flags, fd, offset);
if (rc != std.c.MAP_FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length];
if (rc != std.c.MAP_FAILED) return @ptrCast([*]align(mem.min_page_size) u8, @alignCast(mem.min_page_size, rc))[0..length];
break :blk @intCast(usize, system._errno().*);
} else blk: {
const rc = system.mmap(ptr, length, prot, flags, fd, offset);
const err = errno(rc);
if (err == 0) return @intToPtr([*]align(mem.page_size) u8, rc)[0..length];
if (err == 0) return @intToPtr([*]align(mem.min_page_size) u8, rc)[0..length];
break :blk err;
};
switch (err) {
Expand All @@ -2279,7 +2279,7 @@ pub fn mmap(
/// Zig's munmap function does not, for two reasons:
/// * It violates the Zig principle that resource deallocation must succeed.
/// * The Windows function, VirtualFree, has this restriction.
pub fn munmap(memory: []align(mem.page_size) u8) void {
pub fn munmap(memory: []align(mem.min_page_size) u8) void {
switch (errno(system.munmap(memory.ptr, memory.len))) {
0 => return,
EINVAL => unreachable, // Invalid parameters.
Expand Down
Loading