diff --git a/lib/std/c.zig b/lib/std/c.zig index 9e70ff988d7a..0b89d78aa689 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -1,6 +1,6 @@ const builtin = @import("builtin"); const std = @import("std"); -const page_size = std.mem.page_size; +const min_page_size = std.mem.min_page_size; pub usingnamespace @import("os/bits.zig"); @@ -83,9 +83,9 @@ pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int; pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize; pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: u64) isize; -pub extern "c" fn mmap(addr: ?*align(page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void; -pub extern "c" fn munmap(addr: *align(page_size) c_void, len: usize) c_int; -pub extern "c" fn mprotect(addr: *align(page_size) c_void, len: usize, prot: c_uint) c_int; +pub extern "c" fn mmap(addr: ?*align(min_page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void; +pub extern "c" fn munmap(addr: *align(min_page_size) c_void, len: usize) c_int; +pub extern "c" fn mprotect(addr: *align(min_page_size) c_void, len: usize, prot: c_uint) c_int; pub extern "c" fn unlink(path: [*]const u8) c_int; pub extern "c" fn unlinkat(dirfd: fd_t, path: [*]const u8, flags: c_uint) c_int; pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 3c842f3ac153..13982be0ff42 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1031,7 +1031,7 @@ fn openSelfDebugInfoPosix(allocator: *mem.Allocator) !DwarfInfo { errdefer S.self_exe_file.close(); const self_exe_len = math.cast(usize, try S.self_exe_file.getEndPos()) catch return error.DebugInfoTooLarge; - const self_exe_mmap_len = mem.alignForward(self_exe_len, mem.page_size); + const self_exe_mmap_len = mem.alignForward(self_exe_len, mem.min_page_size); const self_exe_mmap = try os.mmap( null, self_exe_mmap_len, @@ -1135,7 +1135,7 @@ fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void { defer f.close(); // TODO fstat and make sure that the file has the correct size - var buf: [mem.page_size]u8 = undefined; + var buf: [mem.bufsiz]u8 = undefined; var line: usize = 1; var column: usize = 1; var abs_index: usize = 0; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index dbebb20b2708..8aab9b234117 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -101,7 +101,7 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator { pub const LinuxDynLib = struct { elf_lib: ElfLib, fd: i32, - memory: []align(mem.page_size) u8, + memory: []align(mem.min_page_size) u8, /// Trusts the file pub fn open(path: []const u8) !DynLib { @@ -113,7 +113,7 @@ pub const LinuxDynLib = struct { const bytes = try os.mmap( null, - mem.alignForward(size, mem.page_size), + mem.alignForward(size, mem.min_page_size), os.PROT_READ | os.PROT_EXEC, os.MAP_PRIVATE, fd, diff --git a/lib/std/event/fs.zig b/lib/std/event/fs.zig index 346d0f294a46..44ce24c492fb 100644 --- a/lib/std/event/fs.zig +++ b/lib/std/event/fs.zig @@ -692,7 +692,7 @@ pub fn readFile(allocator: *Allocator, file_path: []const u8, max_size: usize) ! defer list.deinit(); while (true) { - try list.ensureCapacity(list.len + mem.page_size); + try list.ensureCapacity(list.len + mem.bufsiz); const buf = list.items[list.len..]; const buf_array = [_][]u8{buf}; const amt = try preadv(allocator, fd, buf_array, list.len); diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index e078abcb2b86..04bb965f0540 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -88,7 +88,7 @@ pub fn LinearFifo( mem.copy(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); self.head = 0; } else { - var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined; + var tmp: [mem.bufsiz / @sizeOf(T)]T = undefined; while (self.head != 0) { const n = math.min(self.head, tmp.len); diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 95060f566f05..d6ad29f450d8 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -139,7 +139,7 @@ pub fn updateFileMode(source_path: []const u8, dest_path: []const u8, mode: ?Fil const in_stream = &src_file.inStream().stream; - var buf: [mem.page_size * 6]u8 = undefined; + var buf: [mem.bufsiz]u8 = undefined; while (true) { const amt = try in_stream.readFull(buf[0..]); try atomic_file.file.write(buf[0..amt]); @@ -166,7 +166,7 @@ pub fn copyFile(source_path: []const u8, dest_path: []const u8) !void { var atomic_file = try AtomicFile.init(dest_path, mode); defer atomic_file.deinit(); - var buf: [mem.page_size]u8 = undefined; + var buf: [mem.bufsiz]u8 = undefined; while (true) { const amt = try in_stream.readFull(buf[0..]); try atomic_file.file.write(buf[0..amt]); @@ -186,7 +186,7 @@ pub fn copyFileMode(source_path: []const u8, dest_path: []const u8, mode: File.M var atomic_file = try AtomicFile.init(dest_path, mode); defer atomic_file.deinit(); - var buf: [mem.page_size * 6]u8 = undefined; + var buf: [mem.bufsiz]u8 = undefined; while (true) { const amt = try in_file.read(buf[0..]); try atomic_file.file.write(buf[0..amt]); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 997f1fa06f50..9e87ce2c14e1 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -109,10 +109,10 @@ const PageAllocator = struct { return @ptrCast([*]u8, final_addr)[0..n]; } - const alloc_size = if (alignment <= mem.page_size) n else n + alignment; + const alloc_size = if (alignment <= mem.min_page_size) n else n + alignment; const slice = os.mmap( null, - mem.alignForward(alloc_size, mem.page_size), + mem.alignForward(alloc_size, mem.min_page_size), os.PROT_READ | os.PROT_WRITE, os.MAP_PRIVATE | os.MAP_ANONYMOUS, -1, @@ -129,17 +129,17 @@ const PageAllocator = struct { if (unused_start_len != 0) { os.munmap(slice[0..unused_start_len]); } - const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size); + const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.min_page_size); const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr; if (unused_end_len != 0) { - os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]); + os.munmap(@intToPtr([*]align(mem.min_page_size) u8, aligned_end_addr)[0..unused_end_len]); } return @intToPtr([*]u8, aligned_addr)[0..n]; } fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); + const old_mem = @alignCast(mem.min_page_size, old_mem_unaligned); if (builtin.os == .windows) { const w = os.windows; if (new_size == 0) { @@ -154,7 +154,7 @@ const PageAllocator = struct { const base_addr = @ptrToInt(old_mem.ptr); const old_addr_end = base_addr + old_mem.len; const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); + const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size); if (old_addr_end > new_addr_end_rounded) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. @@ -170,16 +170,16 @@ const PageAllocator = struct { const base_addr = @ptrToInt(old_mem.ptr); const old_addr_end = base_addr + old_mem.len; const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); + const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size); if (old_addr_end > new_addr_end_rounded) { - const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded); + const ptr = @intToPtr([*]align(mem.min_page_size) u8, new_addr_end_rounded); os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]); } return old_mem[0..new_size]; } fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); + const old_mem = @alignCast(mem.min_page_size, old_mem_unaligned); if (builtin.os == .windows) { if (old_mem.len == 0) { return alloc(allocator, new_size, new_align); @@ -205,9 +205,9 @@ const PageAllocator = struct { } const old_addr_end = base_addr + old_mem.len; - const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size); + const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.min_page_size); const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); + const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.min_page_size); if (new_addr_end_rounded == old_addr_end_rounded) { // The reallocation fits in the already allocated pages. return @ptrCast([*]u8, old_mem.ptr)[0..new_size]; @@ -270,11 +270,11 @@ const WasmPageAllocator = struct { const adjusted_index = end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + size; - if (new_end_index > num_pages * mem.page_size) { - const required_memory = new_end_index - (num_pages * mem.page_size); + if (new_end_index > num_pages * mem.min_page_size) { + const required_memory = new_end_index - (num_pages * mem.min_page_size); - var inner_num_pages: usize = required_memory / mem.page_size; - if (required_memory % mem.page_size != 0) { + var inner_num_pages: usize = required_memory / mem.min_page_size; + if (required_memory % mem.min_page_size != 0) { inner_num_pages += 1; } @@ -300,14 +300,14 @@ const WasmPageAllocator = struct { fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { // Initialize start_ptr at the first realloc if (num_pages == 0) { - start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size); + start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.min_page_size); } if (is_last_item(old_mem, new_align)) { const start_index = end_index - old_mem.len; const new_end_index = start_index + new_size; - if (new_end_index > num_pages * mem.page_size) { + if (new_end_index > num_pages * mem.min_page_size) { _ = try alloc(allocator, new_end_index - end_index, new_align); } const result = start_ptr[start_index..new_end_index]; @@ -464,7 +464,7 @@ pub const ArenaAllocator = struct { var len = prev_len; while (true) { len += len / 2; - len += mem.page_size - @rem(len, mem.page_size); + len += mem.min_page_size - @rem(len, mem.min_page_size); if (len >= actual_min_size) break; } const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len); @@ -886,10 +886,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { //Maybe a platform's page_size is actually the same as or // very near usize? - if (mem.page_size << 2 > maxInt(usize)) return; + if (mem.max_page_size << 2 > maxInt(usize)) return; const USizeShift = @IntType(false, std.math.log2(usize.bit_count)); - const large_align = @as(u29, mem.page_size << 2); + const large_align = @as(u29, mem.min_page_size << 2); var align_mask: usize = undefined; _ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask); @@ -916,16 +916,16 @@ fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!voi var debug_buffer: [1000]u8 = undefined; const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator; - const alloc_size = mem.page_size * 2 + 50; + const alloc_size = mem.min_page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); defer allocator.free(slice); var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary, - // which is 16 pages, hence the 32. This test may require to increase - // the size of the allocations feeding the `allocator` parameter if they - // fail, because of this high over-alignment we want to have. - while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { + // which is 16 pages. This test may require to increase the size of the + // allocations feeding the `allocator` parameter if they fail, because + // of this high over-alignment we want to have. + while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), 64 * 1024)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } @@ -936,7 +936,7 @@ fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!voi slice[60] = 0x34; // realloc to a smaller size but with a larger alignment - slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2); + slice = try allocator.alignedRealloc(slice, mem.min_page_size * 32, alloc_size / 2); testing.expect(slice[0] == 0x12); testing.expect(slice[60] == 0x34); } diff --git a/lib/std/io.zig b/lib/std/io.zig index c36fd195ba75..8b4c6cb22efc 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -72,7 +72,7 @@ pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 { } pub fn BufferedInStream(comptime Error: type) type { - return BufferedInStreamCustom(mem.page_size, Error); + return BufferedInStreamCustom(mem.bufsiz, Error); } pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type) type { @@ -545,7 +545,7 @@ test "io.CountingOutStream" { } pub fn BufferedOutStream(comptime Error: type) type { - return BufferedOutStreamCustom(mem.page_size, Error); + return BufferedOutStreamCustom(mem.bufsiz, Error); } pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamError: type) type { diff --git a/lib/std/io/in_stream.zig b/lib/std/io/in_stream.zig index c51d9eb18782..a4fc4f997cff 100644 --- a/lib/std/io/in_stream.zig +++ b/lib/std/io/in_stream.zig @@ -78,7 +78,7 @@ pub fn InStream(comptime ReadError: type) type { return; } - const new_buf_size = math.min(max_size, actual_buf_len + mem.page_size); + const new_buf_size = math.min(max_size, actual_buf_len + mem.bufsiz); if (new_buf_size == actual_buf_len) return error.StreamTooLong; try buffer.resize(new_buf_size); } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 412bf9b649f4..db8abb08e98a 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -8,11 +8,31 @@ const meta = std.meta; const trait = meta.trait; const testing = std.testing; -pub const page_size = switch (builtin.arch) { +pub const min_page_size = switch (builtin.arch) { + .mips, .mipsel, .mips64, .mips64el => switch (builtin.os) { + else => 1024, // NEC VR41xx processors support as low as 1K page size + .linux => 4 * 1024, // Linux doesn't support < 4K pages + }, + .sparcv9 => 8 * 1024, .wasm32, .wasm64 => 64 * 1024, else => 4 * 1024, }; +pub const max_page_size = switch (builtin.arch) { + .arm, .armeb => 16 * 1024 * 1024, // At least ARMv7 has optional support for 16M pages + .aarch64, .aarch64_be => 1 * 1024 * 1024 * 1024, // ARM64 supports 4K, 16K, 64K, 2M, 32M, 512M, 1G pages + .mips, .mipsel, .mips64, .mips64el => 64 * 1024, // Every MIPS III, MIPS IV, MIPS32 and MIPS64 processor supports 4K, 16K and 64K page sizes. + .powerpc64, .powerpc64le => 64 * 1024, + .s390x => 2 * 1024 * 1024 * 1024, // 4K, 1M, 2G pages + .sparcv9 => 16 * 1024 * 1024 * 1024, // 8K, 64K, 512K, 4M, 32M, 256M, 2G, 16G + .wasm32, .wasm64 => 64 * 1024, + .x86_64 => 1 * 1024 * 1024 * 1024, // 1G huge pages. + else => min_page_size, +}; + +/// A good default size for buffers +pub const bufsiz = min_page_size; + pub const Allocator = struct { pub const Error = error{OutOfMemory}; diff --git a/lib/std/os.zig b/lib/std/os.zig index 622aaaf3bd35..c75d498dd741 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -2197,8 +2197,8 @@ pub const MProtectError = error{ } || UnexpectedError; /// `memory.len` must be page-aligned. -pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { - assert(mem.isAligned(memory.len, mem.page_size)); +pub fn mprotect(memory: []align(mem.min_page_size) u8, protection: u32) MProtectError!void { + assert(mem.isAligned(memory.len, mem.min_page_size)); switch (errno(system.mprotect(memory.ptr, memory.len, protection))) { 0 => return, EINVAL => unreachable, @@ -2242,21 +2242,21 @@ pub const MMapError = error{ /// * SIGSEGV - Attempted write into a region mapped as read-only. /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file pub fn mmap( - ptr: ?[*]align(mem.page_size) u8, + ptr: ?[*]align(mem.min_page_size) u8, length: usize, prot: u32, flags: u32, fd: fd_t, offset: u64, -) MMapError![]align(mem.page_size) u8 { +) MMapError![]align(mem.min_page_size) u8 { const err = if (builtin.link_libc) blk: { const rc = std.c.mmap(ptr, length, prot, flags, fd, offset); - if (rc != std.c.MAP_FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length]; + if (rc != std.c.MAP_FAILED) return @ptrCast([*]align(mem.min_page_size) u8, @alignCast(mem.min_page_size, rc))[0..length]; break :blk @intCast(usize, system._errno().*); } else blk: { const rc = system.mmap(ptr, length, prot, flags, fd, offset); const err = errno(rc); - if (err == 0) return @intToPtr([*]align(mem.page_size) u8, rc)[0..length]; + if (err == 0) return @intToPtr([*]align(mem.min_page_size) u8, rc)[0..length]; break :blk err; }; switch (err) { @@ -2279,7 +2279,7 @@ pub fn mmap( /// Zig's munmap function does not, for two reasons: /// * It violates the Zig principle that resource deallocation must succeed. /// * The Windows function, VirtualFree, has this restriction. -pub fn munmap(memory: []align(mem.page_size) u8) void { +pub fn munmap(memory: []align(mem.min_page_size) u8) void { switch (errno(system.munmap(memory.ptr, memory.len))) { 0 => return, EINVAL => unreachable, // Invalid parameters. diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig index 57660f23d929..bfa27b947bfd 100644 --- a/lib/std/packed_int_array.zig +++ b/lib/std/packed_int_array.zig @@ -607,9 +607,8 @@ test "PackedInt(Array/Slice)Endian" { //These tests prove we aren't accidentally accessing memory past // the end of the array/slice by placing it at the end of a page -// and reading the last element. The assumption is that the page -// after this one is not mapped and will cause a segfault if we -// don't account for the bounds. +// and reading the last element. The page after is unmapped, so +// it will cause a segfault if bounds are not respected. test "PackedIntArray at end of available memory" { switch (builtin.os) { .linux, .macosx, .ios, .freebsd, .netbsd, .windows => {}, @@ -617,16 +616,15 @@ test "PackedIntArray at end of available memory" { } const PackedArray = PackedIntArray(u3, 8); - const Padded = struct { - _: [std.mem.page_size - @sizeOf(PackedArray)]u8, - p: PackedArray, - }; - const allocator = std.heap.page_allocator; - var pad = try allocator.create(Padded); - defer allocator.destroy(pad); - pad.p.set(7, std.math.maxInt(u3)); + const pages = try allocator.alignedAlloc(u8, std.mem.min_page_size * 2, std.mem.min_page_size); + defer allocator.free(pages); + + try std.os.mprotect(pages[std.mem.min_page_size..], std.os.PROT_NONE); + + const pad = @ptrCast(*PackedArray, &pages[std.mem.min_page_size - @sizeOf(PackedArray)]); + pad.set(7, std.math.maxInt(u3)); } test "PackedIntSlice at end of available memory" { @@ -638,9 +636,9 @@ test "PackedIntSlice at end of available memory" { const allocator = std.heap.page_allocator; - var page = try allocator.alloc(u8, std.mem.page_size); + var page = try allocator.alloc(u8, std.mem.min_page_size); defer allocator.free(page); - var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1); + var p = PackedSlice.init(page[std.mem.min_page_size - 2 ..], 1); p.set(0, std.math.maxInt(u11)); } diff --git a/lib/std/process.zig b/lib/std/process.zig index e432be213f19..93708abb7dcb 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -525,7 +525,7 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { ReadGroupId, }; - var buf: [std.mem.page_size]u8 = undefined; + var buf: [std.mem.bufsiz]u8 = undefined; var name_index: usize = 0; var state = State.Start; var uid: u32 = 0; diff --git a/lib/std/special/start.zig b/lib/std/special/start.zig index 6d99618d2e0a..afa6b14181cb 100644 --- a/lib/std/special/start.zig +++ b/lib/std/special/start.zig @@ -143,6 +143,17 @@ fn posixCallMainAndExit() noreturn { std.os.linux.tls.setThreadPointer(tp); } + var page_size:?usize = null; + var i: usize = 0; + while (auxv[i].a_type != std.elf.AT_NULL) : (i += 1) { + if (auxv[i].a_type == std.elf.AT_PAGESZ) { + page_size = auxv[i].a_un.a_val; + assert(page_size.? >= std.mem.min_page_size); + assert(page_size.? <= std.mem.max_page_size); + break; + } + } + // TODO This is disabled because what should we do when linking libc and this code // does not execute? And also it's causing a test failure in stack traces in release modes. @@ -150,9 +161,9 @@ fn posixCallMainAndExit() noreturn { //// problem is that it uses PROT_GROWSDOWN which prevents stores to addresses too far down //// the stack and requires "probing". So here we allocate our own stack. //const wanted_stack_size = gnu_stack_phdr.p_memsz; - //assert(wanted_stack_size % std.mem.page_size == 0); + //assert(wanted_stack_size % page_size == 0); //// Allocate an extra page as the guard page. - //const total_size = wanted_stack_size + std.mem.page_size; + //const total_size = wanted_stack_size + page_size; //const new_stack = std.os.mmap( // null, // total_size, @@ -161,7 +172,7 @@ fn posixCallMainAndExit() noreturn { // -1, // 0, //) catch @panic("out of memory"); - //std.os.mprotect(new_stack[0..std.mem.page_size], std.os.PROT_NONE) catch {}; + //std.os.mprotect(new_stack[0..page_size], std.os.PROT_NONE) catch {}; //std.os.exit(@newStackCall(new_stack, callMainWithArgs, argc, argv, envp)); } diff --git a/lib/std/thread.zig b/lib/std/thread.zig index fe976a6839d0..bd84bf4b9afb 100644 --- a/lib/std/thread.zig +++ b/lib/std/thread.zig @@ -33,12 +33,12 @@ pub const Thread = struct { pub const Data = if (use_pthreads) struct { handle: Thread.Handle, - memory: []align(mem.page_size) u8, + memory: []align(mem.min_page_size) u8, } else switch (builtin.os) { .linux => struct { handle: Thread.Handle, - memory: []align(mem.page_size) u8, + memory: []align(mem.min_page_size) u8, }, .windows => struct { handle: Thread.Handle, @@ -229,11 +229,11 @@ pub const Thread = struct { var context_start_offset: usize = undefined; var tls_start_offset: usize = undefined; const mmap_len = blk: { - var l: usize = mem.page_size; + var l: usize = mem.min_page_size; // Allocate a guard page right after the end of the stack region guard_end_offset = l; // The stack itself, which grows downwards. - l = mem.alignForward(l + default_stack_size, mem.page_size); + l = mem.alignForward(l + default_stack_size, mem.min_page_size); stack_end_offset = l; // Above the stack, so that it can be in the same mmap call, put the Thread object. l = mem.alignForward(l, @alignOf(Thread)); @@ -259,7 +259,7 @@ pub const Thread = struct { // whole region right away const mmap_slice = os.mmap( null, - mem.alignForward(mmap_len, mem.page_size), + mem.alignForward(mmap_len, mem.min_page_size), os.PROT_NONE, os.MAP_PRIVATE | os.MAP_ANONYMOUS, -1,