Merge branch 'LemonBoy-guard-pages-in-threads'

This commit is contained in:
Andrew Kelley 2019-05-28 12:47:10 -04:00
commit 568dc56232
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
2 changed files with 34 additions and 11 deletions

View File

@ -1883,20 +1883,29 @@ pub fn inotify_rm_watch(inotify_fd: i32, wd: i32) void {
} }
pub const MProtectError = error{ pub const MProtectError = error{
/// The memory cannot be given the specified access. This can happen, for example, if you
/// mmap(2) a file to which you have read-only access, then ask mprotect() to mark it
/// PROT_WRITE.
AccessDenied, AccessDenied,
/// Changing the protection of a memory region would result in the total number of map
/// pings with distinct attributes (e.g., read versus read/write protection) exceeding the
/// allowed maximum. (For example, making the protection of a range PROT_READ in the mid
/// dle of a region currently protected as PROT_READ|PROT_WRITE would result in three map
/// pings: two read/write mappings at each end and a read-only mapping in the middle.)
OutOfMemory, OutOfMemory,
Unexpected, Unexpected,
}; };
/// `memory.len` must be page-aligned. /// `memory.len` must be page-aligned.
pub fn mprotect(memory: [*]align(mem.page_size) u8, protection: u32) MProtectError!void { pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
assert(mem.isAligned(memory.len, mem.page_size)); assert(mem.isAligned(memory.len, mem.page_size));
switch (errno(system.mprotect(memory.ptr, memory.len, protection))) { switch (errno(system.mprotect(memory.ptr, memory.len, protection))) {
0 => return, 0 => return,
EINVAL => unreachable, EINVAL => unreachable,
EACCES => return error.AccessDenied, EACCES => return error.AccessDenied,
ENOMEM => return error.OutOfMemory, ENOMEM => return error.OutOfMemory,
else => return unexpectedErrno(err), else => |err| return unexpectedErrno(err),
} }
} }

View File

@ -223,15 +223,17 @@ pub const Thread = struct {
} }
}; };
const MAP_GROWSDOWN = if (os.linux.is_the_target) os.linux.MAP_GROWSDOWN else 0; var guard_end_offset: usize = undefined;
var stack_end_offset: usize = undefined; var stack_end_offset: usize = undefined;
var thread_start_offset: usize = undefined; var thread_start_offset: usize = undefined;
var context_start_offset: usize = undefined; var context_start_offset: usize = undefined;
var tls_start_offset: usize = undefined; var tls_start_offset: usize = undefined;
const mmap_len = blk: { const mmap_len = blk: {
// First in memory will be the stack, which grows downwards. var l: usize = mem.page_size;
var l: usize = mem.alignForward(default_stack_size, mem.page_size); // Allocate a guard page right after the end of the stack region
guard_end_offset = l;
// The stack itself, which grows downwards.
l = mem.alignForward(l + default_stack_size, mem.page_size);
stack_end_offset = l; stack_end_offset = l;
// Above the stack, so that it can be in the same mmap call, put the Thread object. // Above the stack, so that it can be in the same mmap call, put the Thread object.
l = mem.alignForward(l, @alignOf(Thread)); l = mem.alignForward(l, @alignOf(Thread));
@ -253,20 +255,32 @@ pub const Thread = struct {
} }
break :blk l; break :blk l;
}; };
// Map the whole stack with no rw permissions to avoid committing the
// whole region right away
const mmap_slice = os.mmap( const mmap_slice = os.mmap(
null, null,
mem.alignForward(mmap_len, mem.page_size), mem.alignForward(mmap_len, mem.page_size),
os.PROT_READ | os.PROT_WRITE, os.PROT_NONE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS | MAP_GROWSDOWN, os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1, -1,
0, 0,
) catch |err| switch (err) { ) catch |err| switch (err) {
error.MemoryMappingNotSupported => unreachable, // no file descriptor error.MemoryMappingNotSupported => unreachable,
error.AccessDenied => unreachable, // no file descriptor error.AccessDenied => unreachable,
error.PermissionDenied => unreachable, // no file descriptor error.PermissionDenied => unreachable,
else => |e| return e, else => |e| return e,
}; };
errdefer os.munmap(mmap_slice); errdefer os.munmap(mmap_slice);
// Map everything but the guard page as rw
os.mprotect(
mmap_slice,
os.PROT_READ | os.PROT_WRITE,
) catch |err| switch (err) {
error.AccessDenied => unreachable,
else => |e| return e,
};
const mmap_addr = @ptrToInt(mmap_slice.ptr); const mmap_addr = @ptrToInt(mmap_slice.ptr);
const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, mmap_addr + thread_start_offset)); const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, mmap_addr + thread_start_offset));