From 321feca5fd46f02283929c924e3dd99dc79b42ed Mon Sep 17 00:00:00 2001 From: sewer56 Date: Mon, 11 Sep 2023 20:03:43 +0100 Subject: [PATCH] Added: Remaining W^X fixes for M1 macOS --- src-rust/src/internal/buffer_allocator_osx.rs | 14 +++++++++++++- src-rust/src/structs/internal/locator_item.rs | 4 ++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src-rust/src/internal/buffer_allocator_osx.rs b/src-rust/src/internal/buffer_allocator_osx.rs index 4357809..43a1962 100644 --- a/src-rust/src/internal/buffer_allocator_osx.rs +++ b/src-rust/src/internal/buffer_allocator_osx.rs @@ -132,13 +132,25 @@ fn try_allocate_buffer( continue; } + // TODO: M1 W^X + // M1 macOS has strict W^X enforcement where pages are not allowed to be writeable + // and executable at the same time. Therefore, we have to work around this by allocating as RW + // and temporarily changing it on every write. + + // This is not safe, but later we'll get a better workaround going. + #[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] + const PROT: vm_prot_t = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; + + #[cfg(all(target_os = "macos", target_arch = "aarch64"))] + const PROT: vm_prot_t = VM_PROT_READ | VM_PROT_WRITE; + kr = unsafe { mach_vm_protect( self_task, allocated, settings.size as mach_vm_size_t, 0, - VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, + PROT, ) }; diff --git a/src-rust/src/structs/internal/locator_item.rs b/src-rust/src/structs/internal/locator_item.rs index ccd6d5a..09490d1 100644 --- a/src-rust/src/structs/internal/locator_item.rs +++ b/src-rust/src/structs/internal/locator_item.rs @@ -165,12 +165,14 @@ impl LocatorItem { /// This function is safe provided that the caller ensures that the buffer is large enough to hold the data. /// There is no error thrown if size is insufficient. pub unsafe fn append_bytes(&mut self, data: &[u8]) -> usize { + disable_write_xor_execute(self.base_address.value as *const u8, data.len()); let address = self.base_address.value + self.position as usize; let data_len = data.len(); std::ptr::copy_nonoverlapping(data.as_ptr(), address as *mut u8, data_len); self.position += data_len as u32; + restore_write_xor_execute(self.base_address.value as *const u8, data.len()); address } @@ -197,9 +199,11 @@ impl LocatorItem { where T: Copy, { + disable_write_xor_execute(self.base_address.value as *const u8, data.len()); let address = (self.base_address.value + self.position as usize) as *mut T; *address = data; self.position += std::mem::size_of::() as u32; + restore_write_xor_execute(self.base_address.value as *const u8, data.len()); address as usize } }