Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Common: Use Mach VM routines for memory mapping #11308

Merged
merged 1 commit into from
May 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
230 changes: 225 additions & 5 deletions common/Darwin/DarwinMisc.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team
// SPDX-FileCopyrightText: 2002-2024 PCSX2 Dev Team
// SPDX-License-Identifier: LGPL-3.0+

#if defined(__APPLE__)
Expand All @@ -9,14 +9,21 @@
#include <cstring>
#include <cstdlib>
#include <optional>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <time.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/mach_time.h>
#include <mach/mach_vm.h>
#include <mach/vm_map.h>
#include <IOKit/pwr_mgt/IOPMLib.h>

#include "common/Pcsx2Types.h"
#include "common/Assertions.h"
#include "common/BitUtils.h"
#include "common/Console.h"
#include "common/Pcsx2Types.h"
#include "common/HostSys.h"
#include "common/Threading.h"
#include "common/WindowInfo.h"
Expand Down Expand Up @@ -95,9 +102,9 @@ static std::optional<u32> sysctlbyname_u32(const char* name)

std::string GetOSVersionString()
{
std::string type = sysctl_str(CTL_KERN, KERN_OSTYPE);
std::string type = sysctl_str(CTL_KERN, KERN_OSTYPE);
std::string release = sysctl_str(CTL_KERN, KERN_OSRELEASE);
std::string arch = sysctl_str(CTL_HW, HW_MACHINE);
std::string arch = sysctl_str(CTL_HW, HW_MACHINE);
return type + " " + release + " " + arch;
}

Expand Down Expand Up @@ -162,7 +169,7 @@ std::vector<DarwinMisc::CPUClass> DarwinMisc::GetCPUClasses()
if (!physicalcpu.has_value() || !logicalcpu.has_value())
{
Console.Warning("(DarwinMisc) Perf level %u is missing data on %s cpus!",
i, !physicalcpu.has_value() ? "physical" : "logical");
i, !physicalcpu.has_value() ? "physical" : "logical");
continue;
}

Expand All @@ -181,4 +188,217 @@ std::vector<DarwinMisc::CPUClass> DarwinMisc::GetCPUClasses()
return out;
}

static __ri vm_prot_t MachProt(const PageProtectionMode& mode)
{
vm_prot_t machmode = (mode.CanWrite()) ? VM_PROT_WRITE : 0;
machmode |= (mode.CanRead()) ? VM_PROT_READ : 0;
machmode |= (mode.CanExecute()) ? (VM_PROT_EXECUTE | VM_PROT_READ) : 0;
return machmode;
}

void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
if (mode.IsNone())
return nullptr;

#ifdef __aarch64__
// We can't allocate executable memory with mach_vm_allocate() on Apple Silicon.
// Instead, we need to use MAP_JIT with mmap(), which does not support fixed mappings.
if (mode.CanExecute())
{
if (base)
return nullptr;

const u32 mmap_prot = mode.CanWrite() ? (PROT_READ | PROT_WRITE | PROT_EXEC) : (PROT_READ | PROT_EXEC);
const u32 flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
void* const res = mmap(nullptr, size, mmap_prot, flags, -1, 0);
return (res == MAP_FAILED) ? nullptr : res;
}
#endif

kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&base), size,
base ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_allocate() returned {}", ret);
return nullptr;
}

ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size, false, MachProt(mode));
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_protect() returned {}", ret);
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
return nullptr;
}

return base;
}

void HostSys::Munmap(void* base, size_t size)
{
if (!base)
return;

mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
}

void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");

kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
MachProt(mode));
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_protect() failed: {}", res);
pxFailRel("mach_vm_protect() failed");
}
}

std::string HostSys::GetFileMappingName(const char* prefix)
{
// name actually is not used.
return {};
}

void* HostSys::CreateSharedMemory(const char* name, size_t size)
{
mach_vm_size_t vm_size = size;
mach_port_t port;
const kern_return_t res = mach_make_memory_entry_64(
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_make_memory_entry_64() failed: {}", res);
return nullptr;
}

return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
}

void HostSys::DestroySharedMemory(void* ptr)
{
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
}

void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
{
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}

return reinterpret_cast<void*>(ptr);
}

void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
{
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
if (res != KERN_SUCCESS)
pxFailRel("Failed to unmap shared memory");
}

#ifdef _M_ARM64

void HostSys::FlushInstructionCache(void* address, u32 size)
{
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
}

#endif

SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
: m_base_ptr(base_ptr)
, m_size(size)
, m_num_pages(num_pages)
{
}

SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
pxAssertRel(m_num_mappings == 0, "No mappings left");

if (mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
pxFailRel("Failed to release shared memory area");
}


std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
{
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");

mach_vm_address_t alloc;
const kern_return_t res =
mach_vm_map(mach_task_self(), &alloc, size, 0, VM_FLAGS_ANYWHERE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return {};
}

return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(reinterpret_cast<u8*>(alloc), size, size / __pagesize));
}

u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));

const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}

m_num_mappings++;
return static_cast<u8*>(map_base);
}

bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));

const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return false;
}

m_num_mappings--;
return true;
}

#ifdef _M_ARM64

static thread_local int s_code_write_depth = 0;

void HostSys::BeginCodeWrite()
{
if ((s_code_write_depth++) == 0)
pthread_jit_write_protect_np(0);
}

void HostSys::EndCodeWrite()
{
pxAssert(s_code_write_depth > 0);
if ((--s_code_write_depth) == 0)
pthread_jit_write_protect_np(1);
}

#endif

#endif
29 changes: 3 additions & 26 deletions common/Linux/LnxHostSys.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,6 @@
#define MAP_ANONYMOUS MAP_ANON
#endif

// MacOS does not have MAP_FIXED_NOREPLACE, which means our mappings are
// vulnerable to races with the main/Qt threads. TODO: Investigate using
// mach_vm_allocate()/mach_vm_map() instead of mmap(), but Apple's
// documentation for these routines is non-existant...
#if defined(__APPLE__) && !defined(MAP_FIXED_NOREPLACE)
#define MAP_FIXED_NOREPLACE MAP_FIXED
#endif

// FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL.
// MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE.
#if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE)
Expand Down Expand Up @@ -230,6 +222,8 @@ bool PageFaultHandler::Install(Error* error)
return true;
}

#ifndef __APPLE__

static __ri uint LinuxProt(const PageProtectionMode& mode)
{
u32 lnxmode = 0;
Expand Down Expand Up @@ -406,23 +400,6 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
return true;
}

#endif

#if defined(_M_ARM64) && defined(__APPLE__)

static thread_local int s_code_write_depth = 0;

void HostSys::BeginCodeWrite()
{
if ((s_code_write_depth++) == 0)
pthread_jit_write_protect_np(0);
}

void HostSys::EndCodeWrite()
{
pxAssert(s_code_write_depth > 0);
if ((--s_code_write_depth) == 0)
pthread_jit_write_protect_np(1);
}
#endif // __APPLE__

#endif