diff --git a/src/cpu/idt.rs b/src/cpu/idt.rs index 48b0ea345..8d8d8d217 100644 --- a/src/cpu/idt.rs +++ b/src/cpu/idt.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use super::control_regs::read_cr2; +use super::percpu::this_cpu; use super::tss::IST_DF; use super::vc::handle_vc_exception; use super::{X86GeneralRegs, X86InterruptFrame}; @@ -39,6 +40,8 @@ pub const _HV_VECTOR: usize = 28; pub const VC_VECTOR: usize = 29; pub const _SX_VECTOR: usize = 30; +pub const PF_ERROR_WRITE: usize = 2; + #[repr(C, packed)] #[derive(Default, Debug, Clone, Copy)] pub struct X86ExceptionContext { @@ -198,7 +201,11 @@ fn generic_idt_handler(ctx: &mut X86ExceptionContext) { let rip = ctx.frame.rip; let err = ctx.error_code; - if !handle_exception_table(ctx) { + if this_cpu() + .handle_pf(VirtAddr::from(cr2), (err & PF_ERROR_WRITE) != 0) + .is_err() + && !handle_exception_table(ctx) + { panic!( "Unhandled Page-Fault at RIP {:#018x} CR2: {:#018x} error code: {:#018x}", rip, cr2, err diff --git a/src/cpu/percpu.rs b/src/cpu/percpu.rs index e36e37428..9d67458f0 100644 --- a/src/cpu/percpu.rs +++ b/src/cpu/percpu.rs @@ -562,6 +562,10 @@ impl PerCpu { pub fn populate_page_table(&self, pt: &mut PageTableRef) { self.vm_range.populate(pt); } + + pub fn handle_pf(&self, vaddr: VirtAddr, write: bool) -> Result<(), SvsmError> { + self.vm_range.handle_page_fault(vaddr, write) + } } unsafe impl Sync for PerCpu {} diff --git a/src/fs/api.rs b/src/fs/api.rs index fd4218799..bd59ec449 100644 --- a/src/fs/api.rs +++ b/src/fs/api.rs @@ -11,6 +11,7 @@ use alloc::vec::Vec; use core::fmt::Debug; use crate::error::SvsmError; +use crate::mm::PageRef; use crate::string::FixedString; use packit::PackItError; @@ -64,6 +65,7 @@ pub trait File: Debug + Send + Sync { fn write(&self, buf: &[u8], offset: usize) -> Result; fn truncate(&self, size: usize) -> Result; fn size(&self) -> usize; + fn mapping(&self, offset: usize) -> Option; } pub trait Directory: Debug + Send + Sync { diff --git a/src/fs/filesystem.rs b/src/fs/filesystem.rs index 216cc7bca..37f4f9e6b 100644 --- a/src/fs/filesystem.rs +++ b/src/fs/filesystem.rs @@ -9,6 +9,7 @@ use super::*; use crate::error::SvsmError; use crate::locking::SpinLock; +use crate::mm::PageRef; use core::cmp::min; @@ -57,6 +58,10 @@ impl RawFileHandle { fn size(&self) -> usize { self.file.size() } + + fn mapping(&self, offset: usize) -> Option { + self.file.mapping(offset) + } } #[derive(Debug)] @@ -97,6 +102,10 @@ impl FileHandle { pub fn position(&self) -> usize { self.handle.lock().current } + + pub fn mapping(&self, offset: usize) -> Option { + self.handle.lock().mapping(offset) + } } #[derive(Debug)] diff --git a/src/fs/ramfs.rs b/src/fs/ramfs.rs index b8d10a4c3..8ca3061db 100644 --- a/src/fs/ramfs.rs +++ b/src/fs/ramfs.rs @@ -9,7 +9,7 @@ use super::*; use crate::error::SvsmError; use crate::locking::RWLock; use crate::mm::{allocate_file_page_ref, PageRef}; -use crate::types::PAGE_SIZE; +use crate::types::{PAGE_SHIFT, PAGE_SIZE}; use crate::utils::{page_align_up, page_offset, zero_mem_region}; extern crate alloc; @@ -164,6 +164,13 @@ impl RawRamFile { fn size(&self) -> usize { self.size } + + fn mapping(&self, offset: usize) -> Option { + if offset > self.size() { + return None; + } + self.pages.get(offset >> PAGE_SHIFT).cloned() + } } #[derive(Debug)] @@ -196,6 +203,10 @@ impl File for RamFile { fn size(&self) -> usize { self.rawfile.lock_read().size() } + + fn mapping(&self, offset: usize) -> Option { + self.rawfile.lock_read().mapping(offset) + } } #[derive(Debug)] @@ -392,4 +403,81 @@ mod tests { let list = ram_dir.list(); assert_eq!(list, [f_name]); } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_ramfs_single_page_mapping() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + + let file = RamFile::new(); + let buf = [0xffu8; 512]; + + file.write(&buf, 0).expect("Failed to write file data"); + + let res = file + .mapping(0) + .expect("Failed to get mapping for ramfs page"); + assert_eq!( + res.phys_addr(), + file.rawfile.lock_read().pages[0].phys_addr() + ); + drop(file); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_ramfs_multi_page_mapping() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + + let file = RamFile::new(); + let buf = [0xffu8; 4 * PAGE_SIZE]; + + file.write(&buf, 0).expect("Failed to write file data"); + + for i in 0..4 { + let res = file + .mapping(i * PAGE_SIZE) + .expect("Failed to get mapping for ramfs page"); + assert_eq!( + res.phys_addr(), + file.rawfile.lock_read().pages[i].phys_addr() + ); + } + drop(file); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_ramfs_mapping_unaligned_offset() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + + let file = RamFile::new(); + let buf = [0xffu8; 4 * PAGE_SIZE]; + + file.write(&buf, 0).expect("Failed to write file data"); + + let res = file + .mapping(PAGE_SIZE + 0x123) + .expect("Failed to get mapping for ramfs page"); + assert_eq!( + res.phys_addr(), + file.rawfile.lock_read().pages[1].phys_addr() + ); + drop(file); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_ramfs_mapping_out_of_range() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + + let file = RamFile::new(); + let buf = [0xffu8; 4 * PAGE_SIZE]; + + file.write(&buf, 0).expect("Failed to write file data"); + + let res = file.mapping(4 * PAGE_SIZE); + assert!(res.is_none()); + drop(file); + } } diff --git a/src/mm/vm/mapping/api.rs b/src/mm/vm/mapping/api.rs index eac1946c5..383d49fa9 100644 --- a/src/mm/vm/mapping/api.rs +++ b/src/mm/vm/mapping/api.rs @@ -5,8 +5,10 @@ // Author: Joerg Roedel use crate::address::{PhysAddr, VirtAddr}; +use crate::error::SvsmError; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; use crate::mm::pagetable::PTEntryFlags; +use crate::mm::vm::VMR; use crate::types::{PAGE_SHIFT, PAGE_SIZE}; use intrusive_collections::rbtree::Link; @@ -18,6 +20,16 @@ extern crate alloc; use alloc::boxed::Box; use alloc::sync::Arc; +/// Information required to resolve a page fault within a virtual mapping +pub struct VMPageFaultResolution { + /// The physical address of a page that must be mapped to the page fault + /// virtual address to resolve the page fault. + pub paddr: PhysAddr, + + /// The flags to use to map the virtual memory page. + pub flags: PTEntryFlags, +} + pub trait VirtualMapping: core::fmt::Debug { /// Request the size of the virtual memory mapping /// @@ -48,14 +60,23 @@ pub trait VirtualMapping: core::fmt::Debug { // Provide default in case there is nothing to do } - /// Request the PTEntryFlags used for this virtual memory mapping. This is - /// a combination of + /// Request the PTEntryFlags used for this virtual memory mapping. + /// + /// # Arguments /// + /// * 'offset' -> The offset in bytes into the `VirtualMapping`. The flags + /// returned from this function relate to the page at the + /// given offset + /// + /// # Returns + /// + /// A combination of: + /// * PTEntryFlags::WRITABLE /// * PTEntryFlags::NX, /// * PTEntryFlags::ACCESSED /// * PTEntryFlags::DIRTY - fn pt_flags(&self) -> PTEntryFlags; + fn pt_flags(&self, offset: usize) -> PTEntryFlags; /// Request the page size used for mappings /// @@ -78,6 +99,30 @@ pub trait VirtualMapping: core::fmt::Debug { // Shared with the HV - defaults not No false } + + /// Handle a page fault that occurred on a virtual memory address within + /// this mapping. + /// + /// # Arguments + /// + /// * 'vmr' - Virtual memory range that contains the mapping. This + /// [`VirtualMapping`] can use this to insert/remove regions + /// as necessary to handle the page fault. + /// + /// * `offset` - Offset into the virtual mapping that was the subject of + /// the page fault. + /// + /// * 'write' - `true` if the fault was due to a write to the memory + /// location, or 'false' if the fault was due to a read. + /// + fn handle_page_fault( + &mut self, + _vmr: &VMR, + _offset: usize, + _write: bool, + ) -> Result { + Err(SvsmError::Mem) + } } #[derive(Debug)] @@ -186,4 +231,8 @@ impl VMM { pub fn get_mapping_mut(&self) -> WriteLockGuard> { self.mapping.get_mut() } + + pub fn get_mapping_clone(&self) -> Arc { + self.mapping.clone() + } } diff --git a/src/mm/vm/mapping/file_mapping.rs b/src/mm/vm/mapping/file_mapping.rs new file mode 100644 index 000000000..4e199a6f2 --- /dev/null +++ b/src/mm/vm/mapping/file_mapping.rs @@ -0,0 +1,590 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2023 SUSE LLC +// +// Author: Roy Hopkins + +extern crate alloc; + +use core::slice::from_raw_parts_mut; + +#[cfg(not(test))] +use alloc::sync::Arc; + +use alloc::vec::Vec; + +#[cfg(not(test))] +use super::{Mapping, VMPhysMem}; + +use super::{RawAllocMapping, VMPageFaultResolution, VirtualMapping}; +use crate::address::{Address, PhysAddr}; +use crate::error::SvsmError; +use crate::fs::FileHandle; +use crate::mm::vm::VMR; +use crate::mm::PageRef; +use crate::mm::{pagetable::PageTable, PAGE_SIZE}; +use crate::types::PAGE_SHIFT; +use crate::utils::align_up; + +#[derive(Debug)] +struct VMWriteFileMapping(RawAllocMapping); + +impl VMWriteFileMapping { + pub fn get_alloc(&self) -> &RawAllocMapping { + &self.0 + } + + pub fn get_alloc_mut(&mut self) -> &mut RawAllocMapping { + &mut self.0 + } +} + +impl VirtualMapping for VMWriteFileMapping { + fn mapping_size(&self) -> usize { + self.0.mapping_size() + } + + fn map(&self, offset: usize) -> Option { + self.0.map(offset) + } + + fn pt_flags(&self, _offset: usize) -> crate::mm::pagetable::PTEntryFlags { + PageTable::task_data_flags() + } +} + +#[derive(Debug, PartialEq)] +pub enum VMFileMappingPermission { + /// Read-only access to the file + Read, + // Read/Write access to a copy of the files pages + Write, + // Read-only access that allows execution + Execute, +} + +/// Map view of a ramfs file into virtual memory +#[derive(Debug)] +pub struct VMFileMapping { + /// The file that this mapping relates to + file: FileHandle, + + /// The size of the mapping in bytes + size: usize, + + /// The permission to apply to the virtual mapping + permission: VMFileMappingPermission, + + /// A vec containing references to mapped pages within the file + pages: Vec>, + + /// A copy of the file pages for mappings with Write permission + write_copy: Option, +} + +impl VMFileMapping { + /// Create a new ['VMFileMapping'] for a file. The file provides the backing + /// pages for the file contents. + /// + /// # Arguments + /// + /// * 'file' - The file to create the mapping for. This instance keeps a + /// reference to the file until it is dropped. + /// + /// * 'offset' - The offset from the start of the file to map. This must be + /// align to PAGE_SIZE. + /// + /// * 'size' - The number of bytes to map starting from the offset. This + /// must be a multiple of PAGE_SIZE. + /// + /// # Returns + /// + /// Initialized mapping on success, Err(SvsmError::Mem) on error + pub fn new( + file: FileHandle, + offset: usize, + size: usize, + permission: VMFileMappingPermission, + ) -> Result { + let page_size = align_up(size, PAGE_SIZE); + let file_size = align_up(file.size(), PAGE_SIZE); + if (offset & (PAGE_SIZE - 1)) != 0 { + return Err(SvsmError::Mem); + } + if (page_size + offset) > file_size { + return Err(SvsmError::Mem); + } + + // Take references to the file pages + let count = page_size >> PAGE_SHIFT; + let mut pages = Vec::>::new(); + for page_index in 0..count { + pages.push(file.mapping(offset + page_index * PAGE_SIZE)); + } + // For ranges with write access we need to take a copy of the ram pages + // to allow them to be written to without modifying the contents of the + // file itself and also to prevent pointer aliasing with any other + // FileHandles that may be open on the same file. + let write_copy = if permission == VMFileMappingPermission::Write { + Some(VMWriteFileMapping(RawAllocMapping::new(size))) + } else { + None + }; + + Ok(Self { + file, + size: page_size, + permission, + pages, + write_copy, + }) + } +} + +#[cfg(not(test))] +fn copy_page( + vmr: &VMR, + file: &FileHandle, + offset: usize, + paddr_dst: PhysAddr, + page_size: usize, +) -> Result<(), SvsmError> { + let temp_map = VMPhysMem::new(paddr_dst, page_size, true); + let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)))?; + let slice = unsafe { from_raw_parts_mut(vaddr_new_page.bits() as *mut u8, page_size) }; + file.seek(offset); + file.read(slice)?; + vmr.remove(vaddr_new_page)?; + Ok(()) +} + +#[cfg(test)] +fn copy_page( + _vmr: &VMR, + file: &FileHandle, + offset: usize, + paddr_dst: PhysAddr, + page_size: usize, +) -> Result<(), SvsmError> { + // In the test environment the physical address is actually the virtual + // address. We can take advantage of this to copy the file contents into the + // mock physical address without worrying about VMRs and page tables. + let slice = unsafe { from_raw_parts_mut(paddr_dst.bits() as *mut u8, page_size) }; + file.seek(offset); + file.read(slice)?; + Ok(()) +} + +impl VirtualMapping for VMFileMapping { + fn mapping_size(&self) -> usize { + self.size + } + + fn map(&self, offset: usize) -> Option { + let page_index = offset / PAGE_SIZE; + if page_index >= self.pages.len() { + return None; + } + if let Some(write_copy) = &self.write_copy { + let write_addr = write_copy.map(offset); + if write_addr.is_some() { + return write_addr; + } + } + self.pages[page_index].as_ref().map(|p| p.phys_addr()) + } + + fn pt_flags(&self, offset: usize) -> crate::mm::pagetable::PTEntryFlags { + match self.permission { + VMFileMappingPermission::Read => PageTable::task_data_ro_flags(), + VMFileMappingPermission::Write => { + if let Some(write_copy) = &self.write_copy { + if write_copy.get_alloc().present(offset) { + PageTable::task_data_flags() + } else { + PageTable::task_data_ro_flags() + } + } else { + PageTable::task_data_ro_flags() + } + } + VMFileMappingPermission::Execute => PageTable::task_exec_flags(), + } + } + + fn handle_page_fault( + &mut self, + vmr: &VMR, + offset: usize, + write: bool, + ) -> Result { + let page_size = self.page_size(); + if write { + if let Some(write_copy) = self.write_copy.as_mut() { + // This is a writeable region with copy-on-write access. The + // page fault will have occurred because the page has not yet + // been allocated. Allocate a page and copy the readonly source + // page into the new writeable page. + let offset_aligned = offset & !(page_size - 1); + if write_copy + .get_alloc_mut() + .alloc_page(offset_aligned) + .is_ok() + { + let paddr_new_page = write_copy.map(offset_aligned).ok_or(SvsmError::Mem)?; + copy_page(vmr, &self.file, offset_aligned, paddr_new_page, page_size)?; + return Ok(VMPageFaultResolution { + paddr: paddr_new_page, + flags: PageTable::task_data_flags(), + }); + } + } + } + Err(SvsmError::Mem) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + address::{Address, VirtAddr}, + fs::{create, initialize_fs, open, uninitialize_fs, FileHandle}, + mm::{ + alloc::{TestRootMem, DEFAULT_TEST_MEMORY_SIZE}, + pagetable::PageTable, + vm::{VirtualMapping, VMR}, + }, + types::PAGE_SIZE, + }; + + use super::VMFileMapping; + + fn create_512b_test_file() -> FileHandle { + let fh = create("test1").unwrap(); + let buf = [0xffu8; 512]; + fh.write(&buf).expect("File write failed"); + fh + } + + fn create_16k_test_file() -> FileHandle { + let fh = create("test1").unwrap(); + let mut buf = [0xffu8; PAGE_SIZE * 4]; + buf[PAGE_SIZE] = 1; + buf[PAGE_SIZE * 2] = 2; + buf[PAGE_SIZE * 3] = 3; + fh.write(&buf).expect("File write failed"); + fh + } + + fn create_5000b_test_file() -> FileHandle { + let fh = create("test1").unwrap(); + let buf = [0xffu8; 5000]; + fh.write(&buf).expect("File write failed"); + fh + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_create_mapping() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_512b_test_file(); + let vm = VMFileMapping::new(fh, 0, 512, super::VMFileMappingPermission::Read) + .expect("Failed to create new VMFileMapping"); + assert_eq!(vm.mapping_size(), PAGE_SIZE); + assert_eq!(vm.permission, super::VMFileMappingPermission::Read); + assert_eq!(vm.pages.len(), 1); + + uninitialize_fs(); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_create_unaligned_offset() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + // Not page aligned + let offset = PAGE_SIZE + 0x60; + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new( + fh, + offset, + fh2.size() - offset, + super::VMFileMappingPermission::Read, + ); + assert!(vm.is_err()); + + uninitialize_fs(); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_create_size_too_large() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new(fh, 0, fh2.size() + 1, super::VMFileMappingPermission::Read); + assert!(vm.is_err()); + + uninitialize_fs(); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_create_offset_overflow() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new( + fh, + PAGE_SIZE, + fh2.size(), + super::VMFileMappingPermission::Read, + ); + assert!(vm.is_err()); + + uninitialize_fs(); + } + + fn test_map_first_page(permission: super::VMFileMappingPermission) { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_512b_test_file(); + let vm = + VMFileMapping::new(fh, 0, 512, permission).expect("Failed to create new VMFileMapping"); + + let res = vm + .map(0) + .expect("Mapping of first VMFileMapping page failed"); + + let fh2 = open("test1").unwrap(); + assert_eq!( + fh2.mapping(0) + .expect("Failed to get file page mapping") + .phys_addr(), + res + ); + + uninitialize_fs(); + } + + fn test_map_multiple_pages(permission: super::VMFileMappingPermission) { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new(fh, 0, fh2.size(), permission) + .expect("Failed to create new VMFileMapping"); + + for i in 0..4 { + let res = vm + .map(i * PAGE_SIZE) + .expect("Mapping of VMFileMapping page failed"); + + assert_eq!( + fh2.mapping(i * PAGE_SIZE) + .expect("Failed to get file page mapping") + .phys_addr(), + res + ); + } + uninitialize_fs(); + } + + fn test_map_unaligned_file_size(permission: super::VMFileMappingPermission) { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_5000b_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new(fh, 0, fh2.size(), permission) + .expect("Failed to create new VMFileMapping"); + + assert_eq!(vm.mapping_size(), PAGE_SIZE * 2); + assert_eq!(vm.pages.len(), 2); + + for i in 0..2 { + let res = vm + .map(i * PAGE_SIZE) + .expect("Mapping of first VMFileMapping page failed"); + + assert_eq!( + fh2.mapping(i * PAGE_SIZE) + .expect("Failed to get file page mapping") + .phys_addr(), + res + ); + } + uninitialize_fs(); + } + + fn test_map_non_zero_offset(permission: super::VMFileMappingPermission) { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let vm = VMFileMapping::new(fh, 2 * PAGE_SIZE, PAGE_SIZE, permission) + .expect("Failed to create new VMFileMapping"); + + assert_eq!(vm.mapping_size(), PAGE_SIZE); + assert_eq!(vm.pages.len(), 1); + + let res = vm + .map(0) + .expect("Mapping of first VMFileMapping page failed"); + + assert_eq!( + fh2.mapping(2 * PAGE_SIZE) + .expect("Failed to get file page mapping") + .phys_addr(), + res + ); + uninitialize_fs(); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_first_page_readonly() { + test_map_first_page(super::VMFileMappingPermission::Read) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_multiple_pages_readonly() { + test_map_multiple_pages(super::VMFileMappingPermission::Read) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_unaligned_file_size_readonly() { + test_map_unaligned_file_size(super::VMFileMappingPermission::Read) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_non_zero_offset_readonly() { + test_map_non_zero_offset(super::VMFileMappingPermission::Read) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_first_page_readwrite() { + test_map_first_page(super::VMFileMappingPermission::Write) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_multiple_pages_readwrite() { + test_map_multiple_pages(super::VMFileMappingPermission::Write) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_unaligned_file_size_readwrite() { + test_map_unaligned_file_size(super::VMFileMappingPermission::Write) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_map_non_zero_offset_readwrite() { + test_map_non_zero_offset(super::VMFileMappingPermission::Write) + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_handle_page_fault() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let mut vm = VMFileMapping::new(fh, 0, fh2.size(), super::VMFileMappingPermission::Write) + .expect("Failed to create new VMFileMapping"); + + let vmr = VMR::new( + VirtAddr::from(0usize), + VirtAddr::from(16usize * PAGE_SIZE), + PageTable::data_flags(), + ); + let res = vm + .handle_page_fault(&vmr, PAGE_SIZE, true) + .expect("handle_page_fault() failed"); + assert!(vm.write_copy.is_some()); + assert_eq!( + vm.write_copy.as_ref().unwrap().0.mapping_size(), + vm.mapping_size() + ); + assert_eq!( + res.paddr, + vm.write_copy + .as_ref() + .unwrap() + .0 + .map(PAGE_SIZE) + .expect("Page not allocated") + ); + // create_16k_test_file() populates the first byte of each 4K page with + // the page number. We can use this to check if the copy from the file + // page to the writeable page worked correctly. + assert_eq!(unsafe { (res.paddr.bits() as *const u8).read() }, 1); + + assert_eq!( + vm.map(PAGE_SIZE).expect("Failed to map file page"), + res.paddr + ); + + uninitialize_fs(); + } + + #[test] + #[cfg_attr(test_in_svsm, ignore = "FIXME")] + fn test_handle_page_fault_unaligned_addr() { + let _test_mem = TestRootMem::setup(DEFAULT_TEST_MEMORY_SIZE); + initialize_fs(); + + let fh = create_16k_test_file(); + let fh2 = open("test1").unwrap(); + let mut vm = VMFileMapping::new(fh, 0, fh2.size(), super::VMFileMappingPermission::Write) + .expect("Failed to create new VMFileMapping"); + + let vmr = VMR::new( + VirtAddr::from(0usize), + VirtAddr::from(16usize * PAGE_SIZE), + PageTable::data_flags(), + ); + let res = vm + .handle_page_fault(&vmr, PAGE_SIZE * 2 + 1, true) + .expect("handle_page_fault() failed"); + assert_eq!( + res.paddr, + vm.write_copy + .as_ref() + .unwrap() + .0 + .map(PAGE_SIZE * 2) + .expect("Page not allocated") + ); + // create_16k_test_file() populates the first byte of each 4K page with + // the page number. We can use this to check if the copy from the file + // page to the writeable page worked correctly. + assert_eq!(unsafe { (res.paddr.bits() as *const u8).read() }, 2); + + assert_eq!( + vm.map(PAGE_SIZE * 2).expect("Failed to map file page"), + res.paddr + ); + + uninitialize_fs(); + } +} diff --git a/src/mm/vm/mapping/kernel_stack.rs b/src/mm/vm/mapping/kernel_stack.rs index 74a75d501..2dd5386f6 100644 --- a/src/mm/vm/mapping/kernel_stack.rs +++ b/src/mm/vm/mapping/kernel_stack.rs @@ -116,7 +116,7 @@ impl VirtualMapping for VMKernelStack { } } - fn pt_flags(&self) -> PTEntryFlags { + fn pt_flags(&self, _offset: usize) -> PTEntryFlags { PTEntryFlags::WRITABLE | PTEntryFlags::NX | PTEntryFlags::ACCESSED | PTEntryFlags::DIRTY } } diff --git a/src/mm/vm/mapping/mod.rs b/src/mm/vm/mapping/mod.rs index 0b75696a1..e6541e956 100644 --- a/src/mm/vm/mapping/mod.rs +++ b/src/mm/vm/mapping/mod.rs @@ -5,13 +5,15 @@ // Author: Joerg Roedel pub mod api; +pub mod file_mapping; pub mod kernel_stack; pub mod phys_mem; pub mod rawalloc; pub mod reserved; pub mod vmalloc; -pub use api::{Mapping, VMMAdapter, VirtualMapping, VMM}; +pub use api::{Mapping, VMMAdapter, VMPageFaultResolution, VirtualMapping, VMM}; +pub use file_mapping::{VMFileMapping, VMFileMappingPermission}; pub use kernel_stack::VMKernelStack; pub use phys_mem::VMPhysMem; pub use rawalloc::RawAllocMapping; diff --git a/src/mm/vm/mapping/phys_mem.rs b/src/mm/vm/mapping/phys_mem.rs index 84b64debd..a86017413 100644 --- a/src/mm/vm/mapping/phys_mem.rs +++ b/src/mm/vm/mapping/phys_mem.rs @@ -69,7 +69,7 @@ impl VirtualMapping for VMPhysMem { } } - fn pt_flags(&self) -> PTEntryFlags { + fn pt_flags(&self, _offset: usize) -> PTEntryFlags { PTEntryFlags::NX | PTEntryFlags::ACCESSED | if self.writable { diff --git a/src/mm/vm/mapping/rawalloc.rs b/src/mm/vm/mapping/rawalloc.rs index 65f10b7a1..dbc87484f 100644 --- a/src/mm/vm/mapping/rawalloc.rs +++ b/src/mm/vm/mapping/rawalloc.rs @@ -4,6 +4,8 @@ // // Author: Joerg Roedel +use core::iter; + use crate::address::PhysAddr; use crate::error::SvsmError; use crate::mm::alloc::{allocate_file_page_ref, PageRef}; @@ -18,7 +20,7 @@ use alloc::vec::Vec; #[derive(Default, Debug)] pub struct RawAllocMapping { /// A vec containing references to PageFile allocations - pages: Vec, + pages: Vec>, /// Number of pages required in [`pages`] count: usize, @@ -36,22 +38,38 @@ impl RawAllocMapping { /// New instance of RawAllocMapping. Still needs to call `alloc_pages()` on it before it can be used. pub fn new(size: usize) -> Self { let count = align_up(size, PAGE_SIZE) >> PAGE_SHIFT; - RawAllocMapping { - pages: Vec::new(), - count, + let pages: Vec> = iter::repeat(None).take(count).collect(); + RawAllocMapping { pages, count } + } + + /// Allocates a single backing page of type PageFile if the page has not already + /// been allocated + /// + /// # Argument + /// + /// * 'offset' - The offset in bytes from the start of the mapping + /// + /// # Returns + /// + /// `Ok(())` if the page has been allocated, `Err(SvsmError::Mem)` otherwise + pub fn alloc_page(&mut self, offset: usize) -> Result<(), SvsmError> { + let index = offset >> PAGE_SHIFT; + if index < self.count { + let entry = self.pages.get_mut(index).ok_or(SvsmError::Mem)?; + entry.get_or_insert(allocate_file_page_ref()?); } + Ok(()) } - /// Allocates the backing pages of type PageFile + /// Allocates a full set of backing pages of type PageFile /// /// # Returns /// /// `Ok(())` when all pages could be allocated, `Err(SvsmError::Mem)` otherwise pub fn alloc_pages(&mut self) -> Result<(), SvsmError> { - for _ in 0..self.count { - self.pages.push(allocate_file_page_ref()?); + for index in 0..self.count { + self.alloc_page(index * PAGE_SIZE)?; } - Ok(()) } @@ -75,7 +93,9 @@ impl RawAllocMapping { /// Physical address to map for the given offset. pub fn map(&self, offset: usize) -> Option { let pfn = offset >> PAGE_SHIFT; - self.pages.get(pfn).map(|r| r.phys_addr()) + self.pages + .get(pfn) + .and_then(|r| r.as_ref().map(|r| r.phys_addr())) } /// Unmap call-back - currently nothing to do in this function @@ -86,4 +106,19 @@ impl RawAllocMapping { pub fn unmap(&self, _offset: usize) { // Nothing to do for now } + + /// Check if a page has been allocated + /// + /// # Arguments + /// + /// * 'offset' - Byte offset into the mapping + /// + /// # Returns + /// + /// 'true' if the page containing the offset has been allocated + /// otherwise 'false'. + pub fn present(&self, offset: usize) -> bool { + let pfn = offset >> PAGE_SHIFT; + self.pages.get(pfn).and_then(|r| r.as_ref()).is_some() + } } diff --git a/src/mm/vm/mapping/reserved.rs b/src/mm/vm/mapping/reserved.rs index 69cd9745b..7dcbf4266 100644 --- a/src/mm/vm/mapping/reserved.rs +++ b/src/mm/vm/mapping/reserved.rs @@ -55,7 +55,7 @@ impl VirtualMapping for VMReserved { None } - fn pt_flags(&self) -> PTEntryFlags { + fn pt_flags(&self, _offset: usize) -> PTEntryFlags { PTEntryFlags::NX | PTEntryFlags::ACCESSED | PTEntryFlags::WRITABLE | PTEntryFlags::DIRTY } } diff --git a/src/mm/vm/mapping/vmalloc.rs b/src/mm/vm/mapping/vmalloc.rs index 2b9471393..41a3090fb 100644 --- a/src/mm/vm/mapping/vmalloc.rs +++ b/src/mm/vm/mapping/vmalloc.rs @@ -70,7 +70,7 @@ impl VirtualMapping for VMalloc { self.alloc.unmap(offset); } - fn pt_flags(&self) -> PTEntryFlags { + fn pt_flags(&self, _offset: usize) -> PTEntryFlags { PTEntryFlags::WRITABLE | PTEntryFlags::NX | PTEntryFlags::ACCESSED | PTEntryFlags::DIRTY } } diff --git a/src/mm/vm/mod.rs b/src/mm/vm/mod.rs index b8f661354..1c0d8baf2 100644 --- a/src/mm/vm/mod.rs +++ b/src/mm/vm/mod.rs @@ -8,7 +8,7 @@ mod mapping; mod range; pub use mapping::{ - Mapping, RawAllocMapping, VMKernelStack, VMMAdapter, VMPhysMem, VMReserved, VMalloc, - VirtualMapping, VMM, + Mapping, RawAllocMapping, VMFileMapping, VMFileMappingPermission, VMKernelStack, VMMAdapter, + VMPhysMem, VMReserved, VMalloc, VirtualMapping, VMM, }; pub use range::{VMRMapping, VMR, VMR_GRANULE}; diff --git a/src/mm/vm/range.rs b/src/mm/vm/range.rs index cccdb7d8b..90788dea2 100644 --- a/src/mm/vm/range.rs +++ b/src/mm/vm/range.rs @@ -155,7 +155,6 @@ impl VMR { let (vmm_start, vmm_end) = vmm.range(); let mut pgtbl_parts = self.pgtbl_parts.lock_write(); let mapping = vmm.get_mapping(); - let pt_flags = self.pt_flags | mapping.pt_flags() | PTEntryFlags::PRESENT; let mut offset: usize = 0; let page_size = mapping.page_size(); let shared = mapping.shared(); @@ -163,6 +162,7 @@ impl VMR { while vmm_start + offset < vmm_end { let idx = PageTable::index::<3>(VirtAddr::from(vmm_start - rstart)); if let Some(paddr) = mapping.map(offset) { + let pt_flags = self.pt_flags | mapping.pt_flags(offset) | PTEntryFlags::PRESENT; if page_size == PAGE_SIZE { pgtbl_parts[idx].map_4k(vmm_start + offset, paddr, pt_flags, shared)?; } else if page_size == PAGE_SIZE_2M { @@ -373,6 +373,68 @@ impl VMR { ); } } + + /// Notify the range that a page fault has occurred. This should be called from + /// the page fault handler. The mappings withing this virtual memory region are + /// examined and if they overlap with the page fault address then + /// [`VirtualMemoryRange::handle_page_fault()`] is called to handle the page + /// fault within that range. + /// + /// # Arguments + /// + /// * `vaddr` - Virtual memory address that was the subject of the page fault + /// + /// * 'write' - 'true' if a write was attempted. 'false' if a read was attempted. + /// + /// # Returns + /// + /// '()' if the page fault was successfully handled. + /// + /// 'SvsmError::Mem' if the page fault should propogate to the next handler. + pub fn handle_page_fault(&self, vaddr: VirtAddr, write: bool) -> Result<(), SvsmError> { + // Get the mapping that contains the faulting address. This needs to + // be done as a separate step, returning a reference to the mapping to + // avoid issues with the mapping page fault handler needing mutable access + // to `self.tree` via `insert()`. + let pf_mapping = { + let tree = self.tree.lock_read(); + let addr = vaddr.pfn(); + let cursor = tree.find(&addr); + if let Some(node) = cursor.get() { + let (start, end) = node.range(); + if vaddr >= start && vaddr < end { + Some((node.get_mapping_clone(), start)) + } else { + None + } + } else { + None + } + }; + + if let Some((pf_mapping, start)) = pf_mapping { + let resolution = pf_mapping + .get_mut() + .handle_page_fault(self, vaddr - start, write)?; + // The handler has resolved the page fault by allocating a new page. + // Update the page table accordingly. + let vaddr = vaddr.page_align(); + let page_size = pf_mapping.get().page_size(); + let shared = pf_mapping.get().shared(); + let mut pgtbl_parts = self.pgtbl_parts.lock_write(); + + let (rstart, _) = self.virt_range(); + let idx = PageTable::index::<3>(VirtAddr::from(vaddr - rstart)); + if page_size == PAGE_SIZE { + pgtbl_parts[idx].map_4k(vaddr, resolution.paddr, resolution.flags, shared)?; + } else if page_size == PAGE_SIZE_2M { + pgtbl_parts[idx].map_2m(vaddr, resolution.paddr, resolution.flags, shared)?; + } + Ok(()) + } else { + Err(SvsmError::Mem) + } + } } #[derive(Debug)] diff --git a/src/task/tasks.rs b/src/task/tasks.rs index a74471b9c..99da39975 100644 --- a/src/task/tasks.rs +++ b/src/task/tasks.rs @@ -254,6 +254,10 @@ impl Task { self.affinity = affinity; } + pub fn handle_pf(&self, vaddr: VirtAddr, write: bool) -> Result<(), SvsmError> { + self.vm_kernel_range.handle_page_fault(vaddr, write) + } + fn allocate_stack(entry: extern "C" fn()) -> Result<(Arc, VirtAddr), SvsmError> { let stack = VMKernelStack::new()?; let offset = stack.top_of_stack(VirtAddr::from(0u64));