Skip to content

Commit

Permalink
Merge pull request #340 from 00xc/clippy
Browse files Browse the repository at this point in the history
Fix nightly clippy warnings
  • Loading branch information
joergroedel authored May 10, 2024
2 parents 13acb6a + 75ce1e2 commit f83fa40
Show file tree
Hide file tree
Showing 8 changed files with 50 additions and 27 deletions.
4 changes: 4 additions & 0 deletions kernel/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
// Author: Joerg Roedel <[email protected]>

fn main() {
// Extra cfgs
println!("cargo::rustc-check-cfg=cfg(fuzzing)");
println!("cargo::rustc-check-cfg=cfg(test_in_svsm)");

// Stage 2
println!("cargo:rustc-link-arg-bin=stage2=-nostdlib");
println!("cargo:rustc-link-arg-bin=stage2=--build-id=none");
Expand Down
26 changes: 17 additions & 9 deletions kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,11 @@ impl VmsaRef {
}
}

#[allow(clippy::needless_pass_by_ref_mut)]
pub fn vmsa(&mut self) -> &mut VMSA {
let ptr = self.vaddr.as_mut_ptr::<VMSA>();
// SAFETY: this function takes &mut self, so only one mutable
// reference to the underlying VMSA can exist.
unsafe { ptr.as_mut().unwrap() }
}
}
Expand Down Expand Up @@ -178,8 +181,11 @@ impl GuestVmsaRef {
self.caa
}

#[allow(clippy::needless_pass_by_ref_mut)]
pub fn vmsa(&mut self) -> &mut VMSA {
assert!(self.vmsa.is_some());
// SAFETY: this function takes &mut self, so only one mutable
// reference to the underlying VMSA can exist.
unsafe { SVSM_PERCPU_VMSA_BASE.as_mut_ptr::<VMSA>().as_mut().unwrap() }
}

Expand Down Expand Up @@ -385,20 +391,20 @@ impl PerCpu {
self.apic_id
}

fn allocate_page_table(&mut self) -> Result<(), SvsmError> {
fn allocate_page_table(&self) -> Result<(), SvsmError> {
self.vm_range.initialize()?;
let pgtable_ref = get_init_pgtable_locked().clone_shared()?;
self.set_pgtable(pgtable_ref);

Ok(())
}

pub fn set_pgtable(&mut self, pgtable: PageTableRef) {
pub fn set_pgtable(&self, pgtable: PageTableRef) {
let mut my_pgtable = self.get_pgtable();
*my_pgtable = pgtable;
}

fn allocate_stack(&mut self, base: VirtAddr) -> Result<VirtAddr, SvsmError> {
fn allocate_stack(&self, base: VirtAddr) -> Result<VirtAddr, SvsmError> {
let stack = VMKernelStack::new()?;
let top_of_stack = stack.top_of_stack(base);
let mapping = Arc::new(Mapping::new(stack));
Expand Down Expand Up @@ -450,15 +456,15 @@ impl PerCpu {
self.tss.ist_stacks[IST_DF] = double_fault_stack;
}

pub fn map_self_stage2(&mut self) -> Result<(), SvsmError> {
pub fn map_self_stage2(&self) -> Result<(), SvsmError> {
let vaddr = VirtAddr::from(self.cpu_unsafe);
let paddr = virt_to_phys(vaddr);
let flags = PTEntryFlags::data();

self.get_pgtable().map_4k(SVSM_PERCPU_BASE, paddr, flags)
}

pub fn map_self(&mut self) -> Result<(), SvsmError> {
pub fn map_self(&self) -> Result<(), SvsmError> {
let vaddr = VirtAddr::from(self.cpu_unsafe);
let paddr = virt_to_phys(vaddr);

Expand All @@ -468,7 +474,7 @@ impl PerCpu {
Ok(())
}

fn initialize_vm_ranges(&mut self) -> Result<(), SvsmError> {
fn initialize_vm_ranges(&self) -> Result<(), SvsmError> {
let size_4k = SVSM_PERCPU_TEMP_END_4K - SVSM_PERCPU_TEMP_BASE_4K;
let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k));
self.vm_range
Expand All @@ -482,7 +488,7 @@ impl PerCpu {
Ok(())
}

fn finish_page_table(&mut self) {
fn finish_page_table(&self) {
let mut pgtable = self.get_pgtable();
self.vm_range.populate(&mut pgtable);
}
Expand Down Expand Up @@ -532,10 +538,12 @@ impl PerCpu {
Ok(())
}

pub fn load_pgtable(&mut self) {
pub fn load_pgtable(&self) {
self.get_pgtable().load();
}

// Ensure this function does not have multiple concurrent callers.
#[allow(clippy::needless_pass_by_ref_mut)]
pub fn load_tss(&mut self) {
gdt_mut().load_tss(&self.tss);
}
Expand Down Expand Up @@ -608,7 +616,7 @@ impl PerCpu {
self.shared().guest_vmsa.lock()
}

pub fn alloc_guest_vmsa(&mut self) -> Result<(), SvsmError> {
pub fn alloc_guest_vmsa(&self) -> Result<(), SvsmError> {
let vaddr = allocate_new_vmsa(RMPFlags::GUEST_VMPL)?;
let paddr = virt_to_phys(vaddr);

Expand Down
2 changes: 1 addition & 1 deletion kernel/src/fs/filesystem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ impl RawFileHandle {
result
}

fn truncate(&mut self, offset: usize) -> Result<usize, SvsmError> {
fn truncate(&self, offset: usize) -> Result<usize, SvsmError> {
self.file.truncate(offset)
}

Expand Down
8 changes: 4 additions & 4 deletions kernel/src/greq/msg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ impl SnpGuestRequestMsg {
/// before the object is dropped. Shared pages should not be freed
/// (returned to the allocator)
pub fn set_shared(&mut self) -> Result<(), SvsmReqError> {
let vaddr = VirtAddr::from(addr_of!(*self));
let vaddr = VirtAddr::from(addr_of_mut!(*self));
this_cpu_mut()
.get_pgtable()
.set_shared_4k(vaddr)
Expand All @@ -257,7 +257,7 @@ impl SnpGuestRequestMsg {

/// Set the C-bit (memory encryption bit) for the Self page
pub fn set_encrypted(&mut self) -> Result<(), SvsmReqError> {
let vaddr = VirtAddr::from(addr_of!(*self));
let vaddr = VirtAddr::from(addr_of_mut!(*self));
this_cpu_mut()
.get_pgtable()
.set_encrypted_4k(vaddr)
Expand Down Expand Up @@ -495,14 +495,14 @@ impl SnpGuestRequestExtData {
/// before the object is dropped. Shared pages should not be freed
/// (returned to the allocator)
pub fn set_shared(&mut self) -> Result<(), SvsmReqError> {
let start = VirtAddr::from(addr_of!(*self));
let start = VirtAddr::from(addr_of_mut!(*self));
let end = start + size_of::<Self>();
set_shared_region_4k(start, end)
}

/// Set the C-bit (memory encryption bit) for the Self pages
pub fn set_encrypted(&mut self) -> Result<(), SvsmReqError> {
let start = VirtAddr::from(addr_of!(*self));
let start = VirtAddr::from(addr_of_mut!(*self));
let end = start + size_of::<Self>();
set_encrypted_region_4k(start, end)
}
Expand Down
21 changes: 17 additions & 4 deletions kernel/src/mm/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -414,17 +414,29 @@ impl MemoryRegion {
Some(self.start_phys + offset)
}

/// Gets a pointer to the page information for a given page frame number.
/// Gets a mutable pointer to the page information for a given page frame
/// number.
///
/// # Safety
///
/// The caller must provide a valid pfn, otherwise the returned pointer is
/// undefined, as the compiler is allowed to optimize assuming there will
/// be no arithmetic overflows.
unsafe fn page_info_ptr(&self, pfn: usize) -> *mut PageStorageType {
unsafe fn page_info_mut_ptr(&mut self, pfn: usize) -> *mut PageStorageType {
self.start_virt.as_mut_ptr::<PageStorageType>().add(pfn)
}

/// Gets a pointer to the page information for a given page frame number.
///
/// # Safety
///
/// The caller must provide a valid pfn, otherwise the returned pointer is
/// undefined, as the compiler is allowed to optimize assuming there will
/// be no arithmetic overflows.
unsafe fn page_info_ptr(&self, pfn: usize) -> *const PageStorageType {
self.start_virt.as_ptr::<PageStorageType>().add(pfn)
}

/// Checks if a page frame number is valid.
///
/// # Panics
Expand All @@ -442,12 +454,12 @@ impl MemoryRegion {
}

/// Writes page information for a given page frame number.
fn write_page_info(&self, pfn: usize, pi: PageInfo) {
fn write_page_info(&mut self, pfn: usize, pi: PageInfo) {
self.check_pfn(pfn);

let info: PageStorageType = pi.to_mem();
// SAFETY: we have checked that the pfn is valid via check_pfn() above.
unsafe { self.page_info_ptr(pfn).write(info) };
unsafe { self.page_info_mut_ptr(pfn).write(info) };
}

/// Reads page information for a given page frame number.
Expand Down Expand Up @@ -1139,6 +1151,7 @@ impl SlabPage {
}

/// Free the memory (destroy) the [`SlabPage`]
#[allow(clippy::needless_pass_by_ref_mut)]
fn destroy(&mut self) {
if self.vaddr.is_null() {
return;
Expand Down
6 changes: 3 additions & 3 deletions kernel/src/mm/vm/range.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ impl VMR {
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
fn initialize_common(&mut self, lazy: bool) -> Result<(), SvsmError> {
fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> {
let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT);
assert!(start < end && start.is_aligned(VMR_GRANULE) && end.is_aligned(VMR_GRANULE));
Expand All @@ -153,7 +153,7 @@ impl VMR {
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize(&mut self) -> Result<(), SvsmError> {
pub fn initialize(&self) -> Result<(), SvsmError> {
self.initialize_common(false)
}

Expand All @@ -162,7 +162,7 @@ impl VMR {
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize_lazy(&mut self) -> Result<(), SvsmError> {
pub fn initialize_lazy(&self) -> Result<(), SvsmError> {
self.initialize_common(true)
}

Expand Down
2 changes: 1 addition & 1 deletion kernel/src/sev/ghcb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ impl GHCB {
}

pub fn shutdown(&mut self) -> Result<(), SvsmError> {
let vaddr = VirtAddr::from(self as *const GHCB);
let vaddr = VirtAddr::from(ptr::from_mut(self));
let paddr = virt_to_phys(vaddr);

// Re-encrypt page
Expand Down
8 changes: 3 additions & 5 deletions kernel/src/task/tasks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,7 @@ impl Task {

cpu.populate_page_table(&mut pgtable);

let mut vm_kernel_range =
VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
vm_kernel_range.initialize()?;

let (stack, raw_bounds, rsp_offset) = Self::allocate_ktask_stack(cpu, entry)?;
Expand Down Expand Up @@ -214,16 +213,15 @@ impl Task {

cpu.populate_page_table(&mut pgtable);

let mut vm_kernel_range =
VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
vm_kernel_range.initialize()?;

let (stack, raw_bounds, stack_offset) = Self::allocate_utask_stack(cpu, user_entry)?;
vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?;

vm_kernel_range.populate(&mut pgtable);

let mut vm_user_range = VMR::new(USER_MEM_START, USER_MEM_END, PTEntryFlags::USER);
let vm_user_range = VMR::new(USER_MEM_START, USER_MEM_END, PTEntryFlags::USER);
vm_user_range.initialize_lazy()?;

// Remap at the per-task offset
Expand Down

0 comments on commit f83fa40

Please sign in to comment.