Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kernel/cpu: add safety comments #584

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions kernel/src/cpu/efer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,14 @@ pub fn read_efer() -> EFERFlags {
EFERFlags::from_bits_truncate(read_msr(EFER))
}

pub fn write_efer(efer: EFERFlags) {
/// # Safety
///
/// The caller should ensure that the new value written to EFER MSR doesn't
/// break memory safety.
pub unsafe fn write_efer(efer: EFERFlags) {
let val = efer.bits();
write_msr(EFER, val);
// SAFETY: requirements should be verified by the caller.
unsafe {
write_msr(EFER, val);
}
}
22 changes: 17 additions & 5 deletions kernel/src/cpu/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,18 @@ impl GDT {

let tss_entries = &self.entries[idx..idx + 1].as_mut_ptr();

// SAFETY:
// For add():
// - idx and idx + size_of::<GDTEntry>() don't overflow isize.
// - the borrow checker guarantees that self is still allocated
// - self.entries[6:8] fits in self's allocation.
// For write_volatile():
// - the borrow checker guarantees that self.entries is allocated
// - alignment is checked inside
unsafe {
assert_eq!(align_of_val(&tss_entries.add(0)), align_of::<GDTEntry>());
assert_eq!(align_of_val(&tss_entries.add(1)), align_of::<GDTEntry>());

tss_entries.add(0).write_volatile(desc0);
tss_entries.add(1).write_volatile(desc1);
}
Expand All @@ -93,11 +104,12 @@ impl GDT {
pub fn load_tss(&mut self, tss: &X86Tss) {
let (desc0, desc1) = tss.to_gdt_entry();

unsafe {
self.set_tss_entry(desc0, desc1);
asm!("ltr %ax", in("ax") SVSM_TSS, options(att_syntax));
self.clear_tss_entry()
}
self.set_tss_entry(desc0, desc1);
// SAFETY: loading task register must me done in assembly.
// It's safe to do so as long as a global GDT is in use and still
// allocated, which is always our case.
Comment on lines +109 to +110
Copy link
Collaborator

@msft-jlange msft-jlange Dec 20, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, it is not necessarily the case that a global GDT is in use here. However, the lifetime of the GDT doesn't matter, because once the task register is loaded, only the TSS needs to remain live. For that reason, either this whole function should be unsafe (because the compiler cannot prove that the lifetime of the tss parameter cannot be proven) or the tss parameter should be declared as &'static. A quick experiment suggests that the latter approach is workable as long as we modify PerCpu::load() and PerCpu::load_isst() to take &'static self. Once that is done, this comment should be updated to indicate that the assembly here is safe because tss has a static lifetime.

unsafe { asm!("ltr %ax", in("ax") SVSM_TSS, options(att_syntax)) };
self.clear_tss_entry()
}

pub fn kernel_cs(&self) -> GDTEntry {
Expand Down
7 changes: 6 additions & 1 deletion kernel/src/cpu/msr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,15 @@ pub fn read_msr(msr: u32) -> u64 {
(eax as u64) | (edx as u64) << 32
}

pub fn write_msr(msr: u32, val: u64) {
/// # Safety
///
/// The caller should ensure that the new value in the target MSR doesn't break
/// memory safety.
pub unsafe fn write_msr(msr: u32, val: u64) {
let eax = (val & 0x0000_0000_ffff_ffff) as u32;
let edx = (val >> 32) as u32;

// SAFETY: requirements have to be checked by the caller.
unsafe {
asm!("wrmsr",
in("ecx") msr,
Expand Down
3 changes: 2 additions & 1 deletion kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -800,7 +800,8 @@ impl PerCpu {

pub fn load_isst(&self) {
let isst = self.isst.as_ptr();
write_msr(ISST_ADDR, isst as u64);
// SAFETY: ISST is already setup when this is called.
unsafe { write_msr(ISST_ADDR, isst as u64) };
}

pub fn load(&self) {
Expand Down
4 changes: 3 additions & 1 deletion kernel/src/cpu/vc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,9 @@ mod tests {
fn test_wrmsr_tsc_aux() {
if is_qemu_test_env() && is_test_platform_type(SvsmPlatformType::Snp) {
let test_val = 0x1234;
verify_ghcb_gets_altered(|| write_msr(MSR_TSC_AUX, test_val));
verify_ghcb_gets_altered(||
// SAFETY: writing to TSC MSR doesn't break memory safety.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should say TSC_AUX MSR.

unsafe {write_msr(MSR_TSC_AUX, test_val) });
let readback = verify_ghcb_gets_altered(|| read_msr(MSR_TSC_AUX));
assert_eq!(test_val, readback);
}
Expand Down
2 changes: 2 additions & 0 deletions kernel/src/cpu/x86/smap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use core::arch::asm;
#[inline(always)]
pub fn clac() {
if !cfg!(feature = "nosmap") {
// SAFETY: `clac` instruction doesn't break memory safety.
unsafe { asm!("clac", options(att_syntax, nomem, nostack, preserves_flags)) }
}
}
Expand All @@ -22,6 +23,7 @@ pub fn clac() {
#[inline(always)]
pub fn stac() {
if !cfg!(feature = "nosmap") {
// SAFETY: `stac` instruction doesn't break memory safety.
unsafe { asm!("stac", options(att_syntax, nomem, nostack, preserves_flags)) }
}
}
7 changes: 5 additions & 2 deletions kernel/src/hyperv/hv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,12 +190,15 @@ pub fn hyperv_setup_hypercalls() -> Result<(), SvsmError> {
.expect("Hypercall code page already allocated");

// Set the guest OS ID. The value is arbitrary.
write_msr(HyperVMsr::GuestOSID.into(), 0xC0C0C0C0);
// SAFETY: writing to HV Guest OS ID doesn't break safety.
unsafe { write_msr(HyperVMsr::GuestOSID.into(), 0xC0C0C0C0) };

// Set the hypercall code page address to the physical address of the
// allocated page, and mark it enabled.
let pa = virt_to_phys(page);
write_msr(HyperVMsr::Hypercall.into(), u64::from(pa) | 1);
// SAFETY: we trust the page allocator to allocate a valid page to which pa
// points.
unsafe { write_msr(HyperVMsr::Hypercall.into(), u64::from(pa) | 1) };

// Obtain the current VTL for use in future hypercalls.
let vsm_status_value = get_vp_register(hyperv::HvRegisterName::VsmVpStatus)?;
Expand Down
6 changes: 4 additions & 2 deletions kernel/src/platform/native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,12 +156,14 @@ impl SvsmPlatform for NativePlatform {
}

fn post_irq(&self, icr: u64) -> Result<(), SvsmError> {
write_msr(APIC_MSR_ICR, icr);
// SAFETY: writing to ICR MSR doesn't break memory safety.
unsafe { write_msr(APIC_MSR_ICR, icr) };
Ok(())
}

fn eoi(&self) {
write_msr(APIC_MSR_EOI, 0);
// SAFETY: writing to EOI MSR doesn't break memory safety.
unsafe { write_msr(APIC_MSR_EOI, 0) };
}

fn is_external_interrupt(&self, _vector: usize) -> bool {
Expand Down
5 changes: 4 additions & 1 deletion kernel/src/sev/ghcb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,10 @@ impl GHCB {
// Disable interrupts between writing the MSR and making the GHCB call
// to prevent reentrant use of the GHCB MSR.
let guard = IrqGuard::new();
write_msr(SEV_GHCB, ghcb_pa);
// SAFETY: GHCB is already allocated and setup so this is safe.
unsafe {
write_msr(SEV_GHCB, ghcb_pa);
}
raw_vmgexit();
drop(guard);

Expand Down
13 changes: 8 additions & 5 deletions kernel/src/sev/msr_protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ pub fn verify_ghcb_version() {
// used.
assert!(!irqs_enabled());
// Request SEV information.
write_msr(SEV_GHCB, GHCBMsr::SEV_INFO_REQ);
// SAFETY: Requesting info through the GHCB MSR protocol is safe.
unsafe { write_msr(SEV_GHCB, GHCBMsr::SEV_INFO_REQ) };
raw_vmgexit();
let sev_info = read_msr(SEV_GHCB);

Expand Down Expand Up @@ -107,7 +108,8 @@ pub fn hypervisor_ghcb_features() -> GHCBHvFeatures {

pub fn init_hypervisor_ghcb_features() -> Result<(), GhcbMsrError> {
let guard = IrqGuard::new();
write_msr(SEV_GHCB, GHCBMsr::SNP_HV_FEATURES_REQ);
// SAFETY: Requesting HV features through the GHCB MSR protocol is safe.
unsafe { write_msr(SEV_GHCB, GHCBMsr::SNP_HV_FEATURES_REQ) };
raw_vmgexit();
let result = read_msr(SEV_GHCB);
drop(guard);
Expand Down Expand Up @@ -143,7 +145,7 @@ pub fn register_ghcb_gpa_msr(addr: PhysAddr) -> Result<(), GhcbMsrError> {

info |= GHCBMsr::SNP_REG_GHCB_GPA_REQ;
let guard = IrqGuard::new();
write_msr(SEV_GHCB, info);
unsafe { write_msr(SEV_GHCB, info) };
raw_vmgexit();
info = read_msr(SEV_GHCB);
drop(guard);
Expand All @@ -170,7 +172,7 @@ fn set_page_valid_status_msr(addr: PhysAddr, valid: bool) -> Result<(), GhcbMsrE

info |= GHCBMsr::SNP_STATE_CHANGE_REQ;
let guard = IrqGuard::new();
write_msr(SEV_GHCB, info);
unsafe { write_msr(SEV_GHCB, info) };
raw_vmgexit();
let response = read_msr(SEV_GHCB);
drop(guard);
Expand Down Expand Up @@ -201,7 +203,8 @@ pub fn request_termination_msr() -> ! {
// no reason to preserve interrupt state. Interrupts can be disabled
// outright prior to shutdown.
raw_irqs_disable();
write_msr(SEV_GHCB, info);
// SAFETY: Requesting termination doesn't break memory safety.
unsafe { write_msr(SEV_GHCB, info) };
raw_vmgexit();
loop {
halt();
Expand Down
6 changes: 5 additions & 1 deletion kernel/src/task/schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,11 @@ pub fn schedule() {
this_cpu().set_tss_rsp0(next.stack_bounds.end());
}
if is_cet_ss_supported() {
write_msr(PL0_SSP, next.exception_shadow_stack.bits() as u64);
// SAFETY: Task::exception_shadow_stack is always initialized when
// creating a new Task.
unsafe {
write_msr(PL0_SSP, next.exception_shadow_stack.bits() as u64);
}
}

// Get task-pointers, consuming the Arcs and release their reference
Expand Down
Loading