From a5d13a965765f117f7401c87ef9e6c547bf9a0a0 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Thu, 13 Jun 2024 16:19:40 -0700 Subject: [PATCH 1/2] cpu: Support TPR when handling SVSM interrupts The priority scheme inherently associated with interrupts permits masking interrupts of lower priority classes while handling higher-priority interrupts. This makes it possible to enable interrupts while in an interrupt handler, allowing high-priority interrupts to be handled while masking lower-priorty interrupts. By raising TPR during interrupt handling, it becomes possible to be safely preempted by higher-priority interrupts during interrupt handling. This change also defines a mechanism where code unrelated to interrupt handlers can temporarily raise TPR to mask low-priority interrupts while permitting the delivery of high-priority interrupts. In the future, it will be possible to enable spin locks or other locks to be associated with TPR so that a lock can safely be acquired and prevent reentracncy by low-priority interrupts while still permitting the handilng of high-priority interrupts. Signed-off-by: Jon Lange --- kernel/src/cpu/idt/svsm.rs | 26 ++++- kernel/src/cpu/irq_state.rs | 201 +++++++++++++++++++++++++++++++--- kernel/src/cpu/mod.rs | 4 +- kernel/src/cpu/percpu.rs | 48 +++++++- kernel/src/sev/hv_doorbell.rs | 40 +++++-- kernel/src/task/schedule.rs | 6 +- 6 files changed, 293 insertions(+), 32 deletions(-) diff --git a/kernel/src/cpu/idt/svsm.rs b/kernel/src/cpu/idt/svsm.rs index cd8b92fe3..6dd1d2f58 100644 --- a/kernel/src/cpu/idt/svsm.rs +++ b/kernel/src/cpu/idt/svsm.rs @@ -16,6 +16,7 @@ use super::common::{ TS_VECTOR, UD_VECTOR, VC_VECTOR, XF_VECTOR, }; use crate::address::VirtAddr; +use crate::cpu::irq_state::{raw_get_tpr, raw_set_tpr, tpr_from_vector}; use crate::cpu::registers::RFlags; use crate::cpu::shadow_stack::IS_CET_SUPPORTED; use crate::cpu::X86ExceptionContext; @@ -351,11 +352,28 @@ pub extern "C" fn common_isr_handler_entry(vector: usize) { cpu.irqs_pop_nesting(); } -pub fn common_isr_handler(_vector: usize) { - // Interrupt injection requests currently require no processing; they occur - // simply to ensure an exit from the guest. +pub fn common_isr_handler(vector: usize) { + // Set TPR based on the vector being handled and reenable interrupts to + // permit delivery of higher priority interrupts. Because this routine + // dispatches interrupts which should only be observable if interrupts + // are enabled, the IRQ nesting count must be zero at this point. + let previous_tpr = raw_get_tpr(); + raw_set_tpr(tpr_from_vector(vector)); - // Treat any unhandled interrupt as a spurious interrupt. + let cpu = this_cpu(); + cpu.irqs_enable(); + + // Treat any unhandled interrupt as a spurious interrupt. Interrupt + // injection requests currently require no processing; they occur simply + // to ensure an exit from the guest. + + // Disable interrupts before restoring TPR. + cpu.irqs_disable(); + raw_set_tpr(previous_tpr); + + // Perform the EOI cycle after the interrupt processing state has been + // restored so that recurrent interrupts will not introduce recursion at + // this point. SVSM_PLATFORM.eoi(); } diff --git a/kernel/src/cpu/irq_state.rs b/kernel/src/cpu/irq_state.rs index 65db8b2b0..0fd18de5a 100644 --- a/kernel/src/cpu/irq_state.rs +++ b/kernel/src/cpu/irq_state.rs @@ -5,10 +5,10 @@ // Author: Joerg Roedel use crate::cpu::percpu::this_cpu; -use crate::cpu::{irqs_disable, irqs_enable}; +use crate::cpu::{irqs_disable, irqs_enable, lower_tpr, raise_tpr}; use core::arch::asm; use core::marker::PhantomData; -use core::sync::atomic::{AtomicBool, AtomicIsize, Ordering}; +use core::sync::atomic::{AtomicBool, AtomicI32, Ordering}; /// Interrupt flag in RFLAGS register pub const EFLAGS_IF: usize = 1 << 9; @@ -51,9 +51,9 @@ pub fn raw_irqs_enable() { #[inline(always)] #[must_use = "Unused irqs_enabled() result - meant to be irq_enable()?"] pub fn irqs_enabled() -> bool { + let state: usize; // SAFETY: The inline assembly just reads the processors RFLAGS register // and does not change any state. - let state: usize; unsafe { asm!("pushfq", "popq {}", @@ -75,6 +75,50 @@ pub fn irqs_disabled() -> bool { !irqs_enabled() } +/// Converts an interrupt vector to a TPR value. +#[inline(always)] +pub fn tpr_from_vector(vector: usize) -> usize { + // TPR is the high four bits of the vector number. + vector >> 4 +} + +/// Unconditionally set TPR. +/// +/// Callers need to ensure that the selected TPR is appropriate for the +/// current context. +/// +/// * `tpr_value` - the new TPR value. +#[inline(always)] +pub fn raw_set_tpr(tpr_value: usize) { + // SAFETY: Inline assembly to change TPR, which does not change any state + // related to memory safety. + unsafe { + asm!("mov {tpr}, %cr8", + tpr = in(reg) tpr_value, + options(att_syntax)); + } +} + +/// Query IRQ state on current CPU +/// +/// # Returns +/// +/// The current TPR. +#[inline(always)] +pub fn raw_get_tpr() -> usize { + // SAFETY: The inline assembly just reads the TPR register and does not + // change any state. + unsafe { + let mut ret: usize; + asm!("movq %cr8, {tpr}", + tpr = out(reg) ret, + options(att_syntax)); + ret + } +} + +const TPR_LIMIT: usize = 16; + /// This structure keeps track of PerCpu IRQ states. It tracks the original IRQ /// state and how deep IRQ-disable calls have been nested. The use of atomics /// is necessary for interior mutability and to make state modifications safe @@ -87,8 +131,10 @@ pub fn irqs_disabled() -> bool { pub struct IrqState { /// IRQ state when count was `0` state: AtomicBool, - /// Depth of IRQ-disabled nesting - count: AtomicIsize, + /// Depth of IRQ-disabled nesting. Index 0 specifies the count of + /// IRQ disables and the remaining indices specify the nesting count + /// for eached raised TPR level. + counts: [AtomicI32; TPR_LIMIT], /// Make the type !Send + !Sync phantom: PhantomData<*const ()>, } @@ -98,7 +144,7 @@ impl IrqState { pub fn new() -> Self { Self { state: AtomicBool::new(false), - count: AtomicIsize::new(0), + counts: Default::default(), phantom: PhantomData, } } @@ -120,7 +166,7 @@ impl IrqState { /// The previous nesting level. pub fn push_nesting(&self, was_enabled: bool) { debug_assert!(irqs_disabled()); - let val = self.count.fetch_add(1, Ordering::Relaxed); + let val = self.counts[0].fetch_add(1, Ordering::Relaxed); assert!(val >= 0); @@ -151,10 +197,10 @@ impl IrqState { /// # Returns /// /// The new IRQ nesting level. - pub fn pop_nesting(&self) -> isize { + pub fn pop_nesting(&self) -> i32 { debug_assert!(irqs_disabled()); - let val = self.count.fetch_sub(1, Ordering::Relaxed); + let val = self.counts[0].fetch_sub(1, Ordering::Relaxed); assert!(val > 0); @@ -181,8 +227,8 @@ impl IrqState { /// # Returns /// /// Levels of IRQ-disable nesting currently active - pub fn count(&self) -> isize { - self.count.load(Ordering::Relaxed) + pub fn count(&self) -> i32 { + self.counts[0].load(Ordering::Relaxed) } /// Changes whether interrupts will be enabled when the nesting count @@ -192,17 +238,66 @@ impl IrqState { /// and must ensure that the specified value is appropriate for the /// current environment. pub fn set_restore_state(&self, enabled: bool) { - assert!(self.count.load(Ordering::Relaxed) != 0); + assert!(self.counts[0].load(Ordering::Relaxed) != 0); self.state.store(enabled, Ordering::Relaxed); } + + /// Increments TPR. + /// + /// The caller must ensure that a `raise_tpr()` call is followed by a + /// matching call to `lower_tpr()`. + /// + /// * `tpr_value` - The new TPR value. Must be greater than or equal to + /// the current TPR value. + #[inline(always)] + pub fn raise_tpr(&self, tpr_value: usize) { + assert!(tpr_value > 0 && tpr_value >= raw_get_tpr()); + raw_set_tpr(tpr_value); + + // Increment the count of requests to raise to this TPR to indicate + // the number of execution contexts that require this TPR. + self.counts[tpr_value].fetch_add(1, Ordering::Relaxed); + } + + /// Decrements TPR. + /// + /// The caller must ensure that a `lower` call balances a preceding + /// `raise` call to the indicated level. + /// + /// * `tpr_value` - The TPR from which the caller would like to lower. + /// Must be less than or equal to the current TPR. + #[inline(always)] + pub fn lower_tpr(&self, tpr_value: usize) { + let current_tpr = raw_get_tpr(); + debug_assert!(tpr_value <= current_tpr); + + // Decrement the count of execution contexts requiring this raised + // TPR. + let count = self.counts[tpr_value].fetch_sub(1, Ordering::Relaxed); + debug_assert!(count > 0); + + if count == 1 && tpr_value >= current_tpr { + // Find the highest TPR that is still required. + for new_tpr in (0..tpr_value).rev() { + if self.counts[new_tpr].load(Ordering::Relaxed) != 0 { + raw_set_tpr(new_tpr); + return; + } + } + + // No TPR is still in use, so lower to zero. + raw_set_tpr(0); + } + } } impl Drop for IrqState { /// This struct should never be dropped. Add a debug check in case it is /// dropped anyway. fn drop(&mut self) { - let count = self.count.load(Ordering::Relaxed); - assert_eq!(count, 0); + for count in &self.counts { + assert_eq!(count.load(Ordering::Relaxed), 0); + } } } @@ -212,7 +307,7 @@ impl Drop for IrqState { /// /// The struct implements the `Default` and `Drop` traits for easy use. #[derive(Debug)] -#[must_use = "if unused previous IRQ state will be immediatly restored"] +#[must_use = "if unused previous IRQ state will be immediately restored"] pub struct IrqGuard { /// Make the type !Send + !Sync phantom: PhantomData<*const ()>, @@ -244,9 +339,43 @@ impl Drop for IrqGuard { } } +/// A TPR guard which raises TPR upon creation. When the guard goes out of +/// scope, TPR is lowered to the highest active TPR. +/// +/// The struct implements the `Drop` trait for easy use. +#[derive(Debug, Default)] +#[must_use = "if unused previous TPR will be immediately restored"] +pub struct TprGuard { + tpr_value: usize, + + /// Make the type !Send + !Sync + phantom: PhantomData<*const ()>, +} + +impl TprGuard { + pub fn raise(tpr_value: usize) -> Self { + // SAFETY: Safe because the struct implements `Drop, which restores + // TPR state. + raise_tpr(tpr_value); + + Self { + tpr_value, + phantom: PhantomData, + } + } +} + +impl Drop for TprGuard { + fn drop(&mut self) { + // Lower TPR from the value to which it was raised. + lower_tpr(self.tpr_value); + } +} + #[cfg(test)] mod tests { use super::*; + use crate::platform::SVSM_PLATFORM; #[test] #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] @@ -293,4 +422,46 @@ mod tests { raw_irqs_disable(); } } + + #[test] + #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] + fn tpr_test() { + if SVSM_PLATFORM.use_interrupts() { + assert_eq!(raw_get_tpr(), 0); + raise_tpr(7); + assert_eq!(raw_get_tpr(), 7); + raise_tpr(8); + assert_eq!(raw_get_tpr(), 8); + lower_tpr(8); + assert_eq!(raw_get_tpr(), 7); + lower_tpr(7); + assert_eq!(raw_get_tpr(), 0); + } + } + + #[test] + #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] + fn tpr_guard_test() { + if SVSM_PLATFORM.use_interrupts() { + assert_eq!(raw_get_tpr(), 0); + // Test in-order raise/lower. + let g1 = TprGuard::raise(8); + assert_eq!(raw_get_tpr(), 8); + let g2 = TprGuard::raise(9); + assert_eq!(raw_get_tpr(), 9); + drop(g2); + assert_eq!(raw_get_tpr(), 8); + drop(g1); + assert_eq!(raw_get_tpr(), 0); + // Test out-of-order raise/lower. + let g1 = TprGuard::raise(8); + assert_eq!(raw_get_tpr(), 8); + let g2 = TprGuard::raise(9); + assert_eq!(raw_get_tpr(), 9); + drop(g1); + assert_eq!(raw_get_tpr(), 9); + drop(g2); + assert_eq!(raw_get_tpr(), 0); + } + } } diff --git a/kernel/src/cpu/mod.rs b/kernel/src/cpu/mod.rs index c699d756a..b6e6d7828 100644 --- a/kernel/src/cpu/mod.rs +++ b/kernel/src/cpu/mod.rs @@ -29,7 +29,7 @@ pub mod x86; pub use apic::LocalApic; pub use idt::common::X86ExceptionContext; -pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState}; -pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable}; +pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState, TprGuard}; +pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable, lower_tpr, raise_tpr}; pub use registers::{X86GeneralRegs, X86InterruptFrame, X86SegmentRegs}; pub use tlb::*; diff --git a/kernel/src/cpu/percpu.rs b/kernel/src/cpu/percpu.rs index ea2212f23..8e5d60acf 100644 --- a/kernel/src/cpu/percpu.rs +++ b/kernel/src/cpu/percpu.rs @@ -457,10 +457,32 @@ impl PerCpu { /// # Returns /// /// Current nesting depth of irq_disable() calls. - pub fn irq_nesting_count(&self) -> isize { + pub fn irq_nesting_count(&self) -> i32 { self.irq_state.count() } + /// Raises TPR on the current CPU. Keeps track of the nesting level. + /// + /// The caller must ensure that every `raise_tpr()` call is followed by a + /// matching call to `lower_tpr()`. + #[inline(always)] + pub fn raise_tpr(&self, tpr_value: usize) { + self.irq_state.raise_tpr(tpr_value); + } + + /// Lowers TPR from the current level to the new level required by the + /// current nesting state. + /// + /// The caller must ensure that a `lower_tpr()` call balances a preceding + /// `raise_tpr()` call to the indicated level. + /// + /// * `tpr_value` - The TPR from which the caller would like to lower. + /// Must be less than or equal to the current TPR. + #[inline(always)] + pub fn lower_tpr(&self, tpr_value: usize) { + self.irq_state.lower_tpr(tpr_value); + } + /// Sets up the CPU-local GHCB page. pub fn setup_ghcb(&self) -> Result<(), SvsmError> { let page = GhcbPage::new()?; @@ -1105,10 +1127,32 @@ pub fn irqs_enable() { /// # Returns /// /// Current nesting depth of irq_disable() calls. -pub fn irq_nesting_count() -> isize { +pub fn irq_nesting_count() -> i32 { this_cpu().irq_nesting_count() } +/// Raises TPR on the current CPU. Keeps track of the nesting level. +/// +/// The caller must ensure that every `raise_tpr()` call is followed by a +/// matching call to `lower_tpr()`. +#[inline(always)] +pub fn raise_tpr(tpr_value: usize) { + this_cpu().raise_tpr(tpr_value); +} + +/// Lowers TPR from the current level to the new level required by the +/// current nesting state. +/// +/// The caller must ensure that a `lower_tpr()` call balances a preceding +/// `raise_tpr()` call to the indicated level. +/// +/// * `tpr_value` - The TPR from which the caller would like to lower. +/// Must be less than or equal to the current TPR. +#[inline(always)] +pub fn lower_tpr(tpr_value: usize) { + this_cpu().lower_tpr(tpr_value); +} + /// Gets the GHCB for this CPU. /// /// # Panics diff --git a/kernel/src/sev/hv_doorbell.rs b/kernel/src/sev/hv_doorbell.rs index fd10f9ff0..a332dbb57 100644 --- a/kernel/src/sev/hv_doorbell.rs +++ b/kernel/src/sev/hv_doorbell.rs @@ -2,6 +2,7 @@ // Author: Jon Lange (jlange@microsoft.com) use crate::cpu::idt::svsm::common_isr_handler; +use crate::cpu::irq_state::{raw_get_tpr, tpr_from_vector}; use crate::cpu::percpu::this_cpu; use crate::cpu::IrqState; use crate::error::SvsmError; @@ -98,24 +99,49 @@ impl HVDoorbell { panic!("#MC exception delivered via #HV"); } - // Consume interrupts as long as they are available. + // Consume interrupts as long as they are available and as long as + // they are appropriate for the current task priority. + let tpr = raw_get_tpr(); + let mut vector = self.vector.load(Ordering::Relaxed); loop { - // Consume any interrupt that may be present. - let vector = self.vector.swap(0, Ordering::Relaxed); - if vector == 0 { + // Check whether an interrupt is present. If it is at or below + // the current task priority, then it will not be dispatched. + // If the interrupt is not dispatched, then the vector must remain + // in the #HV doorbell page so the hypervisor knows it has not been + // placed into service. + if (tpr_from_vector(vector as usize)) <= tpr { break; } - common_isr_handler(vector as usize); + match self + .vector + .compare_exchange_weak(vector, 0, Ordering::Relaxed, Ordering::Relaxed) + { + Ok(_) => common_isr_handler(vector as usize), + Err(new) => vector = new, + } } // Ignore per-VMPL events; these will be consumed when APIC emulation // is performed. } + fn events_to_process(&self) -> bool { + // If NoFurtherSignal is set, it means the hypervisor has posted a + // new event, and there must be an event that requires processing. If + // NoFurtherSignal is not set, then processing is required if the + // pending vector is a higher priority than the current TPR. + let flags = HVDoorbellFlags::from(self.flags.load(Ordering::Relaxed)); + if flags.no_further_signal() { + true + } else { + let min_vector = (raw_get_tpr() + 1) << 4; + self.vector.load(Ordering::Relaxed) as usize >= min_vector + } + } + /// This function must always be called with interrupts enabled. pub fn process_if_required(&self, irq_state: &IrqState) { - let flags = HVDoorbellFlags::from(self.flags.load(Ordering::Relaxed)); - while flags.no_further_signal() { + while self.events_to_process() { // #HV event processing must always be performed with interrupts // disabled. irq_state.disable(); diff --git a/kernel/src/task/schedule.rs b/kernel/src/task/schedule.rs index 55b13b6a8..da6b6ee61 100644 --- a/kernel/src/task/schedule.rs +++ b/kernel/src/task/schedule.rs @@ -33,16 +33,17 @@ extern crate alloc; use super::INITIAL_TASK_ID; use super::{Task, TaskListAdapter, TaskPointer, TaskRunListAdapter}; use crate::address::{Address, VirtAddr}; +use crate::cpu::irq_state::raw_get_tpr; use crate::cpu::msr::write_msr; use crate::cpu::percpu::{irq_nesting_count, this_cpu}; use crate::cpu::shadow_stack::{is_cet_ss_supported, IS_CET_SUPPORTED, PL0_SSP}; -use crate::cpu::sse::sse_restore_context; -use crate::cpu::sse::sse_save_context; +use crate::cpu::sse::{sse_restore_context, sse_save_context}; use crate::cpu::IrqGuard; use crate::error::SvsmError; use crate::fs::Directory; use crate::locking::SpinLock; use crate::mm::{STACK_TOTAL_SIZE, SVSM_CONTEXT_SWITCH_SHADOW_STACK, SVSM_CONTEXT_SWITCH_STACK}; +use crate::platform::SVSM_PLATFORM; use alloc::string::String; use alloc::sync::Arc; use core::arch::{asm, global_asm}; @@ -365,6 +366,7 @@ pub fn schedule_init() { fn preemption_checks() { assert!(irq_nesting_count() == 0); + assert!(raw_get_tpr() == 0 || !SVSM_PLATFORM.use_interrupts()); } /// Perform a task switch and hand the CPU over to the next task on the From 3a47166efa84fe0dd42f5acb1059fdf78fcc1af2 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Sun, 24 Nov 2024 11:26:04 -0800 Subject: [PATCH 2/2] locking: define lock types that use TPR for reentrancy protection Using TPR for reentrancy protection means that higher priority interrupts can continue to be delivered while locks are held. Each TPR-protected lock defines the TPR with which it is associated, so any attempt to acquire the lock from a higher priority interrupt will panic due to TPR inversion. Signed-off-by: Jon Lange --- kernel/src/locking/common.rs | 58 ++++++++++++++++++++++++---------- kernel/src/locking/mod.rs | 11 ++++--- kernel/src/locking/rwlock.rs | 48 ++++++++++++++++++++++++---- kernel/src/locking/spinlock.rs | 36 +++++++++++++++++---- kernel/src/types.rs | 3 ++ 5 files changed, 122 insertions(+), 34 deletions(-) diff --git a/kernel/src/locking/common.rs b/kernel/src/locking/common.rs index 229ff3e92..b041f7900 100644 --- a/kernel/src/locking/common.rs +++ b/kernel/src/locking/common.rs @@ -3,41 +3,46 @@ // Copyright (c) 2024 SUSE LLC // // Author: Joerg Roedel -use crate::cpu::IrqGuard; +use crate::cpu::{IrqGuard, TprGuard}; use core::marker::PhantomData; -/// Abstracts IRQ state handling when taking and releasing locks. There are two -/// implemenations: +/// Abstracts TPR and interrupt state handling when taking and releasing +/// locks. There are three implemenations: /// /// * [IrqUnsafeLocking] implements the methods as no-ops and does not change -/// any IRQ state. -/// * [IrqSafeLocking] actually disables and enables IRQs in the methods, -/// making a lock IRQ-safe by using this structure. +/// any IRQ or TPR state. +/// * [IrqGuardLocking] actually disables and enables IRQs in the methods, +/// ensuring that no interrupt can be taken while the lock is held. +/// * [TprGuardLocking] raises and lowers TPR while the lock is held, +/// ensuring that no higher priority interrupt can be taken while the lock +/// is held. This will panic when attempting to acquire a lower priority +/// lock from a higher priority interrupt context. pub trait IrqLocking { - /// Associated helper function to disable IRQs and create an instance of - /// the implementing struct. This is used by lock implementations. + /// Associated helper function to modify TPR/interrupt state when a lock + /// is acquired. This is used by lock implementations and will return an + /// instance of the object. /// /// # Returns /// /// New instance of implementing struct. - fn irqs_disable() -> Self; + fn acquire_lock() -> Self; } -/// Implements the IRQ state handling methods as no-ops. For use it IRQ-unsafe -/// locks. +/// Implements the IRQ state handling methods as no-ops. Locks defined with +/// this state handler are not safe with respect to reentrancy due to +/// interrupt delivery. #[derive(Debug, Default)] pub struct IrqUnsafeLocking; impl IrqLocking for IrqUnsafeLocking { - fn irqs_disable() -> Self { + fn acquire_lock() -> Self { Self {} } } -/// Properly implements the IRQ state handling methods. For use it IRQ-safe -/// locks. +/// Implements the state handling methods for locks that disable interrupts. #[derive(Debug, Default)] -pub struct IrqSafeLocking { +pub struct IrqGuardLocking { /// IrqGuard to keep track of IRQ state. IrqGuard implements Drop, which /// will re-enable IRQs when the struct goes out of scope. _guard: IrqGuard, @@ -45,11 +50,30 @@ pub struct IrqSafeLocking { phantom: PhantomData<*const ()>, } -impl IrqLocking for IrqSafeLocking { - fn irqs_disable() -> Self { +impl IrqLocking for IrqGuardLocking { + fn acquire_lock() -> Self { Self { _guard: IrqGuard::new(), phantom: PhantomData, } } } + +/// Implements the state handling methods for locks that raise and lower TPR. +#[derive(Debug, Default)] +pub struct TprGuardLocking { + /// TprGuard to keep track of IRQ state. TprGuard implements Drop, which + /// will lower TPR as required when the struct goes out of scope. + _guard: TprGuard, + /// Make type explicitly !Send + !Sync + phantom: PhantomData<*const ()>, +} + +impl IrqLocking for TprGuardLocking { + fn acquire_lock() -> Self { + Self { + _guard: TprGuard::raise(TPR), + phantom: PhantomData, + } + } +} diff --git a/kernel/src/locking/mod.rs b/kernel/src/locking/mod.rs index 3c2fbcb71..38f2f0046 100644 --- a/kernel/src/locking/mod.rs +++ b/kernel/src/locking/mod.rs @@ -8,9 +8,12 @@ pub mod common; pub mod rwlock; pub mod spinlock; -pub use common::{IrqLocking, IrqSafeLocking, IrqUnsafeLocking}; +pub use common::{IrqGuardLocking, IrqLocking, TprGuardLocking}; pub use rwlock::{ - RWLock, RWLockIrqSafe, ReadLockGuard, ReadLockGuardIrqSafe, WriteLockGuard, - WriteLockGuardIrqSafe, + RWLock, RWLockAnyTpr, RWLockIrqSafe, RWLockTpr, ReadLockGuard, ReadLockGuardAnyTpr, + ReadLockGuardIrqSafe, WriteLockGuard, WriteLockGuardAnyTpr, WriteLockGuardIrqSafe, +}; +pub use spinlock::{ + LockGuard, LockGuardAnyTpr, LockGuardIrqSafe, RawLockGuard, SpinLock, SpinLockAnyTpr, + SpinLockIrqSafe, SpinLockTpr, }; -pub use spinlock::{LockGuard, LockGuardIrqSafe, RawLockGuard, SpinLock, SpinLockIrqSafe}; diff --git a/kernel/src/locking/rwlock.rs b/kernel/src/locking/rwlock.rs index 0ef2ecaf6..f999f0c4e 100644 --- a/kernel/src/locking/rwlock.rs +++ b/kernel/src/locking/rwlock.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use super::common::*; +use crate::types::TPR_LOCK; use core::cell::UnsafeCell; use core::marker::PhantomData; use core::ops::{Deref, DerefMut}; @@ -41,7 +42,9 @@ impl Deref for RawReadLockGuard<'_, T, I> { } pub type ReadLockGuard<'a, T> = RawReadLockGuard<'a, T, IrqUnsafeLocking>; -pub type ReadLockGuardIrqSafe<'a, T> = RawReadLockGuard<'a, T, IrqSafeLocking>; +pub type ReadLockGuardIrqSafe<'a, T> = RawReadLockGuard<'a, T, IrqGuardLocking>; +pub type ReadLockGuardAnyTpr<'a, T, const TPR: usize> = + RawReadLockGuard<'a, T, TprGuardLocking>; /// A guard that provides exclusive write access to the data protected by [`RWLock`] #[derive(Debug)] @@ -81,7 +84,9 @@ impl DerefMut for RawWriteLockGuard<'_, T, I> { } pub type WriteLockGuard<'a, T> = RawWriteLockGuard<'a, T, IrqUnsafeLocking>; -pub type WriteLockGuardIrqSafe<'a, T> = RawWriteLockGuard<'a, T, IrqSafeLocking>; +pub type WriteLockGuardIrqSafe<'a, T> = RawWriteLockGuard<'a, T, IrqGuardLocking>; +pub type WriteLockGuardAnyTpr<'a, T, const TPR: usize> = + RawWriteLockGuard<'a, T, TprGuardLocking>; /// A simple Read-Write Lock (RWLock) that allows multiple readers or /// one exclusive writer. @@ -216,7 +221,7 @@ impl RawRWLock { /// /// A [`ReadLockGuard`] that provides read access to the protected data. pub fn lock_read(&self) -> RawReadLockGuard<'_, T, I> { - let irq_state = I::irqs_disable(); + let irq_state = I::acquire_lock(); loop { let val = self.wait_for_writers(); let (readers, _) = split_val(val); @@ -246,7 +251,7 @@ impl RawRWLock { /// /// A [`WriteLockGuard`] that provides write access to the protected data. pub fn lock_write(&self) -> RawWriteLockGuard<'_, T, I> { - let irq_state = I::irqs_disable(); + let irq_state = I::acquire_lock(); // Waiting for current writer to finish loop { @@ -277,7 +282,9 @@ impl RawRWLock { } pub type RWLock = RawRWLock; -pub type RWLockIrqSafe = RawRWLock; +pub type RWLockIrqSafe = RawRWLock; +pub type RWLockAnyTpr = RawRWLock>; +pub type RWLockTpr = RWLockAnyTpr; mod tests { #[test] @@ -380,7 +387,7 @@ mod tests { // Lock for read let guard = lock.lock_read(); - // IRQs must still be enabled; + // IRQs must be disabled assert!(irqs_disabled()); // Unlock drop(guard); @@ -391,4 +398,33 @@ mod tests { raw_irqs_disable(); } } + + #[test] + #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] + fn rw_lock_tpr() { + use crate::cpu::irq_state::raw_get_tpr; + use crate::locking::*; + use crate::types::TPR_LOCK; + + assert_eq!(raw_get_tpr(), 0); + let lock = RWLockTpr::new(0); + + // Lock for write + let guard = lock.lock_write(); + // TPR must be raised + assert_eq!(raw_get_tpr(), TPR_LOCK); + // Unlock + drop(guard); + // TPR must be restored + assert_eq!(raw_get_tpr(), 0); + + // Lock for read + let guard = lock.lock_read(); + // TPR must be raised + assert_eq!(raw_get_tpr(), TPR_LOCK); + // Unlock + drop(guard); + // TPR must be restored + assert_eq!(raw_get_tpr(), 0); + } } diff --git a/kernel/src/locking/spinlock.rs b/kernel/src/locking/spinlock.rs index 215fd9b17..8d8cb9ec4 100644 --- a/kernel/src/locking/spinlock.rs +++ b/kernel/src/locking/spinlock.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use super::common::*; +use crate::types::TPR_LOCK; use core::cell::UnsafeCell; use core::marker::PhantomData; use core::ops::{Deref, DerefMut}; @@ -29,7 +30,7 @@ use core::sync::atomic::{AtomicU64, Ordering}; /// ``` #[derive(Debug)] #[must_use = "if unused the SpinLock will immediately unlock"] -pub struct RawLockGuard<'a, T, I = IrqUnsafeLocking> { +pub struct RawLockGuard<'a, T, I> { holder: &'a AtomicU64, data: &'a mut T, #[expect(dead_code)] @@ -64,7 +65,8 @@ impl DerefMut for RawLockGuard<'_, T, I> { } pub type LockGuard<'a, T> = RawLockGuard<'a, T, IrqUnsafeLocking>; -pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqSafeLocking>; +pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqGuardLocking>; +pub type LockGuardAnyTpr<'a, T, const TPR: usize> = RawLockGuard<'a, T, TprGuardLocking>; /// A simple ticket-spinlock implementation for protecting concurrent data /// access. @@ -95,7 +97,7 @@ pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqSafeLocking>; /// }; /// ``` #[derive(Debug, Default)] -pub struct RawSpinLock { +pub struct RawSpinLock { /// This atomic counter is incremented each time a thread attempts to /// acquire the lock. It helps to determine the order in which threads /// acquire the lock. @@ -150,7 +152,7 @@ impl RawSpinLock { /// }; // Lock is automatically released when `guard` goes out of scope. /// ``` pub fn lock(&self) -> RawLockGuard<'_, T, I> { - let irq_state = I::irqs_disable(); + let irq_state = I::acquire_lock(); let ticket = self.current.fetch_add(1, Ordering::Relaxed); loop { @@ -172,7 +174,7 @@ impl RawSpinLock { /// successfully acquired, it returns a [`LockGuard`] that automatically /// releases the lock when it goes out of scope. pub fn try_lock(&self) -> Option> { - let irq_state = I::irqs_disable(); + let irq_state = I::acquire_lock(); let current = self.current.load(Ordering::Relaxed); let holder = self.holder.load(Ordering::Acquire); @@ -198,13 +200,16 @@ impl RawSpinLock { } pub type SpinLock = RawSpinLock; -pub type SpinLockIrqSafe = RawSpinLock; +pub type SpinLockIrqSafe = RawSpinLock; +pub type SpinLockAnyTpr = RawSpinLock>; +pub type SpinLockTpr = SpinLockAnyTpr; #[cfg(test)] mod tests { use super::*; - use crate::cpu::irq_state::{raw_irqs_disable, raw_irqs_enable}; + use crate::cpu::irq_state::{raw_get_tpr, raw_irqs_disable, raw_irqs_enable}; use crate::cpu::{irqs_disabled, irqs_enabled}; + use crate::types::TPR_LOCK; #[test] fn test_spin_lock() { @@ -277,4 +282,21 @@ mod tests { raw_irqs_disable(); } } + + #[test] + #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] + fn spin_trylock_tpr() { + assert_eq!(raw_get_tpr(), 0); + + let spin_lock = SpinLockTpr::new(0); + + // TPR is zero - taking the lock must succeed and raise TPR. + let g1 = spin_lock.try_lock(); + assert!(g1.is_some()); + assert_eq!(raw_get_tpr(), TPR_LOCK); + + // Release lock and check if that resets TPR. + drop(g1); + assert_eq!(raw_get_tpr(), 0); + } } diff --git a/kernel/src/types.rs b/kernel/src/types.rs index 415598063..13427468b 100644 --- a/kernel/src/types.rs +++ b/kernel/src/types.rs @@ -93,3 +93,6 @@ impl TryFrom for Bytes { } } } + +pub const TPR_NORMAL: usize = 0; +pub const TPR_LOCK: usize = 2;