Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kernel: use TPR for interrupt reentrancy protection #568

Merged
merged 2 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions kernel/src/cpu/idt/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use super::common::{
TS_VECTOR, UD_VECTOR, VC_VECTOR, XF_VECTOR,
};
use crate::address::VirtAddr;
use crate::cpu::irq_state::{raw_get_tpr, raw_set_tpr, tpr_from_vector};
use crate::cpu::registers::RFlags;
use crate::cpu::shadow_stack::IS_CET_SUPPORTED;
use crate::cpu::X86ExceptionContext;
Expand Down Expand Up @@ -351,11 +352,28 @@ pub extern "C" fn common_isr_handler_entry(vector: usize) {
cpu.irqs_pop_nesting();
}

pub fn common_isr_handler(_vector: usize) {
// Interrupt injection requests currently require no processing; they occur
// simply to ensure an exit from the guest.
pub fn common_isr_handler(vector: usize) {
// Set TPR based on the vector being handled and reenable interrupts to
// permit delivery of higher priority interrupts. Because this routine
// dispatches interrupts which should only be observable if interrupts
// are enabled, the IRQ nesting count must be zero at this point.
let previous_tpr = raw_get_tpr();
raw_set_tpr(tpr_from_vector(vector));

// Treat any unhandled interrupt as a spurious interrupt.
let cpu = this_cpu();
cpu.irqs_enable();

// Treat any unhandled interrupt as a spurious interrupt. Interrupt
// injection requests currently require no processing; they occur simply
// to ensure an exit from the guest.

// Disable interrupts before restoring TPR.
cpu.irqs_disable();
raw_set_tpr(previous_tpr);

// Perform the EOI cycle after the interrupt processing state has been
// restored so that recurrent interrupts will not introduce recursion at
// this point.
SVSM_PLATFORM.eoi();
}

Expand Down
201 changes: 186 additions & 15 deletions kernel/src/cpu/irq_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
// Author: Joerg Roedel <[email protected]>

use crate::cpu::percpu::this_cpu;
use crate::cpu::{irqs_disable, irqs_enable};
use crate::cpu::{irqs_disable, irqs_enable, lower_tpr, raise_tpr};
use core::arch::asm;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
use core::sync::atomic::{AtomicBool, AtomicI32, Ordering};

/// Interrupt flag in RFLAGS register
pub const EFLAGS_IF: usize = 1 << 9;
Expand Down Expand Up @@ -51,9 +51,9 @@ pub fn raw_irqs_enable() {
#[inline(always)]
#[must_use = "Unused irqs_enabled() result - meant to be irq_enable()?"]
pub fn irqs_enabled() -> bool {
let state: usize;
// SAFETY: The inline assembly just reads the processors RFLAGS register
// and does not change any state.
let state: usize;
unsafe {
asm!("pushfq",
"popq {}",
Expand All @@ -75,6 +75,50 @@ pub fn irqs_disabled() -> bool {
!irqs_enabled()
}

/// Converts an interrupt vector to a TPR value.
#[inline(always)]
pub fn tpr_from_vector(vector: usize) -> usize {
// TPR is the high four bits of the vector number.
vector >> 4
}

/// Unconditionally set TPR.
///
/// Callers need to ensure that the selected TPR is appropriate for the
/// current context.
///
/// * `tpr_value` - the new TPR value.
#[inline(always)]
pub fn raw_set_tpr(tpr_value: usize) {
// SAFETY: Inline assembly to change TPR, which does not change any state
// related to memory safety.
unsafe {
asm!("mov {tpr}, %cr8",
tpr = in(reg) tpr_value,
options(att_syntax));
}
}

/// Query IRQ state on current CPU
///
/// # Returns
///
/// The current TPR.
#[inline(always)]
pub fn raw_get_tpr() -> usize {
// SAFETY: The inline assembly just reads the TPR register and does not
// change any state.
unsafe {
let mut ret: usize;
asm!("movq %cr8, {tpr}",
tpr = out(reg) ret,
options(att_syntax));
ret
}
}

const TPR_LIMIT: usize = 16;

/// This structure keeps track of PerCpu IRQ states. It tracks the original IRQ
/// state and how deep IRQ-disable calls have been nested. The use of atomics
/// is necessary for interior mutability and to make state modifications safe
Expand All @@ -87,8 +131,10 @@ pub fn irqs_disabled() -> bool {
pub struct IrqState {
/// IRQ state when count was `0`
state: AtomicBool,
/// Depth of IRQ-disabled nesting
count: AtomicIsize,
/// Depth of IRQ-disabled nesting. Index 0 specifies the count of
/// IRQ disables and the remaining indices specify the nesting count
/// for eached raised TPR level.
counts: [AtomicI32; TPR_LIMIT],
/// Make the type !Send + !Sync
phantom: PhantomData<*const ()>,
}
Expand All @@ -98,7 +144,7 @@ impl IrqState {
pub fn new() -> Self {
Self {
state: AtomicBool::new(false),
count: AtomicIsize::new(0),
counts: Default::default(),
phantom: PhantomData,
}
}
Expand All @@ -120,7 +166,7 @@ impl IrqState {
/// The previous nesting level.
pub fn push_nesting(&self, was_enabled: bool) {
debug_assert!(irqs_disabled());
let val = self.count.fetch_add(1, Ordering::Relaxed);
let val = self.counts[0].fetch_add(1, Ordering::Relaxed);

assert!(val >= 0);

Expand Down Expand Up @@ -151,10 +197,10 @@ impl IrqState {
/// # Returns
///
/// The new IRQ nesting level.
pub fn pop_nesting(&self) -> isize {
pub fn pop_nesting(&self) -> i32 {
debug_assert!(irqs_disabled());

let val = self.count.fetch_sub(1, Ordering::Relaxed);
let val = self.counts[0].fetch_sub(1, Ordering::Relaxed);

assert!(val > 0);

Expand All @@ -181,8 +227,8 @@ impl IrqState {
/// # Returns
///
/// Levels of IRQ-disable nesting currently active
pub fn count(&self) -> isize {
self.count.load(Ordering::Relaxed)
pub fn count(&self) -> i32 {
self.counts[0].load(Ordering::Relaxed)
}

/// Changes whether interrupts will be enabled when the nesting count
Expand All @@ -192,17 +238,66 @@ impl IrqState {
/// and must ensure that the specified value is appropriate for the
/// current environment.
pub fn set_restore_state(&self, enabled: bool) {
assert!(self.count.load(Ordering::Relaxed) != 0);
assert!(self.counts[0].load(Ordering::Relaxed) != 0);
self.state.store(enabled, Ordering::Relaxed);
}

/// Increments TPR.
///
/// The caller must ensure that a `raise_tpr()` call is followed by a
/// matching call to `lower_tpr()`.
///
/// * `tpr_value` - The new TPR value. Must be greater than or equal to
/// the current TPR value.
#[inline(always)]
pub fn raise_tpr(&self, tpr_value: usize) {
assert!(tpr_value > 0 && tpr_value >= raw_get_tpr());
raw_set_tpr(tpr_value);

// Increment the count of requests to raise to this TPR to indicate
// the number of execution contexts that require this TPR.
self.counts[tpr_value].fetch_add(1, Ordering::Relaxed);
}

/// Decrements TPR.
///
/// The caller must ensure that a `lower` call balances a preceding
/// `raise` call to the indicated level.
///
/// * `tpr_value` - The TPR from which the caller would like to lower.
/// Must be less than or equal to the current TPR.
#[inline(always)]
pub fn lower_tpr(&self, tpr_value: usize) {
let current_tpr = raw_get_tpr();
debug_assert!(tpr_value <= current_tpr);

// Decrement the count of execution contexts requiring this raised
// TPR.
let count = self.counts[tpr_value].fetch_sub(1, Ordering::Relaxed);
debug_assert!(count > 0);

if count == 1 && tpr_value >= current_tpr {
// Find the highest TPR that is still required.
for new_tpr in (0..tpr_value).rev() {
if self.counts[new_tpr].load(Ordering::Relaxed) != 0 {
raw_set_tpr(new_tpr);
return;
}
}

// No TPR is still in use, so lower to zero.
raw_set_tpr(0);
}
}
}

impl Drop for IrqState {
/// This struct should never be dropped. Add a debug check in case it is
/// dropped anyway.
fn drop(&mut self) {
let count = self.count.load(Ordering::Relaxed);
assert_eq!(count, 0);
for count in &self.counts {
assert_eq!(count.load(Ordering::Relaxed), 0);
}
}
}

Expand All @@ -212,7 +307,7 @@ impl Drop for IrqState {
///
/// The struct implements the `Default` and `Drop` traits for easy use.
#[derive(Debug)]
#[must_use = "if unused previous IRQ state will be immediatly restored"]
#[must_use = "if unused previous IRQ state will be immediately restored"]
pub struct IrqGuard {
/// Make the type !Send + !Sync
phantom: PhantomData<*const ()>,
Expand Down Expand Up @@ -244,9 +339,43 @@ impl Drop for IrqGuard {
}
}

/// A TPR guard which raises TPR upon creation. When the guard goes out of
/// scope, TPR is lowered to the highest active TPR.
///
/// The struct implements the `Drop` trait for easy use.
#[derive(Debug, Default)]
#[must_use = "if unused previous TPR will be immediately restored"]
pub struct TprGuard {
tpr_value: usize,

/// Make the type !Send + !Sync
phantom: PhantomData<*const ()>,
}

impl TprGuard {
pub fn raise(tpr_value: usize) -> Self {
// SAFETY: Safe because the struct implements `Drop, which restores
// TPR state.
raise_tpr(tpr_value);

Self {
tpr_value,
phantom: PhantomData,
}
}
}

impl Drop for TprGuard {
fn drop(&mut self) {
// Lower TPR from the value to which it was raised.
lower_tpr(self.tpr_value);
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::platform::SVSM_PLATFORM;

#[test]
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
Expand Down Expand Up @@ -293,4 +422,46 @@ mod tests {
raw_irqs_disable();
}
}

#[test]
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn tpr_test() {
if SVSM_PLATFORM.use_interrupts() {
assert_eq!(raw_get_tpr(), 0);
raise_tpr(7);
assert_eq!(raw_get_tpr(), 7);
raise_tpr(8);
assert_eq!(raw_get_tpr(), 8);
lower_tpr(8);
assert_eq!(raw_get_tpr(), 7);
lower_tpr(7);
assert_eq!(raw_get_tpr(), 0);
}
}

#[test]
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn tpr_guard_test() {
if SVSM_PLATFORM.use_interrupts() {
assert_eq!(raw_get_tpr(), 0);
// Test in-order raise/lower.
let g1 = TprGuard::raise(8);
assert_eq!(raw_get_tpr(), 8);
let g2 = TprGuard::raise(9);
assert_eq!(raw_get_tpr(), 9);
drop(g2);
assert_eq!(raw_get_tpr(), 8);
drop(g1);
assert_eq!(raw_get_tpr(), 0);
// Test out-of-order raise/lower.
let g1 = TprGuard::raise(8);
assert_eq!(raw_get_tpr(), 8);
let g2 = TprGuard::raise(9);
assert_eq!(raw_get_tpr(), 9);
drop(g1);
assert_eq!(raw_get_tpr(), 9);
drop(g2);
assert_eq!(raw_get_tpr(), 0);
}
}
}
4 changes: 2 additions & 2 deletions kernel/src/cpu/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub mod x86;

pub use apic::LocalApic;
pub use idt::common::X86ExceptionContext;
pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState};
pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable};
pub use irq_state::{irqs_disabled, irqs_enabled, IrqGuard, IrqState, TprGuard};
pub use percpu::{irq_nesting_count, irqs_disable, irqs_enable, lower_tpr, raise_tpr};
pub use registers::{X86GeneralRegs, X86InterruptFrame, X86SegmentRegs};
pub use tlb::*;
Loading
Loading