Skip to content

Commit

Permalink
platform: abstract tlb flush
Browse files Browse the repository at this point in the history
Different platforms implement TLB flush in different ways, so TLB flush
operations should be abstracted in the platform object.

Signed-off-by: Jon Lange <[email protected]>
  • Loading branch information
msft-jlange committed Dec 19, 2024
1 parent 08c12fd commit d35cc10
Show file tree
Hide file tree
Showing 6 changed files with 149 additions and 46 deletions.
100 changes: 54 additions & 46 deletions kernel/src/cpu/tlb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,55 +5,69 @@
// Author: Joerg Roedel <[email protected]>

use crate::address::{Address, VirtAddr};
use crate::cpu::control_regs::{read_cr4, write_cr4, CR4Flags};
use crate::cpu::control_regs::{read_cr3, read_cr4, write_cr3, write_cr4, CR4Flags};
use crate::cpu::ipi::{send_multicast_ipi, IpiMessage, IpiTarget};
use crate::platform::SVSM_PLATFORM;

use core::arch::asm;
use core::sync::atomic::{AtomicBool, Ordering};

const INVLPGB_VALID_VA: u64 = 1u64 << 0;
//const INVLPGB_VALID_PCID: u64 = 1u64 << 1;
const INVLPGB_VALID_ASID: u64 = 1u64 << 2;
const INVLPGB_VALID_GLOBAL: u64 = 1u64 << 3;
static FLUSH_SMP: AtomicBool = AtomicBool::new(false);

#[inline]
fn do_invlpgb(rax: u64, rcx: u64, rdx: u64) {
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
// any state related to memory safety.
unsafe {
asm!("invlpgb",
in("rax") rax,
in("rcx") rcx,
in("rdx") rdx,
options(att_syntax));
}
/// Defines the scope of a TLB flush.
/// * [AllGlobal] means that all addresses must be flushed on all processors,
/// including global addresses.
/// * [AllNonGlobal] means that all addresses must be flushed on all
/// processors, excluding global addresses.
#[derive(Copy, Clone, Debug)]
pub enum TlbFlushScope {
AllGlobal,
AllNonGlobal,
}

#[inline]
fn do_tlbsync() {
// SAFETY: Inline assembly to synchronize TLB invalidations. It does not
// change any state.
unsafe {
asm!("tlbsync", options(att_syntax));
impl TlbFlushScope {
pub fn flush_percpu(&self) {
match self {
Self::AllGlobal => flush_tlb_global_percpu(),
Self::AllNonGlobal => flush_tlb_percpu(),
}
}

pub fn flush_all(&self) {
// If SMP has not yet been started, then perform all flushes as local only.
// Prior to SMP startup, there is no need to reach into other processors,
// and the SVSM platform object may not even exist when flushes are
// attempted prior to SMP startup.
if FLUSH_SMP.load(Ordering::Relaxed) {
SVSM_PLATFORM.flush_tlb(self);
} else {
self.flush_percpu();
}
}
}

pub fn flush_tlb() {
let rax: u64 = INVLPGB_VALID_ASID;
do_invlpgb(rax, 0, 0);
/// # Safety
/// The TlbFlushScope structure contains no references and can safely rely on
/// the default implementation of the IPI message copy routines.
unsafe impl IpiMessage for TlbFlushScope {
fn invoke(&self) {
self.flush_percpu();
}
}

pub fn flush_tlb_sync() {
flush_tlb();
do_tlbsync();
pub fn flush_tlb(flush_scope: &TlbFlushScope) {
// SAFETY: the TLB flush scope structure correctly implements cross-context
// address safety because it contains no references.
send_multicast_ipi(IpiTarget::All, flush_scope);
}

pub fn flush_tlb_global() {
let rax: u64 = INVLPGB_VALID_ASID | INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
pub fn set_tlb_flush_smp() {
FLUSH_SMP.store(true, Ordering::Relaxed);
}

pub fn flush_tlb_global_sync() {
flush_tlb_global();
do_tlbsync();
let flush_scope = TlbFlushScope::AllGlobal;
flush_scope.flush_all();
}

pub fn flush_tlb_global_percpu() {
Expand All @@ -66,6 +80,13 @@ pub fn flush_tlb_global_percpu() {
}
}

pub fn flush_tlb_percpu() {
// SAFETY: reloading CR3 with its current value is always safe.
unsafe {
write_cr3(read_cr3());
}
}

pub fn flush_address_percpu(va: VirtAddr) {
let va: u64 = va.page_align().bits() as u64;
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
Expand All @@ -76,16 +97,3 @@ pub fn flush_address_percpu(va: VirtAddr) {
options(att_syntax));
}
}

pub fn flush_address(va: VirtAddr) {
let rax: u64 = (va.page_align().bits() as u64)
| INVLPGB_VALID_VA
| INVLPGB_VALID_ASID
| INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_address_sync(va: VirtAddr) {
flush_address(va);
do_tlbsync();
}
6 changes: 6 additions & 0 deletions kernel/src/platform/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use crate::address::{PhysAddr, VirtAddr};
use crate::config::SvsmConfig;
use crate::cpu::cpuid::CpuidResult;
use crate::cpu::percpu::PerCpu;
use crate::cpu::tlb::{flush_tlb, TlbFlushScope};
use crate::error::SvsmError;
use crate::hyperv;
use crate::io::IOPort;
Expand Down Expand Up @@ -137,6 +138,11 @@ pub trait SvsmPlatform {
op: PageValidateOp,
) -> Result<(), SvsmError>;

/// Performs a system-wide TLB flush.
fn flush_tlb(&self, flush_scope: &TlbFlushScope) {
flush_tlb(flush_scope);
}

/// Configures the use of alternate injection as requested.
fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError>;

Expand Down
6 changes: 6 additions & 0 deletions kernel/src/platform/snp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use crate::config::SvsmConfig;
use crate::console::init_svsm_console;
use crate::cpu::cpuid::{cpuid_table, CpuidResult};
use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpu};
use crate::cpu::tlb::TlbFlushScope;
use crate::error::ApicError::Registration;
use crate::error::SvsmError;
use crate::greq::driver::guest_request_driver_init;
Expand All @@ -26,6 +27,7 @@ use crate::sev::msr_protocol::{
hypervisor_ghcb_features, request_termination_msr, verify_ghcb_version, GHCBHvFeatures,
};
use crate::sev::status::{sev_restricted_injection, vtom_enabled};
use crate::sev::tlb::flush_tlb_scope;
use crate::sev::{
init_hypervisor_ghcb_features, pvalidate_range, sev_status_init, sev_status_verify, PvalidateOp,
};
Expand Down Expand Up @@ -237,6 +239,10 @@ impl SvsmPlatform for SnpPlatform {
pvalidate_range(region, PvalidateOp::from(op))
}

fn flush_tlb(&self, flush_scope: &TlbFlushScope) {
flush_tlb_scope(flush_scope);
}

fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError> {
if !alt_inj_requested {
return Ok(());
Expand Down
1 change: 1 addition & 0 deletions kernel/src/sev/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ pub mod hv_doorbell;
pub mod msr_protocol;
pub mod secrets_page;
pub mod status;
pub mod tlb;
pub mod vmsa;

pub mod utils;
Expand Down
77 changes: 77 additions & 0 deletions kernel/src/sev/tlb.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022-2023 SUSE LLC
//
// Author: Joerg Roedel <[email protected]>

use crate::address::{Address, VirtAddr};
use crate::cpu::tlb::TlbFlushScope;

use core::arch::asm;

const INVLPGB_VALID_VA: u64 = 1u64 << 0;
//const INVLPGB_VALID_PCID: u64 = 1u64 << 1;
const INVLPGB_VALID_ASID: u64 = 1u64 << 2;
const INVLPGB_VALID_GLOBAL: u64 = 1u64 << 3;

#[inline]
fn do_invlpgb(rax: u64, rcx: u64, rdx: u64) {
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
// any state related to memory safety.
unsafe {
asm!("invlpgb",
in("rax") rax,
in("rcx") rcx,
in("rdx") rdx,
options(att_syntax));
}
}

#[inline]
fn do_tlbsync() {
// SAFETY: Inline assembly to synchronize TLB invalidations. It does not
// change any state.
unsafe {
asm!("tlbsync", options(att_syntax));
}
}

pub fn flush_tlb() {
let rax: u64 = INVLPGB_VALID_ASID;
do_invlpgb(rax, 0, 0);
}

pub fn flush_tlb_sync() {
flush_tlb();
do_tlbsync();
}

pub fn flush_tlb_global() {
let rax: u64 = INVLPGB_VALID_ASID | INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_tlb_global_sync() {
flush_tlb_global();
do_tlbsync();
}

pub fn flush_address(va: VirtAddr) {
let rax: u64 = (va.page_align().bits() as u64)
| INVLPGB_VALID_VA
| INVLPGB_VALID_ASID
| INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_address_sync(va: VirtAddr) {
flush_address(va);
do_tlbsync();
}

pub fn flush_tlb_scope(flush_scope: &TlbFlushScope) {
match flush_scope {
TlbFlushScope::AllGlobal => flush_tlb_global_sync(),
TlbFlushScope::AllNonGlobal => flush_tlb_sync(),
}
}
5 changes: 5 additions & 0 deletions kernel/src/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use svsm::cpu::shadow_stack::{
};
use svsm::cpu::smp::start_secondary_cpus;
use svsm::cpu::sse::sse_init;
use svsm::cpu::tlb::set_tlb_flush_smp;
use svsm::debug::gdbstub::svsm_gdbstub::{debug_break, gdbstub_start};
use svsm::debug::stacktrace::print_stack;
use svsm::enable_shadow_stacks;
Expand Down Expand Up @@ -311,6 +312,10 @@ pub extern "C" fn svsm_main() {

log::info!("{} CPU(s) present", nr_cpus);

// Advise the TLB package that future TLB flushes will have to be done with
// SMP scope.
set_tlb_flush_smp();

start_secondary_cpus(&**SVSM_PLATFORM, &cpus);

if let Err(e) = SVSM_PLATFORM.prepare_fw(&config, new_kernel_region(&LAUNCH_INFO)) {
Expand Down

0 comments on commit d35cc10

Please sign in to comment.