Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

platform: abstract tlb flush #582

Merged
merged 1 commit into from
Jan 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 52 additions & 46 deletions kernel/src/cpu/tlb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,55 +5,67 @@
// Author: Joerg Roedel <[email protected]>

use crate::address::{Address, VirtAddr};
use crate::cpu::control_regs::{read_cr4, write_cr4, CR4Flags};
use crate::cpu::control_regs::{read_cr3, read_cr4, write_cr3, write_cr4, CR4Flags};
use crate::cpu::ipi::{send_multicast_ipi, IpiMessage, IpiTarget};
use crate::platform::SVSM_PLATFORM;

use core::arch::asm;
use core::sync::atomic::{AtomicBool, Ordering};

const INVLPGB_VALID_VA: u64 = 1u64 << 0;
//const INVLPGB_VALID_PCID: u64 = 1u64 << 1;
const INVLPGB_VALID_ASID: u64 = 1u64 << 2;
const INVLPGB_VALID_GLOBAL: u64 = 1u64 << 3;
static FLUSH_SMP: AtomicBool = AtomicBool::new(false);

#[inline]
fn do_invlpgb(rax: u64, rcx: u64, rdx: u64) {
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
// any state related to memory safety.
unsafe {
asm!("invlpgb",
in("rax") rax,
in("rcx") rcx,
in("rdx") rdx,
options(att_syntax));
}
/// Defines the scope of a TLB flush.
#[derive(Copy, Clone, Debug)]
pub enum TlbFlushScope {
/// Indicates that all addresses must be flushed on all processors,
/// including global addresses.
AllGlobal,

/// Indicates that all addresses must be flushed on all processors,
/// excluding global addresses.
AllNonGlobal,
}

#[inline]
fn do_tlbsync() {
// SAFETY: Inline assembly to synchronize TLB invalidations. It does not
// change any state.
unsafe {
asm!("tlbsync", options(att_syntax));
impl TlbFlushScope {
pub fn flush_percpu(&self) {
match self {
Self::AllGlobal => flush_tlb_global_percpu(),
Self::AllNonGlobal => flush_tlb_percpu(),
}
}

pub fn flush_all(&self) {
// If SMP has not yet been started, then perform all flushes as local only.
// Prior to SMP startup, there is no need to reach into other processors,
// and the SVSM platform object may not even exist when flushes are
// attempted prior to SMP startup.
if FLUSH_SMP.load(Ordering::Relaxed) {
SVSM_PLATFORM.flush_tlb(self);
} else {
self.flush_percpu();
}
}
}

pub fn flush_tlb() {
let rax: u64 = INVLPGB_VALID_ASID;
do_invlpgb(rax, 0, 0);
// SAFETY: The TlbFlushScope structure contains no references and can safely
// rely on the default implementation of the IPI message copy routines.
unsafe impl IpiMessage for TlbFlushScope {
fn invoke(&self) {
self.flush_percpu();
}
}

pub fn flush_tlb_sync() {
flush_tlb();
do_tlbsync();
pub fn flush_tlb(flush_scope: &TlbFlushScope) {
send_multicast_ipi(IpiTarget::All, flush_scope);
}

pub fn flush_tlb_global() {
let rax: u64 = INVLPGB_VALID_ASID | INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
pub fn set_tlb_flush_smp() {
FLUSH_SMP.store(true, Ordering::Relaxed);
}

pub fn flush_tlb_global_sync() {
flush_tlb_global();
do_tlbsync();
let flush_scope = TlbFlushScope::AllGlobal;
flush_scope.flush_all();
}

pub fn flush_tlb_global_percpu() {
Expand All @@ -66,6 +78,13 @@ pub fn flush_tlb_global_percpu() {
}
}

pub fn flush_tlb_percpu() {
// SAFETY: reloading CR3 with its current value is always safe.
unsafe {
write_cr3(read_cr3());
}
}

pub fn flush_address_percpu(va: VirtAddr) {
let va: u64 = va.page_align().bits() as u64;
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
Expand All @@ -76,16 +95,3 @@ pub fn flush_address_percpu(va: VirtAddr) {
options(att_syntax));
}
}

pub fn flush_address(va: VirtAddr) {
let rax: u64 = (va.page_align().bits() as u64)
| INVLPGB_VALID_VA
| INVLPGB_VALID_ASID
| INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_address_sync(va: VirtAddr) {
flush_address(va);
do_tlbsync();
}
6 changes: 6 additions & 0 deletions kernel/src/platform/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use crate::address::{PhysAddr, VirtAddr};
use crate::config::SvsmConfig;
use crate::cpu::cpuid::CpuidResult;
use crate::cpu::percpu::PerCpu;
use crate::cpu::tlb::{flush_tlb, TlbFlushScope};
use crate::error::SvsmError;
use crate::hyperv;
use crate::io::IOPort;
Expand Down Expand Up @@ -140,6 +141,11 @@ pub trait SvsmPlatform {
op: PageValidateOp,
) -> Result<(), SvsmError>;

/// Performs a system-wide TLB flush.
fn flush_tlb(&self, flush_scope: &TlbFlushScope) {
flush_tlb(flush_scope);
}

/// Configures the use of alternate injection as requested.
fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError>;

Expand Down
6 changes: 6 additions & 0 deletions kernel/src/platform/snp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use crate::config::SvsmConfig;
use crate::console::init_svsm_console;
use crate::cpu::cpuid::{cpuid_table, CpuidResult};
use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpu};
use crate::cpu::tlb::TlbFlushScope;
use crate::error::ApicError::Registration;
use crate::error::SvsmError;
use crate::greq::driver::guest_request_driver_init;
Expand All @@ -26,6 +27,7 @@ use crate::sev::msr_protocol::{
hypervisor_ghcb_features, request_termination_msr, verify_ghcb_version, GHCBHvFeatures,
};
use crate::sev::status::vtom_enabled;
use crate::sev::tlb::flush_tlb_scope;
use crate::sev::{
init_hypervisor_ghcb_features, pvalidate_range, sev_status_init, sev_status_verify, PvalidateOp,
};
Expand Down Expand Up @@ -231,6 +233,10 @@ impl SvsmPlatform for SnpPlatform {
pvalidate_range(region, PvalidateOp::from(op))
}

fn flush_tlb(&self, flush_scope: &TlbFlushScope) {
flush_tlb_scope(flush_scope);
}

fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError> {
if !alt_inj_requested {
return Ok(());
Expand Down
1 change: 1 addition & 0 deletions kernel/src/sev/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ pub mod hv_doorbell;
pub mod msr_protocol;
pub mod secrets_page;
pub mod status;
pub mod tlb;
pub mod vmsa;

pub mod utils;
Expand Down
77 changes: 77 additions & 0 deletions kernel/src/sev/tlb.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022-2023 SUSE LLC
//
// Author: Joerg Roedel <[email protected]>

use crate::address::{Address, VirtAddr};
use crate::cpu::tlb::TlbFlushScope;

use core::arch::asm;

const INVLPGB_VALID_VA: u64 = 1u64 << 0;
//const INVLPGB_VALID_PCID: u64 = 1u64 << 1;
const INVLPGB_VALID_ASID: u64 = 1u64 << 2;
const INVLPGB_VALID_GLOBAL: u64 = 1u64 << 3;

#[inline]
fn do_invlpgb(rax: u64, rcx: u64, rdx: u64) {
// SAFETY: Inline assembly to invalidate TLB Entries, which does not change
// any state related to memory safety.
unsafe {
asm!("invlpgb",
in("rax") rax,
in("rcx") rcx,
in("rdx") rdx,
options(att_syntax));
}
}

#[inline]
fn do_tlbsync() {
// SAFETY: Inline assembly to synchronize TLB invalidations. It does not
// change any state.
unsafe {
asm!("tlbsync", options(att_syntax));
}
}

pub fn flush_tlb() {
let rax: u64 = INVLPGB_VALID_ASID;
do_invlpgb(rax, 0, 0);
}

pub fn flush_tlb_sync() {
flush_tlb();
do_tlbsync();
}

pub fn flush_tlb_global() {
let rax: u64 = INVLPGB_VALID_ASID | INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_tlb_global_sync() {
flush_tlb_global();
do_tlbsync();
}

pub fn flush_address(va: VirtAddr) {
let rax: u64 = (va.page_align().bits() as u64)
| INVLPGB_VALID_VA
| INVLPGB_VALID_ASID
| INVLPGB_VALID_GLOBAL;
do_invlpgb(rax, 0, 0);
}

pub fn flush_address_sync(va: VirtAddr) {
flush_address(va);
do_tlbsync();
}

pub fn flush_tlb_scope(flush_scope: &TlbFlushScope) {
match flush_scope {
TlbFlushScope::AllGlobal => flush_tlb_global_sync(),
TlbFlushScope::AllNonGlobal => flush_tlb_sync(),
}
}
5 changes: 5 additions & 0 deletions kernel/src/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use svsm::cpu::shadow_stack::{
};
use svsm::cpu::smp::start_secondary_cpus;
use svsm::cpu::sse::sse_init;
use svsm::cpu::tlb::set_tlb_flush_smp;
use svsm::debug::gdbstub::svsm_gdbstub::{debug_break, gdbstub_start};
use svsm::debug::stacktrace::print_stack;
use svsm::enable_shadow_stacks;
Expand Down Expand Up @@ -311,6 +312,10 @@ pub extern "C" fn svsm_main() {

log::info!("{} CPU(s) present", nr_cpus);

// Advise the TLB package that future TLB flushes will have to be done with
// SMP scope.
set_tlb_flush_smp();

start_secondary_cpus(&**SVSM_PLATFORM, &cpus);

if let Err(e) = SVSM_PLATFORM.prepare_fw(&config, new_kernel_region(&LAUNCH_INFO)) {
Expand Down
Loading