diff --git a/src/arch/armv8/armv8-a/inc/arch/smmuv3.h b/src/arch/armv8/armv8-a/inc/arch/smmuv3.h new file mode 100644 index 00000000..551d7e42 --- /dev/null +++ b/src/arch/armv8/armv8-a/inc/arch/smmuv3.h @@ -0,0 +1,475 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +/* + ? Questions: + - Target platform to determine microarchitectural-dependent details + - Ask whether Bao supports MSI interrupts. The SMMUv3 generates MSIs in a standardized manner. + Wired interrupts are generated in an implementation-defined way. +*/ + +#ifndef __ARCH_SMMUV3_H__ +#define __ARCH_SMMUV3_H__ + +#include +#include +#include +#include + +// #------------- +// # Register Map +// #------------- + +// # Page 0 + +/* IDR0 */ +#define SMMUV3_IDR0_S2P_BIT (0x1UL << 0) // Stage-2 translation support +#define SMMUV3_IDR0_TTF_OFF \ + (2) // Translation tables format support //? Check target platform requirements +#define SMMUV3_IDR0_TTF_LEN (2) +#define SMMUV3_IDR0_COHACC_BIT (0x1UL << 4) // Coherent access support +#define SMMUV3_IDR0_BTM_BIT (0x1UL << 5) // Broadcast TLB Maintenance Support +// A and D bits update support ignored +// Dormhint support ignored (to check if the SMMU has any cached data) +// Hyp Stage-1 context support ignored +// ATS ignored +// ASID ignored +// MSI generation support ignored +// ATOS support ignored (Register-based address translation, like debug reg IF in RISC-V) +// PRI ignored +#define SMMUV3_IDR0_VMW_BIT (0x1UL << 17) // VMID matching for invalidation +#define SMMUV3_IDR0_VMID16_BIT (0x1UL << 18) // 16-bit VMID support +// 2-lvl CDT ignored +// VATOS ignored +// endianness ignored +// stall model support ignored +// terminate model behavior is ignored since stage-2 faulting transactions always abort +#define SMMUV3_IDR0_ST_LEVEL_OFF (27) // Multi-lvl ST support +#define SMMUV3_IDR0_ST_LEVEL_LEN (2) + +/* IDR1 */ +#define SMMUV3_IDR1_SIDSIZE_OFF (0) // Supported max width of the StreamID (max 32) +#define SMMUV3_IDR1_SIDSIZE_LEN (6) +// SubstreamID ignored +// PRI queue supported +#define SMMUV3_IDR1_EVENTQS_OFF (16) // Max number of EVTQ entries as log2(N) +#define SMMUV3_IDR1_EVENTQS_LEN (5) +#define SMMUV3_IDR1_CMDQS_OFF (21) // Max number of CMDQ entries as log2(N) +#define SMMUV3_IDR1_CMDQS_LEN (5) +// Memory attributes override ignored +#define SMMUV3_IDR1_REL_BIT (1UL << 28) +#define SMMUV3_IDR1_QUEUES_PRESET_BIT (1UL << 29) +#define SMMUV3_IDR1_TABLES_PRESET_BIT (1UL << 30) +// Enhanced CQ Interface support ignored + +/* IDR2 */ +// VATOS support ignored + +/* IDR3 */ +// All ignored + +/* IDR4 */ +// Implementation defined. Ignored + +/* IDR5 */ +#define SMMUV3_IDR5_OAS_OFF (0) // Output Address Size +#define SMMUV3_IDR5_OAS_LEN (3) +#define SMMUV3_IDR5_GRAN4K_BIT (0x1UL << 4) // 4KiB translation granule support +// 16k and 64k translation granularities support ignored (//? check whether bao uses this) +// VAX and STALL_MAX ignored + +/* IIDR */ +// All ignored + +/* AIDR */ +// Minor arch version ignored +#define SMMUV3_AIDR_MAJOR_OFF (4) // Major SMMU arch version (must be 0 => SMMUv3) +#define SMMUV3_AIDR_MAJOR_LEN (4) + +/* CR0 */ +#define SMMUV3_CR0_SMMUEN_BIT (0x1UL << 0) +// PRIQ ignored +#define SMMUV3_CR0_EVENTQEN_BIT (0x1UL << 2) +#define SMMUV3_CR0_CMDQEN_BIT (0x1UL << 3) +// ATS check ignored +// VMID grouping for invalidations ignored + +/* CR0 ACK */ +#define SMMUV3_CR0ACK_SMMUEN_BIT (0x1UL << 0) +// PRIQ ignored +#define SMMUV3_CR0ACK_EVENTQEN_BIT (0x1UL << 2) +#define SMMUV3_CR0ACK_CMDQEN_BIT (0x1UL << 3) + +/* CR1 */ +// Queue and Table Cacheability and shareability ignored + +/* CR2 */ +// EL2-E2H translation regime enable +#define SMMUV3_CR2_RECINVSID_BIT (0x1UL << 1) // generate fault records for invalid streamIDs +#define SMMUV3_CR2_PTM_BIT (0x1UL << 2) // Private TLB maintenance (ignore broadcast TLB operations) +// ATS records fault records ignored + +/* STATUSR */ +// Dormant ignored + +/* GBPA */ +#define SMMUV3_GBPA_MEMATTR_OFF (0) // Memory type +#define SMMUV3_GBPA_MEMATTR_LEN (4) +#define SMMUV3_GBPA_MTCFG_BIT (0x1UL << 4) // Memory type override (select input or override) +#define SMMUV3_GBPA_ALLOCCFG_OFF (8) // Alloc config +#define SMMUV3_GBPA_ALLOCCFG_LEN (4) +#define SMMUV3_GBPA_SHCFG_OFF (12) // Shareability config +#define SMMUV3_GBPA_SHCFG_LEN (2) +#define SMMUV3_GBPA_PRIVCFG_OFF (16) // Privileged config +#define SMMUV3_GBPA_PRIVCFG_LEN (2) +#define SMMUV3_GBPA_INSTCFG_OFF (18) // Instruction/Data config +#define SMMUV3_GBPA_INSTCFG_LEN (2) +#define SMMUV3_GBPA_ABORT_BIT (0x1UL << 20) // Do not bypass transactions in the SMMU +#define SMMUV3_GBPA_MTCFG_BIT (0x1UL << 31) // Set to 1 when updating the register + +/* AGBPA */ +// Ignored + +/* IRQ_CTRL */ +#define SMMUV3_IRQCTRL_GERROR_BIT (0x1UL << 0) // Global errors interrupts enable +// PRI IRQ control ignored +#define SMMUV3_IRQCTRL_EVENTQ_BIT (0x1UL << 2) // Event queue interrupts enable + +/* IRQ_CTRL_ACK */ +#define SMMUV3_IRQCTRLACK_GERROR_BIT (0x1UL << 0) // Global errors interrupts enable ACK +#define SMMUV3_IRQCTRLACK_EVENTQ_BIT (0x1UL << 2) // Event queue interrupts enable ACK + +/* GERROR */ +// unknown CMD errors ignored since we are not issuing commands +#define SMMUV3_GERROR_EVENTQ_ABT_BIT (0x1UL << 2) // An access to the EQ was terminated with abort +// PRI ignored +// CMD sync MSI write error ignored +// EVENTQ MSI write error ignored (we will use WSI) +// PRI MSI write ignored +// GERROR MSI write notification error ignored +#define SMMUV3_GERROR_SFM_ERR_BIT \ + (0x1UL << 8) // The SMMU entered Service Failure Mode. Traffic stopped +// Extended command queue errors ignored + +/* GERROR_N (to acknowledge global errors toggling bits) */ +#define SMMUV3_GERROR_N_EVENTQ_ABT_BIT (0x1UL << 2) // An access to the EQ was terminated with abort +#define SMMUV3_GERROR_N_SFM_ERR_BIT \ + (0x1UL << 8) // The SMMU entered Service Failure Mode. Traffic stopped + +/* GERROR_IRQ_CFG0 (MSI Addr) */ +// Ignored since we are not using MSIs to notify global errors + +/* GERROR_IRQ_CFG1 (MSI Data) */ +// Ignored since we are not using MSIs to notify global errors + +/* GERROR_IRQ_CFG2 (MSI Shareability and Mem type) */ +// Ignored since we are not using MSIs to notify global errors + +/* STRTAB_BASE (64-bit) */ +#define SMMUV3_STRTAB_BASE_ADDR_OFF (6) // Base address of the ST +#define SMMUV3_STRTAB_BASE_ADDR_LEN (46) +#define SMMUV3_STRTAB_BASE_ADDR_MASK \ + BIT64_MASK(SMMUV3_STRTAB_BASE_ADDR_OFF, SMMUV3_STRTAB_BASE_ADDR_LEN) +#define SMMUV3_STRTAB_BASE_RA_BIT (1ULL << 62) // Read-allocate hint + +/* STRTAB_BASE_CFG */ +#define SMMUV3_STRTAB_BASE_CFG_LOG2SZ_OFF (0) // Size of the ST as log2(N) +#define SMMUV3_STRTAB_BASE_CFG_LOG2SZ_LEN (6) +#define SMMUV3_STRTAB_BASE_CFG_LOG2SZ_MASK \ + BIT32_MASK(SMMUV3_STRTAB_BASE_CFG_LOG2SZ_OFF, SMMUV3_STRTAB_BASE_CFG_LOG2SZ_LEN) +#define SMMUV3_STRTAB_BASE_CFG_SPLIT_OFF (6) // StreamID split point for 2-lvl STs +#define SMMUV3_STRTAB_BASE_CFG_SPLIT_LEN (5) +#define SMMUV3_STRTAB_BASE_CFG_SPLIT_MASK \ + BIT32_MASK(SMMUV3_STRTAB_BASE_CFG_SPLIT_OFF, SMMUV3_STRTAB_BASE_CFG_SPLIT_LEN) +#define SMMUV3_STRTAB_BASE_CFG_FMT_OFF (16) // ST format +#define SMMUV3_STRTAB_BASE_CFG_FMT_LEN (2) +#define SMMUV3_STRTAB_BASE_CFG_FMT_MASK \ + BIT32_MASK(SMMUV3_STRTAB_BASE_CFG_FMT_OFF, SMMUV3_STRTAB_BASE_CFG_FMT_LEN) +#define SMMUV3_STRTAB_BASE_CFG_FMT_LINEAR (0x0UL << SMMUV3_STRTAB_BASE_CFG_FMT_OFF) +#define SMMUV3_STRTAB_BASE_CFG_FMT_2LVL (0x1UL << SMMUV3_STRTAB_BASE_CFG_FMT_OFF) + +/* CMDQ BASE, PROD AND CONS */ +// We are not using the CMDQ + +/* EVENTQ_BASE */ +#define SMMUV3_EVENTQ_BASE_LOG2SZ_OFF (0) // Size of the EVENTQ as log2(N) +#define SMMUV3_EVENTQ_BASE_LOG2SZ_LEN (5) +#define SMMUV3_EVENTQ_BASE_LOG2SZ_MASK \ + BIT64_MASK(SMMUV3_EVENTQ_BASE_LOG2SZ_OFF, SMMUV3_EVENTQ_BASE_LOG2SZ_LEN) +#define SMMUV3_EVENTQ_BASE_ADDR_OFF (5) // Base address of the EVENTQ +#define SMMUV3_EVENTQ_BASE_ADDR_LEN (47) +#define SMMUV3_EVENTQ_BASE_ADDR_MASK \ + BIT64_MASK(SMMUV3_EVENTQ_BASE_ADDR_OFF, SMMUV3_EVENTQ_BASE_ADDR_LEN) +#define SMMUV3_EVENTQ_BASE_WA_BIT (1ULL << 62) // Write-allocate hint + +/* alias for EVENTQ_PROD */ +// not implemented + +/* alias for EVENTQ_CONS */ +// not implemented + +/* EVENTQ_IRQ_CFG0 (MSI Addr) */ +// Ignored since we are not using MSIs to notify EVENTQ interrupts + +/* EVENTQ_IRQ_CFG1 (MSI Data) */ +// Ignored since we are not using MSIs to notify EVENTQ interrupts + +/* EVENTQ_IRQ_CFG2 (MSI Shareability and Mem type) */ +// Ignored since we are not using MSIs to notify EVENTQ interrupts + +/* PRIQ BASE, PROD AND CONS */ +// Ignored. We are not using the PRIQ + +/* GATOS registers */ +// Ignored. We are not using the ATOS interface + +/* MPAMIDR and VATOS regs */ +// We are not using MPAM support + +// All registers associated to the Enhanced CMDQ IF are ignored + +// # Page 1 + +/* EVENTQ_PROD */ +#define SMMUV3_EVENTQ_PROD_WR_OFF (0) // Write index of the EVENTQ +#define SMMUV3_EVENTQ_PROD_WR_LEN (20) +#define SMMUV3_EVENTQ_PROD_OVFLG_BIT (1UL << 31) // Overflow flag (to be compared with CONS flag) + +/* EVENTQ_CONS */ +#define SMMUV3_EVENTQ_CONS_RD_OFF (0) // Read index of the EVENTQ +#define SMMUV3_EVENTQ_CONS_RD_LEN (20) +#define SMMUV3_EVENTQ_CONS_OVACK_BIT (1UL << 31) // Overflow ACK flag + +// # Register Map: Page 0 (4-kiB) (Offset 0x0) +struct smmuv3_regmap_p0_hw { + uint32_t IDR0; + uint32_t IDR1; + uint32_t IDR2; + uint32_t IDR3; + uint32_t IDR4; + uint32_t IDR5; + uint32_t IIDR; + uint32_t AIDR; + uint32_t CR0; + uint32_t CR0_ACK; + uint32_t CR1; + uint32_t CR2; + uint32_t STATUSR; + uint32_t GBPA; + uint32_t AGBPA; + uint32_t IRQ_CTRL; + uint32_t IRQ_CTRLACK; + uint32_t GERROR; + uint32_t GERROR_N; + uint64_t GERROR_IRQ_CFG0; + uint32_t GERROR_IRQ_CFG1; + uint32_t GERROR_IRQ_CFG2; + uint64_t STRTAB_BASE; + uint32_t STRTAB_BASE_CFG; + uint64_t CMDQ_BASE; + uint32_t CMDQ_PROD; + uint32_t CMDQ_CONS; + uint64_t EVENTQ_BASE; + uint32_t __EVENTQ_PROD; + uint32_t __EVENTQ_CONS; + uint64_t EVENTQ_IRQ_CFG0; + uint32_t EVENTQ_IRQ_CFG1; + uint32_t EVENTQ_IRQ_CFG2; + uint64_t PRIQ_BASE; + uint32_t __PRIQ_PROD; + uint32_t __PRIQ_CONS; + uint64_t PRIQ_IRQ_CFG0; + uint32_t PRIQ_IRQ_CFG1; + uint32_t PRIQ_IRQ_CFG2; + uint32_t GATOS_CTRL; + uint64_t GATOS_SID; + uint64_t GATOS_ADDR; + uint64_t GATOS_PAR; + uint32_t MPAMIDR; + uint32_t GMPAM; + uint32_t GBPMPAM; + uint32_t VATOS_SEL; + uint32_t IDR6; + uint8_t __impl1[0xF00 - 0xE00]; + uint8_t ID_REGS[0x1000 - 0xFD0]; +} __attribute__((__packed__, __aligned__(0x10000))); // Aligned to 64 kiB + +// Enhanced CMDQ not used + +// Secure Register Interface not used + +// # Register Map: Page 1 (4-kiB) (Offset 0x10000) +struct smmuv3_regmap_p1_hw { + uint8_t __pad1[0xA8 - 0x00]; + uint32_t EVENTQ_PROD; + uint32_t EVENTQ_CONS; + uint8_t __pad2[0xC8 - 0xB0]; + uint32_t PRIQ_PROD; + uint32_t PRIQ_CONS; + uint8_t __pad3[0x1000 - 0x00D0]; +} __attribute__((__packed__, __aligned__(0x10000))); // Aligned to 64 kiB + +// #------------- +// # Stream Table +// #------------- + +/* DW 1 */ +#define SMMUV3_STE_VALID_BIT (1ULL << 0) +#define SMMUV3_STE_CONFIG_OFF (1) +#define SMMUV3_STE_CONFIG_LEN (3) +#define SMMUV3_STE_CONFIG_MASK BIT64_MASK(SMMUV3_STE_CONFIG_OFF, SMMUV3_STE_CONFIG_LEN) +#define SMMUV3_STE_CONFIG_ABORT ((0b0ULL << SMMUV3_STE_CONFIG_OFF) & SMMUV3_STE_CONFIG_MASK) +#define SMMUV3_STE_CONFIG_BYPASS ((0b0100ULL << SMMUV3_STE_CONFIG_OFF) & SMMUV3_STE_CONFIG_MASK) +#define SMMUV3_STE_CONFIG_S1ONLY ((0b0101ULL << SMMUV3_STE_CONFIG_OFF) & SMMUV3_STE_CONFIG_MASK) +#define SMMUV3_STE_CONFIG_S2ONLY ((0b0110ULL << SMMUV3_STE_CONFIG_OFF) & SMMUV3_STE_CONFIG_MASK) +#define SMMUV3_STE_CONFIG_2STAGE ((0b0111ULL << SMMUV3_STE_CONFIG_OFF) & SMMUV3_STE_CONFIG_MASK) +// Stage 1 format and contexts ignored + +/* DW 2 */ +// Default SubstreamID ignored +// S1 ctx ptr attributes ignored (inner and outer cacheablility/allocation, shareability) +// Page-based HW attributes ignored (bits 59, 60, 61 and 62) +// Destructive Reads ignored +#define SMMUV3_STE_CONT_OFF (77 - 64) // May be used to group equal STEs for different devices +#define SMMUV3_STE_CONT_LEN (4) +#define SMMUV3_STE_CONT_MASK BIT64_MASK(SMMUV3_STE_CONT_OFF, SMMUV3_STE_CONT_LEN) +// DCP ignored +// PPAR ignored +// Fault records merging ignored +// Stage 2 control of attributes ignored +// Stage 1 MPAM ignored +// Stage 1 stalls disable ignored +// We do not give support for ATS + +#define SMMUV3_STE_MEMATTR_OFF (96 - 96) // Memory type to be overriden +#define SMMUV3_STE_MEMATTR_LEN (4) +#define SMMUV3_STE_MEMATTR_MASK BIT64_MASK(SMMUV3_STE_MEMATTR_OFF, SMMUV3_STE_MEMATTR_LEN) +#define SMMUV3_STE_MEMATTR_IWB_OWB ((0b1111 << SMMUV3_STE_MEMATTR_OFF) & SMMUV3_STE_MEMATTR_MASK) +#define SMMUV3_STE_MTCFG_BIT (1ULL << (100 - 96)) // Memory type override enable +#define SMMUV3_STE_ALLOCCFG_OFF (101 - 96) // Allocation hints override +#define SMMUV3_STE_ALLOCCFG_LEN (4) +#define SMMUV3_STE_ALLOCCFG_MASK BIT64_MASK(SMMUV3_STE_ALLOCCFG_OFF, SMMUV3_STE_ALLOCCFG_LEN) +#define SMMUV3_STE_ALLOCCFG_RA_WA ((0b1110UL << SMMUV3_STE_ALLOCCFG_OFF) & SMMUV3_STE_ALLOCCFG_MASK) +#define SMMUV3_STE_SHCFG_OFF (108 - 96) // Shareability config override +#define SMMUV3_STE_SHCFG_LEN (2) +#define SMMUV3_STE_SHCFG_MASK BIT64_MASK(SMMUV3_STE_SHCFG_OFF, SMMUV3_STE_SHCFG_LEN) +#define SMMUV3_STE_SHCFG_NONSH ((0b00UL << SMMUV3_STE_SHCFG_OFF) & SMMUV3_STE_SHCFG_MASK) +#define SMMUV3_STE_SHCFG_USEIN ((0b01UL << SMMUV3_STE_SHCFG_OFF) & SMMUV3_STE_SHCFG_MASK) +#define SMMUV3_STE_SHCFG_OUTSH ((0b10UL << SMMUV3_STE_SHCFG_OFF) & SMMUV3_STE_SHCFG_MASK) +#define SMMUV3_STE_SHCFG_INNSH ((0b11UL << SMMUV3_STE_SHCFG_OFF) & SMMUV3_STE_SHCFG_MASK) +// NS attribute ignored since we do not use Secure ST +#define SMMUV3_STE_PRIVCFG_OFF (112 - 96) // Privilege config override +#define SMMUV3_STE_PRIVCFG_LEN (2) +#define SMMUV3_STE_PRIVCFG_MASK BIT64_MASK(SMMUV3_STE_PRIVCFG_OFF, SMMUV3_STE_PRIVCFG_LEN) +#define SMMUV3_STE_INSTCFG_OFF (114 - 96) // Inst/Data override +#define SMMUV3_STE_INSTCFG_LEN (2) +#define SMMUV3_STE_INSTCFG_MASK BIT64_MASK(SMMUV3_STE_INSTCFG_OFF, SMMUV3_STE_INSTCFG_LEN) + +/* DW 3 */ +#define SMMUV3_STE_S2VMID_OFF (128 - 128) // Stage 2 VMID +#define SMMUV3_STE_S2VMID_LEN (16) +#define SMMUV3_STE_S2VMID_MASK BIT64_MASK(SMMUV3_STE_S2VMID_OFF, SMMUV3_STE_S2VMID_LEN) + +#define SMMUV3_STE_S2T0SZ_OFF (160 - 160) // Size of the IPA input region +#define SMMUV3_STE_S2T0SZ_LEN (6) +#define SMMUV3_STE_S2T0SZ_MASK BIT64_MASK(SMMUV3_STE_S2T0SZ_OFF, SMMUV3_STE_S2T0SZ_LEN) +#define SMMUV3_STE_S2SL0_OFF (166 - 160) // Starting level of stage 2 translation table walk +#define SMMUV3_STE_S2SL0_LEN (2) +#define SMMUV3_STE_S2SL0_MASK BIT64_MASK(SMMUV3_STE_S2SL0_OFF, SMMUV3_STE_S2SL0_LEN) +#define SMMUV3_STE_S2SL0_LVL0 ((0b10UL << SMMUV3_STE_S2SL0_OFF) & SMMUV3_STE_S2SL0_LEN) +#define SMMUV3_STE_S2SL0_LVL1 ((0b01UL << SMMUV3_STE_S2SL0_OFF) & SMMUV3_STE_S2SL0_LEN) +#define SMMUV3_STE_S2SL0_LVL2 ((0b00UL << SMMUV3_STE_S2SL0_OFF) & SMMUV3_STE_S2SL0_LEN) +#define SMMUV3_STE_S2IR0_OFF (168 - 160) // Inner region cacheability for stage 2 table walks +#define SMMUV3_STE_S2IR0_LEN (2) +#define SMMUV3_STE_S2IR0_MASK BIT64_MASK(SMMUV3_STE_S2IR0_OFF, SMMUV3_STE_S2IR0_LEN) +#define SMMUV3_STE_S2IR0_NC ((0b00UL << SMMUV3_STE_S2IR0_OFF) & SMMUV3_STE_S2IR0_MASK) +#define SMMUV3_STE_S2IR0_WB_RA_WA ((0b01UL << SMMUV3_STE_S2IR0_OFF) & SMMUV3_STE_S2IR0_MASK) +#define SMMUV3_STE_S2IR0_WT_RA ((0b10UL << SMMUV3_STE_S2IR0_OFF) & SMMUV3_STE_S2IR0_MASK) +#define SMMUV3_STE_S2IR0_WB_RA_NWA ((0b11UL << SMMUV3_STE_S2IR0_OFF) & SMMUV3_STE_S2IR0_MASK) +#define SMMUV3_STE_S2OR0_OFF (170 - 160) // Outer region cacheability for stage 2 table walks +#define SMMUV3_STE_S2OR0_LEN (2) +#define SMMUV3_STE_S2OR0_MASK BIT64_MASK(SMMUV3_STE_S2OR0_OFF, SMMUV3_STE_S2OR0_LEN) +#define SMMUV3_STE_S2OR0_NC ((0b00UL << SMMUV3_STE_S2OR0_OFF) & SMMUV3_STE_S2OR0_MASK) +#define SMMUV3_STE_S2OR0_WB_RA_WA ((0b01UL << SMMUV3_STE_S2OR0_OFF) & SMMUV3_STE_S2OR0_MASK) +#define SMMUV3_STE_S2OR0_WT_RA ((0b10UL << SMMUV3_STE_S2OR0_OFF) & SMMUV3_STE_S2OR0_MASK) +#define SMMUV3_STE_S2OR0_WB_RA_NWA ((0b11UL << SMMUV3_STE_S2OR0_OFF) & SMMUV3_STE_S2OR0_MASK) +#define SMMUV3_STE_S2SH0_OFF (172 - 160) // Shareability for stage 2 table walks +#define SMMUV3_STE_S2SH0_LEN (2) +#define SMMUV3_STE_S2SH0_MASK BIT64_MASK(SMMUV3_STE_S2SH0_OFF, SMMUV3_STE_S2SH0_LEN) +#define SMMUV3_STE_S2SH0_NONSH ((0b00UL << SMMUV3_STE_S2SH0_OFF) & SMMUV3_STE_S2SH0_MASK) +#define SMMUV3_STE_S2SH0_OUTSH ((0b10UL << SMMUV3_STE_S2SH0_OFF) & SMMUV3_STE_S2SH0_MASK) +#define SMMUV3_STE_S2SH0_INNSH ((0b11UL << SMMUV3_STE_S2SH0_OFF) & SMMUV3_STE_S2SH0_MASK) +#define SMMUV3_STE_S2TG_OFF (174 - 160) // Stage 2 translation granule size +#define SMMUV3_STE_S2TG_LEN (2) +#define SMMUV3_STE_S2TG_MASK BIT64_MASK(SMMUV3_STE_S2TG_OFF, SMMUV3_STE_S2TG_LEN) +#define SMMUV3_STE_S2PS_OFF (176 - 160) // Physical address size (for fault checking) +#define SMMUV3_STE_S2PS_LEN (2) +#define SMMUV3_STE_S2PS_MASK BIT64_MASK(SMMUV3_STE_S2PS_OFF, SMMUV3_STE_S2PS_LEN) +#define SMMUV3_STE_S2AA64_BIT \ + (1ULL << (179 - 160)) // Stage 2 translation table format (AArch64 /AArch32) +#define SMMUV3_STE_S2ENDI_BIT (1ULL << (180 - 160)) // Stage 2 translation table endianness +#define SMMUV3_STE_S2AFFD_BIT (1ULL << (181 - 160)) // Stage 2 AF fault disable +// Protected Table Walks not used +#define SMMUV3_STE_S2HD_BIT (1ULL << (183 - 160)) // Stage 2 DF HW update +#define SMMUV3_STE_S2HA_BIT (1ULL << (184 - 160)) // Stage 2 AF HW updates +#define SMMUV3_STE_S2S_BIT (1ULL << (185 - 160)) // Stage 2 fault behavior - Stall +#define SMMUV3_STE_S2R_BIT (1ULL << (186 - 160)) // Stage 2 fault behavior - Record + +/* DW 4 */ +#define SMMUV3_STE_S2TTB_OFF (196 - 196) // Address of Translation Table Base +#define SMMUV3_STE_S2TTB_LEN (48) +#define SMMUV3_STE_S2TTB_MASK BIT64_MASK(SMMUV3_STE_S2TTB_OFF, SMMUV3_STE_S2TTB_LEN) + +/* DW 5 */ +// MPAM PARTID ignored +// Secure fields ignored + +/* DW 6 */ +// MPAM PMG and NS ignored +// VM Structure ignored (we're using stage 2 only) + +/* DW 7 */ +// Secure STE stuff ignored + +struct smmuv3_ste { + uint64_t s1_ctx; + uint32_t tc; + uint32_t attr_ovrd; + uint32_t s2_vmid; + uint32_t s2_cfg; + uint64_t s2_ttb; + uint32_t partid; + uint32_t sec_cfg; + uint64_t vms_ptr; + uint64_t sec_s2ttb; + uint64_t __res0; +} __attribute__((__packed__)); + +// #------------- +// # Event Queue +// #------------- + +#define SMMUV3_EVENTQ_NUMBER_OFF (0) +#define SMMUV3_EVENTQ_NUMBER_LEN (8) +#define SMMUV3_EVENTQ_NUMBER_MASK BIT32_MASK(SMMUV3_EVENTQ_NUMBER_OFF, SMMUV3_EVENTQ_NUMBER_LEN) + +struct smmuv3_eventq_entry { + uint32_t record[8]; +} __attribute__((__packed__)); + +void smmu_init(); + +ssize_t smmu_alloc_ctxbnk(); +ssize_t smmu_alloc_sme(); +void smmu_write_ctxbnk(size_t ctx_id, paddr_t root_pt, asid_t vm_id); +void smmu_write_sme(size_t sme, streamid_t mask, streamid_t id, bool group); +void smmu_write_s2c(size_t sme, size_t ctx_id); +size_t smmu_sme_get_ctx(size_t sme); +streamid_t smmu_sme_get_id(size_t sme); +streamid_t smmu_sme_get_mask(size_t sme); +bool smmu_sme_is_group(size_t sme); +bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, bool group); + +#endif diff --git a/src/arch/armv8/armv8-a/iommu.c b/src/arch/armv8/armv8-a/iommu.c index 5c89fdc0..cebf983c 100644 --- a/src/arch/armv8/armv8-a/iommu.c +++ b/src/arch/armv8/armv8-a/iommu.c @@ -3,86 +3,81 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. */ -#include -#include -#include -#include +#include +#include #include -bool iommu_arch_init(void) +/** + * IOMMU HW Initialization. + * + * @returns true on success, false on error. + */ +// TODO: We need a mechanism to select between both SMMU versions +bool iommu_arch_init() { - if (cpu_is_master() && platform.arch.smmu.base) { - smmu_init(); + // By checking platform.arch.iommu.base we verify if an IOMMU is present in the platform + if (cpu()->id == CPU_MASTER && platform.arch.iommu.base) { + smmuv3_init(); return true; } return false; } -static ssize_t iommu_vm_arch_init_ctx(struct vm* vm) +/** + * Initialize the STE indexed by the StreamID for the given VM + * Configure corresponding STE with root PT base addr, VMID and device config + * + * @vm: VM struct to which the device will be assigned. + * @sid: StreamID of the device to be added. + * + * @returns true on success, false on error. + */ +static bool iommu_vm_arch_add(struct vm* vm, streamid_t sid) { - ssize_t ctx_id = (ssize_t)vm->io.prot.mmu.ctx_id; - if (ctx_id < 0) { - /* Set up ctx bank to vm address space in an available ctx. */ - ctx_id = smmu_alloc_ctxbnk(); - if (ctx_id >= 0) { + if (sid > 0) { + // Check if device was already added to a VM + if (smmuv3_alloc_ste(sid)) { paddr_t rootpt; + // Translate root PT base address mem_translate(&cpu()->as, (vaddr_t)vm->as.pt.root, &rootpt); - smmu_write_ctxbnk((size_t)ctx_id, rootpt, vm->id); - vm->io.prot.mmu.ctx_id = ctx_id; + // Set DDT entry with root PT base address, VMID and configuration + smmuv3_write_ste(sid, vm, rootpt); } else { - INFO("iommu: smmuv2 could not allocate ctx for vm: %d", vm->id); - } - } - - /* Ctx is valid when we get here. */ - return ctx_id; -} - -static bool iommu_vm_arch_add(struct vm* vm, streamid_t mask, streamid_t id) -{ - ssize_t vm_ctx = iommu_vm_arch_init_ctx(vm); - streamid_t glbl_mask = vm->io.prot.mmu.global_mask; - streamid_t prep_mask = (mask & SMMU_ID_MSK) | glbl_mask; - streamid_t prep_id = (id & SMMU_ID_MSK); - bool group = (bool)mask; - - if (vm_ctx < 0) { - return false; - } - - if (!smmu_compatible_sme_exists(prep_mask, prep_id, (size_t)vm_ctx, group)) { - ssize_t sme = smmu_alloc_sme(); - if (sme < 0) { - INFO("iommu: smmuv2 no more free sme available."); + INFO("SMMUv3: Cannot add the same StreamID (%d) twice", sid); return false; } - smmu_write_sme((size_t)sme, prep_mask, prep_id, group); - smmu_write_s2c((size_t)sme, (size_t)vm_ctx); + } else { + INFO("SMMUv3: Invalid StreamID: %d", sid); + return false; } return true; } -inline bool iommu_arch_vm_add_device(struct vm* vm, streamid_t id) +/** + * Add device to the VM specified. + * + * @vm: VM struct to which the device will be assigned. + * @sid: StreamID of the device to be added. + * + * @returns true on success, false on error. + */ +inline bool iommu_arch_vm_add_device(struct vm* vm, streamid_t sid) { - return iommu_vm_arch_add(vm, 0, id); + return iommu_vm_arch_add(vm, sid); } -bool iommu_arch_vm_init(struct vm* vm, const struct vm_config* vm_config) +/** + * Initialize VM-specific, arch-specific IOMMU data. + * + * @vm: VM under consideration. + * @config: VM config. + * + * @returns true on success, false on error. + */ +bool iommu_arch_vm_init(struct vm* vm, const struct vm_config* config) { - vm->io.prot.mmu.global_mask = - vm_config->platform.arch.smmu.global_mask | platform.arch.smmu.global_mask; - vm->io.prot.mmu.ctx_id = -1; - - /* This section relates only to arm's iommu so we parse it here. */ - for (size_t i = 0; i < vm_config->platform.arch.smmu.group_num; i++) { - /* Register each group. */ - const struct smmu_group* group = &vm_config->platform.arch.smmu.groups[i]; - if (!iommu_vm_arch_add(vm, group->mask, group->id)) { - return false; - } - } - + // For now there is no data to initialize return true; } diff --git a/src/arch/armv8/armv8-a/smmuv3.c b/src/arch/armv8/armv8-a/smmuv3.c new file mode 100644 index 00000000..2110abcb --- /dev/null +++ b/src/arch/armv8/armv8-a/smmuv3.c @@ -0,0 +1,389 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +// Use a 4kiB page of memory for the ST +// If a (non-embedded) SMMU implementation supports less than 64 STEs, +// the max number of STEs supported is selected +#define ST_N_ENTRIES (64) + +#define EVENTQ_N_ENTRIES (32) + +// Delay to ack register writes +#define ACK_DELAY (10000) + +struct smmuv3_hw { + volatile struct smmuv3_regmap_p0_hw* regmap_p0; + volatile struct smmuv3_regmap_p1_hw* regmap_p1; + volatile struct smmuv3_st_hw* st; + volatile struct smmuv3_eventq_hw* eventq; +}; + +struct smmuv3_priv { + struct smmuv3_hw hw; + + /* For easier book keeping */ + spinlock_t st_lock; + size_t st_num; + BITMAP_ALLOC(st_bitmap, ST_N_ENTRIES); +}; + +struct smmuv3_priv smmuv3; + +uint16_t log2(uint32_t n) +{ + uint16_t log_val = -1; + + if (n == 0) { + return 0; + } + + while (n) { + log_val++; + n >>= 1; + } + + return log_val; +} + +// Check capabilities +static void smmuv3_check_capabilities() +{ + unsigned major_version = 0; + unsigned tt_format = 0; + unsigned sid_max_width = 0; + size_t pa_size = 0; + + // Check major version + major_version = + bit32_extract(smmuv3.hw.regmap_p0->AIDR, SMMUV3_AIDR_MAJOR_OFF, SMMUV3_AIDR_MAJOR_LEN); + if (major_version != 3) { + ERROR("SMMU unsupported version: %d", major_version); + } + + // Stage-2 translation support + if (!(smmuv3.hw.regmap_p0->IDR0 & SMMUV3_IDR0_S2P_BIT)) { + ERROR("This SMMU implementation does not support Stage-2 translation"); + } + + // Translation Table format (AArch) + // TODO: Confirm format of target platform + tt_format = bit32_extract(smmuv3.hw.regmap_p0->IDR0, SMMUV3_IDR0_TTF_OFF, SMMUV3_IDR0_TTF_LEN); + if (!(tt_format & (unsigned)0x2)) { + ERROR("AArch64 not suported by this SMMU implementation"); + } + + // Check coherent access support + if (!(smmuv3.hw.regmap_p0->IDR0 & SMMUV3_IDR0_COHACC_BIT)) { + ERROR("This SMMU implementation does not support coherent memory accesses"); + } + + // Check broadcast TLB maintenance support + if (!(smmuv3.hw.regmap_p0->IDR0 & SMMUV3_IDR0_BTM_BIT)) { + ERROR("This SMMU implementation does not support broadcast TLB maintenance"); + } + + // VMID support + if (!(smmuv3.hw.regmap_p0->IDR0 & SMMUV3_IDR0_VMW_BIT)) { + ERROR("This SMMU implementation does not support VMID matching"); + } + + // 16-bit VMID support + if (!(smmuv3.hw.regmap_p0->IDR0 & SMMUV3_IDR0_VMID16_BIT)) { + WARNING("This SMMU implementation only supports VMIDs up to 8bits"); + } + + // Check max width of the streamID + // At minimum we should have as many STE entries as DMAs attached to the platform + sid_max_width = + bit32_extract(smmuv3.hw.regmap_p0->IDR1, SMMUV3_IDR1_SIDSIZE_OFF, SMMUV3_IDR1_SIDSIZE_LEN); + if ((1 << sid_max_width) < config->platform.dev_num) { + ERROR("Insufficient STEs for this platform: %d", (1 << sid_max_width)); + } + + pa_size = bit32_extract(smmuv3.hw.regmap_p0->IDR5, SMMUV3_IDR5_OAS_OFF, SMMUV3_IDR5_OAS_LEN); + + if (pa_size < parange) { + ERROR("This SMMU implementation does not support the full available PA range"); + } + + // Check translation granularity + if (!(smmuv3.hw.regmap_p0->IDR5 & SMMUV3_IDR5_GRAN4K_BIT)) { + ERROR("This SMMU implementation does not support 4kiB translation granularity"); + } +} + +/** + * Init and enable ARM SMMUv3. + */ +void smmuv3_init() +{ + unsigned evtq_max_sz = 0; + unsigned evtq_sz = 0; + uint8_t ack = 0; + + // # Map register IF + // For now, we only map the first 4k of Page 0, as we do not use the remaining registers + vaddr_t smmuv3_regmap_p0 = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.smmu.base, NUM_PAGES(sizeof(struct smmuv3_regmap_p0_hw))); + + smmuv3.hw.regmap_p0 = (struct smmuv3_regmap_p0_hw*)smmuv3_regmap_p0; + + // Page 1 + vaddr_t smmuv3_regmap_p1 = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.smmu.base, NUM_PAGES(sizeof(struct smmuv3_regmap_p1_hw))); + + smmuv3.hw.regmap_p1 = (struct smmuv3_regmap_p1_hw*)smmuv3_regmap_p1; + + // # Check capabilities + smmuv3_check_capabilities(); + + // # Clear Errors/Interrupts + smmuv3.hw.regmap_p0->GERROR_N = smmuv3.hw.regmap_p0->GERROR; + // TODO: Wired-signaled interrupt mechanism is implementation-defined. Check for WSI support and + // clear IP bits if present. + + // # Configure Fault/Event Queue + // Check whether queues are stored internally within the SMMU + if (smmuv3.hw.regmap_p0->IDR1 & SMMUV3_IDR1_QUEUES_PRESET_BIT) { + // EVENTQ is stored internally within the SMMU. Base register is fixed + } + + else { + // EVENTQ is stored in main memory. We need to allocate and configure base register + + // Check max number of EVTQ entries + evtq_max_sz = bit32_extract(smmuv3.hw.regmap_p0->IDR1, SMMUV3_IDR1_EVENTQS_OFF, + SMMUV3_IDR1_EVENTQS_LEN); + evtq_sz = (evtq_max_sz < EVENTQ_N_ENTRIES) ? (evtq_max_sz) : (EVENTQ_N_ENTRIES); + + // Allocate memory for the EVENTQ + vaddr_t eventq_vaddr = + (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct smmuv3_eventq_entry) * evtq_sz), + SEC_HYP_GLOBAL, true); + memset((void*)eventq_vaddr, 0, sizeof(struct smmuv3_eventq_entry) * evtq_sz); + smmuv3.hw.eventq = (struct smmuv3_eventq_entry*)eventq_vaddr; + + // Set EVENTQ size + uint32_t eventq_base = 0; + eventq_base |= log2(evtq_sz) & SMMUV3_EVENTQ_BASE_LOG2SZ_MASK; + + // Initialize base pointer + paddr_t eventq_paddr; + mem_translate(&cpu()->as, eventq_vaddr, &eventq_paddr); + eventq_base |= + ((eventq_paddr << SMMUV3_EVENTQ_BASE_ADDR_OFF) & SMMUV3_EVENTQ_BASE_ADDR_MASK); + smmuv3.hw.regmap_p0->EVENTQ_BASE = eventq_base; + } + + // Initialize index registers + smmuv3.hw.regmap_p1->EVENTQ_PROD = 0; + smmuv3.hw.regmap_p1->EVENTQ_CONS = 0; + + // Enable EVENTQ and global interrupts + smmuv3.hw.regmap_p0->IRQ_CTRL = (SMMUV3_IRQCTRL_EVENTQ_BIT | SMMUV3_IRQCTRL_GERROR_BIT); + + // Ensure that interrupts were enabled + ack = 0; + for (size_t i = 0; i < ACK_DELAY; i++) { + if (smmuv3.hw.regmap_p0->IRQ_CTRL_ACK == smmuv3.hw.regmap_p0->IRQ_CTRL) { + ack = 1; + break; + } + } + if (!ack) { + WARNING("Could not enable Global and EVENTQ interrupts"); + } + + // Enable fault queue + smmuv3.hw.regmap_p0->CR0 |= SMMUV3_CR0_EVENTQEN_BIT; + + // Confirm that the EVENTQ was enabled + ack = 0; + for (size_t i = 0; i < ACK_DELAY; i++) { + if (smmuv3.hw.regmap_p0->CR0_ACK == smmuv3.hw.regmap_p0->CR0) { + ack = 1; + break; + } + } + if (!ack) { + WARNING("Could not enable the EVENTQ"); + } + + // # Issue commands to invalidate all cached configuration and TLB entries + /* + Since we do not perform invalidations in Bao, it may not be necessary to perform this step. + However, invalidation of all non-secure TLB information can be achieved using + CMD_TLBI_EL2_ALL and CMD_TLBI_NSNH_ALL commands + */ + + // # Configure Stream Table registers + // Check whether the Stream Table is stored internally within the SMMU + if (smmuv3.hw.regmap_p0->IDR1 & SMMUV3_IDR1_TABLES_PRESET_BIT) { + // Stream Table is embedded within the SMMU. We only need to know the size + smmuv3.st_num = (1 << bit32_extract(smmuv3.hw.regmap_p0->STRTAB_BASE_CFG, + SMMUV3_STRTAB_BASE_CFG_LOG2SZ_OFF, SMMUV3_STRTAB_BASE_CFG_LOG2SZ_LEN)); + } + + else { + // ST is stored in main memory + + // Check max number of STEs supported + size_t max_num_ste = (1 << bit32_extract(smmuv3.hw.regmap_p0->IDR1, SMMUV3_IDR1_SIDSIZE_OFF, + SMMUV3_IDR1_SIDSIZE_LEN)); + smmuv3.st_num = (max_num_ste < ST_N_ENTRIES) ? (max_num_ste) : (ST_N_ENTRIES); + + // Allocate a page of memory for the Stream Table + vaddr_t st_vaddr = + (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct smmuv3_ste) * smmuv3.st_num), + SEC_HYP_GLOBAL, true); + // Clear entries + memset((void*)st_vaddr, 0, sizeof(struct smmuv3_ste) * smmuv3.st_num); + smmuv3.hw.st = (struct smmuv3_ste*)st_vaddr; + + // Config STRTAB_BASE with physical base address of the ST in memory + paddr_t st_paddr; + mem_translate(&cpu()->as, st_vaddr, &st_paddr); + smmuv3.hw.regmap_p0->STRTAB_BASE = + ((st_paddr << SMMUV3_STRTAB_BASE_ADDR_OFF) & SMMUV3_STRTAB_BASE_ADDR_MASK); + + // Config Stream Table format and size + uint32_t st_config = 0; + st_config |= log2(smmuv3.st_num) & SMMUV3_STRTAB_BASE_CFG_LOG2SZ_MASK; + st_config |= SMMUV3_STRTAB_BASE_CFG_FMT_LINEAR & SMMUV3_STRTAB_BASE_CFG_FMT_MASK; + smmuv3.hw.regmap_p0->STRTAB_BASE_CFG = st_config; + } + + // Init ST bitmap + smmuv3.st_lock = SPINLOCK_INITVAL; + bitmap_clear_consecutive(smmuv3.st_bitmap, 0, smmuv3.st_num); + + // # Configure other relevant registers + // We have to determine the reset value of SMMU_GBPA.ABORT + if (smmuv3.hw.regmap_p0->GBPA & SMMUV3_GBPA_ABORT_BIT) { + WARNING("SMMU_GBPA.ABORT is clear by default. This allows incoming transactions + to bypass the SMMU before it has been initialized"); + } + + // Enable recording of C_BAD_STREAMID in the EVENTQ + smmuv3.hw.regmap_p0->CR2 |= SMMUV3_CR2_RECINVSID_BIT; + + // # Enable the SMMU + smmuv3.hw.regmap_p0->CR0 |= SMMUV3_CR0_SMMUEN_BIT; + + // Confirm that the SMMU was enabled + ack = 0; + for (size_t i = 0; i < ACK_DELAY; i++) { + if (smmuv3.hw.regmap_p0->CR0_ACK == smmuv3.hw.regmap_p0->CR0) { + ack = 1; + break; + } + } + if (!ack) { + ERROR("Could not enable the SMMU"); + } +} + +/** + * Allocate Stream Table Entry in the ST bitmap. + * + * @sid: StreamID index to select STE + * + * @returns true on success, false on error + */ +bool smmuv3_alloc_ste(streamid_t sid) +{ + bool allocated; + spin_lock(&smmuv3.st_lock); + + // Check whether the STE is already allocated + if (!bitmap_get(smmuv3.st_bitmap, sid)) { + bitmap_set(smmuv3.st_bitmap, sid); + allocated = true; + } else { + allocated = false; // StreamID is already allocated + } + + spin_unlock(&smmuv3.st_lock); + return allocated; +} + +/** + * Configure STE with base address of the root PT, VMID and translation configuration. Enable STE. + * + * @sid: StreamID to index ST + * @vm: VM to which the device is being assigned + * @root_pt: Base physical address of the root second-stage PT + */ +void smmuv3_write_ste(streamid_t sid, struct vm* vm, paddr_t root_pt) +{ + spin_lock(&smmuv3.st_lock); + if (!bitmap_get(smmuv3.st_bitmap, sid)) { + ERROR("Trying to configure STE %d, which is not allocated", sid); + } else { + // Configure STE + // Stage-1 context fields + uint64_t s1_ctx = 0; + s1_ctx |= SMMUV3_STE_VALID_BIT; + s1_ctx |= SMMUV3_STE_CONFIG_S2ONLY; + smmuv3.hw.st[sid].s1_ctx = s1_ctx; + + // Translation control + uint32_t tc = 0; + smmuv3.hw.st[sid].tc = tc; + + // Memory attributes + uint32_t memattr = 0; + // memattr |= SMMUV3_STE_MTCFG_BIT; // replace incoming cacheability + // memattr |= SMMUV3_STE_MEMATTR_IWB_OWB; // Inner and outer WB cacheable + // memattr |= SMMUV3_STE_ALLOCCFG_RA_WA; // Write/Read Allocate, non-transient + // memattr |= SMMUV3_STE_SHCFG_INNSH; // Inner shareable + memattr |= SMMUV3_STE_SHCFG_USEIN; // Use incoming shareability + smmuv3.hw.st[sid].memattr = memattr; + + // VMID + uint32_t s2vmid = 0; + s2vmid |= ((vm->id << SMMUV3_STE_S2VMID_OFF) & SMMUV3_STE_S2VMID_MASK); + smmuv3.hw.st[sid].s2vmid = s2vmid; + + // Stage-2 config + uint32_t s2_cfg = 0; + size_t t0sz = 64 - parange_table[parange]; // {32, 28, 24, 22, 20, 16} + s2_cfg |= ((t0sz << SMMUV3_STE_S2T0SZ_OFF) & SMMUV3_STE_S2T0SZ_MASK); + s2_cfg |= |= + ((parange_table[parange] < 44) ? SMMUV3_STE_S2SL0_LVL1 : SMMUV3_STE_S2SL0_LVL0); + s2_cfg |= SMMUV3_STE_S2IR0_WB_RA_WA; // Inner region cacheability for stage-2 + s2_cfg |= SMMUV3_STE_S2OR0_WB_RA_WA; // Outer region cacheability for stage-2 + s2_cfg |= SMMUV3_STE_S2SH0_INNSH; // Shareability for stage-2 + s2_cfg |= ((parange << SMMUV3_STE_S2PS_OFF) & SMMUV3_STE_S2PS_MASK); // PA size + // TODO: Check target platform format + s2_cfg |= SMMUV3_STE_S2AA64_BIT; // Stage-2 translation table is AArch64 + s2_cfg |= SMMUV3_STE_S2R_BIT; // Stage-2 record fault model + smmuv3.hw.st[sid].s2_cfg = s2_cfg; + + // Stage-2 Translation Table + uint64_t s2_ttb = 0; + s2_ttb |= (((root_pt >> 4) << SMMUV3_STE_S2TTB_OFF) & SMMUV3_STE_S2TTB_MASK); + smmuv3.hw.st[sid].s2_ttb = s2_ttb; + + // Remaining fields are ignored + smmuv3.hw.st[sid].partid = 0; + smmuv3.hw.st[sid].sec_cfg = 0; + smmuv3.hw.st[sid].vms_ptr = 0; + smmuv3.hw.st[sid].sec_s2ttb = 0; + smmuv3.hw.st[sid].__res0 = 0; + + // TODO: + // Stall fault model ? + // Configure first-stage translation. Second-stage only by now + } + spin_unlock(&smmuv3.st_lock); +} diff --git a/src/arch/armv8/armv8-r/mpu.c b/src/arch/armv8/armv8-r/mpu.c index 8acf13fe..cd715a2e 100644 --- a/src/arch/armv8/armv8-r/mpu.c +++ b/src/arch/armv8/armv8-r/mpu.c @@ -116,7 +116,7 @@ static inline void mpu_entry_set_perms(struct mp_region* mpr, struct mpu_perms m bool el1_priv = mpu_perms.el1 != PERM_NONE; perms_t perms = mpu_perms.el1 | mpu_perms.el2; - mpr->mem_flags.prbar &= (uint16_t) ~(PRBAR_PERMS_FLAGS_MSK); + mpr->mem_flags.prbar &= (uint16_t)~(PRBAR_PERMS_FLAGS_MSK); if (perms & PERM_W) { mpr->mem_flags.prbar |= PRBAR_AP_RW_EL2; } else { diff --git a/src/core/inc/config.h b/src/core/inc/config.h index 7154e7e2..7cdefc48 100644 --- a/src/core/inc/config.h +++ b/src/core/inc/config.h @@ -30,23 +30,27 @@ // clang-format on #define VM_IMAGE_OFFSET(img_name) ((paddr_t) & _##img_name##_vm_beg) -#define VM_IMAGE_SIZE(img_name) ((size_t) & _##img_name##_vm_size) +#define VM_IMAGE_SIZE(img_name) ((size_t)&_##img_name##_vm_size) #else #define VM_IMAGE(img_name, img_path) #define VM_IMAGE_OFFSET(img_name) ((paddr_t)0) #define VM_IMAGE_SIZE(img_name) ((size_t)0) #endif -#define VM_IMAGE_BUILTIN(img_name, image_base_addr) \ - { \ - .base_addr = image_base_addr, .load_addr = VM_IMAGE_OFFSET(img_name), \ - .size = VM_IMAGE_SIZE(img_name), .separately_loaded = false, \ +#define VM_IMAGE_BUILTIN(img_name, image_base_addr) \ + { \ + .base_addr = image_base_addr, \ + .load_addr = VM_IMAGE_OFFSET(img_name), \ + .size = VM_IMAGE_SIZE(img_name), \ + .separately_loaded = false, \ } -#define VM_IMAGE_LOADED(image_base_addr, image_load_addr, image_size) \ - { \ - .base_addr = image_base_addr, .load_addr = image_load_addr, .size = image_size, \ - .separately_loaded = true, \ +#define VM_IMAGE_LOADED(image_base_addr, image_load_addr, image_size) \ + { \ + .base_addr = image_base_addr, \ + .load_addr = image_load_addr, \ + .size = image_size, \ + .separately_loaded = true, \ } /* CONFIG_HEADER is just defined for compatibility with older configs */ diff --git a/src/core/inc/cpu.h b/src/core/inc/cpu.h index d14fac38..70c91533 100644 --- a/src/core/inc/cpu.h +++ b/src/core/inc/cpu.h @@ -48,9 +48,9 @@ void cpu_send_msg(cpuid_t cpu, struct cpu_msg* msg); typedef void (*cpu_msg_handler_t)(uint32_t event, uint64_t data); -#define CPU_MSG_HANDLER(handler, handler_id) \ - __attribute__((section(".ipi_cpumsg_handlers"), used)) \ - cpu_msg_handler_t __cpumsg_handler_##handler = handler; \ +#define CPU_MSG_HANDLER(handler, handler_id) \ + __attribute__((section(".ipi_cpumsg_handlers"), \ + used)) cpu_msg_handler_t __cpumsg_handler_##handler = handler; \ __attribute__((section(".ipi_cpumsg_handlers_id"), used)) volatile const size_t handler_id; struct cpu_synctoken { diff --git a/src/core/inc/types.h b/src/core/inc/types.h index 3b7ca4fe..0bd35ef8 100644 --- a/src/core/inc/types.h +++ b/src/core/inc/types.h @@ -25,23 +25,23 @@ typedef signed long ssize_t; typedef unsigned long asid_t; typedef unsigned long vmid_t; -#define INVALID_VMID ((vmid_t)-1) +#define INVALID_VMID ((vmid_t) - 1) typedef uintptr_t paddr_t; typedef uintptr_t regaddr_t; typedef uintptr_t vaddr_t; -#define MAX_VA ((vaddr_t)-1) +#define MAX_VA ((vaddr_t) - 1) #define INVALID_VA MAX_VA typedef size_t mpid_t; -#define INVALID_MPID ((mpid_t)-1) +#define INVALID_MPID ((mpid_t) - 1) typedef unsigned long colormap_t; typedef unsigned long cpuid_t; typedef unsigned long vcpuid_t; typedef unsigned long cpumap_t; -#define INVALID_CPUID ((cpuid_t)-1) +#define INVALID_CPUID ((cpuid_t) - 1) typedef unsigned irqid_t; diff --git a/src/lib/inc/bit.h b/src/lib/inc/bit.h index 168b6061..8512fdad 100644 --- a/src/lib/inc/bit.h +++ b/src/lib/inc/bit.h @@ -14,9 +14,9 @@ * word length masks with the cost of an extra shift instruction. For static masks, there should be * no extra costs. */ -#define BIT32_MASK(OFF, LEN) ((((UINT32_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) -#define BIT64_MASK(OFF, LEN) ((((UINT64_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) -#define BIT_MASK(OFF, LEN) (((((1UL) << ((LEN)-1)) << 1) - 1) << (OFF)) +#define BIT32_MASK(OFF, LEN) ((((UINT32_C(1) << ((LEN) - 1)) << 1) - 1) << (OFF)) +#define BIT64_MASK(OFF, LEN) ((((UINT64_C(1) << ((LEN) - 1)) << 1) - 1) << (OFF)) +#define BIT_MASK(OFF, LEN) (((((1UL) << ((LEN) - 1)) << 1) - 1) << (OFF)) #ifndef __ASSEMBLER__ @@ -52,7 +52,7 @@ mask <<= 1U; \ pos++; \ } \ - return (mask != 0U) ? pos : (ssize_t)-1; \ + return (mask != 0U) ? pos : (ssize_t) - 1; \ } \ static inline size_t PRE##_count(TYPE word) \ { \ diff --git a/src/lib/inc/util.h b/src/lib/inc/util.h index dd9066d3..2cbc6de6 100644 --- a/src/lib/inc/util.h +++ b/src/lib/inc/util.h @@ -9,20 +9,20 @@ /* UTILITY MACROS */ /* align VAL to TO which must be power a two */ -#define ALIGN(VAL, TO) ((((VAL) + (TO)-1) / (TO)) * TO) +#define ALIGN(VAL, TO) ((((VAL) + (TO) - 1) / (TO)) * TO) #define IS_ALIGNED(VAL, TO) (!((VAL) % (TO))) -#define ALIGN_FLOOR(VAL, TO) ((VAL) & ~((TO)-1)) +#define ALIGN_FLOOR(VAL, TO) ((VAL) & ~((TO) - 1)) #define NUM_PAGES(SZ) (ALIGN(SZ, PAGE_SIZE) / PAGE_SIZE) -#define PAGE_OFFSET_MASK ((PAGE_SIZE)-1) +#define PAGE_OFFSET_MASK ((PAGE_SIZE) - 1) #define PAGE_FRAME_MASK (~(PAGE_OFFSET_MASK)) #define SR_OR(VAL, SHIFT) (((VAL) >> (SHIFT)) | VAL) /* Next Power Of Two */ -#define NPOT(VAL) \ - ((SR_OR(((VAL)-1), 1) | SR_OR(SR_OR(((VAL)-1), 1), 2) | \ - SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4) | \ - SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8) | \ - SR_OR(SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8), 16)) + \ +#define NPOT(VAL) \ + ((SR_OR(((VAL) - 1), 1) | SR_OR(SR_OR(((VAL) - 1), 1), 2) | \ + SR_OR(SR_OR(SR_OR(((VAL) - 1), 1), 2), 4) | \ + SR_OR(SR_OR(SR_OR(SR_OR(((VAL) - 1), 1), 2), 4), 8) | \ + SR_OR(SR_OR(SR_OR(SR_OR(SR_OR(((VAL) - 1), 1), 2), 4), 8), 16)) + \ 1) /* Previous Power Of Two */