Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support U-boot #69

Merged
merged 16 commits into from
Jan 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified guest.dtb
Binary file not shown.
2 changes: 0 additions & 2 deletions guest_image/guest.dts
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,6 @@

chosen {
bootargs = "root=/dev/vda rw console=ttyS0";
linux,initrd-end = <0x00 0x9832b230>;
linux,initrd-start = <0x00 0x88200000>;
stdout-path = "/soc/serial@10000000";
rng-seed = <0x9775b34d 0xe39158f2 0x3e9d10b1 0x73692dfd 0x13c15814 0xa4ad203f 0x8b0d62f7 0x6bf8a79d>;
};
Expand Down
33 changes: 18 additions & 15 deletions memory.x
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
MEMORY
{
FLASH (rx) : ORIGIN = 0x80200000, LENGTH = 2M
BOOT_RAM (rw) : ORIGIN = 0x80400000, LENGTH = 6M
FLASH (rx) : ORIGIN = 0x84000000, LENGTH = 2M
BOOT_RAM (rw) : ORIGIN = 0x84200000, LENGTH = 6M
RAM_HEAP (rwx) : ORIGIN = 0xc1000000, LENGTH = 528M
RAM (rwx) : ORIGIN = 0xe2000000, LENGTH = 32M
L2_LIM (rw) : ORIGIN = 0xe4000000, LENGTH = 8M
}

/*
* FLASH (TEXT), 0x8020_0000..0x8040_0000
* BOOT_RAM , 0x8040_0000..0x80a0_0000
* FLASH (TEXT), 0x8400_0000..0x8420_0000
* BOOT_RAM , 0x8420_0000..0x8480_0000
* RAM_HEAP (HEAP), 0xc100_0000..0xe200_0000
* RAM (DATA, BSS), 0xe200_0000..0xe400_0000
* L2_LIM (STACK), 0xe400_0000..0xe480_0000
Expand Down Expand Up @@ -41,34 +41,37 @@ SECTIONS
_top_b_stack = .;
} > BOOT_RAM

.hv_heap (NOLOAD) : ALIGN(1024K)
{
.hv_heap (NOLOAD) : ALIGN(1024K) {
_start_heap = .;
. += _hv_heap_size;
_end_heap = .;
} > REGION_HEAP

.host_dtb : ALIGN(4K)
{
.host_dtb : ALIGN(4K) {
*(.host_dtb);
. = ALIGN(4K);
} > REGION_DATA

.guest_kernel : ALIGN(4K)
{
*(.guest_dtb);
.guest_kernel : ALIGN(4K) {
*(.guest_kernel);
. = ALIGN(4K);
} > REGION_DATA

.guest_dtb : ALIGN(4K)
{
.guest_dtb : ALIGN(4K) {
*(.guest_dtb);
. = ALIGN(4K);
} > REGION_DATA

.root_page_table : ALIGN(16K)
{
.root_page_table : ALIGN(16K) {
*(.root_page_table);
. = ALIGN(4K);
} > REGION_DATA

.bss : ALIGN(4K) {
_start_bss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
. = ALIGN(4K);
_end_bss = .;
} > REGION_BSS
}
78 changes: 56 additions & 22 deletions src/guest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,8 @@ use elf::{endian::AnyEndian, ElfBytes};
/// Guest Information
#[derive(Debug)]
pub struct Guest {
/// Guest ID
#[allow(clippy::struct_field_names)]
guest_id: usize,
/// HART ID
hart_id: usize,
/// Page table that is passed to guest address
page_table_addr: HostPhysicalAddress,
/// Device tree address
Expand All @@ -42,17 +41,24 @@ impl Guest {
hart_id: usize,
root_page_table: &'static [PageTableEntry; FIRST_LV_PAGE_TABLE_LEN],
guest_dtb: &'static [u8; include_bytes!("../guest.dtb").len()],
memory_region: Range<GuestPhysicalAddress>,
) -> Self {
// calculate guest memory region
let guest_memory_begin: GuestPhysicalAddress =
guest_memory::DRAM_BASE + (hart_id + 1) * guest_memory::DRAM_SIZE_PER_GUEST;
let memory_region =
guest_memory_begin..guest_memory_begin + guest_memory::DRAM_SIZE_PER_GUEST;

let stack_top_addr = HostPhysicalAddress(core::ptr::addr_of!(crate::_stack_start) as usize);
let page_table_addr = HostPhysicalAddress(root_page_table.as_ptr() as usize);

// init page table
page_table::sv39x4::initialize_page_table(page_table_addr);

// load guest dtb to memory
let dtb_addr = Self::map_guest_dtb(hart_id, page_table_addr, guest_dtb);

Guest {
guest_id: hart_id,
hart_id,
page_table_addr: HostPhysicalAddress(root_page_table.as_ptr() as usize),
dtb_addr,
stack_top_addr,
Expand All @@ -61,22 +67,25 @@ impl Guest {
}
}

/// Map guest device tree region
/// Load guest device tree and create corresponding page table
///
/// Guest device tree will be placed start of guest memory region.
fn map_guest_dtb(
hart_id: usize,
page_table_addr: HostPhysicalAddress,
guest_dtb: &'static [u8; include_bytes!("../guest.dtb").len()],
) -> GuestPhysicalAddress {
use PteFlag::{Accessed, Dirty, Read, User, Valid, Write};

assert!(guest_dtb.len() < guest_memory::GUEST_DTB_SIZE_PER_HART);
assert!(guest_dtb.len() < guest_memory::GUEST_DTB_REGION_SIZE);

let guest_dtb_gpa =
guest_memory::DRAM_BASE + hart_id * guest_memory::GUEST_DTB_SIZE_PER_HART;
// guest device tree is loaded at end of guest memory region.
let guest_dtb_addr =
guest_memory::DRAM_BASE + hart_id * guest_memory::GUEST_DTB_REGION_SIZE;
let aligned_dtb_size = guest_dtb.len().div_ceil(PAGE_SIZE) * PAGE_SIZE;

for offset in (0..aligned_dtb_size).step_by(PAGE_SIZE) {
let guest_physical_addr = guest_dtb_gpa + offset;
let guest_physical_addr = guest_dtb_addr + offset;

// allocate memory from heap
let aligned_page_size_block_addr = PageBlock::alloc();
Expand All @@ -102,12 +111,12 @@ impl Guest {
);
}

guest_dtb_gpa
guest_dtb_addr
}

/// Return HART(HARdware Thread) id.
pub fn hart_id(&self) -> usize {
self.guest_id
self.hart_id
}

/// Return Stack top (end of memory region)
Expand All @@ -120,6 +129,11 @@ impl Guest {
self.dtb_addr
}

/// Return guest dram space start
pub fn memory_region(&self) -> &Range<GuestPhysicalAddress> {
&self.memory_region
}

/// Return guest dram space start
fn dram_base(&self) -> GuestPhysicalAddress {
self.memory_region.start
Expand Down Expand Up @@ -158,9 +172,12 @@ impl Guest {
.expect("failed to get segments from elf")
.iter()
{
if prog_header.p_type == PT_LOAD && prog_header.p_filesz > 0 {
if prog_header.p_type == PT_LOAD {
assert!(prog_header.p_align >= PAGE_SIZE as u64);
let aligned_segment_size = align_size(prog_header.p_filesz, prog_header.p_align);

let aligned_segment_size = align_size(prog_header.p_memsz, prog_header.p_align);
let segment_file_offset = usize::try_from(prog_header.p_offset).unwrap();
let segment_file_size = usize::try_from(prog_header.p_filesz).unwrap();

for offset in (0..aligned_segment_size).step_by(PAGE_SIZE) {
let guest_physical_addr =
Expand All @@ -170,15 +187,32 @@ impl Guest {
// allocate memory from heap
let aligned_page_size_block_addr = PageBlock::alloc();

// copy elf segment to new heap block
// Determine the range of data to copy
let copy_start = segment_file_offset + offset;
let copy_size = if offset + PAGE_SIZE <= segment_file_size {
PAGE_SIZE
} else {
segment_file_size.saturating_sub(offset)
};

unsafe {
core::ptr::copy(
elf_addr.wrapping_add(
usize::try_from(prog_header.p_offset).unwrap() + offset,
),
aligned_page_size_block_addr.raw() as *mut u8,
PAGE_SIZE,
);
if copy_size > 0 {
// Copy ELF segment data from file
core::ptr::copy(
elf_addr.wrapping_add(copy_start),
aligned_page_size_block_addr.raw() as *mut u8,
copy_size,
);
}

if copy_size < PAGE_SIZE {
// Zero-initialize the remaining part of the page
core::ptr::write_bytes(
(aligned_page_size_block_addr.raw() as *mut u8).add(copy_size),
0,
PAGE_SIZE - copy_size,
);
}
}

// create memory mapping
Expand Down
31 changes: 17 additions & 14 deletions src/hypervisor_init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
};
use crate::h_extension::instruction::hfence_gvma_all;
use crate::memmap::{
constant::guest_memory, page_table::sv39x4::ROOT_PAGE_TABLE, GuestPhysicalAddress,
HostPhysicalAddress,
page_table::sv39x4::ROOT_PAGE_TABLE, GuestPhysicalAddress, HostPhysicalAddress,
};
use crate::trap::hstrap_vector;
use crate::ALLOCATOR;
Expand All @@ -31,6 +30,18 @@
// dtb_addr test and hint for register usage.
assert_ne!(dtb_addr, 0);

// clear bss section
unsafe {
use crate::{_end_bss, _start_bss};
use core::ptr::addr_of;

core::slice::from_raw_parts_mut(
addr_of!(_start_bss).cast_mut(),
addr_of!(_end_bss) as usize - addr_of!(_start_bss) as usize,
)
.fill(0);
}

unsafe {
// Initialize global allocator
ALLOCATOR.lock().init(
Expand Down Expand Up @@ -101,15 +112,8 @@
/// * Setup page table
fn vsmode_setup(hart_id: usize, dtb_addr: HostPhysicalAddress) -> ! {
// create new guest data
let guest_id = hart_id + 1;
let guest_memory_begin = guest_memory::DRAM_BASE + guest_id * guest_memory::DRAM_SIZE_PER_GUEST;
let new_guest = Guest::new(hart_id, &ROOT_PAGE_TABLE, &GUEST_DTB);
let root_page_table_addr = HostPhysicalAddress(ROOT_PAGE_TABLE.as_ptr() as usize);
let new_guest = Guest::new(
hart_id,
&ROOT_PAGE_TABLE,
&GUEST_DTB,
guest_memory_begin..guest_memory_begin + guest_memory::DRAM_SIZE_PER_GUEST,
);

// parse device tree
let device_tree = unsafe {
Expand All @@ -120,7 +124,7 @@
};

// initialize hypervisor data
let mut hypervisor_data = unsafe { HYPERVISOR_DATA.lock() };

Check warning on line 127 in src/hypervisor_init.rs

View workflow job for this annotation

GitHub Actions / lint

[clippy] reported by reviewdog 🐶 creating a shared reference to mutable static is discouraged Raw Output: src/hypervisor_init.rs:127:40: warning: creating a shared reference to mutable static is discouraged (static_mut_refs)
hypervisor_data.get_or_init(|| HypervisorData::new(device_tree));

// load guest elf `from GUEST_KERNEL`
Expand All @@ -136,10 +140,9 @@
let (guest_entry_point, elf_end_addr) =
new_guest.load_guest_elf(&guest_elf, GUEST_KERNEL.as_ptr());

// allocate all remain memory region
new_guest.allocate_memory_region(
elf_end_addr..guest_memory_begin + guest_memory::DRAM_SIZE_PER_GUEST,
);
// allocate page tables to all remain guest memory region
let guest_memory_end = new_guest.memory_region().end;
new_guest.allocate_memory_region(elf_end_addr..guest_memory_end);

// set device memory map
hypervisor_data
Expand Down Expand Up @@ -204,7 +207,7 @@
#[inline(never)]
fn hart_entry(hart_id: usize, dtb_addr: GuestPhysicalAddress) -> ! {
// aquire hypervisor data
let hypervisor_data = unsafe { HYPERVISOR_DATA.lock() };

Check warning on line 210 in src/hypervisor_init.rs

View workflow job for this annotation

GitHub Actions / lint

[clippy] reported by reviewdog 🐶 creating a shared reference to mutable static is discouraged Raw Output: src/hypervisor_init.rs:210:36: warning: creating a shared reference to mutable static is discouraged (static_mut_refs)
let stack_top = hypervisor_data.get().unwrap().guest().stack_top();
// release HYPERVISOR_DATA lock
drop(hypervisor_data);
Expand Down
6 changes: 5 additions & 1 deletion src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static mut HYPERVISOR_DATA: Mutex<OnceCell<HypervisorData>> = Mutex::new(OnceCel
#[link_section = ".host_dtb"]
static HOST_DTB: [u8; include_bytes!("../host.dtb").len()] = *include_bytes!("../host.dtb");

/// Device tree blob that is passed to guest
/// Guest kernel image
#[link_section = ".guest_kernel"]
static GUEST_KERNEL: [u8; include_bytes!("../vmlinux").len()] = *include_bytes!("../vmlinux");

Expand All @@ -62,6 +62,10 @@ extern "C" {
static _hv_heap_size: u8;
/// boot stack top (defined in `memory.x`)
static _top_b_stack: u8;
/// start of bss and sbss section.
static _start_bss: u8;
/// end of bss and sbss section.
static _end_bss: u8;
}

/// Panic handler
Expand Down
7 changes: 7 additions & 0 deletions src/memmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ impl core::ops::Add<usize> for GuestPhysicalAddress {
}
}

impl core::ops::Sub<usize> for GuestPhysicalAddress {
type Output = GuestPhysicalAddress;
fn sub(self, other: usize) -> Self::Output {
GuestPhysicalAddress(self.0 - other)
}
}

impl core::ops::Rem<usize> for GuestPhysicalAddress {
type Output = usize;
fn rem(self, other: usize) -> Self::Output {
Expand Down
25 changes: 11 additions & 14 deletions src/memmap/constant.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,13 @@
//! Constant for memory map.
//!
//! # Host physical address
//! See `memory.x`.
//! | start | end | region |
//! |---------------|---------------|---------------------|
//! | `0x8000_0000` | `0x8000_XXXX` | text data of hikami |
//!
//! # Guest physical address
//! | start | end | region |
//! |---------------|---------------|------------------------|
//! | `0xXXXX_XXXX` | `0xXXXX_XXXX` | device identity map |
//! | | | |
//! | `0x8000_0000` | `0x8000_2000` | device tree of guest 1 |
//! | `0x9000_0000` | `0xa000_0000` | text data of guest 1 |
//! | start | end | region |
//! |---------------|---------------|--------------------------|
//! | `0xXXXX_XXXX` | `0xXXXX_XXXX` | device identity map |
//! | | | |
//! | `0x8000_0000` | `0x8000_2000` | device tree of guest 1 |
//! | `0x9000_0000` | `0xa000_0000` | Memory region of guest 1 |
//! | `0x9fff_d000` | `0xa000_0000` | Device tree of guest 1 |

/// Max number of HART
pub const MAX_HART_NUM: usize = 8;
Expand All @@ -26,10 +21,12 @@ pub mod guest_memory {

use crate::memmap::GuestPhysicalAddress;

/// Dram base address
/// Dram base address in guest memory
///
/// It starts from as high as `DRAM_SIZE_PER_GUEST` to distinguish from HPA.
pub const DRAM_BASE: GuestPhysicalAddress = GuestPhysicalAddress(super::DRAM_BASE);
/// Dram memory space per HART.
pub const DRAM_SIZE_PER_GUEST: usize = 256 * 1024 * 1024; // 256 MB = 0x1000_0000
/// Guest DTB space size
pub const GUEST_DTB_SIZE_PER_HART: usize = 0x2000;
pub const GUEST_DTB_REGION_SIZE: usize = 0x2000;
}
Loading