Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 1 addition & 16 deletions src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ mod gdt;
#[cfg(target_os = "none")]
mod page_tables;
#[cfg(target_os = "none")]
mod paging;
#[cfg(target_os = "none")]
mod physicalmem;
mod platform;
#[cfg(target_os = "none")]
Expand All @@ -20,19 +18,6 @@ pub use self::platform::{boot_kernel, find_kernel};
const KERNEL_STACK_SIZE: u64 = 32_768;
pub const SERIAL_IO_PORT: u16 = 0x3F8;

#[cfg(target_os = "none")]
unsafe fn map_memory(address: usize, memory_size: usize) -> usize {
use align_address::Align;
use x86_64::structures::paging::{PageSize, PageTableFlags, Size2MiB};

let address = address.align_up(Size2MiB::SIZE as usize);
let page_count = memory_size.align_up(Size2MiB::SIZE as usize) / Size2MiB::SIZE as usize;

paging::map::<Size2MiB>(address, address, page_count, PageTableFlags::WRITABLE);

address
}

#[cfg(target_os = "none")]
pub unsafe fn get_memory(memory_size: u64) -> u64 {
use align_address::Align;
Expand All @@ -41,7 +26,7 @@ pub unsafe fn get_memory(memory_size: u64) -> u64 {
use self::physicalmem::PhysAlloc;

let address = PhysAlloc::allocate((memory_size as usize).align_up(Size2MiB::SIZE as usize));
unsafe { map_memory(address, memory_size as usize) as u64 }
address as u64
}

pub unsafe fn enter_kernel(
Expand Down
115 changes: 113 additions & 2 deletions src/arch/x86_64/page_tables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,17 @@
//!
//! [rust-lang/rust#51910 (comment)]: https://github.com/rust-lang/rust/issues/51910#issuecomment-1013271838

use core::ptr;
use core::ops::Range;
use core::{fmt, ptr};

use x86_64::structures::paging::{PageSize, PageTableFlags, Size2MiB};
use log::{debug, info, warn};
use x86_64::structures::paging::{
Mapper, OffsetPageTable, PageSize, PageTableFlags, PhysFrame, Size1GiB, Size2MiB,
};
use x86_64::{PhysAddr, VirtAddr};

use self::cpuid::ExtendedProcessorAndProcessorFeatureIdentifiers;
use crate::arch::x86_64::physicalmem::PhysAlloc;

const TABLE_FLAGS: PageTableFlags = PageTableFlags::PRESENT.union(PageTableFlags::WRITABLE);
const PAGE_FLAGS: PageTableFlags = TABLE_FLAGS.union(PageTableFlags::HUGE_PAGE);
Expand Down Expand Up @@ -65,6 +73,109 @@ static mut LEVEL_2_TABLE: PageTable = {
PageTable(page_table)
};

/// Initializes the page tables.
///
/// # Safety
///
/// This function may only be called once before modifying the page tables.
pub unsafe fn init(max_phys_addr: usize) {
debug!("max_phys_addr = {max_phys_addr:#x}");

let idents = ExtendedProcessorAndProcessorFeatureIdentifiers::new();
let has_page_1_gb = idents.has_page_1_gb();

if has_page_1_gb {
info!("CPU supports 1-GiB pages.");
} else {
warn!("CPU does not support 1-GiB pages.");
}

if has_page_1_gb {
// If supported, we replace the existing mapping of 512 2-MiB pages with 1 1-GiB page.
//
// Since the mappings themselves do not change, we don't need to flush the TLB.
// For details, see Section 5.10.2.3 "Details of TLB Use" in the Intel® 64 and IA-32
// Architectures Software Developer's Manual Volume 3A: System Programming Guide, Part 1.

info!("Replacing the 2-MiB pages with a 1-GiB page.");

let flags: usize = PAGE_FLAGS.bits() as usize;
let addr = 0;
unsafe {
LEVEL_3_TABLE.0[0] = ptr::with_exposed_provenance_mut(addr + flags);
}
}

let addrs = Size1GiB::SIZE as usize..max_phys_addr;

if has_page_1_gb {
identity_map::<Size1GiB>(addrs);
} else {
identity_map::<Size2MiB>(addrs);
}
}

fn identity_map<S: PageSize + fmt::Debug>(phys_addrs: Range<usize>)
where
for<'a> OffsetPageTable<'a>: Mapper<S>,
{
if phys_addrs.end <= phys_addrs.start {
return;
}

let start_addr = PhysAddr::new(phys_addrs.start as u64);
let last_addr = PhysAddr::new((phys_addrs.end - 1) as u64);

let start = PhysFrame::<S>::from_start_address(start_addr).unwrap();
let last = PhysFrame::<S>::containing_address(last_addr);

info!("Identity-mapping {start:?}..={last:?}");

let frames = PhysFrame::range_inclusive(start, last);

let level_4_table = unsafe { &mut *(&raw mut LEVEL_4_TABLE).cast() };
let phys_offset = VirtAddr::new(0);
let mut page_table = unsafe { OffsetPageTable::new(level_4_table, phys_offset) };

let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;

for frame in frames {
// SAFETY: We are mapping unused pages to unused frames.
let result = unsafe { page_table.identity_map(frame, flags, &mut PhysAlloc) };

// This page was not mapped previously.
// Thus, we don't need to flush the TLB.
result.unwrap().ignore();
}
}

#[repr(align(0x1000))]
#[repr(C)]
pub struct PageTable([*mut (); 512]);

mod cpuid {
use core::arch::x86_64::CpuidResult;

/// Extended Processor and Processor Feature Identifiers
///
/// We could also use the `raw-cpuid` crate instead, but it is slower, bigger, and less ergonomic.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct ExtendedProcessorAndProcessorFeatureIdentifiers(CpuidResult);

impl ExtendedProcessorAndProcessorFeatureIdentifiers {
const FUNCTION: u32 = 0x8000_0001;

pub fn new() -> Self {
let cpuid_result = core::arch::x86_64::__cpuid(Self::FUNCTION);
Self(cpuid_result)
}

/// 1-GB large page support.
#[doc(alias = "Page1GB")]
pub fn has_page_1_gb(&self) -> bool {
const PAGE_1_GB: u32 = 1 << 26;

self.0.edx & PAGE_1_GB == PAGE_1_GB
}
}
}
112 changes: 0 additions & 112 deletions src/arch/x86_64/paging.rs

This file was deleted.

59 changes: 22 additions & 37 deletions src/arch/x86_64/platform/linux/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@ use hermit_entry::boot_info::{
use hermit_entry::elf::LoadedKernel;
use linux_boot_params::{BootE820Entry, BootParams};
use log::{error, info};
use x86_64::structures::paging::{PageSize, PageTableFlags, Size2MiB, Size4KiB};
use x86_64::structures::paging::{PageSize, Size2MiB, Size4KiB};

use crate::BootInfoExt;
use crate::arch::x86_64::physicalmem::PhysAlloc;
use crate::arch::x86_64::{KERNEL_STACK_SIZE, SERIAL_IO_PORT, paging};
use crate::arch::x86_64::{KERNEL_STACK_SIZE, SERIAL_IO_PORT, page_tables};
use crate::fdt::Fdt;

unsafe extern "C" {
Expand All @@ -40,28 +40,39 @@ static BOOT_PARAMS: AtomicPtr<BootParams> = AtomicPtr::new(ptr::null_mut());
unsafe extern "C" fn rust_start(boot_params: *mut BootParams) -> ! {
crate::log::init();
BOOT_PARAMS.store(boot_params, Ordering::Relaxed);

let free_addr = ptr::addr_of!(loader_end)
.addr()
.align_up(Size2MiB::SIZE as usize);
// Memory after the highest end address is unused and available for the physical memory manager.
info!("Intializing PhysAlloc with {free_addr:#x}");
PhysAlloc::init(free_addr);

let boot_params_ref = unsafe { BootParams::get() };
let e820_entries = boot_params_ref.e820_entries();
let max_phys_addr = e820_entries
.iter()
.copied()
.map(|entry| entry.addr + entry.size)
.max()
.unwrap();
unsafe {
page_tables::init(max_phys_addr.try_into().unwrap());
}

unsafe {
crate::os::loader_main();
}
}

pub fn find_kernel() -> &'static [u8] {
paging::clean_up();

unsafe {
BootParams::map();
}
let boot_params_ref = unsafe { BootParams::get() };

assert!(boot_params_ref.supported());

let free_addr = ptr::addr_of!(loader_end)
.addr()
.align_up(Size2MiB::SIZE as usize);
// Memory after the highest end address is unused and available for the physical memory manager.
info!("Intializing PhysAlloc with {free_addr:#x}");
PhysAlloc::init(free_addr);

boot_params_ref.map_ramdisk().unwrap()
}

Expand All @@ -76,12 +87,6 @@ pub unsafe fn boot_kernel(kernel_info: LoadedKernel) -> ! {
// determine boot stack address
let stack = (ptr::addr_of!(loader_end).addr() + Size4KiB::SIZE as usize)
.align_up(Size4KiB::SIZE as usize);
paging::map::<Size4KiB>(
stack,
stack,
KERNEL_STACK_SIZE as usize / Size4KiB::SIZE as usize,
PageTableFlags::WRITABLE,
);
let stack = ptr::addr_of_mut!(loader_end).with_addr(stack);
// clear stack
unsafe {
Expand Down Expand Up @@ -165,9 +170,6 @@ impl BootParamsExt for BootParams {
let addr = ptr.expose_provenance();
assert!(addr.is_aligned_to(Size4KiB::SIZE as usize));
assert_ne!(addr, 0);

// Identity-map the boot parameters.
paging::map::<Size4KiB>(addr, addr, 1, PageTableFlags::empty());
}

unsafe fn get() -> &'static Self {
Expand Down Expand Up @@ -207,21 +209,6 @@ impl BootParamsExt for BootParams {
}
assert!(ramdisk_image.is_aligned_to(Size4KiB::SIZE as usize));

// Map the start of the image in 4KiB steps.
let count = (ramdisk_image.align_up(Size2MiB::SIZE as usize) - ramdisk_image)
/ Size4KiB::SIZE as usize;
if count > 0 {
paging::map::<Size4KiB>(ramdisk_image, ramdisk_image, count, PageTableFlags::empty());
}

// Map the rest of the image in 2MiB steps.
let addr = ramdisk_image.align_up(Size2MiB::SIZE as usize);
let count = ((ramdisk_image + ramdisk_size).align_up(Size2MiB::SIZE as usize) - addr)
/ Size2MiB::SIZE as usize;
if count > 0 {
paging::map::<Size2MiB>(addr, addr, count, PageTableFlags::empty());
}

let ramdisk_ptr = ptr::with_exposed_provenance(ramdisk_image);
let ramdisk = unsafe { slice::from_raw_parts(ramdisk_ptr, ramdisk_size) };
Some(ramdisk)
Expand All @@ -235,8 +222,6 @@ impl BootParamsExt for BootParams {
assert_ne!(cmd_line_ptr, 0, "boot protocol is older than 2.02");
assert!(cmd_line_ptr.is_aligned_to(Size4KiB::SIZE as usize));

paging::map::<Size4KiB>(cmd_line_ptr, cmd_line_ptr, 1, PageTableFlags::empty());

let ptr = ptr::with_exposed_provenance(cmd_line_ptr);
let bytes = unsafe { core::slice::from_raw_parts(ptr, cmdline_size) };
CStr::from_bytes_until_nul(bytes).unwrap()
Expand Down
Loading