Skip to content

Commit 1dff3cb

Browse files
authored
Merge pull request #1609 from hermit-os/identity-mem
feat: use identity-mapped page tables instead of recursive ones
2 parents f0e229d + 8073a55 commit 1dff3cb

File tree

9 files changed

+85
-282
lines changed

9 files changed

+85
-282
lines changed

Cargo.lock

-10
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

-1
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,6 @@ features = [
160160

161161
[target.'cfg(target_arch = "x86_64")'.dependencies]
162162
free-list = { version = "0.3", features = ["x86_64"] }
163-
multiboot = "0.8"
164163
raw-cpuid = "11"
165164
uart_16550 = "0.3"
166165
x86_64 = "0.15"

src/arch/x86_64/kernel/mod.rs

-15
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
#[cfg(feature = "common-os")]
22
use core::arch::asm;
3-
use core::num::NonZeroU64;
43
use core::ptr;
54
use core::sync::atomic::{AtomicPtr, AtomicU32, Ordering};
65
use core::task::Waker;
@@ -101,20 +100,6 @@ pub fn get_image_size() -> usize {
101100
(range.end - range.start) as usize
102101
}
103102

104-
pub fn get_limit() -> usize {
105-
env::boot_info().hardware_info.phys_addr_range.end as usize
106-
}
107-
108-
pub fn get_mbinfo() -> Option<NonZeroU64> {
109-
match env::boot_info().platform_info {
110-
PlatformInfo::Multiboot {
111-
multiboot_info_addr,
112-
..
113-
} => Some(multiboot_info_addr),
114-
_ => None,
115-
}
116-
}
117-
118103
#[cfg(feature = "smp")]
119104
pub fn get_possible_cpus() -> u32 {
120105
use core::cmp;

src/arch/x86_64/kernel/scheduler.rs

+21-24
Original file line numberDiff line numberDiff line change
@@ -115,32 +115,29 @@ impl TaskStacks {
115115
let mut flags = PageTableEntryFlags::empty();
116116
flags.normal().writable().execute_disable();
117117

118-
// For UEFI systems, the entire memory is already mapped, just clear the stack for safety
119-
if !env::is_uefi() {
120-
// map IST1 into the address space
121-
crate::arch::mm::paging::map::<BasePageSize>(
122-
virt_addr + BasePageSize::SIZE,
123-
phys_addr,
124-
IST_SIZE / BasePageSize::SIZE as usize,
125-
flags,
126-
);
118+
// map IST1 into the address space
119+
crate::arch::mm::paging::map::<BasePageSize>(
120+
virt_addr + BasePageSize::SIZE,
121+
phys_addr,
122+
IST_SIZE / BasePageSize::SIZE as usize,
123+
flags,
124+
);
127125

128-
// map kernel stack into the address space
129-
crate::arch::mm::paging::map::<BasePageSize>(
130-
virt_addr + IST_SIZE + 2 * BasePageSize::SIZE,
131-
phys_addr + IST_SIZE,
132-
DEFAULT_STACK_SIZE / BasePageSize::SIZE as usize,
133-
flags,
134-
);
126+
// map kernel stack into the address space
127+
crate::arch::mm::paging::map::<BasePageSize>(
128+
virt_addr + IST_SIZE + 2 * BasePageSize::SIZE,
129+
phys_addr + IST_SIZE,
130+
DEFAULT_STACK_SIZE / BasePageSize::SIZE as usize,
131+
flags,
132+
);
135133

136-
// map user stack into the address space
137-
crate::arch::mm::paging::map::<BasePageSize>(
138-
virt_addr + IST_SIZE + DEFAULT_STACK_SIZE + 3 * BasePageSize::SIZE,
139-
phys_addr + IST_SIZE + DEFAULT_STACK_SIZE,
140-
user_stack_size / BasePageSize::SIZE as usize,
141-
flags,
142-
);
143-
}
134+
// map user stack into the address space
135+
crate::arch::mm::paging::map::<BasePageSize>(
136+
virt_addr + IST_SIZE + DEFAULT_STACK_SIZE + 3 * BasePageSize::SIZE,
137+
phys_addr + IST_SIZE + DEFAULT_STACK_SIZE,
138+
user_stack_size / BasePageSize::SIZE as usize,
139+
flags,
140+
);
144141

145142
// clear user stack
146143
unsafe {

src/arch/x86_64/mm/mod.rs

-28
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@ pub(crate) mod paging;
22
pub(crate) mod physicalmem;
33
pub(crate) mod virtualmem;
44

5-
use core::slice;
6-
75
use memory_addresses::arch::x86_64::{PhysAddr, VirtAddr};
86
#[cfg(feature = "common-os")]
97
use x86_64::structures::paging::{PageSize, Size4KiB as BasePageSize};
@@ -12,32 +10,6 @@ pub use self::paging::init_page_tables;
1210
#[cfg(feature = "common-os")]
1311
use crate::arch::mm::paging::{PageTableEntryFlags, PageTableEntryFlagsExt};
1412

15-
/// Memory translation, allocation and deallocation for MultibootInformation
16-
struct MultibootMemory;
17-
18-
impl multiboot::information::MemoryManagement for MultibootMemory {
19-
unsafe fn paddr_to_slice(
20-
&self,
21-
p: multiboot::information::PAddr,
22-
size: usize,
23-
) -> Option<&'static [u8]> {
24-
unsafe { Some(slice::from_raw_parts(p as _, size)) }
25-
}
26-
27-
unsafe fn allocate(
28-
&mut self,
29-
_length: usize,
30-
) -> Option<(multiboot::information::PAddr, &mut [u8])> {
31-
None
32-
}
33-
34-
unsafe fn deallocate(&mut self, addr: multiboot::information::PAddr) {
35-
if addr != 0 {
36-
unimplemented!()
37-
}
38-
}
39-
}
40-
4113
#[cfg(feature = "common-os")]
4214
pub fn create_new_root_page_table() -> usize {
4315
use x86_64::registers::control::Cr3;

src/arch/x86_64/mm/paging.rs

+13-75
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use core::fmt::Debug;
22
use core::ptr;
33

4-
use x86_64::instructions::tlb;
54
use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
65
#[cfg(feature = "common-os")]
76
use x86_64::registers::segmentation::SegmentSelector;
@@ -13,12 +12,12 @@ use x86_64::structures::paging::mapper::{MappedFrame, TranslateResult, UnmapErro
1312
use x86_64::structures::paging::page::PageRange;
1413
use x86_64::structures::paging::{
1514
Mapper, OffsetPageTable, Page, PageTable, PageTableIndex, PhysFrame, RecursivePageTable,
16-
Size2MiB, Size4KiB, Translate,
15+
Size4KiB, Translate,
1716
};
1817

1918
use crate::arch::x86_64::kernel::processor;
2019
use crate::arch::x86_64::mm::{PhysAddr, VirtAddr, physicalmem};
21-
use crate::{env, mm, scheduler};
20+
use crate::{env, scheduler};
2221

2322
pub trait PageTableEntryFlagsExt {
2423
fn device(&mut self) -> &mut Self;
@@ -93,16 +92,6 @@ pub use x86_64::structures::paging::{
9392
PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
9493
};
9594

96-
/// Returns a recursive page table mapping, its last entry is mapped to the table itself
97-
unsafe fn recursive_page_table() -> RecursivePageTable<'static> {
98-
let level_4_table_addr = 0xffff_ffff_ffff_f000;
99-
let level_4_table_ptr = ptr::with_exposed_provenance_mut(level_4_table_addr);
100-
unsafe {
101-
let level_4_table = &mut *(level_4_table_ptr);
102-
RecursivePageTable::new(level_4_table).unwrap()
103-
}
104-
}
105-
10695
/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
10796
pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
10897
let level_4_table_addr = Cr3::read().0.start_address().as_u64();
@@ -118,11 +107,7 @@ pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
118107
pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
119108
let addr = x86_64::VirtAddr::from(virtual_address);
120109

121-
let translate_result = if env::is_uefi() {
122-
unsafe { identity_mapped_page_table() }.translate(addr)
123-
} else {
124-
unsafe { recursive_page_table() }.translate(addr)
125-
};
110+
let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
126111

127112
match translate_result {
128113
TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
@@ -202,11 +187,7 @@ pub fn map<S>(
202187
unmapped
203188
}
204189

205-
let unmapped = if env::is_uefi() {
206-
unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) }
207-
} else {
208-
unsafe { map_pages(&mut recursive_page_table(), pages, frames, flags) }
209-
};
190+
let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
210191

211192
if unmapped {
212193
#[cfg(feature = "smp")]
@@ -247,18 +228,15 @@ where
247228
for<'a> OffsetPageTable<'a>: Mapper<S>,
248229
{
249230
assert!(
250-
frame.start_address().as_u64() < mm::kernel_start_address().as_u64(),
231+
frame.start_address().as_u64() < crate::mm::kernel_start_address().as_u64(),
251232
"Address {:p} to be identity-mapped is not below Kernel start address",
252233
frame.start_address()
253234
);
254235

255236
let flags = PageTableEntryFlags::PRESENT | PageTableEntryFlags::NO_EXECUTE;
256237
let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
257-
let mapper_result = if env::is_uefi() {
258-
unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) }
259-
} else {
260-
unsafe { recursive_page_table().identity_map(frame, flags, &mut *frame_allocator) }
261-
};
238+
let mapper_result =
239+
unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) };
262240
mapper_result.unwrap().flush();
263241
}
264242

@@ -278,11 +256,7 @@ where
278256
let range = Page::range(first_page, last_page);
279257

280258
for page in range {
281-
let unmap_result = if env::is_uefi() {
282-
unsafe { identity_mapped_page_table() }.unmap(page)
283-
} else {
284-
unsafe { recursive_page_table() }.unmap(page)
285-
};
259+
let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
286260
match unmap_result {
287261
Ok((_frame, flush)) => flush.flush(),
288262
// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
@@ -378,33 +352,7 @@ fn make_p4_writable() {
378352
unsafe { without_protect(make_writable) }
379353
}
380354

381-
pub fn init_page_tables() {
382-
if env::is_uhyve() {
383-
// Uhyve identity-maps the first Gibibyte of memory (512 page table entries * 2MiB pages)
384-
// We now unmap all memory after the kernel image, so that we can remap it ourselves later for the heap.
385-
// Ideally, uhyve would only map as much memory as necessary, but this requires a hermit-entry ABI jump.
386-
// See https://github.com/hermit-os/uhyve/issues/426
387-
let kernel_end_addr = x86_64::VirtAddr::new(mm::kernel_end_address().as_u64());
388-
let start_page = Page::<Size2MiB>::from_start_address(kernel_end_addr).unwrap();
389-
let end_page = Page::from_page_table_indices_2mib(
390-
start_page.p4_index(),
391-
start_page.p3_index(),
392-
PageTableIndex::new(511),
393-
);
394-
let page_range = Page::range_inclusive(start_page, end_page);
395-
396-
let mut page_table = unsafe { recursive_page_table() };
397-
for page in page_range {
398-
match page_table.unmap(page) {
399-
Ok((_frame, flush)) => flush.ignore(),
400-
Err(UnmapError::PageNotMapped) => {} // If it wasn't mapped, that's not an issue
401-
Err(e) => panic!("Couldn't unmap page {page:?}: {e:?}"),
402-
}
403-
}
404-
405-
tlb::flush_all();
406-
}
407-
}
355+
pub fn init_page_tables() {}
408356

409357
#[allow(dead_code)]
410358
unsafe fn disect<PT: Translate>(pt: PT, virt_addr: x86_64::VirtAddr) {
@@ -448,13 +396,8 @@ unsafe fn disect<PT: Translate>(pt: PT, virt_addr: x86_64::VirtAddr) {
448396
unsafe fn print_page_table_entries(page_table_indices: &[PageTableIndex]) {
449397
assert!(page_table_indices.len() <= 4);
450398

451-
// Recursive
452-
let recursive_page_table = unsafe { recursive_page_table() };
453-
let mut pt = recursive_page_table.level_4_table();
454-
455-
// Identity mapped
456-
// let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
457-
// let pt = identity_mapped_page_table.level_4_table();
399+
let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
400+
let mut pt = identity_mapped_page_table.level_4_table();
458401

459402
for (i, page_table_index) in page_table_indices.iter().copied().enumerate() {
460403
let level = 4 - i;
@@ -500,13 +443,8 @@ pub(crate) unsafe fn print_page_tables(levels: usize) {
500443
}
501444
}
502445

503-
// Recursive
504-
let recursive_page_table = unsafe { recursive_page_table() };
505-
let pt = recursive_page_table.level_4_table();
506-
507-
// Identity mapped
508-
// let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
509-
// let pt = identity_mapped_page_table.level_4_table();
446+
let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
447+
let pt = identity_mapped_page_table.level_4_table();
510448

511449
print(pt, 4, 5 - levels);
512450
}

0 commit comments

Comments
 (0)