1
1
use core:: fmt:: Debug ;
2
2
use core:: ptr;
3
3
4
- use x86_64:: instructions:: tlb;
5
4
use x86_64:: registers:: control:: { Cr0 , Cr0Flags , Cr2 , Cr3 } ;
6
5
#[ cfg( feature = "common-os" ) ]
7
6
use x86_64:: registers:: segmentation:: SegmentSelector ;
@@ -13,12 +12,12 @@ use x86_64::structures::paging::mapper::{MappedFrame, TranslateResult, UnmapErro
13
12
use x86_64:: structures:: paging:: page:: PageRange ;
14
13
use x86_64:: structures:: paging:: {
15
14
Mapper , OffsetPageTable , Page , PageTable , PageTableIndex , PhysFrame , RecursivePageTable ,
16
- Size2MiB , Size4KiB , Translate ,
15
+ Size4KiB , Translate ,
17
16
} ;
18
17
19
18
use crate :: arch:: x86_64:: kernel:: processor;
20
19
use crate :: arch:: x86_64:: mm:: { PhysAddr , VirtAddr , physicalmem} ;
21
- use crate :: { env, mm , scheduler} ;
20
+ use crate :: { env, scheduler} ;
22
21
23
22
pub trait PageTableEntryFlagsExt {
24
23
fn device ( & mut self ) -> & mut Self ;
@@ -93,16 +92,6 @@ pub use x86_64::structures::paging::{
93
92
PageSize , Size1GiB as HugePageSize , Size2MiB as LargePageSize , Size4KiB as BasePageSize ,
94
93
} ;
95
94
96
- /// Returns a recursive page table mapping, its last entry is mapped to the table itself
97
- unsafe fn recursive_page_table ( ) -> RecursivePageTable < ' static > {
98
- let level_4_table_addr = 0xffff_ffff_ffff_f000 ;
99
- let level_4_table_ptr = ptr:: with_exposed_provenance_mut ( level_4_table_addr) ;
100
- unsafe {
101
- let level_4_table = & mut * ( level_4_table_ptr) ;
102
- RecursivePageTable :: new ( level_4_table) . unwrap ( )
103
- }
104
- }
105
-
106
95
/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
107
96
pub unsafe fn identity_mapped_page_table ( ) -> OffsetPageTable < ' static > {
108
97
let level_4_table_addr = Cr3 :: read ( ) . 0 . start_address ( ) . as_u64 ( ) ;
@@ -118,11 +107,7 @@ pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
118
107
pub fn virtual_to_physical ( virtual_address : VirtAddr ) -> Option < PhysAddr > {
119
108
let addr = x86_64:: VirtAddr :: from ( virtual_address) ;
120
109
121
- let translate_result = if env:: is_uefi ( ) {
122
- unsafe { identity_mapped_page_table ( ) } . translate ( addr)
123
- } else {
124
- unsafe { recursive_page_table ( ) } . translate ( addr)
125
- } ;
110
+ let translate_result = unsafe { identity_mapped_page_table ( ) } . translate ( addr) ;
126
111
127
112
match translate_result {
128
113
TranslateResult :: NotMapped | TranslateResult :: InvalidFrameAddress ( _) => {
@@ -202,11 +187,7 @@ pub fn map<S>(
202
187
unmapped
203
188
}
204
189
205
- let unmapped = if env:: is_uefi ( ) {
206
- unsafe { map_pages ( & mut identity_mapped_page_table ( ) , pages, frames, flags) }
207
- } else {
208
- unsafe { map_pages ( & mut recursive_page_table ( ) , pages, frames, flags) }
209
- } ;
190
+ let unmapped = unsafe { map_pages ( & mut identity_mapped_page_table ( ) , pages, frames, flags) } ;
210
191
211
192
if unmapped {
212
193
#[ cfg( feature = "smp" ) ]
@@ -247,18 +228,15 @@ where
247
228
for < ' a > OffsetPageTable < ' a > : Mapper < S > ,
248
229
{
249
230
assert ! (
250
- frame. start_address( ) . as_u64( ) < mm:: kernel_start_address( ) . as_u64( ) ,
231
+ frame. start_address( ) . as_u64( ) < crate :: mm:: kernel_start_address( ) . as_u64( ) ,
251
232
"Address {:p} to be identity-mapped is not below Kernel start address" ,
252
233
frame. start_address( )
253
234
) ;
254
235
255
236
let flags = PageTableEntryFlags :: PRESENT | PageTableEntryFlags :: NO_EXECUTE ;
256
237
let mut frame_allocator = physicalmem:: PHYSICAL_FREE_LIST . lock ( ) ;
257
- let mapper_result = if env:: is_uefi ( ) {
258
- unsafe { identity_mapped_page_table ( ) . identity_map ( frame, flags, & mut * frame_allocator) }
259
- } else {
260
- unsafe { recursive_page_table ( ) . identity_map ( frame, flags, & mut * frame_allocator) }
261
- } ;
238
+ let mapper_result =
239
+ unsafe { identity_mapped_page_table ( ) . identity_map ( frame, flags, & mut * frame_allocator) } ;
262
240
mapper_result. unwrap ( ) . flush ( ) ;
263
241
}
264
242
@@ -278,11 +256,7 @@ where
278
256
let range = Page :: range ( first_page, last_page) ;
279
257
280
258
for page in range {
281
- let unmap_result = if env:: is_uefi ( ) {
282
- unsafe { identity_mapped_page_table ( ) } . unmap ( page)
283
- } else {
284
- unsafe { recursive_page_table ( ) } . unmap ( page)
285
- } ;
259
+ let unmap_result = unsafe { identity_mapped_page_table ( ) } . unmap ( page) ;
286
260
match unmap_result {
287
261
Ok ( ( _frame, flush) ) => flush. flush ( ) ,
288
262
// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
@@ -378,33 +352,7 @@ fn make_p4_writable() {
378
352
unsafe { without_protect ( make_writable) }
379
353
}
380
354
381
- pub fn init_page_tables ( ) {
382
- if env:: is_uhyve ( ) {
383
- // Uhyve identity-maps the first Gibibyte of memory (512 page table entries * 2MiB pages)
384
- // We now unmap all memory after the kernel image, so that we can remap it ourselves later for the heap.
385
- // Ideally, uhyve would only map as much memory as necessary, but this requires a hermit-entry ABI jump.
386
- // See https://github.com/hermit-os/uhyve/issues/426
387
- let kernel_end_addr = x86_64:: VirtAddr :: new ( mm:: kernel_end_address ( ) . as_u64 ( ) ) ;
388
- let start_page = Page :: < Size2MiB > :: from_start_address ( kernel_end_addr) . unwrap ( ) ;
389
- let end_page = Page :: from_page_table_indices_2mib (
390
- start_page. p4_index ( ) ,
391
- start_page. p3_index ( ) ,
392
- PageTableIndex :: new ( 511 ) ,
393
- ) ;
394
- let page_range = Page :: range_inclusive ( start_page, end_page) ;
395
-
396
- let mut page_table = unsafe { recursive_page_table ( ) } ;
397
- for page in page_range {
398
- match page_table. unmap ( page) {
399
- Ok ( ( _frame, flush) ) => flush. ignore ( ) ,
400
- Err ( UnmapError :: PageNotMapped ) => { } // If it wasn't mapped, that's not an issue
401
- Err ( e) => panic ! ( "Couldn't unmap page {page:?}: {e:?}" ) ,
402
- }
403
- }
404
-
405
- tlb:: flush_all ( ) ;
406
- }
407
- }
355
+ pub fn init_page_tables ( ) { }
408
356
409
357
#[ allow( dead_code) ]
410
358
unsafe fn disect < PT : Translate > ( pt : PT , virt_addr : x86_64:: VirtAddr ) {
@@ -448,13 +396,8 @@ unsafe fn disect<PT: Translate>(pt: PT, virt_addr: x86_64::VirtAddr) {
448
396
unsafe fn print_page_table_entries ( page_table_indices : & [ PageTableIndex ] ) {
449
397
assert ! ( page_table_indices. len( ) <= 4 ) ;
450
398
451
- // Recursive
452
- let recursive_page_table = unsafe { recursive_page_table ( ) } ;
453
- let mut pt = recursive_page_table. level_4_table ( ) ;
454
-
455
- // Identity mapped
456
- // let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
457
- // let pt = identity_mapped_page_table.level_4_table();
399
+ let identity_mapped_page_table = unsafe { identity_mapped_page_table ( ) } ;
400
+ let mut pt = identity_mapped_page_table. level_4_table ( ) ;
458
401
459
402
for ( i, page_table_index) in page_table_indices. iter ( ) . copied ( ) . enumerate ( ) {
460
403
let level = 4 - i;
@@ -500,13 +443,8 @@ pub(crate) unsafe fn print_page_tables(levels: usize) {
500
443
}
501
444
}
502
445
503
- // Recursive
504
- let recursive_page_table = unsafe { recursive_page_table ( ) } ;
505
- let pt = recursive_page_table. level_4_table ( ) ;
506
-
507
- // Identity mapped
508
- // let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
509
- // let pt = identity_mapped_page_table.level_4_table();
446
+ let identity_mapped_page_table = unsafe { identity_mapped_page_table ( ) } ;
447
+ let pt = identity_mapped_page_table. level_4_table ( ) ;
510
448
511
449
print ( pt, 4 , 5 - levels) ;
512
450
}
0 commit comments