Next Generation WASM Microkernel Operating System

refactor(kmem): cleanup continued (#633)

* refactor: overhaul address space bootstrapping code

* refactor(kmem): do not tie `PhysMap` to `HardwareAddressSpace`.

Previously every `HardwareAddressSpace` had an associated `PhysMap` that was allocated and mapped during bootstrapping. This however turns out to be inflexible
and not quite correct:
- We don't actually want every address space to have its own physmap. In practice there would only be one physmap anyways (that might be globally mapped) but its a bit awkward
to rely on this unspoken and uncodified assumption.
- Doing this we would potentially be unable to modify userspace address spaces from the root kernel one.

authored by

Jonas Kruckenberg and committed by
GitHub
af4ac5d5 e258a2a7

+306 -278
+219 -71
libs/kmem/src/address_space.rs
··· 1 1 use core::alloc::Layout; 2 2 use core::convert::Infallible; 3 + use core::marker::PhantomData; 3 4 use core::ops::Range; 4 5 5 6 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 6 - use crate::bootstrap::{Bootstrap, BootstrapAllocator}; 7 7 use crate::flush::Flush; 8 8 use crate::physmap::PhysMap; 9 9 use crate::table::{Table, marker}; 10 10 use crate::utils::{PageTableEntries, page_table_entries_for}; 11 11 use crate::{ 12 12 AddressRangeExt, AllocError, FrameAllocator, MemoryAttributes, PhysicalAddress, VirtualAddress, 13 + WriteOrExecute, 13 14 }; 14 15 15 - pub struct HardwareAddressSpace<A: Arch> { 16 + /// Marks an `HardwareAddressSpace` that is still under construction. 17 + pub struct Bootstrapping {} 18 + 19 + /// Marks an active `HardwareAddressSpace`. 20 + pub struct Active {} 21 + 22 + pub struct HardwareAddressSpace<A: Arch, Phase> { 16 23 arch: A, 17 24 root_page_table: Table<A, marker::Owned>, 18 - physmap: PhysMap, 25 + phase: PhantomData<Phase>, 19 26 } 20 27 21 - impl<A: Arch> HardwareAddressSpace<A> { 28 + impl<A: Arch> HardwareAddressSpace<A, Bootstrapping> { 22 29 /// Constructs a new `AddressSpace` with a freshly allocated root page table 23 30 /// that may be used during address space bringup in the `loader`. 24 31 /// ··· 27 34 /// Returns `Err(AllocError)` when allocating the root page table fails. 28 35 pub fn new( 29 36 arch: A, 30 - physmap: PhysMap, 37 + physmap: &PhysMap, 31 38 frame_allocator: impl FrameAllocator, 32 - flush: &mut Flush, 33 39 ) -> Result<Self, AllocError> { 34 - let root_page_table = Table::allocate(frame_allocator, &physmap, &arch)?; 35 - 36 - flush.invalidate_all(); 40 + let root_page_table = Table::allocate(frame_allocator, physmap, &arch)?; 37 41 38 42 Ok(Self { 39 - physmap, 43 + arch, 40 44 root_page_table, 41 - arch, 45 + phase: PhantomData, 42 46 }) 43 47 } 44 48 45 - /// Constructs a new *bootstrapping* `AddressSpace` with a freshly allocated root page table 46 - /// that may be used during address space bringup in the `loader`. 49 + /// Identity-maps the physical address range with the specified memory attributes. 50 + /// 51 + /// If this returns `Ok`, the mapping is added to the address space. 52 + /// 53 + /// Note that this method **does not** establish any ordering between address space modification 54 + /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 55 + /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 56 + /// 57 + /// After the modifications have been synchronized with current execution, all accesses to the virtual 58 + /// address range will translate to accesses of the physical address range and adhere to the 59 + /// access rules established by the `MemoryAttributes`. 60 + /// 61 + /// # Safety 62 + /// 63 + /// 1. The entire virtual address range corresponding to `phys` must be unmapped. 64 + /// 2. `phys` must be aligned to `at least the smallest architecture block size. 47 65 /// 48 66 /// # Errors 49 67 /// 50 - /// Returns `Err(AllocError)` when allocating the root page table fails. 51 - pub fn new_bootstrap<R: lock_api::RawMutex, const MAX_REGIONS: usize>( 52 - arch: A, 53 - future_physmap: PhysMap, 54 - frame_allocator: &BootstrapAllocator<R, MAX_REGIONS>, 55 - flush: &mut Flush, 56 - ) -> Result<Bootstrap<Self>, AllocError> { 57 - let address_space = Self::new(arch, PhysMap::new_bootstrap(), frame_allocator, flush)?; 68 + /// Returning `Err` indicates the mapping cannot be established and the address space remains 69 + /// unaltered. 70 + pub unsafe fn map_identity( 71 + &mut self, 72 + phys: Range<PhysicalAddress>, 73 + attributes: MemoryAttributes, 74 + frame_allocator: impl FrameAllocator, 75 + physmap: &PhysMap, 76 + ) -> Result<(), AllocError> { 77 + debug_assert!( 78 + self.arch.active_table().is_none(), 79 + "During bootstrapping the machine must have no active page table." 80 + ); 81 + 82 + let virt = Range { 83 + start: VirtualAddress::new(phys.start.get()), 84 + end: VirtualAddress::new(phys.end.get()), 85 + }; 86 + 87 + let mut flush = Flush::new(); 88 + 89 + // Safety: ensured by caller. 90 + unsafe { 91 + self.map_contiguous( 92 + virt, 93 + phys.start, 94 + attributes, 95 + frame_allocator, 96 + physmap, 97 + &mut flush, 98 + )?; 99 + } 58 100 59 - Ok(Bootstrap { 60 - address_space, 61 - future_physmap, 62 - }) 101 + // Safety: we're going to invalidate the entire address space after bootstrapping. No need 102 + // to flush in between. 103 + unsafe { flush.ignore() }; 104 + 105 + Ok(()) 63 106 } 64 107 108 + /// Maps the physical memory region managed by the bootstrap allocator into the physmap region 109 + /// described by this architectures memory mode. 110 + /// 111 + /// If this returns `Ok`, the mapping is added to the address space. 112 + /// 113 + /// Note that this method **does not** establish any ordering between address space modification 114 + /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 115 + /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 116 + /// 117 + /// After the modifications have been synchronized with current execution, all accesses to the virtual 118 + /// address range will translate to accesses of the physical address range and adhere to the 119 + /// access rules established by the `MemoryAttributes`. 120 + /// 121 + /// # Errors 122 + /// 123 + /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain 124 + /// partially altered. The caller should call *unmap* on the virtual address range upon failure. 125 + pub fn map_physical_memory( 126 + &mut self, 127 + regions: impl Iterator<Item = Range<PhysicalAddress>>, 128 + active_physmap: &PhysMap, 129 + chosen_physmap: &PhysMap, 130 + frame_allocator: impl FrameAllocator, 131 + ) -> Result<(), AllocError> { 132 + debug_assert!( 133 + self.arch.active_table().is_none(), 134 + "During bootstrapping the machine must have no active page table." 135 + ); 136 + 137 + let attrs = MemoryAttributes::new() 138 + .with(MemoryAttributes::READ, true) 139 + .with(MemoryAttributes::WRITE_OR_EXECUTE, WriteOrExecute::Write); 140 + 141 + for region_phys in regions { 142 + // NB: use the desired physmap (ie the one used after bootstrapping) 143 + let region_virt = chosen_physmap.phys_to_virt_range(region_phys.clone()); 144 + 145 + let mut flush = Flush::new(); 146 + 147 + // Safety: we just created the address space and `BootstrapAllocator` checks its regions to 148 + // not be overlapping (1.). It will also align regions to at least page size (2., 3.). 149 + unsafe { 150 + self.map_contiguous( 151 + region_virt, 152 + region_phys.start, 153 + attrs, 154 + frame_allocator.by_ref(), 155 + active_physmap, 156 + &mut flush, 157 + )?; 158 + } 159 + 160 + // Safety: we're going to invalidate the entire address space after bootstrapping. No need 161 + // to flush in between. 162 + unsafe { flush.ignore() }; 163 + } 164 + 165 + Ok(()) 166 + } 167 + 168 + /// Finish the address space bootstrapping phase and activate the address space on this CPU (set 169 + /// this CPUs page table). 170 + /// 171 + /// # Safety 172 + /// 173 + /// After this method returns, all pointers become dangling and as such any access through 174 + /// pre-existing pointers is Undefined Behavior. This includes implicit references by the CPU 175 + /// such as the instruction pointer. 176 + /// 177 + /// This might seem impossible to uphold, except for identity-mappings which we consider valid 178 + /// even after activating the address space. 179 + pub unsafe fn finish_bootstrap_and_activate(self) -> HardwareAddressSpace<A, Active> { 180 + debug_assert!( 181 + self.arch.active_table().is_none(), 182 + "During bootstrapping the machine must have no active page table." 183 + ); 184 + 185 + let Self { 186 + arch, 187 + root_page_table, 188 + .. 189 + } = self; 190 + 191 + // Safety: ensured by caller 192 + unsafe { arch.set_active_table(root_page_table.address()) }; 193 + 194 + // NB: this is load-bearing. We need to ensure to flush the entire address space with all 195 + // CPUs so that it correctly takes effect (especially so if the address space ID was reused). 196 + arch.fence_all(); 197 + 198 + HardwareAddressSpace { 199 + arch, 200 + root_page_table, 201 + phase: PhantomData, 202 + } 203 + } 204 + } 205 + 206 + impl<A: Arch> HardwareAddressSpace<A, Active> { 65 207 /// Constructs a new `AddressSpace` from its raw components: architecture-specific data and the root table. 66 - pub fn from_parts(arch: A, root_page_table: Table<A, marker::Owned>, physmap: PhysMap) -> Self { 208 + /// 209 + /// # Safety 210 + /// 211 + /// The caller must ensure the address space defined by `arch`, `root_page_table`, and `physmap` 212 + /// indeed represents a properly initialized address space according to [`Active`]. 213 + pub unsafe fn from_parts(arch: A, root_page_table: Table<A, marker::Owned>) -> Self { 67 214 Self { 68 - physmap, 69 215 root_page_table, 70 216 arch, 217 + phase: PhantomData, 71 218 } 72 219 } 73 220 74 221 /// Decomposes an `AddressSpace` into its raw components: architecture-specific data and the root table. 75 - pub fn into_parts(self) -> (A, Table<A, marker::Owned>, PhysMap) { 76 - (self.arch, self.root_page_table, self.physmap) 222 + pub fn into_parts(self) -> (A, Table<A, marker::Owned>) { 223 + (self.arch, self.root_page_table) 77 224 } 225 + } 78 226 227 + impl<A: Arch, Phase> HardwareAddressSpace<A, Phase> { 79 228 pub fn arch(&self) -> &A { 80 229 &self.arch 81 230 } 82 231 83 - pub fn physmap(&self) -> &PhysMap { 84 - &self.physmap 85 - } 232 + // /// Activate the address space on this CPU (set this CPUs page table). 233 + // /// 234 + // /// # Safety 235 + // /// 236 + // /// After this method returns, all pointers become dangling and as such any access through 237 + // /// pre-existing pointers is Undefined Behaviour. This includes implicit references by the CPU 238 + // /// such as the instruction pointer. 239 + // pub unsafe fn activate(&self) { 240 + // todo!() 241 + // // unsafe { (self.vtable.activate)(self.raw, self.root_page_table) } 242 + // } 86 243 87 244 pub const fn granule_size(&self) -> usize { 88 245 A::GRANULE_SIZE ··· 92 249 A::GRANULE_LAYOUT 93 250 } 94 251 95 - /// Activate the address space on this CPU (set this CPUs page table). 96 - /// 97 - /// # Safety 98 - /// 99 - /// After this method returns, all pointers become dangling and as such any access through 100 - /// pre-existing pointers is Undefined Behaviour. This includes implicit references by the CPU 101 - /// such as the instruction pointer. 102 - pub unsafe fn activate(&self) { 103 - todo!() 104 - // unsafe { (self.vtable.activate)(self.raw, self.root_page_table) } 105 - } 106 - 107 252 /// Return the corresponding [`PhysicalAddress`] and [`MemoryAttributes`] for the given 108 253 /// [`VirtualAddress`] if mapped. The returned [`PageTableLevel`] described the page table level 109 254 /// at which the mapping was found. 110 255 pub fn lookup( 111 256 &self, 112 257 virt: VirtualAddress, 258 + physmap: &PhysMap, 113 259 ) -> Option<(PhysicalAddress, MemoryAttributes, &'static PageTableLevel)> { 114 260 let mut table = self.root_page_table.borrow(); 115 261 116 262 for level in A::LEVELS { 117 263 let entry_index = level.pte_index_of(virt); 118 264 // Safety: `pte_index_of` only returns in-bounds indices. 119 - let entry = unsafe { table.get(entry_index, &self.physmap, &self.arch) }; 265 + let entry = unsafe { table.get(entry_index, physmap, &self.arch) }; 120 266 121 267 if entry.is_table() { 122 268 // Safety: We checked the entry is a table above (1.) know the depth is correct (2.). ··· 162 308 phys: impl ExactSizeIterator<Item = Range<PhysicalAddress>>, 163 309 attributes: MemoryAttributes, 164 310 frame_allocator: impl FrameAllocator, 311 + physmap: &PhysMap, 165 312 flush: &mut Flush, 166 313 ) -> Result<(), AllocError> { 167 314 for block_phys in phys { ··· 174 321 block_phys.start, 175 322 attributes, 176 323 frame_allocator.by_ref(), 324 + physmap, 177 325 flush, 178 326 )?; 179 327 } ··· 214 362 mut phys: PhysicalAddress, 215 363 attributes: MemoryAttributes, 216 364 frame_allocator: impl FrameAllocator, 365 + physmap: &PhysMap, 217 366 flush: &mut Flush, 218 367 ) -> Result<(), AllocError> { 219 368 debug_assert!( ··· 250 399 } else { 251 400 let frame = frame_allocator.allocate_contiguous_zeroed( 252 401 A::GRANULE_LAYOUT, 253 - &self.physmap, 402 + physmap, 254 403 &self.arch, 255 404 )?; 256 405 ··· 263 412 Ok(()) 264 413 }; 265 414 266 - self.root_page_table.borrow_mut().visit_mut( 267 - virt, 268 - &self.physmap, 269 - &self.arch, 270 - map_contiguous, 271 - )?; 415 + self.root_page_table 416 + .borrow_mut() 417 + .visit_mut(virt, physmap, &self.arch, map_contiguous)?; 272 418 273 419 Ok(()) 274 420 } ··· 298 444 &mut self, 299 445 mut virt: Range<VirtualAddress>, 300 446 phys: impl ExactSizeIterator<Item = Range<PhysicalAddress>>, 447 + physmap: &PhysMap, 301 448 flush: &mut Flush, 302 449 ) -> Result<(), AllocError> { 303 450 for block_phys in phys { ··· 308 455 self.remap_contiguous( 309 456 Range::from_start_len(virt.start, block_phys.len()), 310 457 block_phys.start, 458 + physmap, 311 459 flush, 312 460 ); 313 461 } ··· 338 486 &mut self, 339 487 virt: Range<VirtualAddress>, 340 488 mut phys: PhysicalAddress, 489 + physmap: &PhysMap, 341 490 flush: &mut Flush, 342 491 ) { 343 492 debug_assert!( ··· 378 527 unsafe { 379 528 self.root_page_table 380 529 .borrow_mut() 381 - .visit_mut(virt, &self.physmap, &self.arch, remap_contiguous) 530 + .visit_mut(virt, physmap, &self.arch, remap_contiguous) 382 531 .unwrap_unchecked(); 383 532 } 384 533 } ··· 401 550 &mut self, 402 551 virt: Range<VirtualAddress>, 403 552 attributes: MemoryAttributes, 553 + physmap: &PhysMap, 404 554 flush: &mut Flush, 405 555 ) { 406 556 debug_assert!( ··· 436 586 unsafe { 437 587 self.root_page_table 438 588 .borrow_mut() 439 - .visit_mut(virt, &self.physmap, &self.arch, set_attributes) 589 + .visit_mut(virt, physmap, &self.arch, set_attributes) 440 590 .unwrap_unchecked(); 441 591 } 442 592 } ··· 459 609 &mut self, 460 610 virt: Range<VirtualAddress>, 461 611 frame_allocator: impl FrameAllocator, 612 + physmap: &PhysMap, 462 613 flush: &mut Flush, 463 614 ) { 464 615 debug_assert!( ··· 474 625 475 626 let table = self.root_page_table.borrow_mut(); 476 627 477 - Self::unmap_inner( 478 - table, 479 - virt, 480 - &self.physmap, 481 - &self.arch, 482 - frame_allocator, 483 - flush, 484 - ); 628 + Self::unmap_inner(table, virt, physmap, &self.arch, frame_allocator, flush); 485 629 } 486 630 487 631 fn unmap_inner( ··· 563 707 ]) 564 708 .finish(); 565 709 566 - let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 710 + let (mut address_space, frame_allocator, physmap) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 567 711 568 712 let frame = frame_allocator 569 713 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 579 723 frame, 580 724 MemoryAttributes::new().with(MemoryAttributes::READ, true), 581 725 frame_allocator.by_ref(), 726 + &physmap, 582 727 &mut flush, 583 728 ) 584 729 .unwrap(); 585 730 } 586 731 flush.flush(address_space.arch()); 587 732 588 - let (phys, attrs, lvl) = address_space.lookup(page.start).unwrap(); 733 + let (phys, attrs, lvl) = address_space.lookup(page.start, &physmap).unwrap(); 589 734 590 735 assert_eq!(phys, frame); 591 736 assert_eq!(attrs.allows_read(), true); ··· 600 745 .with_memory_regions([Layout::from_size_align(0xB000, A::GRANULE_SIZE).unwrap()]) 601 746 .finish(); 602 747 603 - let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 748 + let (mut address_space, frame_allocator, physmap) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 604 749 605 750 let frame = frame_allocator 606 751 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 616 761 frame, 617 762 MemoryAttributes::new().with(MemoryAttributes::READ, true), 618 763 frame_allocator.by_ref(), 764 + &physmap, 619 765 &mut flush, 620 766 ) 621 767 .unwrap(); 622 768 } 623 769 flush.flush(address_space.arch()); 624 770 625 - let (phys, attrs, lvl) = address_space.lookup(page.start).unwrap(); 771 + let (phys, attrs, lvl) = address_space.lookup(page.start, &physmap).unwrap(); 626 772 627 773 assert_eq!(phys, frame); 628 774 assert_eq!(attrs.allows_read(), true); ··· 638 784 639 785 let mut flush = Flush::new(); 640 786 unsafe { 641 - address_space.remap_contiguous(page.clone(), new_frame, &mut flush); 787 + address_space.remap_contiguous(page.clone(), new_frame, &physmap, &mut flush); 642 788 } 643 789 flush.flush(address_space.arch()); 644 790 645 - let (phys, attrs, lvl) = address_space.lookup(page.start).unwrap(); 791 + let (phys, attrs, lvl) = address_space.lookup(page.start, &physmap).unwrap(); 646 792 647 793 assert_eq!(phys, new_frame); 648 794 assert_eq!(attrs.allows_read(), true); ··· 657 803 .with_memory_regions([Layout::from_size_align(0xB000, A::GRANULE_SIZE).unwrap()]) 658 804 .finish(); 659 805 660 - let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 806 + let (mut address_space, frame_allocator, physmap) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 661 807 662 808 let frame = frame_allocator 663 809 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 673 819 frame, 674 820 MemoryAttributes::new().with(MemoryAttributes::READ, true), 675 821 frame_allocator.by_ref(), 822 + &physmap, 676 823 &mut flush, 677 824 ) 678 825 .unwrap(); 679 826 } 680 827 flush.flush(address_space.arch()); 681 828 682 - let (phys, attrs, lvl) = address_space.lookup(page.start).unwrap(); 829 + let (phys, attrs, lvl) = address_space.lookup(page.start, &physmap).unwrap(); 683 830 684 831 assert_eq!(phys, frame); 685 832 assert_eq!(attrs.allows_read(), true); ··· 695 842 page.clone(), 696 843 MemoryAttributes::new() 697 844 .with(MemoryAttributes::WRITE_OR_EXECUTE, WriteOrExecute::Execute), 845 + &physmap, 698 846 &mut flush, 699 847 ); 700 848 } 701 849 flush.flush(address_space.arch()); 702 850 703 - let (phys, attrs, lvl) = address_space.lookup(page.start).unwrap(); 851 + let (phys, attrs, lvl) = address_space.lookup(page.start, &physmap).unwrap(); 704 852 705 853 assert_eq!(phys, frame); 706 854 assert_eq!(attrs.allows_read(), false);
-128
libs/kmem/src/bootstrap.rs
··· 1 - mod frame_allocator; 2 - 3 - use core::ops::Range; 4 - 5 - pub use frame_allocator::{BootstrapAllocator, DEFAULT_MAX_REGIONS}; 6 - 7 - use crate::arch::Arch; 8 - use crate::flush::Flush; 9 - use crate::{ 10 - AllocError, FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, 11 - VirtualAddress, WriteOrExecute, 12 - }; 13 - 14 - pub struct Bootstrap<S> { 15 - pub(crate) address_space: S, 16 - pub(crate) future_physmap: PhysMap, 17 - } 18 - 19 - impl<A: Arch> Bootstrap<HardwareAddressSpace<A>> { 20 - /// Maps the physical memory region managed by the bootstrap allocator into the physmap region 21 - /// described by this architectures memory mode. 22 - /// 23 - /// If this returns `Ok`, the mapping is added to the address space. 24 - /// 25 - /// Note that this method **does not** establish any ordering between address space modification 26 - /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 27 - /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 28 - /// 29 - /// After the modifications have been synchronized with current execution, all accesses to the virtual 30 - /// address range will translate to accesses of the physical address range and adhere to the 31 - /// access rules established by the `MemoryAttributes`. 32 - /// 33 - /// # Errors 34 - /// 35 - /// Returning `Err` indicates the mapping cannot be established and the address space remains 36 - /// unaltered. 37 - pub fn map_physical_memory<R: lock_api::RawMutex, const MAX_REGIONS: usize>( 38 - &mut self, 39 - frame_allocator: &BootstrapAllocator<R, MAX_REGIONS>, 40 - flush: &mut Flush, 41 - ) -> Result<(), AllocError> { 42 - let attrs = MemoryAttributes::new() 43 - .with(MemoryAttributes::READ, true) 44 - .with(MemoryAttributes::WRITE_OR_EXECUTE, WriteOrExecute::Write); 45 - 46 - for region_phys in frame_allocator.regions() { 47 - // NB: use the "future" physical memory mapping (ie after bootstrapping) 48 - let region_virt = self.future_physmap.phys_to_virt_range(region_phys.clone()); 49 - 50 - // Safety: we just created the address space and `BootstrapAllocator` checks its regions to 51 - // not be overlapping (1.). It will also align regions to at least page size (2., 3.). 52 - unsafe { 53 - self.address_space.map_contiguous( 54 - region_virt, 55 - region_phys.start, 56 - attrs, 57 - frame_allocator.by_ref(), 58 - flush, 59 - )?; 60 - } 61 - } 62 - 63 - Ok(()) 64 - } 65 - 66 - /// Identity-maps the physical address range with the specified memory attributes. 67 - /// 68 - /// If this returns `Ok`, the mapping is added to the address space. 69 - /// 70 - /// Note that this method **does not** establish any ordering between address space modification 71 - /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 72 - /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 73 - /// 74 - /// After the modifications have been synchronized with current execution, all accesses to the virtual 75 - /// address range will translate to accesses of the physical address range and adhere to the 76 - /// access rules established by the `MemoryAttributes`. 77 - /// 78 - /// # Safety 79 - /// 80 - /// 1. The entire virtual address range corresponding to `phys` must be unmapped. 81 - /// 2. `phys` must be aligned to `at least the smallest architecture block size. 82 - /// 83 - /// # Errors 84 - /// 85 - /// Returning `Err` indicates the mapping cannot be established and the address space remains 86 - /// unaltered. 87 - pub unsafe fn map_identity<F>( 88 - &mut self, 89 - phys: Range<PhysicalAddress>, 90 - attributes: MemoryAttributes, 91 - frame_allocator: F, 92 - flush: &mut Flush, 93 - ) -> Result<(), AllocError> 94 - where 95 - F: FrameAllocator, 96 - { 97 - let virt = Range { 98 - start: VirtualAddress::new(phys.start.get()), 99 - end: VirtualAddress::new(phys.end.get()), 100 - }; 101 - 102 - // Safety: ensured by caller. 103 - unsafe { 104 - self.address_space 105 - .map_contiguous(virt, phys.start, attributes, frame_allocator, flush) 106 - } 107 - } 108 - 109 - /// Finish the address space bootstrapping phase and activate the address space on this CPU (set 110 - /// this CPUs page table). 111 - /// 112 - /// # Safety 113 - /// 114 - /// After this method returns, all pointers become dangling and as such any access through 115 - /// pre-existing pointers is Undefined Behavior. This includes implicit references by the CPU 116 - /// such as the instruction pointer. 117 - /// 118 - /// This might seem impossible to uphold, except for identity-mappings which we consider valid 119 - /// even after activating the address space. 120 - pub unsafe fn finish_bootstrap_and_activate(self) -> HardwareAddressSpace<A> { 121 - let (arch, root_table, _) = self.address_space.into_parts(); 122 - 123 - // Safety: ensured by caller 124 - unsafe { arch.set_active_table(root_table.address()) }; 125 - 126 - HardwareAddressSpace::from_parts(arch, root_table, self.future_physmap) 127 - } 128 - }
+42 -43
libs/kmem/src/bootstrap/frame_allocator.rs libs/kmem/src/frame_allocator/bump.rs
··· 18 18 /// 19 19 /// This allocator supports discontiguous physical memory by default. By default, up to [`DEFAULT_MAX_REGIONS`] 20 20 /// but this limit can be adjusted by explicitly specifying the const-generic parameter. 21 - pub struct BootstrapAllocator<R, const MAX_REGIONS: usize = DEFAULT_MAX_REGIONS> 21 + pub struct BumpAllocator<R, const MAX_REGIONS: usize = DEFAULT_MAX_REGIONS> 22 22 where 23 23 R: lock_api::RawMutex, 24 24 { 25 - inner: Mutex<R, BootstrapAllocatorInner<MAX_REGIONS>>, 25 + inner: Mutex<R, BumpAllocatorInner<MAX_REGIONS>>, 26 26 min_align: NonZeroUsize, 27 27 } 28 28 29 29 #[derive(Debug)] 30 - struct BootstrapAllocatorInner<const MAX_REGIONS: usize> { 30 + struct BumpAllocatorInner<const MAX_REGIONS: usize> { 31 31 arenas: ArrayVec<Arena, MAX_REGIONS>, 32 32 current_arena_hint: usize, 33 33 } 34 34 35 - impl<R, const MAX_REGIONS: usize> fmt::Debug for BootstrapAllocator<R, MAX_REGIONS> 35 + impl<R, const MAX_REGIONS: usize> fmt::Debug for BumpAllocator<R, MAX_REGIONS> 36 36 where 37 37 R: lock_api::RawMutex, 38 38 { 39 39 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 40 - f.debug_struct("BootstrapAllocator") 40 + f.debug_struct("BumpAllocator") 41 41 .field("inner", &self.inner.lock()) 42 42 .field("min_align", &self.min_align) 43 43 .finish() 44 44 } 45 45 } 46 46 47 - impl<R, const MAX_REGIONS: usize> BootstrapAllocator<R, MAX_REGIONS> 47 + impl<R, const MAX_REGIONS: usize> BumpAllocator<R, MAX_REGIONS> 48 48 where 49 49 R: lock_api::RawMutex, 50 50 { 51 - /// Constructs a new bootstrap frame allocator from the given regions of physical memory. 51 + /// Constructs a new bump allocator from the given regions of physical memory. 52 52 /// 53 53 /// # Panics 54 54 /// ··· 89 89 .collect(); 90 90 91 91 Self { 92 - inner: Mutex::new(BootstrapAllocatorInner { 92 + inner: Mutex::new(BumpAllocatorInner { 93 93 arenas, 94 94 current_arena_hint: largest_region_idx, 95 95 }), ··· 143 143 } 144 144 } 145 145 146 - // Safety: bootstrap allocator manages raw physical memory regions, they remain valid theoretically 146 + // Safety: bump allocator manages raw physical memory regions, they remain valid theoretically 147 147 // forever we merely hand out "land claims" to it. 148 - unsafe impl<R, const MAX_REGIONS: usize> FrameAllocator for BootstrapAllocator<R, MAX_REGIONS> 148 + unsafe impl<R, const MAX_REGIONS: usize> FrameAllocator for BumpAllocator<R, MAX_REGIONS> 149 149 where 150 150 R: lock_api::RawMutex, 151 151 { ··· 182 182 } 183 183 184 184 unsafe fn deallocate(&self, _block: PhysicalAddress, _layout: Layout) { 185 - unimplemented!("BootstrapAllocator does not support deallocation"); 185 + unimplemented!("BumpAllocator does not support deallocation"); 186 186 } 187 187 } 188 188 189 - impl<const MAX_REGIONS: usize> BootstrapAllocatorInner<MAX_REGIONS> { 189 + impl<const MAX_REGIONS: usize> BumpAllocatorInner<MAX_REGIONS> { 190 190 /// Fast-path for allocation from the "current" arena. Most modern machines have a single large 191 191 /// physical memory region. During creation, we determine the largest physical memory region 192 192 /// and designate it as the "current" arena. ··· 506 506 mod tests { 507 507 use core::alloc::Layout; 508 508 509 + use super::*; 509 510 use crate::address_range::AddressRangeExt; 510 511 use crate::arch::Arch; 511 - use crate::bootstrap::BootstrapAllocator; 512 512 use crate::frame_allocator::FrameAllocator; 513 513 use crate::test_utils::{EmulateArch, Machine, MachineBuilder}; 514 514 use crate::{GIB, PhysMap, PhysicalAddress, archtest}; ··· 520 520 } 521 521 522 522 archtest! { 523 - // Assert that the BootstrapAllocator can allocate frames 523 + // Assert that the BumpAllocator can allocate frames 524 524 #[test_log::test] 525 525 fn allocate_contiguous_smoke<A: Arch>() { 526 526 let machine: Machine<A> = MachineBuilder::new() ··· 530 530 ]) 531 531 .finish(); 532 532 533 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 534 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 533 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 534 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 535 535 536 536 // Based on the memory of the machine we set up above, we expect the allocator to 537 537 // yield 3 pages. ··· 558 558 .unwrap_err(); 559 559 } 560 560 561 - // Assert that the BootstrapAllocator can allocate zeroed frames in 561 + // Assert that the BumpAllocator can allocate zeroed frames in 562 562 // bootstrap (bare, before paging is enabled) mode. 563 563 #[test_log::test] 564 564 fn allocate_contiguous_zeroed_smoke<A: Arch>() { ··· 569 569 ]) 570 570 .finish(); 571 571 572 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 573 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 572 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 573 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 574 574 575 575 let arch = EmulateArch::new(machine); 576 576 577 - let physmap = PhysMap::new_bootstrap(); 577 + let physmap = PhysMap::ABSENT; 578 578 579 579 // Based on the memory of the machine we set up above, we expect the allocator to 580 580 // yield 3 pages. ··· 613 613 ]) 614 614 .finish(); 615 615 616 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 617 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 616 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 617 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 618 618 619 619 let blocks: Vec<_> = frame_allocator 620 620 .allocate(Layout::from_size_align(4 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap()) ··· 642 642 643 643 let arch = EmulateArch::new(machine.clone()); 644 644 645 - let physmap = PhysMap::new_bootstrap(); 645 + let physmap = PhysMap::ABSENT; 646 646 647 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 648 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 647 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 648 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 649 649 650 650 let blocks: Vec<_> = frame_allocator 651 651 .allocate_zeroed( ··· 677 677 ]) 678 678 .finish(); 679 679 680 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 681 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 680 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 681 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 682 682 683 683 let frame = frame_allocator 684 684 .allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()) ··· 697 697 ]) 698 698 .finish(); 699 699 700 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 701 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 700 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 701 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 702 702 703 703 let blocks = frame_allocator 704 704 .allocate(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()) ··· 719 719 ]) 720 720 .finish(); 721 721 722 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 723 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 722 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 723 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 724 724 725 725 let frame = frame_allocator 726 726 .allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1 * GIB).unwrap()) ··· 738 738 ]) 739 739 .finish(); 740 740 741 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 742 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 741 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 742 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 743 743 744 744 let blocks = frame_allocator 745 745 .allocate(Layout::from_size_align(A::GRANULE_SIZE, 1 * GIB).unwrap()) ··· 760 760 761 761 use crate::address_range::AddressRangeExt; 762 762 use crate::arch::Arch; 763 - use crate::bootstrap::{BootstrapAllocator, DEFAULT_MAX_REGIONS}; 764 - use crate::frame_allocator::FrameAllocator; 763 + use crate::frame_allocator::{BumpAllocator, DEFAULT_MAX_REGIONS, FrameAllocator}; 765 764 use crate::test_utils::proptest::region_layouts; 766 765 use crate::test_utils::{Machine, MachineBuilder}; 767 766 use crate::{GIB, KIB, for_every_arch}; ··· 774 773 .with_memory_regions(region_layouts.clone()) 775 774 .finish(); 776 775 777 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 778 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 776 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 777 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 779 778 780 779 let total_size = region_layouts.iter().map(|layout| layout.size()).sum(); 781 780 ··· 808 807 .with_memory_regions(region_layouts.clone()) 809 808 .finish(); 810 809 811 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 812 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 810 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 811 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 813 812 814 813 let total_size = region_layouts.iter().map(|layout| layout.size()).sum(); 815 814 ··· 828 827 .with_memory_regions(region_layouts.clone()) 829 828 .finish(); 830 829 831 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 832 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 830 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 831 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 833 832 834 833 let alignment = 1usize << alignment_pot; 835 834 ··· 847 846 .with_memory_regions(region_layouts.clone()) 848 847 .finish(); 849 848 850 - let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 851 - BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 849 + let frame_allocator: BumpAllocator<parking_lot::RawMutex> = 850 + BumpAllocator::new::<A>(machine.memory_regions().collect()); 852 851 853 852 let alignment = 1usize << alignment_pot; 854 853
+4
libs/kmem/src/frame_allocator.rs
··· 1 + mod bump; 2 + 1 3 use core::alloc::Layout; 2 4 use core::fmt; 3 5 use core::ops::Range; 6 + 7 + pub use bump::{BumpAllocator, DEFAULT_MAX_REGIONS}; 4 8 5 9 use crate::arch::Arch; 6 10 use crate::physmap::PhysMap;
+1 -2
libs/kmem/src/lib.rs
··· 10 10 mod address_range; 11 11 mod address_space; 12 12 pub mod arch; 13 - pub mod bootstrap; 14 13 mod flush; 15 14 mod frame_allocator; 16 15 mod memory_attributes; ··· 25 24 pub use address_space::HardwareAddressSpace; 26 25 pub use arch::Arch; 27 26 pub use flush::Flush; 28 - pub use frame_allocator::{AllocError, FrameAllocator}; 27 + pub use frame_allocator::{AllocError, BumpAllocator, DEFAULT_MAX_REGIONS, FrameAllocator}; 29 28 pub use memory_attributes::{MemoryAttributes, WriteOrExecute}; 30 29 pub use physmap::PhysMap; 31 30
+18 -15
libs/kmem/src/physmap.rs
··· 1 1 use core::cmp; 2 + use core::num::NonZeroIsize; 2 3 use core::ops::Range; 3 4 4 5 use crate::{PhysicalAddress, VirtualAddress}; ··· 8 9 /// zeroing frames of memory in the frame allocator). 9 10 /// 10 11 /// This region must be mapped so it is only accessible by the kernel. 11 - #[derive(Debug, Clone)] 12 + #[derive(Debug)] 12 13 pub struct PhysMap { 13 - translation_offset: isize, 14 + translation_offset: Option<NonZeroIsize>, 14 15 #[cfg(debug_assertions)] 15 16 range: Option<Range<u128>>, 16 17 } 17 18 18 19 impl PhysMap { 20 + pub const ABSENT: Self = Self { 21 + translation_offset: None, 22 + #[cfg(debug_assertions)] 23 + range: None, 24 + }; 25 + 19 26 /// Construct a new `PhysMap` from a chosen base address and the machines physical memory regions. 20 27 /// The iterator over the memory regions must not be empty. 21 28 /// ··· 40 47 clippy::cast_possible_wrap, 41 48 reason = "this is expected to wrap when the physmap_start is lower than the lowest physical address (e.g. when it is in upper half of memory)" 42 49 )] 43 - let translation_offset = physmap_start.get().wrapping_sub(min_addr.get()) as isize; 50 + let translation_offset = 51 + NonZeroIsize::new(physmap_start.get().wrapping_sub(min_addr.get()) as isize) 52 + .expect("identity-mapped physmap is not allowed"); 44 53 45 54 #[cfg(debug_assertions)] 46 55 let range = { ··· 51 60 }; 52 61 53 62 Self { 54 - translation_offset, 63 + translation_offset: Some(translation_offset), 55 64 #[cfg(debug_assertions)] 56 65 range: Some(range), 57 66 } 58 67 } 59 68 60 - pub(crate) const fn new_bootstrap() -> Self { 61 - Self { 62 - translation_offset: 0, 63 - #[cfg(debug_assertions)] 64 - range: None, 65 - } 66 - } 67 - 68 69 /// Translates a `PhysicalAddress` to a `VirtualAddress` through this `PhysMap`. 69 70 #[expect(clippy::missing_panics_doc, reason = "internal assert")] 70 71 #[inline] 71 72 pub fn phys_to_virt(&self, phys: PhysicalAddress) -> VirtualAddress { 72 - let virt = VirtualAddress::new(phys.wrapping_offset(self.translation_offset).get()); 73 + let translation_offset = self.translation_offset.map_or(0, |off| off.get()); 74 + 75 + let virt = VirtualAddress::new(phys.wrapping_offset(translation_offset).get()); 73 76 74 77 #[cfg(debug_assertions)] 75 78 if let Some(range) = &self.range { ··· 110 113 [Range::from_start_len(region_start, region_size)], 111 114 ); 112 115 113 - prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(region_start.get()) as isize); 116 + prop_assert_eq!(map.translation_offset.unwrap().get(), base.get().wrapping_sub(region_start.get()) as isize); 114 117 #[cfg(debug_assertions)] 115 118 prop_assert_eq!( 116 119 map.range, ··· 127 130 regions 128 131 ); 129 132 130 - prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(regions_start.get()) as isize); 133 + prop_assert_eq!(map.translation_offset.unwrap().get(), base.get().wrapping_sub(regions_start.get()) as isize); 131 134 } 132 135 133 136 #[test]
+22 -19
libs/kmem/src/test_utils/machine.rs
··· 6 6 use std::sync::Arc; 7 7 use std::{cmp, fmt}; 8 8 9 + use k23_arrayvec::ArrayVec; 9 10 use k23_cpu_local::collection::CpuLocal; 10 11 12 + use crate::address_space::Active; 11 13 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 12 - use crate::bootstrap::BootstrapAllocator; 13 - use crate::flush::Flush; 14 + use crate::frame_allocator::BumpAllocator; 14 15 use crate::test_utils::arch::EmulateArch; 15 16 use crate::test_utils::memory::Memory; 16 17 use crate::utils::page_table_entries_for; 17 - use crate::{HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, VirtualAddress}; 18 + use crate::{ 19 + FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, 20 + VirtualAddress, 21 + }; 18 22 19 23 /// A "virtual machine" that emulates a given architecture. It is intended to be used in tests 20 24 /// and supports modeling the following properties: ··· 54 58 &self, 55 59 physmap_start: VirtualAddress, 56 60 ) -> ( 57 - HardwareAddressSpace<EmulateArch<A>>, 58 - BootstrapAllocator<parking_lot::RawMutex>, 61 + HardwareAddressSpace<EmulateArch<A>, Active>, 62 + BumpAllocator<parking_lot::RawMutex>, 63 + PhysMap, 59 64 ) { 60 - let physmap = PhysMap::new(physmap_start, self.memory_regions()); 61 - 62 65 let arch = EmulateArch::new(self.clone()); 63 66 64 - let frame_allocator = 65 - BootstrapAllocator::new::<A>(arch.machine().memory_regions().collect()); 67 + let memory_regions: ArrayVec<_, _> = arch.machine().memory_regions().collect(); 66 68 67 - let mut flush = Flush::new(); 68 - let mut aspace = 69 - HardwareAddressSpace::new_bootstrap(arch, physmap, &frame_allocator, &mut flush) 70 - .expect("Machine does not have enough physical memory for root page table. Consider increasing configured physical memory sizes."); 69 + let active_physmap = PhysMap::ABSENT; 70 + let chosen_physmap = PhysMap::new(physmap_start, memory_regions.clone()); 71 71 72 - aspace 73 - .map_physical_memory(&frame_allocator, &mut flush) 72 + let frame_allocator = BumpAllocator::new::<A>(memory_regions.clone()); 73 + 74 + let mut address_space = HardwareAddressSpace::new(arch, &active_physmap, frame_allocator.by_ref()) 75 + .expect("Machine does not have enough physical memory for root page table. Consider increasing configured physical memory sizes."); 76 + 77 + address_space.map_physical_memory(memory_regions.into_iter(), &active_physmap, &chosen_physmap, frame_allocator.by_ref()) 74 78 .expect("Machine does not have enough physical memory for physmap. Consider increasing configured physical memory sizes."); 75 79 76 80 // Safety: we just created the address space, so don't have any pointers into it. In hosted tests 77 81 // the programs memory and CPU registers are outside the address space anyway. 78 - let address_space = unsafe { aspace.finish_bootstrap_and_activate() }; 82 + let address_space = unsafe { address_space.finish_bootstrap_and_activate() }; 79 83 80 - flush.flush(address_space.arch()); 81 - 82 - (address_space, frame_allocator) 84 + (address_space, frame_allocator, chosen_physmap) 83 85 } 84 86 85 87 /// Returns an iterator over the physical memory regions in this machine ··· 455 457 pub struct HasMemory; 456 458 457 459 pub struct MachineBuilder<A: Arch, Mem> { 460 + // under_construction: HardwareAddressSpace<A, Bootstrapping>, 458 461 memory: Option<Memory>, 459 462 _has: PhantomData<Mem>, 460 463 _m: PhantomData<A>,