Next Generation WASM Microkernel Operating System

clippy

+731 -427
+13
Cargo.lock
··· 1097 1097 version = "0.1.0" 1098 1098 dependencies = [ 1099 1099 "arrayvec", 1100 + "bitflags", 1100 1101 "cpu-local", 1101 1102 "fallible-iterator", 1102 1103 "kmem", ··· 1105 1106 "mycelium-bitfield", 1106 1107 "parking_lot", 1107 1108 "proptest", 1109 + "proptest-derive", 1108 1110 "riscv", 1109 1111 "spin", 1110 1112 ] ··· 1577 1579 "rusty-fork", 1578 1580 "tempfile", 1579 1581 "unarray", 1582 + ] 1583 + 1584 + [[package]] 1585 + name = "proptest-derive" 1586 + version = "0.7.0" 1587 + source = "registry+https://github.com/rust-lang/crates.io-index" 1588 + checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" 1589 + dependencies = [ 1590 + "proc-macro2", 1591 + "quote", 1592 + "syn", 1580 1593 ] 1581 1594 1582 1595 [[package]]
+1
Cargo.toml
··· 149 149 150 150 # dev dependencies 151 151 proptest = "1.8.0" 152 + proptest-derive = "0.7.0" 152 153 libm = "0.2.15" 153 154 154 155 # build dependencies
+5 -3
libs/kmem/Cargo.toml
··· 10 10 riscv.workspace = true 11 11 cpu-local = { workspace = true, optional = true } 12 12 spin = { workspace = true, optional = true } 13 + proptest = { workspace = true, optional = true } 14 + proptest-derive = { workspace = true, optional = true } 13 15 14 16 # 3rd-party dependencies 15 17 mycelium-bitfield.workspace = true ··· 18 20 lock_api.workspace = true 19 21 20 22 [dev-dependencies] 21 - kmem = { workspace = true, features = ["emulate"] } 22 - proptest.workspace = true 23 23 parking_lot = "0.12.5" 24 + kmem = { workspace = true, features = ["test_utils"] } 25 + bitflags.workspace = true 24 26 25 27 [features] 26 - emulate = ["cpu-local", "spin"] 28 + test_utils = ["cpu-local", "spin", "proptest", "proptest-derive"] 27 29 28 30 [lints] 29 31 workspace = true
+7
libs/kmem/proptest-regressions/arch/riscv64.txt
··· 1 + # Seeds for failure cases proptest has generated in the past. It is 2 + # automatically read and these particular cases re-run before any 3 + # novel cases are generated. 4 + # 5 + # It is recommended to check this file in to source control so that 6 + # everyone who runs the test benefits from these saved cases. 7 + cc 26bcb7f9640296e1ce43defa4a891640f04dbb0569c49ee2fd49fb0d71a1a909 # shrinks to address = PhysicalAddress(0x0000100000000000)
+7
libs/kmem/proptest-regressions/physmap.txt
··· 1 + # Seeds for failure cases proptest has generated in the past. It is 2 + # automatically read and these particular cases re-run before any 3 + # novel cases are generated. 4 + # 5 + # It is recommended to check this file in to source control so that 6 + # everyone who runs the test benefits from these saved cases. 7 + cc 6e6921502ef9b923b728203c38cbf0c9dd25f51a974f8d04fdd2f0f438478a73 # shrinks to base = VirtualAddress(0x0000000000000000), region_start = PhysicalAddress(0x0000000000000000), region_size = 1
+42 -28
libs/kmem/src/address.rs
··· 133 133 Self(self.0.saturating_add(offset)) 134 134 } 135 135 136 + /// Adds an unsigned offset to this address, wrapping around at the boundary of the type. 137 + #[must_use] 138 + #[inline] 139 + pub const fn checked_add(self, offset: usize) -> Option<Self> { 140 + if let Some(val) = self.0.checked_add(offset) { 141 + Some(Self(val)) 142 + } else { 143 + None 144 + } 145 + } 146 + 136 147 /// Calculates the distance between two addresses in bytes. 137 148 #[must_use] 138 149 #[inline] ··· 262 273 263 274 #[repr(transparent)] 264 275 #[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] 276 + #[cfg_attr(feature = "test_utils", derive(proptest_derive::Arbitrary))] 265 277 pub struct VirtualAddress(usize); 266 278 impl_address!(VirtualAddress); 267 279 ··· 300 312 301 313 #[repr(transparent)] 302 314 #[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] 315 + #[cfg_attr(feature = "test_utils", derive(proptest_derive::Arbitrary))] 303 316 pub struct PhysicalAddress(usize); 304 317 impl_address!(PhysicalAddress); 305 318 306 - // #[cfg(test)] 307 - // mod tests { 308 - // use proptest::{proptest, prop_assert, prop_assert_eq, prop_assert_ne}; 309 - // use super::*; 310 - // 311 - // proptest! { 312 - // #[test] 313 - // fn lower_half_is_canonical(addr in 0x0usize..0x3fffffffff) { 314 - // let addr = VirtualAddress::new(addr); 315 - // prop_assert!(addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39)); 316 - // prop_assert_eq!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr); 317 - // } 318 - // 319 - // #[test] 320 - // fn upper_half_is_canonical(addr in 0xffffffc000000000usize..0xffffffffffffffff) { 321 - // let addr = VirtualAddress::new(addr); 322 - // prop_assert!(addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39)); 323 - // prop_assert_eq!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr); 324 - // } 325 - // 326 - // #[test] 327 - // fn non_canonical_hole(addr in 0x4000000000usize..0xffffffbfffffffff) { 328 - // let addr = VirtualAddress::new(addr); 329 - // prop_assert_ne!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr); 330 - // prop_assert!(!addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39)); 331 - // } 332 - // } 333 - // } 319 + #[cfg(test)] 320 + mod tests { 321 + use proptest::{prop_assert, prop_assert_eq, prop_assert_ne, proptest}; 322 + 323 + use super::*; 324 + 325 + proptest! { 326 + #[test] 327 + fn lower_half_is_canonical(addr in 0x0usize..0x3fffffffff) { 328 + let addr = VirtualAddress::new(addr); 329 + prop_assert!(addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>()); 330 + prop_assert_eq!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr); 331 + } 332 + 333 + #[test] 334 + fn upper_half_is_canonical(addr in 0xffffffc000000000usize..0xffffffffffffffff) { 335 + let addr = VirtualAddress::new(addr); 336 + prop_assert!(addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>()); 337 + prop_assert_eq!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr); 338 + } 339 + 340 + #[test] 341 + fn non_canonical_hole(addr in 0x4000000000usize..0xffffffbfffffffff) { 342 + let addr = VirtualAddress::new(addr); 343 + prop_assert_ne!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr); 344 + prop_assert!(!addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>()); 345 + } 346 + } 347 + }
+28 -23
libs/kmem/src/address_space.rs
··· 4 4 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 5 5 use crate::bootstrap::{Bootstrap, BootstrapAllocator}; 6 6 use crate::flush::Flush; 7 - use crate::physmap::PhysicalMemoryMapping; 7 + use crate::physmap::PhysMap; 8 8 use crate::table::{Table, marker}; 9 9 use crate::utils::{PageTableEntries, page_table_entries_for}; 10 10 use crate::{ ··· 14 14 pub struct HardwareAddressSpace<A: Arch> { 15 15 arch: A, 16 16 root_page_table: Table<A, marker::Owned>, 17 - physmap: PhysicalMemoryMapping, 17 + physmap: PhysMap, 18 18 } 19 19 20 20 impl<A: Arch> HardwareAddressSpace<A> { 21 + /// Constructs a new `AddressSpace` with a freshly allocated root page table 22 + /// that may be used during address space bringup in the `loader`. 23 + /// 24 + /// # Errors 25 + /// 26 + /// Returns `Err(AllocError)` when allocating the root page table fails. 21 27 pub fn new( 22 28 arch: A, 23 - physmap: PhysicalMemoryMapping, 29 + physmap: PhysMap, 24 30 frame_allocator: impl FrameAllocator, 25 31 flush: &mut Flush, 26 32 ) -> Result<Self, AllocError> { ··· 35 41 }) 36 42 } 37 43 38 - /// Constructs **and bootstraps** a new AddressSpace with a freshly allocated root page table. 44 + /// Constructs a new *bootstrapping* `AddressSpace` with a freshly allocated root page table 45 + /// that may be used during address space bringup in the `loader`. 39 46 /// 40 47 /// # Errors 41 48 /// 42 - /// Returns Err(AllocError) when allocating the root page table fails. 49 + /// Returns `Err(AllocError)` when allocating the root page table fails. 43 50 pub fn new_bootstrap<R: lock_api::RawMutex>( 44 51 arch: A, 45 - future_physmap: PhysicalMemoryMapping, 52 + future_physmap: PhysMap, 46 53 frame_allocator: &BootstrapAllocator<R>, 47 54 flush: &mut Flush, 48 55 ) -> Result<Bootstrap<Self>, AllocError> { 49 - let address_space = Self::new( 50 - arch, 51 - PhysicalMemoryMapping::new_bootstrap(), 52 - frame_allocator, 53 - flush, 54 - )?; 56 + let address_space = Self::new(arch, PhysMap::new_bootstrap(), frame_allocator, flush)?; 55 57 56 58 Ok(Bootstrap { 57 59 address_space, ··· 60 62 } 61 63 62 64 /// Constructs a new `AddressSpace` from its raw components: architecture-specific data and the root table. 63 - pub fn from_parts( 64 - arch: A, 65 - root_page_table: Table<A, marker::Owned>, 66 - physmap: PhysicalMemoryMapping, 67 - ) -> Self { 65 + pub fn from_parts(arch: A, root_page_table: Table<A, marker::Owned>, physmap: PhysMap) -> Self { 68 66 Self { 69 67 physmap, 70 68 root_page_table, ··· 73 71 } 74 72 75 73 /// Decomposes an `AddressSpace` into its raw components: architecture-specific data and the root table. 76 - pub fn into_parts(self) -> (A, Table<A, marker::Owned>, PhysicalMemoryMapping) { 74 + pub fn into_parts(self) -> (A, Table<A, marker::Owned>, PhysMap) { 77 75 (self.arch, self.root_page_table, self.physmap) 78 76 } 79 77 ··· 104 102 105 103 for level in A::LEVELS { 106 104 let entry_index = level.pte_index_of(virt); 105 + // Safety: `pte_index_of` only returns in-bounds indices. 107 106 let entry = unsafe { table.get(entry_index, &self.physmap, &self.arch) }; 108 107 109 108 if entry.is_table() { ··· 183 182 // TODO we can omit the fence here and lazily change the mapping in the fault handler# 184 183 flush.invalidate(range); 185 184 } else { 186 - let frame = frame_allocator 187 - .allocate_contiguous_zeroed(A::GRANULE_LAYOUT, &self.physmap, &self.arch) 188 - .unwrap(); 185 + let frame = frame_allocator.allocate_contiguous_zeroed( 186 + A::GRANULE_LAYOUT, 187 + &self.physmap, 188 + &self.arch, 189 + )?; 189 190 190 191 *entry = <A as Arch>::PageTableEntry::new_table(frame); 191 192 ··· 261 262 Ok(()) 262 263 }; 263 264 265 + // Safety: `remap_contiguous` is infallible 264 266 unsafe { 265 267 self.root_page_table 266 268 .borrow_mut() ··· 318 320 Ok(()) 319 321 }; 320 322 323 + // Safety: `set_attributes` is infallible 321 324 unsafe { 322 325 self.root_page_table 323 326 .borrow_mut() ··· 372 375 fn unmap_inner( 373 376 mut table: Table<A, marker::Mut<'_>>, 374 377 range: Range<VirtualAddress>, 375 - physmap: &PhysicalMemoryMapping, 378 + physmap: &PhysMap, 376 379 arch: &A, 377 380 frame_allocator: impl FrameAllocator, 378 381 flush: &mut Flush, ··· 380 383 let entries: PageTableEntries<A> = page_table_entries_for(range.clone(), table.level()); 381 384 382 385 for (entry_index, range) in entries { 386 + // Safety: `page_table_entries_for` only returns in-bounds indices. 383 387 let mut entry = unsafe { table.get(entry_index, physmap, arch) }; 384 388 debug_assert!(!entry.is_vacant()); 385 389 ··· 418 422 } 419 423 } 420 424 425 + // Safety: `page_table_entries_for` only returns in-bounds indices. 421 426 unsafe { 422 427 table.set(entry_index, entry, physmap, arch); 423 428 } ··· 431 436 432 437 use crate::address_range::AddressRangeExt; 433 438 use crate::arch::Arch; 434 - use crate::emulate::{BootstrapResult, MachineBuilder}; 435 439 use crate::flush::Flush; 436 440 use crate::frame_allocator::FrameAllocator; 441 + use crate::test_utils::{BootstrapResult, MachineBuilder}; 437 442 use crate::{MemoryAttributes, VirtualAddress, WriteOrExecute, archtest}; 438 443 439 444 archtest! {
+19 -2
libs/kmem/src/arch/mod.rs
··· 7 7 use crate::{MemoryAttributes, PhysicalAddress, VirtualAddress}; 8 8 9 9 pub trait Arch { 10 + /// The type representing a single page table entry on this architecture. Usually `usize` sized. 11 + /// 12 + /// # Safety 13 + /// 14 + /// The value `0` **must** be a valid pattern for this type and **must** correspond to a _vacant_ entry. 10 15 type PageTableEntry: PageTableEntry + fmt::Debug; 11 16 17 + /// The page table levels that this architecture supports. 12 18 const LEVELS: &'static [PageTableLevel]; 19 + 20 + /// The default base address of the [`PhysMap`][crate::PhysMap]. The loader may randomize this 21 + /// during ASLR but this should be the fallback address. On most architectures it is the first 22 + /// address of the upper-half of the address space. 13 23 const DEFAULT_PHYSMAP_BASE: VirtualAddress; 14 24 25 + /// The size of the "translation granule" i.e. the smallest page size supported by this architecture. 15 26 const GRANULE_SIZE: usize = { 16 27 if let Some(level) = Self::LEVELS.last() { 17 28 level.page_size() ··· 20 31 } 21 32 }; 22 33 34 + /// A `Layout` representing a "translation granule". 23 35 const GRANULE_LAYOUT: Layout = { 24 36 if let Ok(layout) = Layout::from_size_align(Self::GRANULE_SIZE, Self::GRANULE_SIZE) { 25 37 layout ··· 28 40 } 29 41 }; 30 42 43 + /// The number of usable bits in a `VirtualAddress`. This may be used for address canonicalization. 31 44 #[expect( 32 45 clippy::cast_possible_truncation, 33 46 reason = "we check the coercion does not truncate" ··· 174 187 fn attributes(&self) -> MemoryAttributes; 175 188 } 176 189 190 + /// Represents a level in a hierarchical page table. 177 191 #[derive(Debug)] 178 192 pub struct PageTableLevel { 179 193 /// The number of entries in this page table level ··· 190 204 clippy::cast_possible_truncation, 191 205 reason = "we check the coercion does not truncate" 192 206 )] 193 - // #[expect(clippy::missing_panics_doc, reason = "internal assertion")] 194 - pub const fn new(page_size: usize, entries: u16, supports_leaf: bool) -> PageTableLevel { 207 + pub(crate) const fn new(page_size: usize, entries: u16, supports_leaf: bool) -> PageTableLevel { 195 208 let index_shift = page_size.ilog2(); 196 209 assert!(index_shift <= u8::MAX as u32); 197 210 ··· 233 246 } 234 247 235 248 /// Extracts the page table entry (PTE) for a table at this level from the given address. 249 + // TODO: tests 250 + // - ensure this only returns in-bound indices 236 251 pub(crate) fn pte_index_of(&self, address: VirtualAddress) -> u16 { 237 252 let idx = 238 253 u16::try_from(address.get() >> self.index_shift & (self.entries as usize - 1)).unwrap(); ··· 240 255 idx 241 256 } 242 257 258 + /// Whether we can create a leaf entry at this level given the combination of base `VirtualAddress`, 259 + /// base `PhysicalAddress`, and remaining chunk length. 243 260 pub(crate) fn can_map(&self, virt: VirtualAddress, phys: PhysicalAddress, len: usize) -> bool { 244 261 let page_size = self.page_size(); 245 262
+77 -3
libs/kmem/src/arch/riscv64.rs
··· 9 9 WriteOrExecute, 10 10 }; 11 11 12 + /// The number of usable bits in a `PhysicalAddress`. This may be used for address canonicalization. 13 + #[cfg_attr(not(test), expect(unused, reason = "only used by tests"))] 14 + const PHYSICAL_ADDRESS_BITS: usize = 56; 15 + 16 + const PAGE_OFFSET_BITS: usize = 12; 17 + 12 18 pub struct Riscv64Sv39 { 13 19 asid: u16, 14 20 } ··· 155 161 } 156 162 } 157 163 164 + const _: () = { 165 + assert!(PageTableEntry::VALID.least_significant_index() == 0); 166 + assert!(PageTableEntry::VALID.most_significant_index() - 1 == 0); 167 + 168 + assert!(PageTableEntry::READ.least_significant_index() - 1 == 0); 169 + assert!(PageTableEntry::READ.most_significant_index() - 2 == 0); 170 + 171 + assert!(PageTableEntry::WRITE.least_significant_index() - 2 == 0); 172 + assert!(PageTableEntry::WRITE.most_significant_index() - 3 == 0); 173 + 174 + assert!(PageTableEntry::EXECUTE.least_significant_index() - 3 == 0); 175 + assert!(PageTableEntry::EXECUTE.most_significant_index() - 4 == 0); 176 + 177 + assert!(PageTableEntry::USER.least_significant_index() - 4 == 0); 178 + assert!(PageTableEntry::USER.most_significant_index() - 5 == 0); 179 + 180 + assert!(PageTableEntry::GLOBAL.least_significant_index() - 5 == 0); 181 + assert!(PageTableEntry::GLOBAL.most_significant_index() - 6 == 0); 182 + 183 + assert!(PageTableEntry::ACCESSED.least_significant_index() - 6 == 0); 184 + assert!(PageTableEntry::ACCESSED.most_significant_index() - 7 == 0); 185 + 186 + assert!(PageTableEntry::DIRTY.least_significant_index() - 7 == 0); 187 + assert!(PageTableEntry::DIRTY.most_significant_index() - 8 == 0); 188 + 189 + assert!(PageTableEntry::SOFTWARE_USE.least_significant_index() - 8 == 0); 190 + assert!(PageTableEntry::SOFTWARE_USE.most_significant_index() - 10 == 0); 191 + 192 + assert!(PageTableEntry::ADDRESS.least_significant_index() - 10 == 0); 193 + assert!(PageTableEntry::ADDRESS.most_significant_index() - 54 == 0); 194 + 195 + assert!(PageTableEntry::_RESERVED.least_significant_index() - 54 == 0); 196 + assert!(PageTableEntry::_RESERVED.most_significant_index() - 61 == 0); 197 + 198 + assert!(PageTableEntry::PBMT.least_significant_index() - 61 == 0); 199 + assert!(PageTableEntry::PBMT.most_significant_index() - 63 == 0); 200 + 201 + assert!(PageTableEntry::NAPOT.least_significant_index() - 63 == 0); 202 + assert!(PageTableEntry::NAPOT.most_significant_index() - 64 == 0); 203 + }; 204 + 158 205 mycelium_bitfield::enum_from_bits! { 159 206 // TODO explain 160 207 #[derive(Debug)] ··· 170 217 171 218 impl super::PageTableEntry for PageTableEntry { 172 219 fn new_leaf(address: PhysicalAddress, attributes: MemoryAttributes) -> Self { 220 + let address_raw = address.get() >> PAGE_OFFSET_BITS; 221 + 222 + debug_assert!(address_raw <= Self::ADDRESS.max_value()); 223 + 173 224 Self::new() 174 225 .with(Self::VALID, true) 175 - .with(Self::ADDRESS, address.get()) 226 + .with(Self::ADDRESS, address_raw) 176 227 .with(Self::READ, attributes.allows_read()) 177 228 .with(Self::WRITE, attributes.allows_write()) 178 229 .with(Self::EXECUTE, attributes.allows_execution()) 179 230 } 180 231 181 232 fn new_table(address: PhysicalAddress) -> Self { 233 + let address_raw = address.get() >> PAGE_OFFSET_BITS; 234 + 235 + debug_assert!(address_raw <= Self::ADDRESS.max_value()); 236 + 182 237 Self::new() 183 238 .with(Self::VALID, true) 184 - .with(Self::ADDRESS, address.get()) 239 + .with(Self::ADDRESS, address_raw) 185 240 } 186 241 187 242 const VACANT: Self = Self::new(); ··· 203 258 } 204 259 205 260 fn address(&self) -> PhysicalAddress { 206 - PhysicalAddress::new(self.get(Self::ADDRESS)) 261 + PhysicalAddress::new(self.get(Self::ADDRESS) << PAGE_OFFSET_BITS) 207 262 } 208 263 209 264 fn attributes(&self) -> MemoryAttributes { ··· 258 313 fn fence_all() { 259 314 sfence_vma(0, usize::MAX, 0, usize::MAX).unwrap(); 260 315 } 316 + 317 + #[cfg(test)] 318 + mod tests { 319 + use proptest::{prop_assert_eq, proptest}; 320 + 321 + use super::*; 322 + use crate::arch::PageTableEntry; 323 + use crate::test_utils::proptest::{aligned_phys, phys}; 324 + use crate::{KIB, MemoryAttributes}; 325 + 326 + proptest! { 327 + #[test] 328 + fn pte_new_leaf(address in aligned_phys(phys(0..1 << PHYSICAL_ADDRESS_BITS), 4*KIB)) { 329 + let pte = <super::PageTableEntry as PageTableEntry>::new_leaf(address, MemoryAttributes::new()); 330 + 331 + prop_assert_eq!(pte.address(), address, "{}", pte); 332 + } 333 + } 334 + }
+12 -10
libs/kmem/src/bootstrap.rs
··· 1 1 mod frame_allocator; 2 2 3 - use core::mem; 4 3 use core::ops::Range; 5 4 6 5 pub use frame_allocator::{BootstrapAllocator, DEFAULT_MAX_REGIONS, FreeRegions, UsedRegions}; ··· 8 7 use crate::arch::Arch; 9 8 use crate::flush::Flush; 10 9 use crate::{ 11 - AllocError, FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysicalAddress, 12 - PhysicalMemoryMapping, VirtualAddress, WriteOrExecute, 10 + AllocError, FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, 11 + VirtualAddress, WriteOrExecute, 13 12 }; 14 13 15 14 pub struct Bootstrap<S> { 16 15 pub(crate) address_space: S, 17 - pub(crate) future_physmap: PhysicalMemoryMapping, 16 + pub(crate) future_physmap: PhysMap, 18 17 } 19 18 20 19 impl<A: Arch> Bootstrap<HardwareAddressSpace<A>> { ··· 95 94 where 96 95 F: FrameAllocator, 97 96 { 98 - let virt = unsafe { 99 - Range { 100 - start: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.start), 101 - end: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.end), 102 - } 97 + let virt = Range { 98 + start: VirtualAddress::new(phys.start.get()), 99 + end: VirtualAddress::new(phys.end.get()), 103 100 }; 104 101 102 + // Safety: ensured by caller. 105 103 unsafe { 106 104 self.address_space 107 105 .map_contiguous(virt, phys.start, attributes, frame_allocator, flush) ··· 114 112 /// # Safety 115 113 /// 116 114 /// After this method returns, all pointers become dangling and as such any access through 117 - /// pre-existing pointers is Undefined Behaviour. This includes implicit references by the CPU 115 + /// pre-existing pointers is Undefined Behavior. This includes implicit references by the CPU 118 116 /// such as the instruction pointer. 117 + /// 118 + /// This might seem impossible to uphold, except for identity-mappings which we consider valid 119 + /// even after activating the address space. 119 120 pub unsafe fn finish_bootstrap_and_activate(self) -> HardwareAddressSpace<A> { 120 121 let (arch, root_table, _) = self.address_space.into_parts(); 121 122 123 + // Safety: ensured by caller 122 124 unsafe { arch.set_active_table(root_table.address()) }; 123 125 124 126 HardwareAddressSpace::from_parts(arch, root_table, self.future_physmap)
+26 -37
libs/kmem/src/bootstrap/frame_allocator.rs
··· 12 12 13 13 pub const DEFAULT_MAX_REGIONS: usize = 16; 14 14 15 + /// Simple bump allocator (cannot free) that can be used to allocate physical memory frames early during system 16 + /// bootstrap. 17 + /// 18 + /// This allocator supports discontiguous physical memory by default. By default, up to [`DEFAULT_MAX_REGIONS`] 19 + /// but this limit can be adjusted by explicitly specifying the const-generic parameter. 15 20 pub struct BootstrapAllocator<R, const MAX_REGIONS: usize = DEFAULT_MAX_REGIONS> 16 21 where 17 22 R: lock_api::RawMutex, 18 23 { 19 24 inner: Mutex<R, BootstrapAllocatorInner<MAX_REGIONS>>, 20 - page_size: usize, 25 + // we make a "snapshot" of the translation granule size during construction so that the allocator 26 + // itself doesn't need to be generic over `Arch`. 27 + frame_size: usize, 21 28 } 22 29 23 30 #[derive(Debug)] 24 31 struct BootstrapAllocatorInner<const MAX_REGIONS: usize> { 32 + /// The discontiguous regions of "regular" physical memory that we can use for allocation. 25 33 regions: ArrayVec<Range<PhysicalAddress>, MAX_REGIONS>, 26 - // offset from the top of memory regions 34 + /// offset from the top of memory regions 27 35 offset: usize, 28 36 } 29 37 ··· 34 42 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 35 43 f.debug_struct("BootstrapAllocator") 36 44 .field("regions", &self.inner.lock()) 37 - .field("page_size", &self.page_size) 45 + .field("frame_size", &self.frame_size) 38 46 .finish() 39 47 } 40 48 } ··· 68 76 69 77 Self { 70 78 inner: Mutex::new(BootstrapAllocatorInner { regions, offset: 0 }), 71 - page_size: A::GRANULE_SIZE, 79 + frame_size: A::GRANULE_SIZE, 72 80 } 73 81 } 74 82 75 - pub fn regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 76 - self.inner.lock().regions.clone().into_iter() 83 + /// Returns the array of "regular" physical memory regions managed by this allocator. 84 + pub fn regions(&self) -> ArrayVec<Range<PhysicalAddress>, MAX_REGIONS> { 85 + self.inner.lock().regions.clone() 77 86 } 78 87 88 + /// Returns an iterator over the "free" (not allocated) portions of physical memory regions 89 + /// managed by this allocator. 79 90 pub fn free_regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 80 91 let inner = self.inner.lock(); 81 92 ··· 85 96 } 86 97 } 87 98 99 + /// Returns an iterator over the "used" (allocated) portions of physical memory regions 100 + /// managed by this allocator. 88 101 pub fn used_regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 89 102 let inner = self.inner.lock(); 90 103 ··· 94 107 } 95 108 } 96 109 110 + /// Returns the number of allocated bytes. 97 111 pub fn usage(&self) -> usize { 98 112 self.inner.lock().offset 99 113 } ··· 108 122 fn allocate_contiguous(&self, layout: Layout) -> Result<PhysicalAddress, AllocError> { 109 123 assert_eq!( 110 124 layout.align(), 111 - self.page_size, 125 + self.frame_size, 112 126 "BootstrapAllocator only supports page-aligned allocations" 113 127 ); 114 128 115 129 self.inner.lock().allocate(layout) 116 130 } 117 131 118 - // fn allocate_contiguous_zeroed( 119 - // &self, 120 - // layout: Layout, 121 - // arch: &impl Arch, 122 - // ) -> Result<PhysicalAddress, AllocError> { 123 - // assert_eq!( 124 - // layout.align(), 125 - // self.page_size, 126 - // "BootstrapAllocator only supports page-aligned allocations" 127 - // ); 128 - // 129 - // let frame = self.inner.lock().allocate(layout)?; 130 - // 131 - // self.physmap.with_mapped(frame, |page| { 132 - // // Safety: the address is properly aligned (at least page aligned) and is either valid to 133 - // // access through the physical memory map or because we're in bootstrapping still and phys==virt 134 - // unsafe { 135 - // arch.write_bytes(page, 0, layout.size()); 136 - // } 137 - // }); 138 - // 139 - // Ok(frame) 140 - // } 141 - 142 132 unsafe fn deallocate(&self, _block: PhysicalAddress, _layout: Layout) { 143 - unimplemented!() 133 + unimplemented!("BootstrapAllocator does not support deallocation") 144 134 } 145 135 146 136 fn size_hint(&self) -> (NonZeroUsize, Option<NonZeroUsize>) { 147 - (NonZeroUsize::new(self.page_size).unwrap(), None) 137 + (NonZeroUsize::new(self.frame_size).unwrap(), None) 148 138 } 149 139 } 150 140 ··· 239 229 mod tests { 240 230 use crate::arch::Arch; 241 231 use crate::bootstrap::BootstrapAllocator; 242 - use crate::emulate::MachineBuilder; 243 - use crate::emulate::arch::EmulateArch; 244 232 use crate::frame_allocator::FrameAllocator; 245 - use crate::{PhysicalMemoryMapping, archtest}; 233 + use crate::test_utils::{EmulateArch, MachineBuilder}; 234 + use crate::{PhysMap, archtest}; 246 235 247 236 archtest! { 248 237 // Assert that the BootstrapAllocator can allocate frames ··· 291 280 292 281 let arch = EmulateArch::new(machine); 293 282 294 - let physmap = PhysicalMemoryMapping::new_bootstrap(); 283 + let physmap = PhysMap::new_bootstrap(); 295 284 296 285 // Based on the memory of the machine we set up above, we expect the allocator to 297 286 // yield 3 pages.
+5 -4
libs/kmem/src/emulate.rs libs/kmem/src/test_utils.rs
··· 1 - #[cfg(feature = "emulate")] 2 - pub mod arch; 1 + mod arch; 3 2 mod machine; 4 3 mod memory; 4 + pub mod proptest; 5 5 6 - // pub use memory::Memory; 7 - pub use machine::{BootstrapResult, Cpu, Machine, MachineBuilder}; 6 + pub use arch::EmulateArch; 7 + pub use machine::{BootstrapResult, Cpu, HasMemory, Machine, MachineBuilder, MissingMemory}; 8 + pub use memory::Memory; 8 9 9 10 #[macro_export] 10 11 macro_rules! archtest {
-89
libs/kmem/src/emulate/arch.rs
··· 1 - use core::fmt; 2 - use core::ops::Range; 3 - use std::mem; 4 - 5 - use crate::arch::{Arch, PageTableLevel}; 6 - use crate::emulate::Machine; 7 - use crate::{PhysicalAddress, VirtualAddress}; 8 - 9 - pub struct EmulateArch<A: Arch, R: lock_api::RawMutex> { 10 - machine: Machine<A, R>, 11 - asid: u16, 12 - } 13 - 14 - impl<A: Arch, R: lock_api::RawMutex> fmt::Debug for EmulateArch<A, R> 15 - where 16 - A::PageTableEntry: fmt::Debug, 17 - { 18 - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 19 - f.debug_struct("EmulateArch") 20 - .field("machine", &self.machine) 21 - .finish() 22 - } 23 - } 24 - 25 - impl<A: Arch, R: lock_api::RawMutex> EmulateArch<A, R> { 26 - pub const fn new(machine: Machine<A, R>) -> Self { 27 - Self::with_asid(machine, 0) 28 - } 29 - 30 - pub const fn with_asid(machine: Machine<A, R>, asid: u16) -> Self { 31 - Self { machine, asid } 32 - } 33 - 34 - pub const fn machine(&self) -> &Machine<A, R> { 35 - &self.machine 36 - } 37 - } 38 - 39 - impl<A: Arch, R: lock_api::RawMutex> Arch for EmulateArch<A, R> { 40 - type PageTableEntry = A::PageTableEntry; 41 - 42 - const LEVELS: &'static [PageTableLevel] = A::LEVELS; 43 - const DEFAULT_PHYSMAP_BASE: VirtualAddress = A::DEFAULT_PHYSMAP_BASE; 44 - 45 - fn active_table(&self) -> Option<PhysicalAddress> { 46 - self.machine.active_table() 47 - } 48 - 49 - unsafe fn set_active_table(&self, address: PhysicalAddress) { 50 - unsafe { 51 - self.machine.set_active_table(address); 52 - } 53 - } 54 - 55 - fn fence(&self, address_range: Range<VirtualAddress>) { 56 - self.machine.invalidate(self.asid, address_range); 57 - } 58 - 59 - fn fence_all(&self) { 60 - self.machine.invalidate_all(self.asid); 61 - } 62 - 63 - unsafe fn read<T>(&self, address: VirtualAddress) -> T { 64 - if self.active_table().is_some() { 65 - unsafe { self.machine.read(self.asid, address) } 66 - } else { 67 - let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 68 - unsafe { self.machine.read_phys(address) } 69 - } 70 - } 71 - 72 - unsafe fn write<T>(&self, address: VirtualAddress, value: T) { 73 - if self.active_table().is_some() { 74 - unsafe { self.machine.write(self.asid, address, value) } 75 - } else { 76 - let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 77 - unsafe { self.machine.write_phys(address, value) } 78 - } 79 - } 80 - 81 - unsafe fn write_bytes(&self, address: VirtualAddress, value: u8, count: usize) { 82 - if self.active_table().is_some() { 83 - self.machine.write_bytes(self.asid, address, value, count) 84 - } else { 85 - let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 86 - self.machine.write_bytes_phys(address, value, count) 87 - } 88 - } 89 - }
+26 -28
libs/kmem/src/emulate/machine.rs libs/kmem/src/test_utils/machine.rs
··· 7 7 8 8 use arrayvec::ArrayVec; 9 9 use cpu_local::collection::CpuLocal; 10 - use lock_api::Mutex; 11 10 12 11 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 13 12 use crate::bootstrap::BootstrapAllocator; 14 - use crate::emulate::arch::EmulateArch; 15 - use crate::emulate::memory::Memory; 16 13 use crate::flush::Flush; 14 + use crate::test_utils::arch::EmulateArch; 15 + use crate::test_utils::memory::Memory; 17 16 use crate::utils::page_table_entries_for; 18 17 use crate::{ 19 - AddressRangeExt, AllocError, HardwareAddressSpace, MemoryAttributes, PhysicalAddress, 20 - PhysicalMemoryMapping, VirtualAddress, 18 + AllocError, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, VirtualAddress, 21 19 }; 22 20 23 - pub struct Machine<A: Arch, R: lock_api::RawMutex>(Arc<MachineInner<A, R>>); 21 + /// A "virtual machine" that emulates a given architecture. It is intended to be used in tests 22 + /// and supports modeling the following properties: 23 + /// 24 + /// - multiple, discontiguous physical memory regions 25 + /// - per-cpu virtual->physical address translation buffers 26 + /// - address translation buffer invalidation 27 + pub struct Machine<A: Arch>(Arc<MachineInner<A>>); 24 28 25 - struct MachineInner<A: Arch, R: lock_api::RawMutex> { 26 - memory: Mutex<R, Memory>, 29 + struct MachineInner<A: Arch> { 30 + memory: Memory, 27 31 cpus: CpuLocal<RefCell<Cpu<A>>>, 28 32 } 29 33 30 - impl<A: Arch, R: lock_api::RawMutex> Clone for Machine<A, R> { 34 + impl<A: Arch> Clone for Machine<A> { 31 35 fn clone(&self) -> Self { 32 36 Self(Arc::clone(&self.0)) 33 37 } 34 38 } 35 39 36 - impl<A: Arch, R: lock_api::RawMutex> fmt::Debug for Machine<A, R> 40 + impl<A: Arch> fmt::Debug for Machine<A> 37 41 where 38 42 A::PageTableEntry: fmt::Debug, 39 43 { ··· 45 49 } 46 50 } 47 51 48 - impl<A: Arch, R: lock_api::RawMutex> Machine<A, R> { 52 + impl<A: Arch> Machine<A> { 49 53 pub fn memory_regions<const MAX: usize>(&self) -> ArrayVec<Range<PhysicalAddress>, MAX> { 50 - self.0.memory.lock().regions().collect() 54 + self.0.memory.regions().collect() 51 55 } 52 56 53 57 pub unsafe fn read<T>(&self, asid: u16, addr: VirtualAddress) -> T { ··· 95 99 } 96 100 97 101 pub unsafe fn read_phys<T>(&self, address: PhysicalAddress) -> T { 98 - unsafe { self.0.memory.lock().read(address) } 102 + unsafe { self.0.memory.read(address) } 99 103 } 100 104 101 105 pub unsafe fn write_phys<T>(&self, address: PhysicalAddress, value: T) { 102 - unsafe { self.0.memory.lock().write(address, value) } 106 + unsafe { self.0.memory.write(address, value) } 103 107 } 104 108 105 109 pub fn write_bytes_phys(&self, address: PhysicalAddress, value: u8, count: usize) { 106 - self.0.memory.lock().write_bytes(address, value, count) 110 + self.0.memory.write_bytes(address, value, count) 107 111 } 108 112 109 113 pub fn active_table(&self) -> Option<PhysicalAddress> { ··· 116 120 117 121 pub fn invalidate(&self, asid: u16, address_range: Range<VirtualAddress>) { 118 122 let mut cpu = self.cpu_mut(); 119 - let memory = self.0.memory.lock(); 120 123 121 - cpu.invalidate(asid, address_range, &memory); 124 + cpu.invalidate(asid, address_range, &self.0.memory); 122 125 } 123 126 124 127 pub fn invalidate_all(&self, asid: u16) { 125 128 let mut cpu = self.cpu_mut(); 126 - let memory = self.0.memory.lock(); 127 129 128 - cpu.invalidate_all(asid, &memory); 130 + cpu.invalidate_all(asid, &self.0.memory); 129 131 } 130 132 131 133 fn cpu(&self) -> Ref<'_, Cpu<A>> { ··· 188 190 self.map 189 191 .retain(|(key_asid, key_range), _| !(*key_asid == asid && range.contains(key_range))); 190 192 191 - // if let Some(page_table) = self.page_table { 192 193 self.reload_map(asid, range, 0, self.page_table.unwrap(), memory); 193 - // } 194 194 } 195 195 196 196 pub fn invalidate_all(&mut self, asid: u16, memory: &Memory) { 197 197 self.map.clear(); 198 198 199 - // if let Some(page_table) = self.page_table { 200 199 self.reload_map( 201 200 asid, 202 201 VirtualAddress::MIN..VirtualAddress::MAX.align_down(A::GRANULE_SIZE), ··· 204 203 self.page_table.unwrap(), 205 204 memory, 206 205 ); 207 - // } 208 206 } 209 207 210 208 fn reload_map( ··· 250 248 } 251 249 252 250 pub struct BootstrapResult<A: Arch, R: lock_api::RawMutex> { 253 - pub machine: Machine<A, R>, 254 - pub address_space: HardwareAddressSpace<EmulateArch<A, R>>, 251 + pub machine: Machine<A>, 252 + pub address_space: HardwareAddressSpace<EmulateArch<A>>, 255 253 pub frame_allocator: BootstrapAllocator<R>, 256 254 } 257 255 ··· 288 286 } 289 287 290 288 impl<A: Arch, R: lock_api::RawMutex> MachineBuilder<A, R, HasMemory> { 291 - pub fn finish(self) -> (Machine<A, R>, PhysicalMemoryMapping) { 289 + pub fn finish(self) -> (Machine<A>, PhysMap) { 292 290 let memory = self.memory.unwrap(); 293 291 294 - let physmap = PhysicalMemoryMapping::new(self.physmap_base, memory.regions()); 292 + let physmap = PhysMap::new(self.physmap_base, memory.regions()); 295 293 296 294 let inner = MachineInner { 297 - memory: Mutex::new(memory), 295 + memory, 298 296 cpus: CpuLocal::with_capacity(std::thread::available_parallelism().unwrap().get()), 299 297 }; 300 298
-116
libs/kmem/src/emulate/memory.rs
··· 1 - use std::alloc::{Allocator, Layout}; 2 - use std::collections::BTreeMap; 3 - use std::ops::Range; 4 - use std::ptr::NonNull; 5 - use std::{fmt, mem}; 6 - 7 - use crate::PhysicalAddress; 8 - use crate::arch::Arch; 9 - 10 - pub struct Memory { 11 - regions: BTreeMap<PhysicalAddress, (PhysicalAddress, NonNull<[u8]>, Layout)>, 12 - } 13 - 14 - impl Drop for Memory { 15 - fn drop(&mut self) { 16 - let regions = mem::take(&mut self.regions); 17 - 18 - for (_end, (_start, region, layout)) in regions { 19 - unsafe { std::alloc::System.deallocate(region.cast(), layout) } 20 - } 21 - } 22 - } 23 - 24 - impl Memory { 25 - pub fn new<A: Arch>(region_sizes: impl IntoIterator<Item = usize>) -> Self { 26 - let regions = region_sizes 27 - .into_iter() 28 - .map(|size| { 29 - let layout = Layout::from_size_align(size, A::GRANULE_SIZE).unwrap(); 30 - 31 - let region = std::alloc::System.allocate(layout).unwrap(); 32 - 33 - // Safety: we just allocated the ptr, we know it is valid 34 - let Range { start, end } = unsafe { region.as_ref() }.as_ptr_range(); 35 - 36 - ( 37 - PhysicalAddress::from_ptr(end), 38 - (PhysicalAddress::from_ptr(start), region, layout), 39 - ) 40 - }) 41 - .collect(); 42 - 43 - Self { regions } 44 - } 45 - 46 - pub fn regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 47 - self.regions.iter().map(|(end, (start, _, _))| *start..*end) 48 - } 49 - 50 - pub fn get_region_containing(&self, address: PhysicalAddress) -> Option<(&[u8], usize)> { 51 - let (_end, (start, region, _)) = self.regions.range(address..).next()?; 52 - let offset = address.offset_from_unsigned(*start); 53 - 54 - let region = unsafe { region.as_ref() }; 55 - 56 - Some((region, offset)) 57 - } 58 - 59 - pub fn get_region_containing_mut( 60 - &mut self, 61 - address: PhysicalAddress, 62 - ) -> Option<(&mut [u8], usize)> { 63 - let (_end, (start, region, _)) = self.regions.range_mut(address..).next()?; 64 - let offset = address.get().checked_sub(start.get())?; 65 - 66 - let region = unsafe { region.as_mut() }; 67 - 68 - Some((region, offset)) 69 - } 70 - 71 - pub unsafe fn read<T>(&self, address: PhysicalAddress) -> T { 72 - let size = size_of::<T>(); 73 - if let Some((region, offset)) = self.get_region_containing(address) 74 - && offset + size <= region.len() 75 - { 76 - unsafe { region.as_ptr().add(offset).cast::<T>().read() } 77 - } else { 78 - core::panic!("Memory::read: {address} size {size:#x} outside of memory ({self:?})"); 79 - } 80 - } 81 - 82 - pub unsafe fn write<T>(&mut self, address: PhysicalAddress, value: T) { 83 - let size = size_of::<T>(); 84 - if let Some((region, offset)) = self.get_region_containing_mut(address) 85 - && offset + size <= region.len() 86 - { 87 - unsafe { region.as_mut_ptr().add(offset).cast::<T>().write(value) }; 88 - } else { 89 - core::panic!("Memory::write: {address} size {size:#x} outside of memory ({self:?})"); 90 - } 91 - } 92 - 93 - pub fn write_bytes(&mut self, address: PhysicalAddress, value: u8, count: usize) { 94 - if let Some((region, offset)) = self.get_region_containing_mut(address) 95 - && offset + count <= region.len() 96 - { 97 - region[offset..offset + count].fill(value); 98 - } else { 99 - core::panic!( 100 - "Memory::write_bytes: {address} size {count:#x} outside of memory ({self:?})" 101 - ); 102 - } 103 - } 104 - } 105 - 106 - impl fmt::Debug for Memory { 107 - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 108 - f.debug_struct("Memory") 109 - .field_with("regions", |f| { 110 - f.debug_list() 111 - .entries(self.regions.iter().map(|(end, (start, _, _))| *start..*end)) 112 - .finish() 113 - }) 114 - .finish() 115 - } 116 - }
+5 -2
libs/kmem/src/frame_allocator.rs
··· 6 6 use fallible_iterator::FallibleIterator; 7 7 8 8 use crate::arch::Arch; 9 - use crate::physmap::PhysicalMemoryMapping; 9 + use crate::physmap::PhysMap; 10 10 use crate::{AddressRangeExt, PhysicalAddress}; 11 11 12 + /// The `AllocError` error indicates a frame allocation failure that may be due 13 + /// to resource exhaustion or to something wrong when combining the given input 14 + /// arguments with this allocator. 12 15 #[derive(Debug, Copy, Clone)] 13 16 pub struct AllocError; 14 17 ··· 92 95 fn allocate_contiguous_zeroed( 93 96 &self, 94 97 layout: Layout, 95 - physmap: &PhysicalMemoryMapping, 98 + physmap: &PhysMap, 96 99 arch: &impl Arch, 97 100 ) -> Result<PhysicalAddress, AllocError> { 98 101 let frame = self.allocate_contiguous(layout)?;
+5 -5
libs/kmem/src/lib.rs
··· 1 - #![cfg_attr(not(any(test, feature = "emulate")), no_std)] 1 + #![cfg_attr(not(any(test, feature = "test_utils")), no_std)] 2 2 // #![no_std] 3 3 #![feature(step_trait)] 4 4 #![feature(debug_closure_helpers)] ··· 14 14 mod memory_attributes; 15 15 mod physmap; 16 16 mod table; 17 + #[cfg(feature = "test_utils")] 18 + pub mod test_utils; 17 19 mod utils; 18 20 19 - #[cfg(feature = "emulate")] 20 - mod emulate; 21 - 22 21 pub use address::{PhysicalAddress, VirtualAddress}; 23 22 pub use address_range::AddressRangeExt; 24 23 pub use address_space::HardwareAddressSpace; 24 + pub use flush::Flush; 25 25 pub use frame_allocator::{AllocError, FrameAllocator, FrameIter}; 26 26 pub use memory_attributes::{MemoryAttributes, WriteOrExecute}; 27 - pub use physmap::PhysicalMemoryMapping; 27 + pub use physmap::PhysMap; 28 28 29 29 pub const KIB: usize = 1024; 30 30 pub const MIB: usize = KIB * 1024;
+121 -65
libs/kmem/src/physmap.rs
··· 1 1 use core::cmp; 2 2 use core::ops::Range; 3 3 4 - use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 4 + use crate::{PhysicalAddress, VirtualAddress}; 5 5 6 + /// Describes the region of virtual memory that maps all of physical memory. This region is used 7 + /// by the virtual memory subsystem to access memory where only the physical address is known (e.g. 8 + /// zeroing frames of memory in the frame allocator). 9 + /// 10 + /// This region must be mapped so it is only accessible by the kernel. 6 11 #[derive(Debug, Clone)] 7 - pub struct PhysicalMemoryMapping { 8 - translation_offset: usize, 12 + pub struct PhysMap { 13 + translation_offset: isize, 9 14 #[cfg(debug_assertions)] 10 - range: Option<Range<VirtualAddress>>, 15 + range: Option<Range<u128>>, 11 16 } 12 17 13 - impl PhysicalMemoryMapping { 18 + impl PhysMap { 19 + /// Construct a new `PhysMap` from a chosen base address and the machines physical memory regions. 20 + /// The iterator over the memory regions must not be empty. 21 + /// 22 + /// # Panics 23 + /// 24 + /// Panics if the iterator is empty. 14 25 pub fn new( 15 26 physmap_start: VirtualAddress, 16 - regions: impl Iterator<Item = Range<PhysicalAddress>>, 27 + regions: impl IntoIterator<Item = Range<PhysicalAddress>>, 17 28 ) -> Self { 18 29 let mut min_addr = PhysicalAddress::MAX; 19 30 let mut max_addr = PhysicalAddress::MIN; ··· 23 34 max_addr = cmp::max(max_addr, region.end); 24 35 } 25 36 26 - assert!(min_addr <= max_addr); 37 + assert!(min_addr <= max_addr, "regions must not be empty"); 27 38 28 - let translation_offset = physmap_start.get() - min_addr.get(); 39 + #[expect( 40 + clippy::cast_possible_wrap, 41 + reason = "this is expected to wrap when the physmap_start is lower than the lowest physical address (e.g. when it is in upper half of memory)" 42 + )] 43 + let translation_offset = physmap_start.get().wrapping_sub(min_addr.get()) as isize; 29 44 30 45 #[cfg(debug_assertions)] 31 - let range = Range::from_start_len(physmap_start, max_addr.offset_from_unsigned(min_addr)); 46 + let range = { 47 + let start = physmap_start.get() as u128; 48 + let end = start + max_addr.offset_from_unsigned(min_addr) as u128; 49 + 50 + start..end 51 + }; 32 52 33 53 Self { 34 54 translation_offset, ··· 40 60 pub(crate) const fn new_bootstrap() -> Self { 41 61 Self { 42 62 translation_offset: 0, 63 + #[cfg(debug_assertions)] 43 64 range: None, 44 65 } 45 66 } 46 67 68 + /// Translates a `PhysicalAddress` to a `VirtualAddress` through this `PhysMap`. 69 + #[expect(clippy::missing_panics_doc, reason = "internal assert")] 47 70 #[inline] 48 71 pub fn phys_to_virt(&self, phys: PhysicalAddress) -> VirtualAddress { 49 - let virt = VirtualAddress::new(phys.get() + self.translation_offset); 72 + let virt = VirtualAddress::new(phys.wrapping_offset(self.translation_offset).get()); 50 73 51 74 #[cfg(debug_assertions)] 52 75 if let Some(range) = &self.range { 53 76 assert!( 54 - range.start <= virt && virt <= range.end, 55 - "physical address is not mapped in physical memory mapping. this is a bug! physmap={self:?},phys={phys:?},virt={virt}" 77 + range.start <= virt.get() as u128 && virt.get() as u128 <= range.end, 78 + "physical address is not mapped in physical memory mapping. this is a bug! physmap={self:#x?},phys={phys:?},virt={virt}" 56 79 ); 57 80 } 58 81 ··· 66 89 67 90 start..end 68 91 } 69 - // 70 - // #[inline] 71 - // pub fn with_mapped<R>(&self, phys: PhysicalAddress, cb: impl FnOnce(VirtualAddress) -> R) -> R { 72 - // let virt = if let Some(physmap) = &self.range { 73 - // let virt = physmap.start.add(phys.get()); 74 - // 75 - // debug_assert!(physmap.contains(&virt)); 76 - // 77 - // virt 78 - // } else { 79 - // // Safety: during bootstrap no address translation takes place meaning physical addresses *are* 80 - // // virtual addresses. 81 - // unsafe { mem::transmute::<PhysicalAddress, VirtualAddress>(phys) } 82 - // }; 83 - // 84 - // cb(virt) 85 - // } 86 - // 87 - // #[inline] 88 - // pub fn with_mapped_range<R>( 89 - // &self, 90 - // phys: Range<PhysicalAddress>, 91 - // cb: impl FnOnce(Range<VirtualAddress>) -> R, 92 - // ) -> R { 93 - // let virt = if let Some(physmap) = &self.range { 94 - // let start = physmap.start.add(phys.start.get()); 95 - // let end = physmap.start.add(phys.end.get()); 96 - // 97 - // debug_assert!( 98 - // physmap.contains(&start), 99 - // "physical address is not mapped in physical memory mapping. this is a bug! physmap={physmap:?},phys={phys:?},virt={:?}", 100 - // start..end 101 - // ); 102 - // debug_assert!( 103 - // physmap.contains(&end), 104 - // "physical address is not mapped in physical memory mapping. this is a bug! physmap={physmap:?},phys={phys:?},virt={:?}", 105 - // start..end 106 - // ); 107 - // 108 - // start..end 109 - // } else { 110 - // // Safety: during bootstrap no address translation takes place meaning physical addresses *are* 111 - // // virtual addresses. 112 - // unsafe { 113 - // Range { 114 - // start: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.start), 115 - // end: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.end), 116 - // } 117 - // } 118 - // }; 119 - // 120 - // cb(virt) 121 - // } 92 + } 93 + 94 + #[cfg(test)] 95 + mod tests { 96 + use proptest::prelude::*; 97 + 98 + use super::*; 99 + use crate::address_range::AddressRangeExt; 100 + use crate::test_utils::proptest::{ 101 + aligned_phys, aligned_virt, pick_address_in_regions, regions, 102 + }; 103 + use crate::{GIB, KIB}; 104 + 105 + proptest! { 106 + #[test] 107 + fn single_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), region_start in aligned_phys(any::<PhysicalAddress>(), 4*KIB), region_size in 0..256*GIB) { 108 + let map = PhysMap::new( 109 + base, 110 + [Range::from_start_len(region_start, region_size)], 111 + ); 112 + 113 + prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(region_start.get()) as isize); 114 + prop_assert_eq!( 115 + map.range, 116 + Some(base.get() as u128..base.add(region_size).get() as u128) 117 + ) 118 + } 119 + 120 + #[test] 121 + fn multi_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), regions in regions(1..10, 4*KIB, 256*GIB, 256*GIB)) { 122 + let regions_start = regions[0].start; 123 + 124 + let map = PhysMap::new( 125 + base, 126 + regions 127 + ); 128 + 129 + prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(regions_start.get()) as isize); 130 + } 131 + 132 + #[test] 133 + fn phys_to_virt(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), (regions, phys) in pick_address_in_regions(regions(1..10, 4*KIB, 256*GIB, 256*GIB)), ) { 134 + let regions_start = regions[0].start; 135 + 136 + let map = PhysMap::new( 137 + base, 138 + regions 139 + ); 140 + 141 + let virt = map.phys_to_virt(phys); 142 + 143 + prop_assert_eq!(virt.get(), base.get() + (phys.get() - regions_start.get())) 144 + } 145 + } 146 + 147 + #[test] 148 + #[should_panic] 149 + fn construct_no_regions() { 150 + let _map = PhysMap::new(VirtualAddress::new(0xffffffc000000000), []); 151 + } 152 + 153 + #[test] 154 + fn phys_to_virt_lower_half() { 155 + let map = PhysMap::new( 156 + VirtualAddress::new(0x0), 157 + [PhysicalAddress::new(0x00007f87024d9000)..PhysicalAddress::new(0x00007fc200e17000)], 158 + ); 159 + 160 + println!("{map:?}"); 161 + 162 + let virt = map.phys_to_virt(PhysicalAddress::new(0x00007f87024d9000)); 163 + assert_eq!(virt, VirtualAddress::new(0x0)); 164 + } 165 + 166 + #[test] 167 + fn phys_to_virt_upper_half() { 168 + let map = PhysMap::new( 169 + VirtualAddress::new(0xffffffc000000000), 170 + [PhysicalAddress::new(0x00007f87024d9000)..PhysicalAddress::new(0x00007fc200e17000)], 171 + ); 172 + 173 + println!("{map:?}"); 174 + 175 + let virt = map.phys_to_virt(PhysicalAddress::new(0x00007f87024d9000)); 176 + assert_eq!(virt, VirtualAddress::new(0xffffffc000000000)); 177 + } 122 178 }
+8 -11
libs/kmem/src/table.rs
··· 4 4 use arrayvec::ArrayVec; 5 5 6 6 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 7 - use crate::physmap::PhysicalMemoryMapping; 7 + use crate::physmap::PhysMap; 8 8 use crate::utils::{PageTableEntries, page_table_entries_for}; 9 9 use crate::{AllocError, FrameAllocator, PhysicalAddress, VirtualAddress}; 10 10 ··· 50 50 } 51 51 52 52 /// Returns `true` when _all_ page table entries in this table are _vacant_. 53 - pub fn is_empty(&self, physmap: &PhysicalMemoryMapping, arch: &A) -> bool { 53 + pub fn is_empty(&self, physmap: &PhysMap, arch: &A) -> bool { 54 54 let mut is_empty = true; 55 55 56 56 for entry_index in 0..self.level().entries() { ··· 69 69 /// # Safety 70 70 /// 71 71 /// The caller must ensure `index` is in-bounds (less than the number of entries at this level). 72 - pub unsafe fn get( 73 - &self, 74 - index: u16, 75 - physmap: &PhysicalMemoryMapping, 76 - arch: &A, 77 - ) -> A::PageTableEntry { 72 + pub unsafe fn get(&self, index: u16, physmap: &PhysMap, arch: &A) -> A::PageTableEntry { 78 73 let entry_phys = self 79 74 .base 80 75 .add(index as usize * size_of::<A::PageTableEntry>()); ··· 91 86 impl<A: Arch> Table<A, marker::Owned> { 92 87 pub fn allocate( 93 88 frame_allocator: impl FrameAllocator, 94 - physmap: &PhysicalMemoryMapping, 89 + physmap: &PhysMap, 95 90 arch: &A, 96 91 ) -> Result<Self, AllocError> { 97 92 let base = frame_allocator.allocate_contiguous_zeroed(A::GRANULE_LAYOUT, physmap, arch)?; ··· 146 141 &mut self, 147 142 index: u16, 148 143 entry: A::PageTableEntry, 149 - physmap: &PhysicalMemoryMapping, 144 + physmap: &PhysMap, 150 145 arch: &A, 151 146 ) { 152 147 debug_assert!(index < self.level().entries()); ··· 166 161 pub fn visit_mut<F, E>( 167 162 self, 168 163 range: Range<VirtualAddress>, 169 - physmap: &PhysicalMemoryMapping, 164 + physmap: &PhysMap, 170 165 arch: &A, 171 166 mut visit_entry: F, 172 167 ) -> Result<(), E> ··· 193 188 194 189 while let Some(mut frame) = stack.pop() { 195 190 for (entry_index, range) in frame.entries_iter { 191 + // Safety: `page_table_entries_for` yields only in-bound indices 196 192 let mut entry = unsafe { frame.table.get(entry_index, physmap, arch) }; 197 193 198 194 visit_entry(&mut entry, range.clone(), frame.table.level())?; 199 195 196 + // Safety: `page_table_entries_for` yields only in-bound indices 200 197 unsafe { 201 198 frame.table.set(entry_index, entry, physmap, arch); 202 199 }
+108
libs/kmem/src/test_utils/arch.rs
··· 1 + use core::fmt; 2 + use core::ops::Range; 3 + use std::mem; 4 + 5 + use crate::arch::{Arch, PageTableLevel}; 6 + use crate::test_utils::Machine; 7 + use crate::{PhysicalAddress, VirtualAddress}; 8 + 9 + /// `[Arch`] implementation that emulates a given "real" architecture. For testing purposes. 10 + pub struct EmulateArch<A: Arch> { 11 + machine: Machine<A>, 12 + asid: u16, 13 + } 14 + 15 + impl<A: Arch> fmt::Debug for EmulateArch<A> 16 + where 17 + A::PageTableEntry: fmt::Debug, 18 + { 19 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 20 + f.debug_struct("EmulateArch") 21 + .field("machine", &self.machine) 22 + .finish() 23 + } 24 + } 25 + 26 + impl<A: Arch> EmulateArch<A> { 27 + pub const fn new(machine: Machine<A>) -> Self { 28 + Self::with_asid(machine, 0) 29 + } 30 + 31 + pub const fn with_asid(machine: Machine<A>, asid: u16) -> Self { 32 + Self { machine, asid } 33 + } 34 + 35 + pub const fn machine(&self) -> &Machine<A> { 36 + &self.machine 37 + } 38 + } 39 + 40 + impl<A: Arch> Arch for EmulateArch<A> { 41 + // We want to inherit all const parameters from the proper architecture... 42 + 43 + type PageTableEntry = A::PageTableEntry; 44 + const LEVELS: &'static [PageTableLevel] = A::LEVELS; 45 + const DEFAULT_PHYSMAP_BASE: VirtualAddress = A::DEFAULT_PHYSMAP_BASE; 46 + 47 + // ...while we emulate all other methods. 48 + 49 + fn active_table(&self) -> Option<PhysicalAddress> { 50 + self.machine.active_table() 51 + } 52 + 53 + unsafe fn set_active_table(&self, address: PhysicalAddress) { 54 + unsafe { 55 + self.machine.set_active_table(address); 56 + } 57 + } 58 + 59 + fn fence(&self, address_range: Range<VirtualAddress>) { 60 + self.machine.invalidate(self.asid, address_range); 61 + } 62 + 63 + fn fence_all(&self) { 64 + self.machine.invalidate_all(self.asid); 65 + } 66 + 67 + unsafe fn read<T>(&self, address: VirtualAddress) -> T { 68 + // NB: if there is no active page table on this CPU, we are in "bare" translation mode. 69 + // In which case we need to use `read_phys` instead of `read`, bypassing 70 + // translation checks. 71 + if self.active_table().is_some() { 72 + unsafe { self.machine.read(self.asid, address) } 73 + } else { 74 + // Safety: We checked for the absence of an active translation table, meaning we're in 75 + // "bare" mode and VirtualAddress==PhysicalAddress. 76 + let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 77 + unsafe { self.machine.read_phys(address) } 78 + } 79 + } 80 + 81 + unsafe fn write<T>(&self, address: VirtualAddress, value: T) { 82 + // NB: if there is no active page table on this CPU, we are in "bare" translation mode. 83 + // In which case we need to use `write_phys` instead of `write`, bypassing 84 + // translation checks. 85 + if self.active_table().is_some() { 86 + unsafe { self.machine.write(self.asid, address, value) } 87 + } else { 88 + // Safety: We checked for the absence of an active translation table, meaning we're in 89 + // "bare" mode and VirtualAddress==PhysicalAddress. 90 + let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 91 + unsafe { self.machine.write_phys(address, value) } 92 + } 93 + } 94 + 95 + unsafe fn write_bytes(&self, address: VirtualAddress, value: u8, count: usize) { 96 + // NB: if there is no active page table on this CPU, we are in "bare" translation mode. 97 + // In which case we need to use `write_bytes_phys` instead of `write_bytes`, bypassing 98 + // translation checks. 99 + if self.active_table().is_some() { 100 + self.machine.write_bytes(self.asid, address, value, count) 101 + } else { 102 + // Safety: We checked for the absence of an active translation table, meaning we're in 103 + // "bare" mode and VirtualAddress==PhysicalAddress. 104 + let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 105 + self.machine.write_bytes_phys(address, value, count) 106 + } 107 + } 108 + }
+114
libs/kmem/src/test_utils/memory.rs
··· 1 + use std::alloc::{Allocator, Layout}; 2 + use std::collections::BTreeMap; 3 + use std::ops::Range; 4 + use std::ptr::NonNull; 5 + use std::{fmt, mem}; 6 + 7 + use crate::arch::Arch; 8 + use crate::{AddressRangeExt, PhysicalAddress}; 9 + 10 + pub struct Memory { 11 + regions: BTreeMap<PhysicalAddress, (PhysicalAddress, NonNull<[u8]>, Layout)>, 12 + } 13 + 14 + impl Drop for Memory { 15 + fn drop(&mut self) { 16 + let regions = mem::take(&mut self.regions); 17 + 18 + for (_end, (_start, region, layout)) in regions { 19 + unsafe { std::alloc::System.deallocate(region.cast(), layout) } 20 + } 21 + } 22 + } 23 + 24 + impl Memory { 25 + pub fn new<A: Arch>(region_sizes: impl IntoIterator<Item = usize>) -> Self { 26 + let regions = region_sizes 27 + .into_iter() 28 + .map(|size| { 29 + let layout = Layout::from_size_align(size, A::GRANULE_SIZE).unwrap(); 30 + 31 + let region = std::alloc::System.allocate(layout).unwrap(); 32 + 33 + // Safety: we just allocated the ptr, we know it is valid 34 + let Range { start, end } = unsafe { region.as_ref() }.as_ptr_range(); 35 + 36 + ( 37 + PhysicalAddress::from_ptr(end), 38 + (PhysicalAddress::from_ptr(start), region, layout), 39 + ) 40 + }) 41 + .collect(); 42 + 43 + Self { regions } 44 + } 45 + 46 + pub fn regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 47 + self.regions.iter().map(|(end, (start, _, _))| *start..*end) 48 + } 49 + 50 + fn get_region_containing(&self, address: PhysicalAddress) -> Option<(NonNull<[u8]>, usize)> { 51 + let (_end, (start, region, _)) = self.regions.range(address..).next()?; 52 + 53 + let offset = address.get().checked_sub(start.get())?; 54 + 55 + Some((*region, offset)) 56 + } 57 + 58 + pub fn with_region<Ret>( 59 + &self, 60 + range: Range<PhysicalAddress>, 61 + will_write: bool, 62 + cb: impl FnOnce(&mut [u8]) -> Ret, 63 + ) -> Ret { 64 + let Some((mut region, offset)) = self.get_region_containing(range.start) else { 65 + let access_ty = if will_write { "write" } else { "read" }; 66 + 67 + panic!( 68 + "Memory Violation: {access_ty} at {range:?} ({} bytes) outside of memory ({self:?})", 69 + range.len() 70 + ) 71 + }; 72 + 73 + let region = unsafe { region.as_mut() }; 74 + let res = cb(&mut region[offset..offset + range.len()]); 75 + 76 + res 77 + } 78 + 79 + pub unsafe fn read<T>(&self, address: PhysicalAddress) -> T { 80 + let size = size_of::<T>(); 81 + self.with_region( 82 + Range::from_start_len(address, size), 83 + false, 84 + |region| unsafe { region.as_ptr().cast::<T>().read() }, 85 + ) 86 + } 87 + 88 + pub unsafe fn write<T>(&self, address: PhysicalAddress, value: T) { 89 + let size = size_of::<T>(); 90 + self.with_region( 91 + Range::from_start_len(address, size), 92 + true, 93 + |region| unsafe { region.as_mut_ptr().cast::<T>().write(value) }, 94 + ) 95 + } 96 + 97 + pub fn write_bytes(&self, address: PhysicalAddress, value: u8, count: usize) { 98 + self.with_region(Range::from_start_len(address, count), true, |region| { 99 + region.fill(value); 100 + }) 101 + } 102 + } 103 + 104 + impl fmt::Debug for Memory { 105 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 106 + f.debug_struct("Memory") 107 + .field_with("regions", |f| { 108 + f.debug_list() 109 + .entries(self.regions.iter().map(|(end, (start, _, _))| *start..*end)) 110 + .finish() 111 + }) 112 + .finish() 113 + } 114 + }
+98
libs/kmem/src/test_utils/proptest.rs
··· 1 + //! `proptest` strategies for virtual memory subsystem tests 2 + 3 + use std::ops::Range; 4 + 5 + use proptest::prelude::{Just, Strategy}; 6 + 7 + use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 8 + 9 + /// Produces `VirtualAddress`s in the given range 10 + pub fn virt(range: Range<usize>) -> impl Strategy<Value = VirtualAddress> { 11 + range.prop_map(|raw| VirtualAddress::new(raw)) 12 + } 13 + 14 + /// Produces `VirtualAddress`s aligned to the given `alignment` 15 + pub fn aligned_virt( 16 + addr: impl Strategy<Value = VirtualAddress>, 17 + alignment: usize, 18 + ) -> impl Strategy<Value = VirtualAddress> { 19 + addr.prop_map(move |value| value.align_down(alignment)) 20 + } 21 + 22 + /// Produces `PhysicalAddress`s in the given range 23 + pub fn phys(range: Range<usize>) -> impl Strategy<Value = PhysicalAddress> { 24 + range.prop_map(|raw| PhysicalAddress::new(raw)) 25 + } 26 + 27 + /// Produces `PhysicalAddress`s aligned to the given `alignment` 28 + pub fn aligned_phys( 29 + addr: impl Strategy<Value = PhysicalAddress>, 30 + alignment: usize, 31 + ) -> impl Strategy<Value = PhysicalAddress> { 32 + addr.prop_map(move |value| value.align_down(alignment)) 33 + } 34 + 35 + /// Produces a set of *sorted*, *non-overlapping* regions of physical memory aligned to `alignment`. 36 + /// Most useful for initializing an emulated machine. 37 + pub fn regions( 38 + num_regions: Range<usize>, 39 + alignment: usize, 40 + max_region_size: usize, 41 + max_gap_size: usize, 42 + ) -> impl Strategy<Value = Vec<Range<PhysicalAddress>>> { 43 + proptest::collection::vec( 44 + ( 45 + // Size of the region (will be aligned) 46 + alignment..=max_region_size, 47 + // Gap after this region (will be aligned) 48 + alignment..=max_gap_size, 49 + ), 50 + num_regions, 51 + ) 52 + .prop_flat_map(move |size_gap_pairs| { 53 + // Calculate the maximum starting address that won't cause overflow 54 + let max_start = { 55 + let total_space_needed: usize = 56 + size_gap_pairs.iter().map(|(size, gap)| size + gap).sum(); 57 + 58 + // Ensure we have headroom for alignment adjustments 59 + usize::MAX 60 + .saturating_sub(total_space_needed) 61 + .saturating_sub(alignment) 62 + }; 63 + 64 + (0..=max_start).prop_map(move |start_raw| { 65 + let mut regions = Vec::with_capacity(size_gap_pairs.len()); 66 + let mut current = PhysicalAddress::new(start_raw).align_down(alignment); 67 + 68 + for (size, gap) in &size_gap_pairs { 69 + let range: Range<PhysicalAddress> = 70 + Range::from_start_len(current, *size).align_in(alignment); 71 + assert!(!range.is_empty()); 72 + 73 + regions.push(range); 74 + 75 + current = current.add(size + gap).align_up(alignment); 76 + } 77 + 78 + regions 79 + }) 80 + }) 81 + } 82 + 83 + /// Picks an arbitrary `PhysicalAddress` from a strategy that produces physical memory regions such 84 + /// as [`regions`]. 85 + pub fn pick_address_in_regions( 86 + regions: impl Strategy<Value = Vec<Range<PhysicalAddress>>>, 87 + ) -> impl Strategy<Value = (Vec<Range<PhysicalAddress>>, PhysicalAddress)> { 88 + regions.prop_flat_map(|regions| { 89 + let r = regions.clone(); 90 + let address = (0..regions.len()).prop_flat_map(move |chosen_region| { 91 + let range = r[chosen_region].clone(); 92 + 93 + (range.start.get()..range.end.get()).prop_map(|raw| PhysicalAddress::new(raw)) 94 + }); 95 + 96 + (Just(regions), address) 97 + }) 98 + }
+3 -1
libs/kmem/src/utils.rs
··· 4 4 use crate::VirtualAddress; 5 5 use crate::arch::{Arch, PageTableLevel}; 6 6 7 - pub(crate) fn page_table_entries_for<A: Arch>( 7 + // TODO: tests 8 + // - ensure this only returns in-bound indices 9 + pub fn page_table_entries_for<A: Arch>( 8 10 range: Range<VirtualAddress>, 9 11 level: &PageTableLevel, 10 12 ) -> PageTableEntries<A> {
+1
libs/riscv/src/lib.rs
··· 41 41 // Safety: inline assembly 42 42 unsafe { 43 43 loop { 44 + #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] 44 45 asm!("wfi"); 45 46 } 46 47 }