···11+# Seeds for failure cases proptest has generated in the past. It is
22+# automatically read and these particular cases re-run before any
33+# novel cases are generated.
44+#
55+# It is recommended to check this file in to source control so that
66+# everyone who runs the test benefits from these saved cases.
77+cc 26bcb7f9640296e1ce43defa4a891640f04dbb0569c49ee2fd49fb0d71a1a909 # shrinks to address = PhysicalAddress(0x0000100000000000)
+7
libs/kmem/proptest-regressions/physmap.txt
···11+# Seeds for failure cases proptest has generated in the past. It is
22+# automatically read and these particular cases re-run before any
33+# novel cases are generated.
44+#
55+# It is recommended to check this file in to source control so that
66+# everyone who runs the test benefits from these saved cases.
77+cc 6e6921502ef9b923b728203c38cbf0c9dd25f51a974f8d04fdd2f0f438478a73 # shrinks to base = VirtualAddress(0x0000000000000000), region_start = PhysicalAddress(0x0000000000000000), region_size = 1
+42-28
libs/kmem/src/address.rs
···133133 Self(self.0.saturating_add(offset))
134134 }
135135136136+ /// Adds an unsigned offset to this address, wrapping around at the boundary of the type.
137137+ #[must_use]
138138+ #[inline]
139139+ pub const fn checked_add(self, offset: usize) -> Option<Self> {
140140+ if let Some(val) = self.0.checked_add(offset) {
141141+ Some(Self(val))
142142+ } else {
143143+ None
144144+ }
145145+ }
146146+136147 /// Calculates the distance between two addresses in bytes.
137148 #[must_use]
138149 #[inline]
···262273263274#[repr(transparent)]
264275#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
276276+#[cfg_attr(feature = "test_utils", derive(proptest_derive::Arbitrary))]
265277pub struct VirtualAddress(usize);
266278impl_address!(VirtualAddress);
267279···300312301313#[repr(transparent)]
302314#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
315315+#[cfg_attr(feature = "test_utils", derive(proptest_derive::Arbitrary))]
303316pub struct PhysicalAddress(usize);
304317impl_address!(PhysicalAddress);
305318306306-// #[cfg(test)]
307307-// mod tests {
308308-// use proptest::{proptest, prop_assert, prop_assert_eq, prop_assert_ne};
309309-// use super::*;
310310-//
311311-// proptest! {
312312-// #[test]
313313-// fn lower_half_is_canonical(addr in 0x0usize..0x3fffffffff) {
314314-// let addr = VirtualAddress::new(addr);
315315-// prop_assert!(addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39));
316316-// prop_assert_eq!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr);
317317-// }
318318-//
319319-// #[test]
320320-// fn upper_half_is_canonical(addr in 0xffffffc000000000usize..0xffffffffffffffff) {
321321-// let addr = VirtualAddress::new(addr);
322322-// prop_assert!(addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39));
323323-// prop_assert_eq!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr);
324324-// }
325325-//
326326-// #[test]
327327-// fn non_canonical_hole(addr in 0x4000000000usize..0xffffffbfffffffff) {
328328-// let addr = VirtualAddress::new(addr);
329329-// prop_assert_ne!(addr.canonicalize(&crate::arch::riscv64::RISCV64_SV39), addr);
330330-// prop_assert!(!addr.is_canonical(&crate::arch::riscv64::RISCV64_SV39));
331331-// }
332332-// }
333333-// }
319319+#[cfg(test)]
320320+mod tests {
321321+ use proptest::{prop_assert, prop_assert_eq, prop_assert_ne, proptest};
322322+323323+ use super::*;
324324+325325+ proptest! {
326326+ #[test]
327327+ fn lower_half_is_canonical(addr in 0x0usize..0x3fffffffff) {
328328+ let addr = VirtualAddress::new(addr);
329329+ prop_assert!(addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>());
330330+ prop_assert_eq!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr);
331331+ }
332332+333333+ #[test]
334334+ fn upper_half_is_canonical(addr in 0xffffffc000000000usize..0xffffffffffffffff) {
335335+ let addr = VirtualAddress::new(addr);
336336+ prop_assert!(addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>());
337337+ prop_assert_eq!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr);
338338+ }
339339+340340+ #[test]
341341+ fn non_canonical_hole(addr in 0x4000000000usize..0xffffffbfffffffff) {
342342+ let addr = VirtualAddress::new(addr);
343343+ prop_assert_ne!(addr.canonicalize::<crate::arch::riscv64::Riscv64Sv39>(), addr);
344344+ prop_assert!(!addr.is_canonical::<crate::arch::riscv64::Riscv64Sv39>());
345345+ }
346346+ }
347347+}
+28-23
libs/kmem/src/address_space.rs
···44use crate::arch::{Arch, PageTableEntry, PageTableLevel};
55use crate::bootstrap::{Bootstrap, BootstrapAllocator};
66use crate::flush::Flush;
77-use crate::physmap::PhysicalMemoryMapping;
77+use crate::physmap::PhysMap;
88use crate::table::{Table, marker};
99use crate::utils::{PageTableEntries, page_table_entries_for};
1010use crate::{
···1414pub struct HardwareAddressSpace<A: Arch> {
1515 arch: A,
1616 root_page_table: Table<A, marker::Owned>,
1717- physmap: PhysicalMemoryMapping,
1717+ physmap: PhysMap,
1818}
19192020impl<A: Arch> HardwareAddressSpace<A> {
2121+ /// Constructs a new `AddressSpace` with a freshly allocated root page table
2222+ /// that may be used during address space bringup in the `loader`.
2323+ ///
2424+ /// # Errors
2525+ ///
2626+ /// Returns `Err(AllocError)` when allocating the root page table fails.
2127 pub fn new(
2228 arch: A,
2323- physmap: PhysicalMemoryMapping,
2929+ physmap: PhysMap,
2430 frame_allocator: impl FrameAllocator,
2531 flush: &mut Flush,
2632 ) -> Result<Self, AllocError> {
···3541 })
3642 }
37433838- /// Constructs **and bootstraps** a new AddressSpace with a freshly allocated root page table.
4444+ /// Constructs a new *bootstrapping* `AddressSpace` with a freshly allocated root page table
4545+ /// that may be used during address space bringup in the `loader`.
3946 ///
4047 /// # Errors
4148 ///
4242- /// Returns Err(AllocError) when allocating the root page table fails.
4949+ /// Returns `Err(AllocError)` when allocating the root page table fails.
4350 pub fn new_bootstrap<R: lock_api::RawMutex>(
4451 arch: A,
4545- future_physmap: PhysicalMemoryMapping,
5252+ future_physmap: PhysMap,
4653 frame_allocator: &BootstrapAllocator<R>,
4754 flush: &mut Flush,
4855 ) -> Result<Bootstrap<Self>, AllocError> {
4949- let address_space = Self::new(
5050- arch,
5151- PhysicalMemoryMapping::new_bootstrap(),
5252- frame_allocator,
5353- flush,
5454- )?;
5656+ let address_space = Self::new(arch, PhysMap::new_bootstrap(), frame_allocator, flush)?;
55575658 Ok(Bootstrap {
5759 address_space,
···6062 }
61636264 /// Constructs a new `AddressSpace` from its raw components: architecture-specific data and the root table.
6363- pub fn from_parts(
6464- arch: A,
6565- root_page_table: Table<A, marker::Owned>,
6666- physmap: PhysicalMemoryMapping,
6767- ) -> Self {
6565+ pub fn from_parts(arch: A, root_page_table: Table<A, marker::Owned>, physmap: PhysMap) -> Self {
6866 Self {
6967 physmap,
7068 root_page_table,
···7371 }
74727573 /// Decomposes an `AddressSpace` into its raw components: architecture-specific data and the root table.
7676- pub fn into_parts(self) -> (A, Table<A, marker::Owned>, PhysicalMemoryMapping) {
7474+ pub fn into_parts(self) -> (A, Table<A, marker::Owned>, PhysMap) {
7775 (self.arch, self.root_page_table, self.physmap)
7876 }
7977···104102105103 for level in A::LEVELS {
106104 let entry_index = level.pte_index_of(virt);
105105+ // Safety: `pte_index_of` only returns in-bounds indices.
107106 let entry = unsafe { table.get(entry_index, &self.physmap, &self.arch) };
108107109108 if entry.is_table() {
···183182 // TODO we can omit the fence here and lazily change the mapping in the fault handler#
184183 flush.invalidate(range);
185184 } else {
186186- let frame = frame_allocator
187187- .allocate_contiguous_zeroed(A::GRANULE_LAYOUT, &self.physmap, &self.arch)
188188- .unwrap();
185185+ let frame = frame_allocator.allocate_contiguous_zeroed(
186186+ A::GRANULE_LAYOUT,
187187+ &self.physmap,
188188+ &self.arch,
189189+ )?;
189190190191 *entry = <A as Arch>::PageTableEntry::new_table(frame);
191192···261262 Ok(())
262263 };
263264265265+ // Safety: `remap_contiguous` is infallible
264266 unsafe {
265267 self.root_page_table
266268 .borrow_mut()
···318320 Ok(())
319321 };
320322323323+ // Safety: `set_attributes` is infallible
321324 unsafe {
322325 self.root_page_table
323326 .borrow_mut()
···372375 fn unmap_inner(
373376 mut table: Table<A, marker::Mut<'_>>,
374377 range: Range<VirtualAddress>,
375375- physmap: &PhysicalMemoryMapping,
378378+ physmap: &PhysMap,
376379 arch: &A,
377380 frame_allocator: impl FrameAllocator,
378381 flush: &mut Flush,
···380383 let entries: PageTableEntries<A> = page_table_entries_for(range.clone(), table.level());
381384382385 for (entry_index, range) in entries {
386386+ // Safety: `page_table_entries_for` only returns in-bounds indices.
383387 let mut entry = unsafe { table.get(entry_index, physmap, arch) };
384388 debug_assert!(!entry.is_vacant());
385389···418422 }
419423 }
420424425425+ // Safety: `page_table_entries_for` only returns in-bounds indices.
421426 unsafe {
422427 table.set(entry_index, entry, physmap, arch);
423428 }
···431436432437 use crate::address_range::AddressRangeExt;
433438 use crate::arch::Arch;
434434- use crate::emulate::{BootstrapResult, MachineBuilder};
435439 use crate::flush::Flush;
436440 use crate::frame_allocator::FrameAllocator;
441441+ use crate::test_utils::{BootstrapResult, MachineBuilder};
437442 use crate::{MemoryAttributes, VirtualAddress, WriteOrExecute, archtest};
438443439444 archtest! {
+19-2
libs/kmem/src/arch/mod.rs
···77use crate::{MemoryAttributes, PhysicalAddress, VirtualAddress};
8899pub trait Arch {
1010+ /// The type representing a single page table entry on this architecture. Usually `usize` sized.
1111+ ///
1212+ /// # Safety
1313+ ///
1414+ /// The value `0` **must** be a valid pattern for this type and **must** correspond to a _vacant_ entry.
1015 type PageTableEntry: PageTableEntry + fmt::Debug;
11161717+ /// The page table levels that this architecture supports.
1218 const LEVELS: &'static [PageTableLevel];
1919+2020+ /// The default base address of the [`PhysMap`][crate::PhysMap]. The loader may randomize this
2121+ /// during ASLR but this should be the fallback address. On most architectures it is the first
2222+ /// address of the upper-half of the address space.
1323 const DEFAULT_PHYSMAP_BASE: VirtualAddress;
14242525+ /// The size of the "translation granule" i.e. the smallest page size supported by this architecture.
1526 const GRANULE_SIZE: usize = {
1627 if let Some(level) = Self::LEVELS.last() {
1728 level.page_size()
···2031 }
2132 };
22333434+ /// A `Layout` representing a "translation granule".
2335 const GRANULE_LAYOUT: Layout = {
2436 if let Ok(layout) = Layout::from_size_align(Self::GRANULE_SIZE, Self::GRANULE_SIZE) {
2537 layout
···2840 }
2941 };
30424343+ /// The number of usable bits in a `VirtualAddress`. This may be used for address canonicalization.
3144 #[expect(
3245 clippy::cast_possible_truncation,
3346 reason = "we check the coercion does not truncate"
···174187 fn attributes(&self) -> MemoryAttributes;
175188}
176189190190+/// Represents a level in a hierarchical page table.
177191#[derive(Debug)]
178192pub struct PageTableLevel {
179193 /// The number of entries in this page table level
···190204 clippy::cast_possible_truncation,
191205 reason = "we check the coercion does not truncate"
192206 )]
193193- // #[expect(clippy::missing_panics_doc, reason = "internal assertion")]
194194- pub const fn new(page_size: usize, entries: u16, supports_leaf: bool) -> PageTableLevel {
207207+ pub(crate) const fn new(page_size: usize, entries: u16, supports_leaf: bool) -> PageTableLevel {
195208 let index_shift = page_size.ilog2();
196209 assert!(index_shift <= u8::MAX as u32);
197210···233246 }
234247235248 /// Extracts the page table entry (PTE) for a table at this level from the given address.
249249+ // TODO: tests
250250+ // - ensure this only returns in-bound indices
236251 pub(crate) fn pte_index_of(&self, address: VirtualAddress) -> u16 {
237252 let idx =
238253 u16::try_from(address.get() >> self.index_shift & (self.entries as usize - 1)).unwrap();
···240255 idx
241256 }
242257258258+ /// Whether we can create a leaf entry at this level given the combination of base `VirtualAddress`,
259259+ /// base `PhysicalAddress`, and remaining chunk length.
243260 pub(crate) fn can_map(&self, virt: VirtualAddress, phys: PhysicalAddress, len: usize) -> bool {
244261 let page_size = self.page_size();
245262
···11mod frame_allocator;
2233-use core::mem;
43use core::ops::Range;
5465pub use frame_allocator::{BootstrapAllocator, DEFAULT_MAX_REGIONS, FreeRegions, UsedRegions};
···87use crate::arch::Arch;
98use crate::flush::Flush;
109use crate::{
1111- AllocError, FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysicalAddress,
1212- PhysicalMemoryMapping, VirtualAddress, WriteOrExecute,
1010+ AllocError, FrameAllocator, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress,
1111+ VirtualAddress, WriteOrExecute,
1312};
14131514pub struct Bootstrap<S> {
1615 pub(crate) address_space: S,
1717- pub(crate) future_physmap: PhysicalMemoryMapping,
1616+ pub(crate) future_physmap: PhysMap,
1817}
19182019impl<A: Arch> Bootstrap<HardwareAddressSpace<A>> {
···9594 where
9695 F: FrameAllocator,
9796 {
9898- let virt = unsafe {
9999- Range {
100100- start: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.start),
101101- end: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.end),
102102- }
9797+ let virt = Range {
9898+ start: VirtualAddress::new(phys.start.get()),
9999+ end: VirtualAddress::new(phys.end.get()),
103100 };
104101102102+ // Safety: ensured by caller.
105103 unsafe {
106104 self.address_space
107105 .map_contiguous(virt, phys.start, attributes, frame_allocator, flush)
···114112 /// # Safety
115113 ///
116114 /// After this method returns, all pointers become dangling and as such any access through
117117- /// pre-existing pointers is Undefined Behaviour. This includes implicit references by the CPU
115115+ /// pre-existing pointers is Undefined Behavior. This includes implicit references by the CPU
118116 /// such as the instruction pointer.
117117+ ///
118118+ /// This might seem impossible to uphold, except for identity-mappings which we consider valid
119119+ /// even after activating the address space.
119120 pub unsafe fn finish_bootstrap_and_activate(self) -> HardwareAddressSpace<A> {
120121 let (arch, root_table, _) = self.address_space.into_parts();
121122123123+ // Safety: ensured by caller
122124 unsafe { arch.set_active_table(root_table.address()) };
123125124126 HardwareAddressSpace::from_parts(arch, root_table, self.future_physmap)
+26-37
libs/kmem/src/bootstrap/frame_allocator.rs
···12121313pub const DEFAULT_MAX_REGIONS: usize = 16;
14141515+/// Simple bump allocator (cannot free) that can be used to allocate physical memory frames early during system
1616+/// bootstrap.
1717+///
1818+/// This allocator supports discontiguous physical memory by default. By default, up to [`DEFAULT_MAX_REGIONS`]
1919+/// but this limit can be adjusted by explicitly specifying the const-generic parameter.
1520pub struct BootstrapAllocator<R, const MAX_REGIONS: usize = DEFAULT_MAX_REGIONS>
1621where
1722 R: lock_api::RawMutex,
1823{
1924 inner: Mutex<R, BootstrapAllocatorInner<MAX_REGIONS>>,
2020- page_size: usize,
2525+ // we make a "snapshot" of the translation granule size during construction so that the allocator
2626+ // itself doesn't need to be generic over `Arch`.
2727+ frame_size: usize,
2128}
22292330#[derive(Debug)]
2431struct BootstrapAllocatorInner<const MAX_REGIONS: usize> {
3232+ /// The discontiguous regions of "regular" physical memory that we can use for allocation.
2533 regions: ArrayVec<Range<PhysicalAddress>, MAX_REGIONS>,
2626- // offset from the top of memory regions
3434+ /// offset from the top of memory regions
2735 offset: usize,
2836}
2937···3442 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3543 f.debug_struct("BootstrapAllocator")
3644 .field("regions", &self.inner.lock())
3737- .field("page_size", &self.page_size)
4545+ .field("frame_size", &self.frame_size)
3846 .finish()
3947 }
4048}
···68766977 Self {
7078 inner: Mutex::new(BootstrapAllocatorInner { regions, offset: 0 }),
7171- page_size: A::GRANULE_SIZE,
7979+ frame_size: A::GRANULE_SIZE,
7280 }
7381 }
74827575- pub fn regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> {
7676- self.inner.lock().regions.clone().into_iter()
8383+ /// Returns the array of "regular" physical memory regions managed by this allocator.
8484+ pub fn regions(&self) -> ArrayVec<Range<PhysicalAddress>, MAX_REGIONS> {
8585+ self.inner.lock().regions.clone()
7786 }
78878888+ /// Returns an iterator over the "free" (not allocated) portions of physical memory regions
8989+ /// managed by this allocator.
7990 pub fn free_regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> {
8091 let inner = self.inner.lock();
8192···8596 }
8697 }
87989999+ /// Returns an iterator over the "used" (allocated) portions of physical memory regions
100100+ /// managed by this allocator.
88101 pub fn used_regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> {
89102 let inner = self.inner.lock();
90103···94107 }
95108 }
96109110110+ /// Returns the number of allocated bytes.
97111 pub fn usage(&self) -> usize {
98112 self.inner.lock().offset
99113 }
···108122 fn allocate_contiguous(&self, layout: Layout) -> Result<PhysicalAddress, AllocError> {
109123 assert_eq!(
110124 layout.align(),
111111- self.page_size,
125125+ self.frame_size,
112126 "BootstrapAllocator only supports page-aligned allocations"
113127 );
114128115129 self.inner.lock().allocate(layout)
116130 }
117131118118- // fn allocate_contiguous_zeroed(
119119- // &self,
120120- // layout: Layout,
121121- // arch: &impl Arch,
122122- // ) -> Result<PhysicalAddress, AllocError> {
123123- // assert_eq!(
124124- // layout.align(),
125125- // self.page_size,
126126- // "BootstrapAllocator only supports page-aligned allocations"
127127- // );
128128- //
129129- // let frame = self.inner.lock().allocate(layout)?;
130130- //
131131- // self.physmap.with_mapped(frame, |page| {
132132- // // Safety: the address is properly aligned (at least page aligned) and is either valid to
133133- // // access through the physical memory map or because we're in bootstrapping still and phys==virt
134134- // unsafe {
135135- // arch.write_bytes(page, 0, layout.size());
136136- // }
137137- // });
138138- //
139139- // Ok(frame)
140140- // }
141141-142132 unsafe fn deallocate(&self, _block: PhysicalAddress, _layout: Layout) {
143143- unimplemented!()
133133+ unimplemented!("BootstrapAllocator does not support deallocation")
144134 }
145135146136 fn size_hint(&self) -> (NonZeroUsize, Option<NonZeroUsize>) {
147147- (NonZeroUsize::new(self.page_size).unwrap(), None)
137137+ (NonZeroUsize::new(self.frame_size).unwrap(), None)
148138 }
149139}
150140···239229mod tests {
240230 use crate::arch::Arch;
241231 use crate::bootstrap::BootstrapAllocator;
242242- use crate::emulate::MachineBuilder;
243243- use crate::emulate::arch::EmulateArch;
244232 use crate::frame_allocator::FrameAllocator;
245245- use crate::{PhysicalMemoryMapping, archtest};
233233+ use crate::test_utils::{EmulateArch, MachineBuilder};
234234+ use crate::{PhysMap, archtest};
246235247236 archtest! {
248237 // Assert that the BootstrapAllocator can allocate frames
···291280292281 let arch = EmulateArch::new(machine);
293282294294- let physmap = PhysicalMemoryMapping::new_bootstrap();
283283+ let physmap = PhysMap::new_bootstrap();
295284296285 // Based on the memory of the machine we set up above, we expect the allocator to
297286 // yield 3 pages.
···66use fallible_iterator::FallibleIterator;
7788use crate::arch::Arch;
99-use crate::physmap::PhysicalMemoryMapping;
99+use crate::physmap::PhysMap;
1010use crate::{AddressRangeExt, PhysicalAddress};
11111212+/// The `AllocError` error indicates a frame allocation failure that may be due
1313+/// to resource exhaustion or to something wrong when combining the given input
1414+/// arguments with this allocator.
1215#[derive(Debug, Copy, Clone)]
1316pub struct AllocError;
1417···9295 fn allocate_contiguous_zeroed(
9396 &self,
9497 layout: Layout,
9595- physmap: &PhysicalMemoryMapping,
9898+ physmap: &PhysMap,
9699 arch: &impl Arch,
97100 ) -> Result<PhysicalAddress, AllocError> {
98101 let frame = self.allocate_contiguous(layout)?;
+5-5
libs/kmem/src/lib.rs
···11-#![cfg_attr(not(any(test, feature = "emulate")), no_std)]
11+#![cfg_attr(not(any(test, feature = "test_utils")), no_std)]
22// #![no_std]
33#![feature(step_trait)]
44#![feature(debug_closure_helpers)]
···1414mod memory_attributes;
1515mod physmap;
1616mod table;
1717+#[cfg(feature = "test_utils")]
1818+pub mod test_utils;
1719mod utils;
18201919-#[cfg(feature = "emulate")]
2020-mod emulate;
2121-2221pub use address::{PhysicalAddress, VirtualAddress};
2322pub use address_range::AddressRangeExt;
2423pub use address_space::HardwareAddressSpace;
2424+pub use flush::Flush;
2525pub use frame_allocator::{AllocError, FrameAllocator, FrameIter};
2626pub use memory_attributes::{MemoryAttributes, WriteOrExecute};
2727-pub use physmap::PhysicalMemoryMapping;
2727+pub use physmap::PhysMap;
28282929pub const KIB: usize = 1024;
3030pub const MIB: usize = KIB * 1024;
+121-65
libs/kmem/src/physmap.rs
···11use core::cmp;
22use core::ops::Range;
3344-use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
44+use crate::{PhysicalAddress, VirtualAddress};
5566+/// Describes the region of virtual memory that maps all of physical memory. This region is used
77+/// by the virtual memory subsystem to access memory where only the physical address is known (e.g.
88+/// zeroing frames of memory in the frame allocator).
99+///
1010+/// This region must be mapped so it is only accessible by the kernel.
611#[derive(Debug, Clone)]
77-pub struct PhysicalMemoryMapping {
88- translation_offset: usize,
1212+pub struct PhysMap {
1313+ translation_offset: isize,
914 #[cfg(debug_assertions)]
1010- range: Option<Range<VirtualAddress>>,
1515+ range: Option<Range<u128>>,
1116}
12171313-impl PhysicalMemoryMapping {
1818+impl PhysMap {
1919+ /// Construct a new `PhysMap` from a chosen base address and the machines physical memory regions.
2020+ /// The iterator over the memory regions must not be empty.
2121+ ///
2222+ /// # Panics
2323+ ///
2424+ /// Panics if the iterator is empty.
1425 pub fn new(
1526 physmap_start: VirtualAddress,
1616- regions: impl Iterator<Item = Range<PhysicalAddress>>,
2727+ regions: impl IntoIterator<Item = Range<PhysicalAddress>>,
1728 ) -> Self {
1829 let mut min_addr = PhysicalAddress::MAX;
1930 let mut max_addr = PhysicalAddress::MIN;
···2334 max_addr = cmp::max(max_addr, region.end);
2435 }
25362626- assert!(min_addr <= max_addr);
3737+ assert!(min_addr <= max_addr, "regions must not be empty");
27382828- let translation_offset = physmap_start.get() - min_addr.get();
3939+ #[expect(
4040+ clippy::cast_possible_wrap,
4141+ reason = "this is expected to wrap when the physmap_start is lower than the lowest physical address (e.g. when it is in upper half of memory)"
4242+ )]
4343+ let translation_offset = physmap_start.get().wrapping_sub(min_addr.get()) as isize;
29443045 #[cfg(debug_assertions)]
3131- let range = Range::from_start_len(physmap_start, max_addr.offset_from_unsigned(min_addr));
4646+ let range = {
4747+ let start = physmap_start.get() as u128;
4848+ let end = start + max_addr.offset_from_unsigned(min_addr) as u128;
4949+5050+ start..end
5151+ };
32523353 Self {
3454 translation_offset,
···4060 pub(crate) const fn new_bootstrap() -> Self {
4161 Self {
4262 translation_offset: 0,
6363+ #[cfg(debug_assertions)]
4364 range: None,
4465 }
4566 }
46676868+ /// Translates a `PhysicalAddress` to a `VirtualAddress` through this `PhysMap`.
6969+ #[expect(clippy::missing_panics_doc, reason = "internal assert")]
4770 #[inline]
4871 pub fn phys_to_virt(&self, phys: PhysicalAddress) -> VirtualAddress {
4949- let virt = VirtualAddress::new(phys.get() + self.translation_offset);
7272+ let virt = VirtualAddress::new(phys.wrapping_offset(self.translation_offset).get());
50735174 #[cfg(debug_assertions)]
5275 if let Some(range) = &self.range {
5376 assert!(
5454- range.start <= virt && virt <= range.end,
5555- "physical address is not mapped in physical memory mapping. this is a bug! physmap={self:?},phys={phys:?},virt={virt}"
7777+ range.start <= virt.get() as u128 && virt.get() as u128 <= range.end,
7878+ "physical address is not mapped in physical memory mapping. this is a bug! physmap={self:#x?},phys={phys:?},virt={virt}"
5679 );
5780 }
5881···66896790 start..end
6891 }
6969- //
7070- // #[inline]
7171- // pub fn with_mapped<R>(&self, phys: PhysicalAddress, cb: impl FnOnce(VirtualAddress) -> R) -> R {
7272- // let virt = if let Some(physmap) = &self.range {
7373- // let virt = physmap.start.add(phys.get());
7474- //
7575- // debug_assert!(physmap.contains(&virt));
7676- //
7777- // virt
7878- // } else {
7979- // // Safety: during bootstrap no address translation takes place meaning physical addresses *are*
8080- // // virtual addresses.
8181- // unsafe { mem::transmute::<PhysicalAddress, VirtualAddress>(phys) }
8282- // };
8383- //
8484- // cb(virt)
8585- // }
8686- //
8787- // #[inline]
8888- // pub fn with_mapped_range<R>(
8989- // &self,
9090- // phys: Range<PhysicalAddress>,
9191- // cb: impl FnOnce(Range<VirtualAddress>) -> R,
9292- // ) -> R {
9393- // let virt = if let Some(physmap) = &self.range {
9494- // let start = physmap.start.add(phys.start.get());
9595- // let end = physmap.start.add(phys.end.get());
9696- //
9797- // debug_assert!(
9898- // physmap.contains(&start),
9999- // "physical address is not mapped in physical memory mapping. this is a bug! physmap={physmap:?},phys={phys:?},virt={:?}",
100100- // start..end
101101- // );
102102- // debug_assert!(
103103- // physmap.contains(&end),
104104- // "physical address is not mapped in physical memory mapping. this is a bug! physmap={physmap:?},phys={phys:?},virt={:?}",
105105- // start..end
106106- // );
107107- //
108108- // start..end
109109- // } else {
110110- // // Safety: during bootstrap no address translation takes place meaning physical addresses *are*
111111- // // virtual addresses.
112112- // unsafe {
113113- // Range {
114114- // start: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.start),
115115- // end: mem::transmute::<PhysicalAddress, VirtualAddress>(phys.end),
116116- // }
117117- // }
118118- // };
119119- //
120120- // cb(virt)
121121- // }
9292+}
9393+9494+#[cfg(test)]
9595+mod tests {
9696+ use proptest::prelude::*;
9797+9898+ use super::*;
9999+ use crate::address_range::AddressRangeExt;
100100+ use crate::test_utils::proptest::{
101101+ aligned_phys, aligned_virt, pick_address_in_regions, regions,
102102+ };
103103+ use crate::{GIB, KIB};
104104+105105+ proptest! {
106106+ #[test]
107107+ fn single_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), region_start in aligned_phys(any::<PhysicalAddress>(), 4*KIB), region_size in 0..256*GIB) {
108108+ let map = PhysMap::new(
109109+ base,
110110+ [Range::from_start_len(region_start, region_size)],
111111+ );
112112+113113+ prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(region_start.get()) as isize);
114114+ prop_assert_eq!(
115115+ map.range,
116116+ Some(base.get() as u128..base.add(region_size).get() as u128)
117117+ )
118118+ }
119119+120120+ #[test]
121121+ fn multi_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), regions in regions(1..10, 4*KIB, 256*GIB, 256*GIB)) {
122122+ let regions_start = regions[0].start;
123123+124124+ let map = PhysMap::new(
125125+ base,
126126+ regions
127127+ );
128128+129129+ prop_assert_eq!(map.translation_offset, base.get().wrapping_sub(regions_start.get()) as isize);
130130+ }
131131+132132+ #[test]
133133+ fn phys_to_virt(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), (regions, phys) in pick_address_in_regions(regions(1..10, 4*KIB, 256*GIB, 256*GIB)), ) {
134134+ let regions_start = regions[0].start;
135135+136136+ let map = PhysMap::new(
137137+ base,
138138+ regions
139139+ );
140140+141141+ let virt = map.phys_to_virt(phys);
142142+143143+ prop_assert_eq!(virt.get(), base.get() + (phys.get() - regions_start.get()))
144144+ }
145145+ }
146146+147147+ #[test]
148148+ #[should_panic]
149149+ fn construct_no_regions() {
150150+ let _map = PhysMap::new(VirtualAddress::new(0xffffffc000000000), []);
151151+ }
152152+153153+ #[test]
154154+ fn phys_to_virt_lower_half() {
155155+ let map = PhysMap::new(
156156+ VirtualAddress::new(0x0),
157157+ [PhysicalAddress::new(0x00007f87024d9000)..PhysicalAddress::new(0x00007fc200e17000)],
158158+ );
159159+160160+ println!("{map:?}");
161161+162162+ let virt = map.phys_to_virt(PhysicalAddress::new(0x00007f87024d9000));
163163+ assert_eq!(virt, VirtualAddress::new(0x0));
164164+ }
165165+166166+ #[test]
167167+ fn phys_to_virt_upper_half() {
168168+ let map = PhysMap::new(
169169+ VirtualAddress::new(0xffffffc000000000),
170170+ [PhysicalAddress::new(0x00007f87024d9000)..PhysicalAddress::new(0x00007fc200e17000)],
171171+ );
172172+173173+ println!("{map:?}");
174174+175175+ let virt = map.phys_to_virt(PhysicalAddress::new(0x00007f87024d9000));
176176+ assert_eq!(virt, VirtualAddress::new(0xffffffc000000000));
177177+ }
122178}
+8-11
libs/kmem/src/table.rs
···44use arrayvec::ArrayVec;
5566use crate::arch::{Arch, PageTableEntry, PageTableLevel};
77-use crate::physmap::PhysicalMemoryMapping;
77+use crate::physmap::PhysMap;
88use crate::utils::{PageTableEntries, page_table_entries_for};
99use crate::{AllocError, FrameAllocator, PhysicalAddress, VirtualAddress};
1010···5050 }
51515252 /// Returns `true` when _all_ page table entries in this table are _vacant_.
5353- pub fn is_empty(&self, physmap: &PhysicalMemoryMapping, arch: &A) -> bool {
5353+ pub fn is_empty(&self, physmap: &PhysMap, arch: &A) -> bool {
5454 let mut is_empty = true;
55555656 for entry_index in 0..self.level().entries() {
···6969 /// # Safety
7070 ///
7171 /// The caller must ensure `index` is in-bounds (less than the number of entries at this level).
7272- pub unsafe fn get(
7373- &self,
7474- index: u16,
7575- physmap: &PhysicalMemoryMapping,
7676- arch: &A,
7777- ) -> A::PageTableEntry {
7272+ pub unsafe fn get(&self, index: u16, physmap: &PhysMap, arch: &A) -> A::PageTableEntry {
7873 let entry_phys = self
7974 .base
8075 .add(index as usize * size_of::<A::PageTableEntry>());
···9186impl<A: Arch> Table<A, marker::Owned> {
9287 pub fn allocate(
9388 frame_allocator: impl FrameAllocator,
9494- physmap: &PhysicalMemoryMapping,
8989+ physmap: &PhysMap,
9590 arch: &A,
9691 ) -> Result<Self, AllocError> {
9792 let base = frame_allocator.allocate_contiguous_zeroed(A::GRANULE_LAYOUT, physmap, arch)?;
···146141 &mut self,
147142 index: u16,
148143 entry: A::PageTableEntry,
149149- physmap: &PhysicalMemoryMapping,
144144+ physmap: &PhysMap,
150145 arch: &A,
151146 ) {
152147 debug_assert!(index < self.level().entries());
···166161 pub fn visit_mut<F, E>(
167162 self,
168163 range: Range<VirtualAddress>,
169169- physmap: &PhysicalMemoryMapping,
164164+ physmap: &PhysMap,
170165 arch: &A,
171166 mut visit_entry: F,
172167 ) -> Result<(), E>
···193188194189 while let Some(mut frame) = stack.pop() {
195190 for (entry_index, range) in frame.entries_iter {
191191+ // Safety: `page_table_entries_for` yields only in-bound indices
196192 let mut entry = unsafe { frame.table.get(entry_index, physmap, arch) };
197193198194 visit_entry(&mut entry, range.clone(), frame.table.level())?;
199195196196+ // Safety: `page_table_entries_for` yields only in-bound indices
200197 unsafe {
201198 frame.table.set(entry_index, entry, physmap, arch);
202199 }
+108
libs/kmem/src/test_utils/arch.rs
···11+use core::fmt;
22+use core::ops::Range;
33+use std::mem;
44+55+use crate::arch::{Arch, PageTableLevel};
66+use crate::test_utils::Machine;
77+use crate::{PhysicalAddress, VirtualAddress};
88+99+/// `[Arch`] implementation that emulates a given "real" architecture. For testing purposes.
1010+pub struct EmulateArch<A: Arch> {
1111+ machine: Machine<A>,
1212+ asid: u16,
1313+}
1414+1515+impl<A: Arch> fmt::Debug for EmulateArch<A>
1616+where
1717+ A::PageTableEntry: fmt::Debug,
1818+{
1919+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2020+ f.debug_struct("EmulateArch")
2121+ .field("machine", &self.machine)
2222+ .finish()
2323+ }
2424+}
2525+2626+impl<A: Arch> EmulateArch<A> {
2727+ pub const fn new(machine: Machine<A>) -> Self {
2828+ Self::with_asid(machine, 0)
2929+ }
3030+3131+ pub const fn with_asid(machine: Machine<A>, asid: u16) -> Self {
3232+ Self { machine, asid }
3333+ }
3434+3535+ pub const fn machine(&self) -> &Machine<A> {
3636+ &self.machine
3737+ }
3838+}
3939+4040+impl<A: Arch> Arch for EmulateArch<A> {
4141+ // We want to inherit all const parameters from the proper architecture...
4242+4343+ type PageTableEntry = A::PageTableEntry;
4444+ const LEVELS: &'static [PageTableLevel] = A::LEVELS;
4545+ const DEFAULT_PHYSMAP_BASE: VirtualAddress = A::DEFAULT_PHYSMAP_BASE;
4646+4747+ // ...while we emulate all other methods.
4848+4949+ fn active_table(&self) -> Option<PhysicalAddress> {
5050+ self.machine.active_table()
5151+ }
5252+5353+ unsafe fn set_active_table(&self, address: PhysicalAddress) {
5454+ unsafe {
5555+ self.machine.set_active_table(address);
5656+ }
5757+ }
5858+5959+ fn fence(&self, address_range: Range<VirtualAddress>) {
6060+ self.machine.invalidate(self.asid, address_range);
6161+ }
6262+6363+ fn fence_all(&self) {
6464+ self.machine.invalidate_all(self.asid);
6565+ }
6666+6767+ unsafe fn read<T>(&self, address: VirtualAddress) -> T {
6868+ // NB: if there is no active page table on this CPU, we are in "bare" translation mode.
6969+ // In which case we need to use `read_phys` instead of `read`, bypassing
7070+ // translation checks.
7171+ if self.active_table().is_some() {
7272+ unsafe { self.machine.read(self.asid, address) }
7373+ } else {
7474+ // Safety: We checked for the absence of an active translation table, meaning we're in
7575+ // "bare" mode and VirtualAddress==PhysicalAddress.
7676+ let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) };
7777+ unsafe { self.machine.read_phys(address) }
7878+ }
7979+ }
8080+8181+ unsafe fn write<T>(&self, address: VirtualAddress, value: T) {
8282+ // NB: if there is no active page table on this CPU, we are in "bare" translation mode.
8383+ // In which case we need to use `write_phys` instead of `write`, bypassing
8484+ // translation checks.
8585+ if self.active_table().is_some() {
8686+ unsafe { self.machine.write(self.asid, address, value) }
8787+ } else {
8888+ // Safety: We checked for the absence of an active translation table, meaning we're in
8989+ // "bare" mode and VirtualAddress==PhysicalAddress.
9090+ let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) };
9191+ unsafe { self.machine.write_phys(address, value) }
9292+ }
9393+ }
9494+9595+ unsafe fn write_bytes(&self, address: VirtualAddress, value: u8, count: usize) {
9696+ // NB: if there is no active page table on this CPU, we are in "bare" translation mode.
9797+ // In which case we need to use `write_bytes_phys` instead of `write_bytes`, bypassing
9898+ // translation checks.
9999+ if self.active_table().is_some() {
100100+ self.machine.write_bytes(self.asid, address, value, count)
101101+ } else {
102102+ // Safety: We checked for the absence of an active translation table, meaning we're in
103103+ // "bare" mode and VirtualAddress==PhysicalAddress.
104104+ let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) };
105105+ self.machine.write_bytes_phys(address, value, count)
106106+ }
107107+ }
108108+}