···11+# Seeds for failure cases proptest has generated in the past. It is
22+# automatically read and these particular cases re-run before any
33+# novel cases are generated.
44+#
55+# It is recommended to check this file in to source control so that
66+# everyone who runs the test benefits from these saved cases.
77+cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1
88+cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257
99+cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257
1010+cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354
1111+cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257
1212+cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20
1313+cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+98
libs/mem/src/access_rules.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mycelium_bitfield::bitfield! {
99+ /// Rules that dictate how a region of virtual memory may be accessed.
1010+ ///
1111+ /// # W^X
1212+ ///
1313+ /// In order to prevent malicious code execution as proactively as possible,
1414+ /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
1515+ /// through the [`WriteOrExecute`] enum field.
1616+ #[derive(PartialEq, Eq)]
1717+ pub struct AccessRules<u8> {
1818+ /// If set, reading from the memory region is allowed.
1919+ pub const READ: bool;
2020+ /// Whether executing, or writing this memory region is allowed (or neither).
2121+ pub const WRITE_OR_EXECUTE: WriteOrExecute;
2222+ /// If set, requires code in the memory region to use aarch64 Branch Target Identification.
2323+ /// Does nothing on non-aarch64 architectures.
2424+ pub const BTI: bool;
2525+ }
2626+}
2727+2828+/// Whether executing, or writing this memory region is allowed (or neither).
2929+///
3030+/// This is an enum to enforce [`W^X`] at the type-level.
3131+///
3232+/// [`W^X`]: AccessRules
3333+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3434+#[repr(u8)]
3535+pub enum WriteOrExecute {
3636+ /// Neither writing nor execution of the memory region is allowed.
3737+ Neither = 0b00,
3838+ /// Writing to the memory region is allowed.
3939+ Write = 0b01,
4040+ /// Executing code from the memory region is allowed.
4141+ Execute = 0b10,
4242+}
4343+4444+// ===== impl AccessRules =====
4545+4646+impl AccessRules {
4747+4848+ pub const fn is_read_only(&self) -> bool {
4949+ const READ_MASK: u8 = AccessRules::READ.max_value();
5050+ assert!(READ_MASK == 1);
5151+ self.0 & READ_MASK == 1
5252+ }
5353+5454+ pub fn allows_read(&self) -> bool {
5555+ self.get(Self::READ)
5656+ }
5757+5858+ pub fn allows_write(&self) -> bool {
5959+ match self.get(Self::WRITE_OR_EXECUTE) {
6060+ WriteOrExecute::Write => true,
6161+ _ => false,
6262+ }
6363+ }
6464+6565+ pub fn allows_execution(&self) -> bool {
6666+ match self.get(Self::WRITE_OR_EXECUTE) {
6767+ WriteOrExecute::Execute => true,
6868+ _ => false,
6969+ }
7070+ }
7171+}
7272+7373+// ===== impl WriteOrExecute =====
7474+7575+impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
7676+ type Error = core::convert::Infallible;
7777+7878+ /// The number of bits required to represent a value of this type.
7979+ const BITS: u32 = 2;
8080+8181+ #[inline]
8282+ fn try_from_bits(bits: u8) -> Result<Self, Self::Error> {
8383+ match bits {
8484+ b if b == Self::Neither as u8 => Ok(Self::Neither),
8585+ b if b == Self::Write as u8 => Ok(Self::Write),
8686+ b if b == Self::Execute as u8 => Ok(Self::Execute),
8787+ _ => {
8888+ // this should never happen unless the bitpacking code is broken
8989+ unreachable!("invalid memory region access rules {bits:#b}")
9090+ }
9191+ }
9292+ }
9393+9494+ #[inline]
9595+ fn into_bits(self) -> u8 {
9696+ self as u8
9797+ }
9898+}
+1007
libs/mem/src/address_space.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod batch;
99+mod region;
1010+1111+use alloc::boxed::Box;
1212+use alloc::sync::Arc;
1313+use core::alloc::Layout;
1414+use core::num::NonZeroUsize;
1515+use core::ops::{Bound, ControlFlow, Range};
1616+use core::ptr::NonNull;
1717+1818+use anyhow::{format_err, Context};
1919+pub(crate) use batch::Batch;
2020+use rand::distr::Uniform;
2121+use rand::Rng;
2222+use rand_chacha::ChaCha20Rng;
2323+use region::AddressSpaceRegion;
2424+use wavltree::{CursorMut, WAVLTree};
2525+2626+use crate::access_rules::AccessRules;
2727+use crate::frame_alloc::FrameAllocator;
2828+use crate::utils::assert_unsafe_precondition_;
2929+use crate::vmo::PagedVmo;
3030+use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
3131+3232+pub unsafe trait RawAddressSpace {
3333+ /// The smallest addressable chunk of memory of this address space. All address argument provided
3434+ /// to methods of this type (both virtual and physical) must be aligned to this.
3535+ const PAGE_SIZE: usize;
3636+ const VIRT_ADDR_BITS: u32;
3737+3838+ const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8;
3939+ const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1);
4040+4141+ /// The [`Flush`] implementation for this address space.
4242+ type Flush: Flush;
4343+4444+ /// Return a new, empty flush for this address space.
4545+ fn flush(&self) -> Self::Flush;
4646+4747+ /// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given
4848+ /// [`VirtualAddress`] if mapped.
4949+ fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>;
5050+5151+ /// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the
5252+ /// specified access rules.
5353+ ///
5454+ /// If this returns `Ok`, the mapping is added to the raw address space and all future
5555+ /// accesses to the virtual address range will translate to accesses of the physical address
5656+ /// range.
5757+ ///
5858+ /// # Safety
5959+ ///
6060+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
6161+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
6262+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6363+ ///
6464+ /// # Errors
6565+ ///
6666+ /// Returning `Err` indicates the mapping cannot be established and the virtual address range
6767+ /// remains unaltered.
6868+ unsafe fn map(
6969+ &mut self,
7070+ virt: VirtualAddress,
7171+ phys: PhysicalAddress,
7272+ len: NonZeroUsize,
7373+ access_rules: AccessRules,
7474+ flush: &mut Self::Flush,
7575+ ) -> crate::Result<()>;
7676+7777+ /// Unmap a contiguous range of `len` virtual addresses.
7878+ ///
7979+ /// After this returns all accesses to the virtual address region will cause a fault.
8080+ ///
8181+ /// # Safety
8282+ ///
8383+ /// - `virt..virt+len` must be mapped
8484+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
8585+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
8686+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
8787+ unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush);
8888+8989+ /// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses.
9090+ ///
9191+ /// After this returns all accesses to the virtual address region must follow the
9292+ /// specified `AccessRules` or cause a fault.
9393+ ///
9494+ /// # Safety
9595+ ///
9696+ /// - `virt..virt+len` must be mapped
9797+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
9898+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
9999+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
100100+ unsafe fn set_access_rules(
101101+ &mut self,
102102+ virt: VirtualAddress,
103103+ len: NonZeroUsize,
104104+ access_rules: AccessRules,
105105+ flush: &mut Self::Flush,
106106+ );
107107+}
108108+109109+/// A type that can flush changes made to a [`RawAddressSpace`].
110110+///
111111+/// Note: [`Flush`] is purely optional, it exists so implementation MAY batch
112112+/// Note that the implementation is not required to delay materializing changes until [`Flush::flush`]
113113+/// is called.
114114+pub trait Flush {
115115+ /// Flush changes made to its [`RawAddressSpace`].
116116+ ///
117117+ /// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across
118118+ /// all affected threads/CPUs.
119119+ ///
120120+ /// # Errors
121121+ ///
122122+ /// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might
123123+ /// still have taken effect across all or some of the threads/CPUs.
124124+ fn flush(self) -> crate::Result<()>;
125125+}
126126+127127+pub struct AddressSpace<R: RawAddressSpace> {
128128+ raw: R,
129129+ regions: WAVLTree<AddressSpaceRegion<R>>,
130130+ batch: Batch,
131131+ max_range: Range<VirtualAddress>,
132132+ rng: Option<ChaCha20Rng>,
133133+ frame_alloc: &'static dyn FrameAllocator,
134134+}
135135+136136+impl<A: RawAddressSpace> AddressSpace<A> {
137137+ pub fn new(raw: A, rng: Option<ChaCha20Rng>, frame_alloc: &'static dyn FrameAllocator) -> Self {
138138+ Self {
139139+ raw,
140140+ regions: WAVLTree::new(),
141141+ batch: Batch::new(),
142142+ max_range: VirtualAddress::MIN..VirtualAddress::MAX,
143143+ rng,
144144+ frame_alloc,
145145+ }
146146+ }
147147+148148+ /// Attempts to reserve a region of virtual memory.
149149+ ///
150150+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
151151+ /// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault.
152152+ ///
153153+ /// The returned region may have a larger size than specified by `layout.size()`, and may or may
154154+ /// not have its contents initialized.
155155+ ///
156156+ /// The returned region of virtual memory remains mapped as long as it is [*currently mapped*]
157157+ /// and the address space type itself has not been dropped.
158158+ ///
159159+ /// [*currently mapped*]: #currently-mapped-memory
160160+ ///
161161+ /// # Errors
162162+ ///
163163+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
164164+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
165165+ pub fn map<R: lock_api::RawRwLock>(
166166+ &mut self,
167167+ layout: Layout,
168168+ access_rules: AccessRules,
169169+ ) -> crate::Result<NonNull<[u8]>> {
170170+ #[cfg(debug_assertions)]
171171+ self.assert_valid("[AddressSpace::map]");
172172+173173+ let layout = layout.align_to(A::PAGE_SIZE).unwrap();
174174+175175+ let spot = self
176176+ .find_spot_for(layout)
177177+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
178178+179179+ // TODO "relaxed" frame provider
180180+ let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo();
181181+ let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0);
182182+183183+ let region = self.regions.insert(Box::pin(region));
184184+185185+ // TODO OPTIONAL eagerly commit a few pages
186186+187187+ self.batch.flush_changes(&mut self.raw)?;
188188+189189+ Ok(region.as_non_null())
190190+ }
191191+192192+ /// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region
193193+ /// is zero-initialized.
194194+ ///
195195+ /// # Errors
196196+ ///
197197+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
198198+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
199199+ pub fn map_zeroed<R: lock_api::RawRwLock>(
200200+ &mut self,
201201+ layout: Layout,
202202+ access_rules: AccessRules,
203203+ ) -> crate::Result<NonNull<[u8]>> {
204204+ #[cfg(debug_assertions)]
205205+ self.assert_valid("[AddressSpace::map_zeroed]");
206206+207207+ let layout = layout.align_to(A::PAGE_SIZE).unwrap();
208208+209209+ let spot = self
210210+ .find_spot_for(layout)
211211+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
212212+213213+ // TODO "zeroed" frame provider
214214+ let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo();
215215+ let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0);
216216+217217+ let region = self.regions.insert(Box::pin(region));
218218+219219+ // TODO OPTIONAL eagerly commit a few pages
220220+221221+ self.batch.flush_changes(&mut self.raw)?;
222222+223223+ Ok(region.as_non_null())
224224+ }
225225+226226+ /// Unmaps the virtual memory region referenced by `ptr`.
227227+ ///
228228+ /// # Safety
229229+ ///
230230+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
231231+ /// * `layout` must [*fit*] that region of memory.
232232+ ///
233233+ /// [*currently mapped*]: #currently-mapped-memory
234234+ /// [*fit*]: #memory-fitting
235235+ pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) {
236236+ #[cfg(debug_assertions)]
237237+ self.assert_valid("[AddressSpace::unmap]");
238238+239239+ // Safety: responsibility of caller
240240+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
241241+242242+ // Safety: responsibility of caller
243243+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
244244+245245+ region.decommit(.., &mut self.batch, &mut self.raw).unwrap();
246246+ }
247247+248248+ /// Attempts to extend the virtual memory reservation.
249249+ ///
250250+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
251251+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
252252+ /// this, the address space may extend the mapping referenced by `ptr` to fit the new layout.
253253+ ///
254254+ /// TODO describe how extending a file-backed, of DMA-backed mapping works
255255+ ///
256256+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
257257+ ///
258258+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
259259+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
260260+ /// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer
261261+ /// for accessing this region now.
262262+ ///
263263+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
264264+ /// this address space, and the contents of the region are unaltered.
265265+ ///
266266+ /// [*Undefined Behavior*]
267267+ ///
268268+ /// # Safety
269269+ ///
270270+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
271271+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
272272+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
273273+ ///
274274+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
275275+ ///
276276+ /// [*currently mapped*]: #currently-mapped-memory
277277+ /// [*fit*]: #memory-fitting
278278+ ///
279279+ /// # Errors
280280+ ///
281281+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
282282+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
283283+ pub unsafe fn grow(
284284+ &mut self,
285285+ ptr: NonNull<u8>,
286286+ old_layout: Layout,
287287+ new_layout: Layout,
288288+ ) -> crate::Result<NonNull<[u8]>> {
289289+ #[cfg(debug_assertions)]
290290+ self.assert_valid("[AddressSpace::grow]");
291291+292292+ assert_unsafe_precondition_!(
293293+ "TODO",
294294+ (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
295295+ old_layout.align().is_multiple_of(page_size)
296296+ }
297297+ );
298298+299299+ assert_unsafe_precondition_!(
300300+ "TODO",
301301+ (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
302302+ new_layout.align().is_multiple_of(page_size)
303303+ }
304304+ );
305305+306306+ if new_layout == old_layout {
307307+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
308308+ }
309309+310310+ assert_unsafe_precondition_!(
311311+ "TODO",
312312+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
313313+ new_layout.size() >= old_layout.size()
314314+ }
315315+ );
316316+317317+ if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } {
318318+ Ok(ptr)
319319+ } else {
320320+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
321321+ }
322322+ }
323323+324324+ /// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place.
325325+ ///
326326+ /// # Safety
327327+ ///
328328+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
329329+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
330330+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
331331+ ///
332332+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
333333+ ///
334334+ /// [*currently mapped*]: #currently-mapped-memory
335335+ /// [*fit*]: #memory-fitting
336336+ ///
337337+ /// # Errors
338338+ ///
339339+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
340340+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
341341+ pub unsafe fn grow_in_place(
342342+ &mut self,
343343+ ptr: NonNull<u8>,
344344+ old_layout: Layout,
345345+ new_layout: Layout,
346346+ ) -> crate::Result<NonNull<[u8]>> {
347347+ #[cfg(debug_assertions)]
348348+ self.assert_valid("[AddressSpace::grow_in_place]");
349349+350350+ assert_unsafe_precondition_!(
351351+ "TODO",
352352+ (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
353353+ old_layout.align().is_multiple_of(page_size)
354354+ }
355355+ );
356356+357357+ assert_unsafe_precondition_!(
358358+ "TODO",
359359+ (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
360360+ new_layout.align().is_multiple_of(page_size)
361361+ }
362362+ );
363363+364364+ if new_layout == old_layout {
365365+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
366366+ }
367367+368368+ assert_unsafe_precondition_!(
369369+ "TODO",
370370+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
371371+ new_layout.size() >= old_layout.size()
372372+ }
373373+ );
374374+375375+ unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) }
376376+ }
377377+378378+ /// Attempts to shrink the virtual memory reservation.
379379+ ///
380380+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
381381+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
382382+ /// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout.
383383+ ///
384384+ /// TODO describe how shrinking a file-backed, of DMA-backed mapping works
385385+ ///
386386+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
387387+ ///
388388+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
389389+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
390390+ /// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer
391391+ /// for accessing this region now.
392392+ ///
393393+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
394394+ /// this address space, and the contents of the region are unaltered.
395395+ ///
396396+ /// [*Undefined Behavior*]
397397+ ///
398398+ /// # Safety
399399+ ///
400400+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
401401+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
402402+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
403403+ ///
404404+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
405405+ ///
406406+ /// [*currently mapped*]: #currently-mapped-memory
407407+ /// [*fit*]: #memory-fitting
408408+ ///
409409+ /// # Errors
410410+ ///
411411+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
412412+ /// constraints, virtual memory is exhausted, or shrinking otherwise fails.
413413+ pub unsafe fn shrink(
414414+ &mut self,
415415+ ptr: NonNull<u8>,
416416+ old_layout: Layout,
417417+ new_layout: Layout,
418418+ ) -> crate::Result<NonNull<[u8]>> {
419419+ #[cfg(debug_assertions)]
420420+ self.assert_valid("[AddressSpace::shrink]");
421421+422422+ assert_unsafe_precondition_!(
423423+ "TODO",
424424+ (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
425425+ old_layout.align().is_multiple_of(page_size)
426426+ }
427427+ );
428428+429429+ assert_unsafe_precondition_!(
430430+ "TODO",
431431+ (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
432432+ new_layout.align().is_multiple_of(page_size)
433433+ }
434434+ );
435435+436436+ if new_layout == old_layout {
437437+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
438438+ }
439439+440440+ assert_unsafe_precondition_!(
441441+ "TODO",
442442+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
443443+ new_layout.size() <= old_layout.size()
444444+ }
445445+ );
446446+447447+ if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } {
448448+ Ok(ptr)
449449+ } else {
450450+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
451451+ }
452452+ }
453453+454454+ /// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be
455455+ /// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned.
456456+ ///
457457+ /// # Safety
458458+ ///
459459+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
460460+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
461461+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
462462+ ///
463463+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
464464+ ///
465465+ /// [*currently mapped*]: #currently-mapped-memory
466466+ /// [*fit*]: #memory-fitting
467467+ ///
468468+ /// # Errors
469469+ ///
470470+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
471471+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
472472+ pub unsafe fn shrink_in_place(
473473+ &mut self,
474474+ ptr: NonNull<u8>,
475475+ old_layout: Layout,
476476+ new_layout: Layout,
477477+ ) -> crate::Result<NonNull<[u8]>> {
478478+ #[cfg(debug_assertions)]
479479+ self.assert_valid("[AddressSpace::shrink_in_place]");
480480+481481+ assert_unsafe_precondition_!(
482482+ "TODO",
483483+ (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
484484+ old_layout.align().is_multiple_of(page_size)
485485+ }
486486+ );
487487+488488+ assert_unsafe_precondition_!(
489489+ "TODO",
490490+ (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
491491+ new_layout.align().is_multiple_of(page_size)
492492+ }
493493+ );
494494+495495+ if new_layout == old_layout {
496496+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
497497+ }
498498+499499+ assert_unsafe_precondition_!(
500500+ "TODO",
501501+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
502502+ new_layout.size() <= old_layout.size()
503503+ }
504504+ );
505505+506506+ unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) }
507507+ }
508508+509509+ /// Updates the access rules for the virtual memory region referenced by `ptr`.
510510+ ///
511511+ /// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault.
512512+ ///
513513+ /// If this method returns `Err`, the access rules of the memory region are unaltered.
514514+ ///
515515+ /// # Safety
516516+ ///
517517+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
518518+ /// * `layout` must [*fit*] that region of memory.
519519+ ///
520520+ /// [*currently mapped*]: #currently-mapped-memory
521521+ /// [*fit*]: #memory-fitting
522522+ pub unsafe fn update_access_rules(
523523+ &mut self,
524524+ ptr: NonNull<u8>,
525525+ layout: Layout,
526526+ access_rules: AccessRules,
527527+ ) -> crate::Result<()> {
528528+ #[cfg(debug_assertions)]
529529+ self.assert_valid("[AddressSpace::update_access_rules]");
530530+531531+ // Safety: responsibility of caller
532532+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
533533+534534+ // Safety: responsibility of caller
535535+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
536536+537537+ region.update_access_rules(access_rules, &mut self.batch)?;
538538+539539+ self.batch.flush_changes(&mut self.raw)?;
540540+541541+ Ok(())
542542+ }
543543+544544+ /// Attempts to fill the virtual memory region referenced by `ptr` with zeroes.
545545+ ///
546546+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
547547+ /// mapped region. The pointer is suitable for holding data described by `new_layout` and is
548548+ /// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the
549549+ /// virtual memory region.
550550+ ///
551551+ /// TODO describe how clearing a file-backed, of DMA-backed mapping works
552552+ ///
553553+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
554554+ ///
555555+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
556556+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
557557+ /// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer
558558+ /// for accessing this region now.
559559+ ///
560560+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
561561+ /// this address space, and the contents of the region are unaltered.
562562+ ///
563563+ /// [*Undefined Behavior*]
564564+ ///
565565+ /// # Safety
566566+ ///
567567+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
568568+ /// * `layout` must [*fit*] that region of memory.
569569+ ///
570570+ /// [*currently mapped*]: #currently-mapped-memory
571571+ /// [*fit*]: #memory-fitting
572572+ ///
573573+ /// # Errors
574574+ ///
575575+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
576576+ /// constraints, clearing a virtual memory region is not supported by the backing storage, or
577577+ /// clearing otherwise fails.
578578+ pub unsafe fn clear(
579579+ &mut self,
580580+ ptr: NonNull<u8>,
581581+ layout: Layout,
582582+ ) -> crate::Result<NonNull<[u8]>> {
583583+ #[cfg(debug_assertions)]
584584+ self.assert_valid("[AddressSpace::clear]");
585585+586586+ // Safety: responsibility of caller
587587+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
588588+589589+ // Safety: responsibility of caller
590590+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
591591+592592+ region.clear(.., &mut self.batch)?;
593593+594594+ self.batch.flush_changes(&mut self.raw)?;
595595+596596+ Ok(region.as_non_null())
597597+ }
598598+599599+ pub fn assert_valid(&self, msg: &str) {
600600+ let mut regions = self.regions.iter();
601601+602602+ let Some(first_region) = regions.next() else {
603603+ assert!(
604604+ self.regions.is_empty(),
605605+ "{msg}region iterator is empty but tree is not."
606606+ );
607607+608608+ return;
609609+ };
610610+611611+ first_region.assert_valid(msg);
612612+613613+ let mut seen_range = first_region.range().clone();
614614+615615+ while let Some(region) = regions.next() {
616616+ assert!(
617617+ !region.range().is_overlapping(&seen_range),
618618+ "{msg}region cannot overlap previous region; region={region:?}"
619619+ );
620620+ assert!(
621621+ region.range().start >= self.max_range.start
622622+ && region.range().end <= self.max_range.end,
623623+ "{msg}region cannot lie outside of max address space range; region={region:?}"
624624+ );
625625+626626+ seen_range = seen_range.start..region.range().end;
627627+628628+ region.assert_valid(msg);
629629+630630+ // TODO assert validity of of VMO against phys addresses
631631+ // let (_phys, access_rules) = self
632632+ // .batched_raw
633633+ // .raw_address_space()
634634+ // .lookup(region.range().start)
635635+ // .unwrap_or_else(|| {
636636+ // panic!("{msg}region base address is not mapped in raw address space region={region:?}")
637637+ // });
638638+ //
639639+ // assert_eq!(
640640+ // access_rules,
641641+ // region.access_rules(),
642642+ // "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}",
643643+ // region.access_rules(),
644644+ // );
645645+ }
646646+ }
647647+648648+ /// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`]
649649+ /// and [`Self::shrink_in_place`].
650650+ ///
651651+ /// # Safety
652652+ ///
653653+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
654654+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
655655+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
656656+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
657657+ unsafe fn grow_in_place_inner(
658658+ &mut self,
659659+ ptr: NonNull<u8>,
660660+ old_layout: Layout,
661661+ new_layout: Layout,
662662+ ) -> crate::Result<NonNull<[u8]>> {
663663+ // Safety: responsibility of caller
664664+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
665665+666666+ let next_range = cursor.peek_next().map(|region| region.range().clone());
667667+668668+ // Safety: responsibility of caller
669669+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
670670+671671+ todo!();
672672+ region.grow(new_layout.size(), &mut self.batch)?;
673673+674674+ self.batch.flush_changes(&mut self.raw)?;
675675+676676+ Ok(region.as_non_null())
677677+ }
678678+679679+ /// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`]
680680+ /// and [`Self::grow_in_place`].
681681+ ///
682682+ /// # Safety
683683+ ///
684684+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
685685+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
686686+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
687687+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
688688+ unsafe fn shrink_in_place_inner(
689689+ &mut self,
690690+ ptr: NonNull<u8>,
691691+ old_layout: Layout,
692692+ new_layout: Layout,
693693+ ) -> crate::Result<NonNull<[u8]>> {
694694+ // Safety: responsibility of caller
695695+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
696696+697697+ // Safety: responsibility of caller
698698+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
699699+700700+ region.shrink(new_layout.size(), &mut self.batch)?;
701701+702702+ self.batch.flush_changes(&mut self.raw)?;
703703+704704+ Ok(region.as_non_null())
705705+ }
706706+707707+ /// Reallocates a virtual address region. This will unmap and remove the old region, allocating
708708+ /// a new region that will be backed the old regions physical memory.
709709+ ///
710710+ /// # Safety
711711+ ///
712712+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
713713+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
714714+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
715715+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
716716+ unsafe fn reallocate_region(
717717+ &mut self,
718718+ ptr: NonNull<u8>,
719719+ old_layout: Layout,
720720+ new_layout: Layout,
721721+ ) -> crate::Result<NonNull<[u8]>> {
722722+ // Safety: responsibility of caller
723723+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
724724+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
725725+726726+ let spot = self.find_spot_for(new_layout).context(format_err!(
727727+ "cannot find free spot for layout {new_layout:?}"
728728+ ))?;
729729+730730+ todo!();
731731+732732+ // region.move_to(spot, new_layout, &mut self.batch)?;
733733+734734+ Ok(region.as_non_null())
735735+ }
736736+737737+ /// Find a spot in the address space that satisfies the given `layout` requirements.
738738+ ///
739739+ /// If a spot suitable for holding data described by `layout` is found, the base address of the
740740+ /// address range is returned in `Some`. The returned address is already correct aligned to
741741+ /// `layout.align()`.
742742+ ///
743743+ /// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in
744744+ /// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot
745745+ /// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a
746746+ /// different time might still succeed.
747747+ fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> {
748748+ // The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is
749749+ // guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works
750750+ // elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively.
751751+ // Here is how it works:
752752+ // 1. We set up two counters: (see the GapVisitor)
753753+ // - `candidate_spot_count` which we initialize to zero
754754+ // - `target_index` which we either set to a random value between 0..<the maximum number of
755755+ // possible addresses in the address space> if ASLR is requested OR to zero otherwise.
756756+ // 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the
757757+ // gaps between regions. We count the number of addresses in each gap that satisfy the
758758+ // requested `Layout`s size and alignment and add that to the `candidate_spot_count`.
759759+ // IF the number of spots in the gap is greater than our chosen target index, we pick the
760760+ // spot at the target index and finish. ELSE we *decrement* the target index by the number
761761+ // of spots and continue to the next gap.
762762+ // 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original
763763+ // guess for `target_index` was too big, in which case we need to retry.
764764+ // 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know
765765+ // the *actual* number of possible spots in the address space since we just counted them
766766+ // during the first attempt. We initialize `target_index` to `0..candidate_spot_count`
767767+ // which is guaranteed to return us a spot.
768768+ // IF `candidate_spot_count` is ZERO after the first attempt, there is no point in
769769+ // retrying since we cannot fulfill the requested layout.
770770+ //
771771+ // Note that in practice, we use a binary tree to keep track of regions, and we use binary search
772772+ // to optimize the search for a suitable gap instead of linear iteration.
773773+774774+ let layout = layout.pad_to_align();
775775+776776+ // First attempt: guess a random target index
777777+ let max_candidate_spots = self.max_range.size();
778778+779779+ let target_index: usize = self
780780+ .rng
781781+ .as_mut()
782782+ .map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap()))
783783+ .unwrap_or_default();
784784+785785+ // First attempt: visit the binary search tree to find a gap
786786+ let mut v = GapVisitor::new(layout, target_index);
787787+ self.visit_gaps(&mut v);
788788+789789+ // if we found a spot already we're done
790790+ if let Some(chosen) = v.chosen {
791791+ return Some(chosen);
792792+ }
793793+794794+ // otherwise, Second attempt: we need to retry with the correct candidate spot count
795795+ // but if we counted no suitable candidate spots during the first attempt, we cannot fulfill
796796+ // the request.
797797+ if v.candidate_spots == 0 {
798798+ return None;
799799+ }
800800+801801+ // Second attempt: pick a new target_index that's actually fulfillable
802802+ let target_index: usize = self
803803+ .rng
804804+ .as_mut()
805805+ .map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap()))
806806+ .unwrap_or_default();
807807+808808+ // Second attempt: visit the binary search tree to find a gap
809809+ let mut v = GapVisitor::new(layout, target_index);
810810+ self.visit_gaps(&mut v);
811811+812812+ let chosen = v
813813+ .chosen
814814+ .expect("There must be a chosen spot after the first attempt. This is a bug!");
815815+816816+ debug_assert!(chosen.is_canonical::<A>());
817817+818818+ Some(chosen)
819819+ }
820820+821821+ /// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space
822822+ /// from lowest to highest addresses.
823823+ fn visit_gaps(&self, v: &mut GapVisitor) {
824824+ let Some(root) = self.regions.root().get() else {
825825+ // if the tree is empty, we treat the entire max_range as the gap
826826+ // note that we do not care about the returned ControlFlow, as there is nothing else we
827827+ // could try to find a spot anyway
828828+ let _ = v.visit(self.max_range.clone());
829829+830830+ return;
831831+ };
832832+833833+ // see if there is a suitable gap between BEFORE the first address space region
834834+ if v.visit(self.max_range.start..root.subtree_range().start)
835835+ .is_break()
836836+ {
837837+ return;
838838+ }
839839+840840+ // now comes the main part of the search. we start at the WAVLTree root node and do a
841841+ // binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion`
842842+ // to speed up this search. See `AddressSpaceRegion` for details on how this works.
843843+844844+ let mut maybe_current = self.regions.root().get();
845845+ let mut already_visited = VirtualAddress::MIN;
846846+847847+ while let Some(current) = maybe_current {
848848+ // If there is no suitable gap in this entire
849849+ if current.suitable_gap_in_subtree(v.layout()) {
850850+ // First, look at the left subtree
851851+ if let Some(left) = current.left_child() {
852852+ if left.suitable_gap_in_subtree(v.layout())
853853+ && left.subtree_range().end > already_visited
854854+ {
855855+ maybe_current = Some(left);
856856+ continue;
857857+ }
858858+859859+ if v.visit(left.subtree_range().end..current.range().start)
860860+ .is_break()
861861+ {
862862+ return;
863863+ }
864864+ }
865865+866866+ if let Some(right) = current.right_child() {
867867+ if v.visit(current.range().end..right.subtree_range().start)
868868+ .is_break()
869869+ {
870870+ return;
871871+ }
872872+873873+ if right.suitable_gap_in_subtree(v.layout())
874874+ && right.subtree_range().end > already_visited
875875+ {
876876+ maybe_current = Some(right);
877877+ continue;
878878+ }
879879+ }
880880+ }
881881+882882+ already_visited = current.subtree_range().end;
883883+ maybe_current = current.parent();
884884+ }
885885+886886+ // see if there is a suitable gap between AFTER the last address space region
887887+ if v.visit(root.subtree_range().end..self.max_range.end)
888888+ .is_break()
889889+ {
890890+ return;
891891+ }
892892+ }
893893+}
894894+895895+/// # Safety
896896+///
897897+/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
898898+/// * `layout` must [*fit*] that region of memory.
899899+///
900900+/// [*currently mapped*]: #currently-mapped-memory
901901+/// [*fit*]: #memory-fitting
902902+unsafe fn get_region_containing_ptr<A: RawAddressSpace>(
903903+ regions: &mut WAVLTree<AddressSpaceRegion<A>>,
904904+ ptr: NonNull<u8>,
905905+ layout: Layout,
906906+) -> CursorMut<'_, AddressSpaceRegion<A>> {
907907+ let addr = VirtualAddress::from_non_null(ptr);
908908+909909+ let cursor = regions.lower_bound_mut(Bound::Included(&addr));
910910+911911+ // assert_unsafe_precondition_!(
912912+ // "TODO",
913913+ // (cursor: &CursorMut<AddressSpaceRegion<A>> = &cursor) => cursor.get().is_some()
914914+ // );
915915+916916+ // Safety: The caller guarantees the pointer is currently mapped which means we must have
917917+ // a corresponding address space region for it
918918+ let region = unsafe { cursor.get().unwrap_unchecked() };
919919+920920+ // assert_unsafe_precondition_!(
921921+ // "TODO",
922922+ // (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => {
923923+ // let range = region.range();
924924+ //
925925+ // range.start.get() <= addr.get() && addr.get() < range.end.get()
926926+ // }
927927+ // );
928928+ //
929929+ // assert_unsafe_precondition_!(
930930+ // "`layout` does not fit memory region",
931931+ // (layout: Layout = layout, region: &AddressSpaceRegion = ®ion) => region.layout_fits_region(layout)
932932+ // );
933933+934934+ cursor
935935+}
936936+937937+pub(crate) struct GapVisitor {
938938+ layout: Layout,
939939+ target_index: usize,
940940+ candidate_spots: usize,
941941+ chosen: Option<VirtualAddress>,
942942+}
943943+944944+impl GapVisitor {
945945+ fn new(layout: Layout, target_index: usize) -> Self {
946946+ Self {
947947+ layout,
948948+ target_index,
949949+ candidate_spots: 0,
950950+ chosen: None,
951951+ }
952952+ }
953953+954954+ pub fn layout(&self) -> Layout {
955955+ self.layout
956956+ }
957957+958958+ /// Returns the number of spots in the given range that satisfy the layout we require
959959+ fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize {
960960+ debug_assert!(
961961+ range.start.is_aligned_to(self.layout.align())
962962+ && range.end.is_aligned_to(self.layout.align())
963963+ );
964964+965965+ // ranges passed in here can become empty for a number of reasons (aligning might produce ranges
966966+ // where end > start, or the range might be empty to begin with) in either case an empty
967967+ // range means no spots are available
968968+ if range.is_empty() {
969969+ return 0;
970970+ }
971971+972972+ let range_size = range.size();
973973+ if range_size >= self.layout.size() {
974974+ ((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1
975975+ } else {
976976+ 0
977977+ }
978978+ }
979979+980980+ pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> {
981981+ // if we have already chosen a spot, signal the caller to stop
982982+ if self.chosen.is_some() {
983983+ return ControlFlow::Break(());
984984+ }
985985+986986+ let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap();
987987+988988+ let spot_count = self.spots_in_range(&aligned_gap);
989989+990990+ self.candidate_spots += spot_count;
991991+992992+ if self.target_index < spot_count {
993993+ self.chosen = Some(
994994+ aligned_gap
995995+ .start
996996+ .checked_add(self.target_index << self.layout.align().ilog2())
997997+ .unwrap(),
998998+ );
999999+10001000+ ControlFlow::Break(())
10011001+ } else {
10021002+ self.target_index -= spot_count;
10031003+10041004+ ControlFlow::Continue(())
10051005+ }
10061006+ }
10071007+}
+336
libs/mem/src/address_space/batch.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::cmp;
99+use core::num::{NonZero, NonZeroUsize};
1010+1111+use smallvec::SmallVec;
1212+1313+use crate::address_space::{Flush, RawAddressSpace};
1414+use crate::{AccessRules, PhysicalAddress, VirtualAddress};
1515+1616+/// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`.
1717+///
1818+/// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space
1919+/// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB
2020+/// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce
2121+/// this number.
2222+pub struct Batch {
2323+ ops: SmallVec<[BatchOperation; 4]>,
2424+}
2525+2626+enum BatchOperation {
2727+ Map(MapOperation),
2828+ Unmap(UnmapOperation),
2929+ SetAccessRules(SetAccessRulesOperation),
3030+}
3131+3232+struct MapOperation {
3333+ virt: VirtualAddress,
3434+ phys: PhysicalAddress,
3535+ len: NonZeroUsize,
3636+ access_rules: AccessRules,
3737+}
3838+3939+struct UnmapOperation {
4040+ virt: VirtualAddress,
4141+ len: NonZeroUsize,
4242+}
4343+4444+struct SetAccessRulesOperation {
4545+ virt: VirtualAddress,
4646+ len: NonZeroUsize,
4747+ access_rules: AccessRules,
4848+}
4949+5050+// ===== impl Batch =====
5151+5252+impl Batch {
5353+ /// Construct a new empty [`Batch`].
5454+ pub fn new() -> Self {
5555+ Self {
5656+ ops: SmallVec::new(),
5757+ }
5858+ }
5959+6060+ /// Add a [`map`] operation to the set of batched operations.
6161+ ///
6262+ /// # Safety
6363+ ///
6464+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
6565+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
6666+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6767+ ///
6868+ /// [`map`]: RawAddressSpace::map
6969+ pub unsafe fn map(
7070+ &mut self,
7171+ virt: VirtualAddress,
7272+ phys: PhysicalAddress,
7373+ len: NonZeroUsize,
7474+ access_rules: AccessRules,
7575+ ) {
7676+ let mut new = MapOperation {
7777+ virt,
7878+ phys,
7979+ len,
8080+ access_rules,
8181+ };
8282+8383+ let ops = self.ops.iter_mut().filter_map(|op| match op {
8484+ BatchOperation::Map(op) => Some(op),
8585+ _ => None,
8686+ });
8787+8888+ for op in ops {
8989+ match op.try_merge_with(new) {
9090+ Ok(()) => return,
9191+ Err(new_) => new = new_,
9292+ }
9393+ }
9494+9595+ self.ops.push(BatchOperation::Map(new));
9696+ }
9797+9898+ /// Add an [`unmap`] operation to the set of batched operations.
9999+ ///
100100+ /// # Safety
101101+ ///
102102+ /// - virt..virt+len must be mapped
103103+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
104104+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
105105+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
106106+ ///
107107+ /// [`unmap`]: RawAddressSpace::unmap
108108+ pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) {
109109+ let mut new = UnmapOperation { virt, len };
110110+111111+ let ops = self.ops.iter_mut().filter_map(|op| match op {
112112+ BatchOperation::Unmap(op) => Some(op),
113113+ _ => None,
114114+ });
115115+116116+ for op in ops {
117117+ match op.try_merge_with(new) {
118118+ Ok(()) => return,
119119+ Err(new_) => new = new_,
120120+ }
121121+ }
122122+123123+ self.ops.push(BatchOperation::Unmap(new));
124124+ }
125125+126126+ /// Add a [`set_access_rules`] operation to the set of batched operations.
127127+ ///
128128+ /// # Safety
129129+ ///
130130+ /// - virt..virt+len must be mapped
131131+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
132132+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
133133+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
134134+ ///
135135+ /// [`set_access_rules`]: RawAddressSpace::set_access_rules
136136+ pub fn set_access_rules(
137137+ &mut self,
138138+ virt: VirtualAddress,
139139+ len: NonZeroUsize,
140140+ access_rules: AccessRules,
141141+ ) {
142142+ let mut new = SetAccessRulesOperation {
143143+ virt,
144144+ len,
145145+ access_rules,
146146+ };
147147+148148+ let ops = self.ops.iter_mut().filter_map(|op| match op {
149149+ BatchOperation::SetAccessRules(op) => Some(op),
150150+ _ => None,
151151+ });
152152+153153+ for op in ops {
154154+ match op.try_merge_with(new) {
155155+ Ok(()) => return,
156156+ Err(new_) => new = new_,
157157+ }
158158+ }
159159+160160+ self.ops.push(BatchOperation::SetAccessRules(new));
161161+ }
162162+163163+ /// Flushes the `Batch` ensuring all changes are materialized into the raw address space.
164164+ pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> {
165165+ let mut flush = raw_aspace.flush();
166166+ for op in self.ops.drain(..) {
167167+ match op {
168168+ BatchOperation::Map(op) => {
169169+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
170170+ debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE));
171171+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
172172+173173+ // Safety: the caller promised the correctness of the values on construction of
174174+ // the operation.
175175+ unsafe {
176176+ raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
177177+ }
178178+ }
179179+ BatchOperation::Unmap(op) => {
180180+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
181181+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
182182+183183+ // Safety: the caller promised the correctness of the values on construction of
184184+ // the operation.
185185+ unsafe {
186186+ raw_aspace.unmap(op.virt, op.len, &mut flush);
187187+ }
188188+ }
189189+ BatchOperation::SetAccessRules(op) => {
190190+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
191191+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
192192+193193+ // Safety: the caller promised the correctness of the values on construction of
194194+ // the operation.
195195+ unsafe {
196196+ raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
197197+ }
198198+ }
199199+ };
200200+ }
201201+ flush.flush()
202202+ }
203203+}
204204+205205+// ===== impl MapOperation =====
206206+207207+impl MapOperation {
208208+ /// Returns true if this operation can be merged with `other`.
209209+ ///
210210+ /// Map operations can be merged if:
211211+ /// - their [`AccessRules`] are the same
212212+ /// - their virtual address ranges are contiguous (no gap between self and other)
213213+ /// - their physical address ranges are contiguous
214214+ /// - the resulting virtual address range still has the same size as the resulting
215215+ /// physical address range
216216+ const fn can_merge_with(&self, other: &Self) -> bool {
217217+ // the access rules need to be the same
218218+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
219219+220220+ let overlap_virt = self.virt.get() <= other.len.get()
221221+ && other.virt.get() <= self.virt.get() + self.len.get();
222222+223223+ let overlap_phys = self.phys.get() <= other.len.get()
224224+ && other.phys.get() <= self.phys.get() + self.len.get();
225225+226226+ let offset_virt = self.virt.get().wrapping_sub(other.virt.get());
227227+ let offset_phys = self.virt.get().wrapping_sub(other.virt.get());
228228+ let same_offset = offset_virt == offset_phys;
229229+230230+ same_rules && overlap_virt && overlap_phys && same_offset
231231+ }
232232+233233+ /// Attempt to merge this operation with `other`.
234234+ ///
235235+ /// If this returns `Ok`, `other` has been merged into `self`.
236236+ ///
237237+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
238238+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
239239+ if self.can_merge_with(&other) {
240240+ let offset = self.virt.get().wrapping_sub(other.virt.get());
241241+ let len = self
242242+ .len
243243+ .get()
244244+ .checked_add(other.len.get())
245245+ .unwrap()
246246+ .wrapping_add(offset);
247247+248248+ self.virt = cmp::min(self.virt, other.virt);
249249+ self.phys = cmp::min(self.phys, other.phys);
250250+ self.len = NonZero::new(len).ok_or(other)?;
251251+252252+ Ok(())
253253+ } else {
254254+ Err(other)
255255+ }
256256+ }
257257+}
258258+259259+// ===== impl UnmapOperation =====
260260+261261+impl UnmapOperation {
262262+ /// Returns true if this operation can be merged with `other`.
263263+ ///
264264+ /// Unmap operations can be merged if:
265265+ /// - their virtual address ranges are contiguous (no gap between self and other)
266266+ const fn can_merge_with(&self, other: &Self) -> bool {
267267+ self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get()
268268+ }
269269+270270+ /// Attempt to merge this operation with `other`.
271271+ ///
272272+ /// If this returns `Ok`, `other` has been merged into `self`.
273273+ ///
274274+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
275275+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
276276+ if self.can_merge_with(&other) {
277277+ let offset = self.virt.get().wrapping_sub(other.virt.get());
278278+ let len = self
279279+ .len
280280+ .get()
281281+ .checked_add(other.len.get())
282282+ .unwrap()
283283+ .wrapping_add(offset);
284284+285285+ self.virt = cmp::min(self.virt, other.virt);
286286+ self.len = NonZero::new(len).ok_or(other)?;
287287+288288+ Ok(())
289289+ } else {
290290+ Err(other)
291291+ }
292292+ }
293293+}
294294+295295+// ===== impl ProtectOperation =====
296296+297297+impl SetAccessRulesOperation {
298298+ /// Returns true if this operation can be merged with `other`.
299299+ ///
300300+ /// Protect operations can be merged if:
301301+ /// - their [`AccessRules`] are the same
302302+ /// - their virtual address ranges are contiguous (no gap between self and other)
303303+ const fn can_merge_with(&self, other: &Self) -> bool {
304304+ // the access rules need to be the same
305305+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
306306+307307+ let overlap = self.virt.get() <= other.len.get()
308308+ && other.virt.get() <= self.virt.get() + self.len.get();
309309+310310+ same_rules && overlap
311311+ }
312312+313313+ /// Attempt to merge this operation with `other`.
314314+ ///
315315+ /// If this returns `Ok`, `other` has been merged into `self`.
316316+ ///
317317+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
318318+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
319319+ if self.can_merge_with(&other) {
320320+ let offset = self.virt.get().wrapping_sub(other.virt.get());
321321+ let len = self
322322+ .len
323323+ .get()
324324+ .checked_add(other.len.get())
325325+ .unwrap()
326326+ .wrapping_add(offset);
327327+328328+ self.virt = cmp::min(self.virt, other.virt);
329329+ self.len = NonZero::new(len).ok_or(other)?;
330330+331331+ Ok(())
332332+ } else {
333333+ Err(other)
334334+ }
335335+ }
336336+}
+564
libs/mem/src/address_space/region.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::boxed::Box;
99+use core::alloc::Layout;
1010+use core::fmt::Formatter;
1111+use core::marker::PhantomData;
1212+use core::mem::offset_of;
1313+use core::num::NonZeroUsize;
1414+use core::ops::{Bound, Range, RangeBounds};
1515+use core::pin::Pin;
1616+use core::ptr::NonNull;
1717+use core::{cmp, fmt, mem, slice};
1818+1919+use fallible_iterator::FallibleIterator;
2020+use pin_project::pin_project;
2121+2222+use crate::address_space::{Batch, RawAddressSpace};
2323+use crate::vmo::Vmo;
2424+use crate::{AccessRules, AddressRangeExt, VirtualAddress};
2525+2626+#[pin_project]
2727+pub struct AddressSpaceRegion<A> {
2828+ range: Range<VirtualAddress>,
2929+ access_rules: AccessRules,
3030+ layout: Layout,
3131+ vmo: Vmo,
3232+ vmo_offset: usize,
3333+3434+ /// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
3535+ subtree_range: Range<VirtualAddress>,
3636+ /// The largest gap in this subtree, used when allocating new regions
3737+ max_gap: usize,
3838+ /// Links to other regions in the WAVL tree
3939+ links: wavltree::Links<AddressSpaceRegion<A>>,
4040+4141+ _raw_aspace: PhantomData<A>,
4242+}
4343+4444+impl<A> fmt::Debug for AddressSpaceRegion<A> {
4545+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
4646+ f.debug_struct("AddressSpaceRegion")
4747+ .field("range", &self.range)
4848+ .field("access_rules", &self.access_rules)
4949+ .field("layout", &self.layout)
5050+ .field("vmo", &self.vmo)
5151+ .field("vmo_offset", &self.vmo_offset)
5252+ .field("subtree_range", &self.subtree_range)
5353+ .field("max_gap", &self.max_gap)
5454+ .field("links", &self.links)
5555+ .finish()
5656+ }
5757+}
5858+5959+impl<A: RawAddressSpace> AddressSpaceRegion<A> {
6060+ pub const fn new(
6161+ spot: VirtualAddress,
6262+ layout: Layout,
6363+ access_rules: AccessRules,
6464+ vmo: Vmo,
6565+ vmo_offset: usize,
6666+ ) -> Self {
6767+ Self {
6868+ range: spot..spot.checked_add(layout.size()).unwrap(),
6969+ access_rules,
7070+ layout,
7171+ vmo,
7272+ vmo_offset,
7373+7474+ max_gap: 0,
7575+ subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
7676+ links: wavltree::Links::new(),
7777+7878+ _raw_aspace: PhantomData,
7979+ }
8080+ }
8181+8282+ pub const fn range(&self) -> &Range<VirtualAddress> {
8383+ &self.range
8484+ }
8585+8686+ pub const fn subtree_range(&self) -> &Range<VirtualAddress> {
8787+ &self.subtree_range
8888+ }
8989+9090+ pub const fn access_rules(&self) -> AccessRules {
9191+ self.access_rules
9292+ }
9393+9494+ pub fn as_slice(&self) -> &[u8] {
9595+ let ptr = self.range.start.as_ptr();
9696+ let len = self.range.size();
9797+9898+ unsafe { slice::from_raw_parts(ptr, len) }
9999+ }
100100+101101+ pub fn as_slice_mut(&mut self) -> &mut [u8] {
102102+ let ptr = self.range.start.as_mut_ptr();
103103+ let len = self.range.size();
104104+105105+ unsafe { slice::from_raw_parts_mut(ptr, len) }
106106+ }
107107+108108+ pub fn as_non_null(&self) -> NonNull<[u8]> {
109109+ let ptr = self.range.start.as_non_null().unwrap();
110110+ NonNull::slice_from_raw_parts(ptr, self.range.size())
111111+ }
112112+113113+ pub const fn layout_fits_region(&self, layout: Layout) -> bool {
114114+ self.range.start.is_aligned_to(layout.align())
115115+ && layout.size() >= self.layout.size()
116116+ && layout.size() <= self.range.end.get() - self.range.start.get()
117117+ }
118118+119119+ /// Find physical memory frames to back the given `range`.
120120+ /// After this call succeeds, accesses that align with the given `access` are guaranteed to
121121+ /// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
122122+ ///
123123+ /// # Errors
124124+ ///
125125+ /// - `range` is out of bounds
126126+ /// - `access_rules` is NOT a subset of self.access_rules
127127+ pub fn commit(
128128+ &mut self,
129129+ range: impl RangeBounds<VirtualAddress>,
130130+ access_rules: AccessRules,
131131+ batch: &mut Batch,
132132+ raw_aspace: &mut A,
133133+ ) -> crate::Result<()> {
134134+ let vmo_relative = self.bounds_to_vmo_relative(range);
135135+136136+ let mut acquired_frames = self.vmo.acquire(vmo_relative, access_rules).enumerate();
137137+ while let Some((idx, frame)) = acquired_frames.next()? {
138138+ let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap();
139139+140140+ unsafe {
141141+ batch.map(
142142+ virt,
143143+ frame.addr(),
144144+ NonZeroUsize::new(A::PAGE_SIZE).unwrap(),
145145+ access_rules,
146146+ );
147147+ }
148148+149149+ if self.vmo.has_content_source() {
150150+ // TODO add virt addr to coalescer
151151+ }
152152+ }
153153+154154+ // materialize changes
155155+ batch.flush_changes(raw_aspace)?;
156156+157157+ // initialize patched holes if necessary
158158+ if self.vmo.has_content_source() {
159159+ // for every region in coalescer
160160+ // figure out content source offset
161161+ // read from content source at offset into region
162162+ }
163163+164164+ Ok(())
165165+ }
166166+167167+ /// Release physical memory frames backing the given `range`.
168168+ /// After this call succeeds, accesses will page fault.
169169+ ///
170170+ /// # Errors
171171+ ///
172172+ /// - `range` is out of bounds
173173+ pub fn decommit(
174174+ &mut self,
175175+ range: impl RangeBounds<VirtualAddress>,
176176+ batch: &mut Batch,
177177+ raw_aspace: &mut A,
178178+ ) -> crate::Result<()> {
179179+ let vmo_relative = self.bounds_to_vmo_relative(range);
180180+181181+ let mut released_frames = self.vmo.release(vmo_relative).enumerate();
182182+ while let Some((idx, _frame)) = released_frames.next()? {
183183+ let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap();
184184+ unsafe { batch.unmap(virt, NonZeroUsize::new(A::PAGE_SIZE).unwrap()) };
185185+186186+ // if VMO has content source && frame is dirty
187187+ // add virt addr to coalescer
188188+ }
189189+190190+ // for every region in coalescer
191191+ // figure out content source offset
192192+ // write region to content source at offset
193193+194194+ // materialize changes
195195+ batch.flush_changes(raw_aspace)?;
196196+197197+ Ok(())
198198+ }
199199+200200+ /// Zero out the memory in the given `range`.
201201+ /// This MAY release physical memory frames backing the `range`.
202202+ ///
203203+ /// # Errors
204204+ ///
205205+ /// - `range` is out of bounds
206206+ pub fn clear(
207207+ &mut self,
208208+ range: impl RangeBounds<VirtualAddress>,
209209+ batch: &mut Batch,
210210+ ) -> crate::Result<()> {
211211+ todo!()
212212+ }
213213+214214+ /// Update the access rules of this `AddressSpaceRegion`.
215215+ pub fn update_access_rules(
216216+ &mut self,
217217+ access_rules: AccessRules,
218218+ batch: &mut Batch,
219219+ ) -> crate::Result<()> {
220220+ todo!()
221221+ }
222222+223223+ /// Fetches content in the given `range`. This operates logically equivalent to
224224+ /// a read, write, or instruction fetch (depending on `access_rules`) so that future accesses
225225+ /// are quicker. The provided `access_rules` MUST be a subset or equal to this regions access rules.
226226+ ///
227227+ /// # Errors
228228+ ///
229229+ /// - `range` is out of bounds
230230+ /// - `access_rules` is NOT a subset of self.access_rules
231231+ pub fn prefetch(
232232+ &mut self,
233233+ range: impl RangeBounds<VirtualAddress>,
234234+ access_rules: AccessRules,
235235+ ) -> crate::Result<()> {
236236+ todo!()
237237+ }
238238+239239+ /// Attempts to grow the address space region to `new_len`.
240240+ /// `new_len` MUST be larger than or equal to the current length.
241241+ pub fn grow(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
242242+ todo!()
243243+ }
244244+245245+ /// Attempts to shrink the address space region to `new_len`.
246246+ /// `new_len` MUST be smaller than or equal to the current length.
247247+ pub fn shrink(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
248248+ todo!()
249249+ }
250250+251251+ // /// grow region to `new_len`, attempting to grow the VMO accordingly
252252+ // /// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
253253+ // pub fn grow_in_place(
254254+ // &mut self,
255255+ // new_layout: Layout,
256256+ // next_range: Option<Range<VirtualAddress>>,
257257+ // batch: &mut Batch,
258258+ // ) -> crate::Result<()> {
259259+ // if new_layout.align() > self.layout.align() {
260260+ // bail!("cannot grow in-place: New alignment greater than current");
261261+ // }
262262+ //
263263+ // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
264264+ //
265265+ // if let Some(next_range) = next_range
266266+ // && next_range.is_overlapping(&new_range)
267267+ // {
268268+ // bail!("cannot grow in-place: New overlapping with next range");
269269+ // }
270270+ //
271271+ // self.vmo.resize(new_range.size(), batch)?;
272272+ //
273273+ // self.update_range(new_range);
274274+ //
275275+ // Ok(())
276276+ // }
277277+ //
278278+ // /// shrink region to the first `len` bytes, dropping the rest frames.
279279+ // /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
280280+ // pub fn shrink(&mut self, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
281281+ // if new_layout.align() > self.layout.align() {
282282+ // bail!("cannot grow in-place: New alignment greater than current");
283283+ // }
284284+ //
285285+ // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
286286+ //
287287+ // self.vmo.resize(new_range.size(), batch)?;
288288+ //
289289+ // self.update_range(new_range);
290290+ //
291291+ // Ok(())
292292+ // }
293293+ //
294294+ // /// move the entire region to the new base address, remapping any already mapped frames
295295+ // pub fn move_to(
296296+ // &mut self,
297297+ // new_base: VirtualAddress,
298298+ // new_layout: Layout,
299299+ // batch: &mut Batch,
300300+ // ) -> crate::Result<()> {
301301+ // let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
302302+ //
303303+ // self.vmo.resize(new_range.size(), batch)?;
304304+ // self.update_range(new_range);
305305+ //
306306+ // // - for every frame in VMO
307307+ // // - attempt to map at new offset (add maps to batch)
308308+ //
309309+ // todo!()
310310+ // }
311311+ //
312312+ // pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
313313+ // where
314314+ // R: RangeBounds<VirtualAddress>,
315315+ // {
316316+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
317317+ //
318318+ // self.vmo.commit(bounds, will_write, batch)
319319+ // }
320320+ //
321321+ // pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
322322+ // where
323323+ // R: RangeBounds<VirtualAddress>,
324324+ // {
325325+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
326326+ //
327327+ // self.vmo.decommit(bounds, batch)
328328+ // }
329329+ //
330330+ // /// updates the access rules of this region
331331+ // pub fn update_access_rules(
332332+ // &mut self,
333333+ // access_rules: AccessRules,
334334+ // batch: &mut Batch,
335335+ // ) -> crate::Result<()> {
336336+ // // TODO
337337+ // // - for every frame in VMO
338338+ // // - update access rules (add protects to batch)
339339+ // // - update self access rules
340340+ //
341341+ // todo!()
342342+ // }
343343+ //
344344+ // pub fn clear<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
345345+ // where
346346+ // R: RangeBounds<VirtualAddress>,
347347+ // {
348348+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
349349+ //
350350+ // self.vmo.clear(bounds, batch)
351351+ // }
352352+ //
353353+ // pub fn prefetch<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
354354+ // where
355355+ // R: RangeBounds<VirtualAddress>,
356356+ // {
357357+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
358358+ //
359359+ // self.vmo.prefetch(bounds, batch)
360360+ // }
361361+362362+ pub fn assert_valid(&self, msg: &str) {
363363+ assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
364364+ assert!(
365365+ self.subtree_range.start <= self.range.start
366366+ && self.range.end <= self.subtree_range.end,
367367+ "{msg}region range cannot be bigger than its subtree range; region={self:?}"
368368+ );
369369+ assert!(
370370+ self.max_gap < self.subtree_range.size(),
371371+ "{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}"
372372+ );
373373+ assert!(
374374+ self.range.start.is_aligned_to(self.layout.align()),
375375+ "{msg}region range is not aligned to its layout; region={self:?}"
376376+ );
377377+ assert!(
378378+ self.range.size() >= self.layout.size(),
379379+ "{msg}region range is smaller than its layout; region={self:?}"
380380+ );
381381+382382+ self.links.assert_valid();
383383+ }
384384+385385+ /// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used
386386+ /// during gap-searching.
387387+ pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool {
388388+ // we need the layout to be padded to alignment
389389+ debug_assert!(layout.size().is_multiple_of(layout.align()));
390390+391391+ self.max_gap >= layout.size()
392392+ }
393393+394394+ /// Returns the left child node in the search tree of regions, used during gap-searching.
395395+ pub fn left_child(&self) -> Option<&Self> {
396396+ Some(unsafe { self.links.left()?.as_ref() })
397397+ }
398398+399399+ /// Returns the right child node in the search tree of regions, used during gap-searching.
400400+ pub fn right_child(&self) -> Option<&Self> {
401401+ Some(unsafe { self.links.right()?.as_ref() })
402402+ }
403403+404404+ /// Returns the parent node in the search tree of regions, used during gap-searching.
405405+ pub fn parent(&self) -> Option<&Self> {
406406+ Some(unsafe { self.links.parent()?.as_ref() })
407407+ }
408408+409409+ #[inline]
410410+ fn bounds_to_vmo_relative(
411411+ &self,
412412+ bounds: impl RangeBounds<VirtualAddress>,
413413+ ) -> (Bound<usize>, Bound<usize>) {
414414+ let start = bounds.start_bound().map(|addr| {
415415+ (addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset
416416+ });
417417+ let end = bounds.end_bound().map(|addr| {
418418+ (addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset
419419+ });
420420+421421+ (start, end)
422422+ }
423423+424424+ fn update_range(&mut self, new_range: Range<VirtualAddress>) {
425425+ self.range = new_range;
426426+ // We also must propagate the information about our changed range to the rest of the tree
427427+ // so searching for a free spot returns the correct results.
428428+ Self::propagate_update_to_parent(Some(NonNull::from(self)));
429429+ }
430430+431431+ /// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`]
432432+ /// implementation below after each tree mutation that impacted this node or its subtree in some way
433433+ /// (insertion, rotation, deletion).
434434+ ///
435435+ /// Returns `true` if this nodes metadata changed.
436436+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
437437+ fn update_gap_metadata(
438438+ mut node: NonNull<Self>,
439439+ left: Option<NonNull<Self>>,
440440+ right: Option<NonNull<Self>>,
441441+ ) -> bool {
442442+ fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize {
443443+ right_first_byte
444444+ .checked_sub_addr(left_last_byte)
445445+ .unwrap_or_default() // TODO use saturating_sub_addr
446446+ }
447447+448448+ let node = unsafe { node.as_mut() };
449449+ let mut left_max_gap = 0;
450450+ let mut right_max_gap = 0;
451451+452452+ // recalculate the subtree_range start
453453+ let old_subtree_range_start = if let Some(left) = left {
454454+ let left = unsafe { left.as_ref() };
455455+ let left_gap = gap(left.subtree_range.end, node.range.start);
456456+ left_max_gap = cmp::max(left_gap, left.max_gap);
457457+ mem::replace(&mut node.subtree_range.start, left.subtree_range.start)
458458+ } else {
459459+ mem::replace(&mut node.subtree_range.start, node.range.start)
460460+ };
461461+462462+ // recalculate the subtree range end
463463+ let old_subtree_range_end = if let Some(right) = right {
464464+ let right = unsafe { right.as_ref() };
465465+ let right_gap = gap(node.range.end, right.subtree_range.start);
466466+ right_max_gap = cmp::max(right_gap, right.max_gap);
467467+ mem::replace(&mut node.subtree_range.end, right.subtree_range.end)
468468+ } else {
469469+ mem::replace(&mut node.subtree_range.end, node.range.end)
470470+ };
471471+472472+ // recalculate the map_gap
473473+ let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap));
474474+475475+ old_max_gap != node.max_gap
476476+ || old_subtree_range_start != node.subtree_range.start
477477+ || old_subtree_range_end != node.subtree_range.end
478478+ }
479479+480480+ // Propagate metadata updates to this regions parent in the search tree. If we had to update
481481+ // our metadata the parent must update its metadata too.
482482+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
483483+ fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) {
484484+ while let Some(node) = maybe_node {
485485+ let links = unsafe { &node.as_ref().links };
486486+ let changed = Self::update_gap_metadata(node, links.left(), links.right());
487487+488488+ // if the metadata didn't actually change, we don't need to recalculate parents
489489+ if !changed {
490490+ return;
491491+ }
492492+493493+ maybe_node = links.parent();
494494+ }
495495+ }
496496+}
497497+498498+unsafe impl<A: RawAddressSpace> wavltree::Linked for AddressSpaceRegion<A> {
499499+ /// Any heap-allocated type that owns an element may be used.
500500+ ///
501501+ /// An element *must not* move while part of an intrusive data
502502+ /// structure. In many cases, `Pin` may be used to enforce this.
503503+ type Handle = Pin<Box<Self>>; // TODO better handle type
504504+505505+ type Key = VirtualAddress;
506506+507507+ /// Convert an owned `Handle` into a raw pointer
508508+ fn into_ptr(handle: Self::Handle) -> NonNull<Self> {
509509+ // Safety: wavltree treats the ptr as pinned
510510+ unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
511511+ }
512512+513513+ /// Convert a raw pointer back into an owned `Handle`.
514514+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
515515+ // Safety: `NonNull` *must* be constructed from a pinned reference
516516+ // which the tree implementation upholds.
517517+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) }
518518+ }
519519+520520+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> {
521521+ ptr.map_addr(|addr| {
522522+ let offset = offset_of!(Self, links);
523523+ addr.checked_add(offset).unwrap()
524524+ })
525525+ .cast()
526526+ }
527527+528528+ fn get_key(&self) -> &Self::Key {
529529+ &self.range.start
530530+ }
531531+532532+ fn after_insert(self: Pin<&mut Self>) {
533533+ debug_assert_eq!(self.subtree_range.start, self.range.start);
534534+ debug_assert_eq!(self.subtree_range.end, self.range.end);
535535+ debug_assert_eq!(self.max_gap, 0);
536536+ Self::propagate_update_to_parent(self.links.parent());
537537+ }
538538+539539+ fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) {
540540+ Self::propagate_update_to_parent(parent);
541541+ }
542542+543543+ fn after_rotate(
544544+ self: Pin<&mut Self>,
545545+ parent: NonNull<Self>,
546546+ sibling: Option<NonNull<Self>>,
547547+ lr_child: Option<NonNull<Self>>,
548548+ side: wavltree::Side,
549549+ ) {
550550+ let this = self.project();
551551+ // Safety: caller ensures ptr is valid
552552+ let _parent = unsafe { parent.as_ref() };
553553+554554+ this.subtree_range.start = _parent.subtree_range.start;
555555+ this.subtree_range.end = _parent.subtree_range.end;
556556+ *this.max_gap = _parent.max_gap;
557557+558558+ if side == wavltree::Side::Left {
559559+ Self::update_gap_metadata(parent, sibling, lr_child);
560560+ } else {
561561+ Self::update_gap_metadata(parent, lr_child, sibling);
562562+ }
563563+ }
564564+}
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::cmp::PartialEq;
1010+use core::fmt;
1111+use core::fmt::Debug;
1212+use core::mem::offset_of;
1313+use core::ops::Deref;
1414+use core::ptr::NonNull;
1515+use core::sync::atomic;
1616+use core::sync::atomic::{AtomicUsize, Ordering};
1717+1818+use cordyceps::{Linked, list};
1919+use pin_project::pin_project;
2020+2121+use crate::PhysicalAddress;
2222+use crate::frame_alloc::FrameAllocator;
2323+2424+/// Soft limit on the amount of references that may be made to a `Frame`.
2525+const MAX_REFCOUNT: usize = isize::MAX as usize;
2626+2727+pub struct FrameRef {
2828+ frame: NonNull<Frame>,
2929+ frame_alloc: &'static dyn FrameAllocator,
3030+}
3131+3232+#[pin_project(!Unpin)]
3333+#[derive(Debug)]
3434+pub struct Frame {
3535+ addr: PhysicalAddress,
3636+ refcount: AtomicUsize,
3737+ #[pin]
3838+ links: list::Links<Self>,
3939+}
4040+4141+// ===== impl FrameRef =====
4242+4343+impl Clone for FrameRef {
4444+ /// Makes a clone of the `Frame`.
4545+ ///
4646+ /// This creates reference to the same `FrameInfo`, increasing the reference count by one.
4747+ fn clone(&self) -> Self {
4848+ // Increase the reference count by one. Using relaxed ordering, as knowledge of the
4949+ // original reference prevents other threads from erroneously deleting
5050+ // the object.
5151+ //
5252+ // Again, restating what the `Arc` implementation quotes from the
5353+ // [Boost documentation][1]:
5454+ //
5555+ // > Increasing the reference counter can always be done with memory_order_relaxed: New
5656+ // > references to an object can only be formed from an existing
5757+ // > reference, and passing an existing reference from one thread to
5858+ // > another must already provide any required synchronization.
5959+ //
6060+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
6161+ let old_size = self.refcount.fetch_add(1, Ordering::Relaxed);
6262+ debug_assert_ne!(old_size, 0);
6363+6464+ // Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
6565+ // `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount
6666+ // would *really* bad as it would treat the frame as free and potentially cause a use-after-free
6767+ // scenario. Realistically this branch should never be taken.
6868+ //
6969+ // Also worth noting: Just like `Arc`, the refcount could still overflow when in between
7070+ // the load above and this check some other cpu increased the refcount from `isize::MAX` to
7171+ // `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in
7272+ // one conditional atomic operation produces much worse code, so if its good enough for the
7373+ // standard library, it is good enough for us.
7474+ assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow");
7575+7676+ unsafe { Self::from_raw_parts(self.frame, self.frame_alloc.clone()) }
7777+ }
7878+}
7979+8080+impl Drop for FrameRef {
8181+ /// Drops the `Frame`.
8282+ ///
8383+ /// This will decrement the reference count. If the reference count reaches zero
8484+ /// then this frame will be marked as free and returned to the frame allocator.
8585+ fn drop(&mut self) {
8686+ if self.refcount.fetch_sub(1, Ordering::Release) != 1 {
8787+ return;
8888+ }
8989+9090+ // Ensure uses of `FrameInfo` happen before freeing it.
9191+ // Because it is marked `Release`, the decreasing of the reference count synchronizes
9292+ // with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing
9393+ // the reference count, which happens before this fence, which happens before freeing `FrameInfo`.
9494+ //
9595+ // This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and
9696+ // may explain further:
9797+ //
9898+ // > It is important to enforce any possible access to the object in one
9999+ // > thread (through an existing reference) to *happen before* deleting
100100+ // > the object in a different thread. This is achieved by a "release"
101101+ // > operation after dropping a reference (any access to the object
102102+ // > through this reference must obviously happened before), and an
103103+ // > "acquire" operation before deleting the object.
104104+ //
105105+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
106106+ atomic::fence(Ordering::Acquire);
107107+108108+ self.drop_slow();
109109+ }
110110+}
111111+112112+impl Debug for FrameRef {
113113+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
114114+ f.debug_struct("FrameRef")
115115+ .field("ptr", &self.frame)
116116+ .finish_non_exhaustive()
117117+ }
118118+}
119119+120120+impl Deref for FrameRef {
121121+ type Target = Frame;
122122+123123+ fn deref(&self) -> &Self::Target {
124124+ unsafe { self.frame.as_ref() }
125125+ }
126126+}
127127+128128+impl FrameRef {
129129+ pub unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self {
130130+ Self { frame, frame_alloc: alloc }
131131+ }
132132+133133+ pub fn ptr_eq(a: &Self, b: &Self) -> bool {
134134+ a.frame == b.frame
135135+ }
136136+137137+ #[inline(never)]
138138+ fn drop_slow(&mut self) {
139139+ let layout = unsafe {
140140+ Layout::from_size_align_unchecked(self.frame_alloc.page_size(), self.frame_alloc.page_size())
141141+ };
142142+ unsafe {
143143+ self.frame_alloc.deallocate(self.frame, layout);
144144+ }
145145+ }
146146+}
147147+148148+// ===== impl Frame =====
149149+150150+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
151151+unsafe impl Send for Frame {}
152152+153153+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
154154+unsafe impl Sync for Frame {}
155155+156156+impl PartialEq<Frame> for &Frame {
157157+ fn eq(&self, other: &Frame) -> bool {
158158+ self.refcount() == other.refcount() && self.addr == other.addr
159159+ }
160160+}
161161+162162+impl Frame {
163163+ pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self {
164164+ Self {
165165+ addr,
166166+ refcount: AtomicUsize::new(initial_refcount),
167167+ links: list::Links::new(),
168168+ }
169169+ }
170170+171171+ pub fn refcount(&self) -> usize {
172172+ self.refcount.load(Ordering::Relaxed)
173173+ }
174174+175175+ pub fn is_unique(&self) -> bool {
176176+ self.refcount() == 1
177177+ }
178178+179179+ pub fn addr(&self) -> PhysicalAddress {
180180+ self.addr
181181+ }
182182+}
183183+184184+unsafe impl Linked<list::Links<Self>> for Frame {
185185+ type Handle = NonNull<Self>;
186186+187187+ fn into_ptr(r: Self::Handle) -> NonNull<Self> {
188188+ r
189189+ }
190190+191191+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
192192+ ptr
193193+ }
194194+195195+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> {
196196+ ptr.map_addr(|addr| {
197197+ let offset = offset_of!(Self, links);
198198+ addr.checked_add(offset).unwrap()
199199+ })
200200+ .cast()
201201+ }
202202+}
+137
libs/mem/src/frame_alloc.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod area;
99+mod area_selection;
1010+1111+use core::alloc::Layout;
1212+use core::cell::RefCell;
1313+use core::cmp;
1414+use core::ops::Range;
1515+use core::ptr::NonNull;
1616+use core::sync::atomic::{AtomicUsize, Ordering};
1717+1818+use cordyceps::List;
1919+use cpu_local::collection::CpuLocal;
2020+use fallible_iterator::FallibleIterator;
2121+use lock_api::Mutex;
2222+use smallvec::SmallVec;
2323+2424+use crate::address_space::RawAddressSpace;
2525+use crate::frame_alloc::area::Area;
2626+use crate::frame_alloc::area_selection::select_areas;
2727+use crate::{Frame, PhysicalAddress};
2828+2929+#[derive(Debug)]
3030+pub struct AllocError;
3131+3232+pub unsafe trait FrameAllocator: Send + Sync + 'static {
3333+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
3434+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
3535+ fn page_size(&self) -> usize;
3636+}
3737+3838+const MAX_FRAMES_IN_CACHE: usize = 256;
3939+4040+pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> {
4141+ areas: Mutex<L, SmallVec<[Area<A>; 4]>>,
4242+ cpu_local_cache: CpuLocal<RefCell<List<Frame>>>,
4343+ max_alignment_hint: AtomicUsize,
4444+}
4545+4646+impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> {
4747+ pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> {
4848+ let mut max_alignment_hint = 0;
4949+ let mut areas = SmallVec::new();
5050+5151+ let mut selections = select_areas::<A>(allocatable_regions);
5252+ while let Some(selection) = selections.next()? {
5353+ let area = Area::new(selection.area, selection.bookkeeping);
5454+ max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint());
5555+ areas.push(area);
5656+ }
5757+5858+ Ok(Self {
5959+ areas: Mutex::new(areas),
6060+ cpu_local_cache: CpuLocal::new(),
6161+ max_alignment_hint: AtomicUsize::new(max_alignment_hint),
6262+ })
6363+ }
6464+6565+ pub fn max_alignment_hint(&self) -> usize {
6666+ self.max_alignment_hint.load(Ordering::Relaxed)
6767+ }
6868+6969+ fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> {
7070+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
7171+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
7272+ cache.pop_back()
7373+ } else {
7474+ None
7575+ }
7676+ }
7777+7878+ fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool {
7979+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
8080+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
8181+8282+ if cache.len() < MAX_FRAMES_IN_CACHE {
8383+ cache.push_back(block);
8484+ return true;
8585+ }
8686+ }
8787+8888+ false
8989+ }
9090+}
9191+9292+unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
9393+ for &'static FrameAlloc<L, A>
9494+{
9595+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
9696+ // attempt to allocate from the CPU-local cache first
9797+ if let Some(frame) = self.allocate_local(layout) {
9898+ return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1));
9999+ }
100100+101101+ let mut areas = self.areas.lock();
102102+ for area in areas.iter_mut() {
103103+ if let Ok(frames) = area.allocate(layout) {
104104+ return Ok(frames);
105105+ }
106106+ }
107107+108108+ Err(AllocError)
109109+ }
110110+111111+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) {
112112+ // attempt to place the frame into the CPU-local cache first
113113+ if self.deallocate_local(block, layout) {
114114+ return;
115115+ }
116116+117117+ let mut areas = self.areas.lock();
118118+ for area in areas.iter_mut() {
119119+ let block_ = unsafe { block.as_ref() };
120120+121121+ if area.contains_frame(block_.addr()) {
122122+ unsafe { area.deallocate(block, layout) };
123123+124124+ self.max_alignment_hint
125125+ .fetch_max(area.max_alignment_hint(), Ordering::Relaxed);
126126+127127+ return;
128128+ }
129129+ }
130130+131131+ unreachable!();
132132+ }
133133+134134+ fn page_size(&self) -> usize {
135135+ A::PAGE_SIZE
136136+ }
137137+}
+444
libs/mem/src/frame_alloc/area.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::marker::PhantomData;
1010+use core::mem::MaybeUninit;
1111+use core::ops::Range;
1212+use core::ptr::NonNull;
1313+use core::{cmp, fmt};
1414+1515+use cordyceps::List;
1616+1717+use crate::address_space::RawAddressSpace;
1818+use crate::frame_alloc::AllocError;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_ORDER: usize = 11;
2222+2323+pub struct Area<A: RawAddressSpace> {
2424+ area: Range<PhysicalAddress>,
2525+ frames: &'static mut [MaybeUninit<Frame>],
2626+2727+ free_lists: [List<Frame>; MAX_ORDER],
2828+2929+ max_order: usize,
3030+ total_frames: usize,
3131+ used_frames: usize,
3232+3333+ _aspace: PhantomData<A>,
3434+}
3535+3636+unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {}
3737+unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {}
3838+3939+impl<A: RawAddressSpace> fmt::Debug for Area<A> {
4040+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4141+ f.debug_struct("Area")
4242+ .field("area", &self.area)
4343+ .field(
4444+ "frames",
4545+ &format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()),
4646+ )
4747+ .field("free_lists", &self.free_lists)
4848+ .field("max_order", &self.max_order)
4949+ .field("total_frames", &self.total_frames)
5050+ .field("used_frames", &self.used_frames)
5151+ .finish()
5252+ }
5353+}
5454+5555+impl<A: RawAddressSpace> Area<A> {
5656+ pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self {
5757+ let mut free_lists = [const { List::new() }; MAX_ORDER];
5858+ let mut total_frames = 0;
5959+ let mut max_order = 0;
6060+6161+ let mut remaining_bytes = area.size();
6262+ let mut addr = area.start;
6363+6464+ // This is the main area initialization loop. We loop through the `area` "chopping off" the
6565+ // largest possible min_block_size-aligned block from the area and add that to its corresponding
6666+ // free list.
6767+ //
6868+ // Note: Remember that for buddy allocators `size == align`. That means we both need to check
6969+ // the alignment and size of our remaining area and can only chop off whatever is smaller.
7070+ while remaining_bytes > 0 {
7171+ // println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}");
7272+7373+ // the largest size we can chop off given the alignment of the remaining area
7474+ let max_align = if addr == PhysicalAddress::ZERO {
7575+ // if area happens to start exactly at address 0x0 our calculation below doesn't work.
7676+ // address 0x0 actually supports *any* alignment so we special-case it and return `MAX`
7777+ usize::MAX
7878+ } else {
7979+ // otherwise mask out the least significant bit of the address to figure out its alignment
8080+ addr.get() & (!addr.get() + 1)
8181+ };
8282+ // the largest size we can chop off given the size of the remaining area
8383+ // which is the next smaller power of two
8484+ let max_size = 1 << remaining_bytes.ilog2();
8585+8686+ // our chosen size will be the smallest of
8787+ // - the maximum size by remaining areas alignment
8888+ // - the maximum size by remaining areas size
8989+ // - the maximum block size supported by this allocator
9090+ let size = cmp::min(
9191+ cmp::min(max_align, max_size),
9292+ A::PAGE_SIZE << (MAX_ORDER - 1),
9393+ );
9494+ debug_assert!(size.is_multiple_of(A::PAGE_SIZE));
9595+9696+ let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize;
9797+9898+ {
9999+ let frame = frames[total_frames].write(Frame::new(addr, 0));
100100+101101+ free_lists[order].push_back(NonNull::from(frame));
102102+ }
103103+104104+ total_frames += 1 << order;
105105+ max_order = cmp::max(max_order, order);
106106+ addr = addr.checked_add(size).unwrap();
107107+ remaining_bytes -= size;
108108+ }
109109+110110+ // Make sure we've accounted for all frames
111111+ debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE);
112112+113113+ Self {
114114+ area,
115115+ frames,
116116+117117+ free_lists,
118118+119119+ max_order,
120120+ total_frames,
121121+ used_frames: 0,
122122+123123+ _aspace: PhantomData,
124124+ }
125125+ }
126126+127127+ pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
128128+ #[cfg(debug_assertions)]
129129+ self.assert_valid();
130130+131131+ let min_order = self.allocation_order(layout)?;
132132+133133+ // Starting at the smallest sufficient size class, search for a free block. If we find one in
134134+ // a free list, return it and its order.
135135+ let (block, block_order) = self.free_lists[min_order..]
136136+ .iter_mut()
137137+ .enumerate()
138138+ .find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order)))
139139+ .ok_or(AllocError)?;
140140+141141+ // if the block we found is larger than the `min_order` we need, we repeatedly split off
142142+ // the upper half (of decreasing size) until we reach the desired size. The split off blocks
143143+ // are returned to their appropriate free lists.
144144+ for order in (min_order..block_order).rev() {
145145+ let block = unsafe { block.as_ref() };
146146+ let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap();
147147+ let buddy = self.frame_for_addr(buddy_addr).unwrap();
148148+149149+ let buddy = buddy.write(Frame::new(buddy_addr, 0));
150150+ let buddy = NonNull::from(buddy);
151151+152152+ self.free_lists[order].push_back(buddy);
153153+ }
154154+155155+ let alloc_size_frames = 1 << min_order;
156156+157157+ // lazily initialize all frames
158158+ for idx in 0..alloc_size_frames {
159159+ let block = unsafe { block.as_ref() };
160160+ let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap();
161161+162162+ let frame = self.frame_for_addr(addr).unwrap();
163163+ frame.write(Frame::new(addr, 1));
164164+ }
165165+166166+ self.used_frames += alloc_size_frames;
167167+168168+ #[cfg(debug_assertions)]
169169+ self.assert_valid();
170170+171171+ Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames))
172172+ }
173173+174174+ pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) {
175175+ #[cfg(debug_assertions)]
176176+ self.assert_valid();
177177+178178+ let initial_order = self.allocation_order(layout).unwrap();
179179+ let mut order = initial_order;
180180+181181+ while order < self.free_lists.len() - 1 {
182182+ let block_ = unsafe { block.as_ref() };
183183+ if let Some(buddy) = self.buddy_addr(order, block_.addr())
184184+ && cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1))
185185+ && self.remove_from_free_list(order, buddy)
186186+ {
187187+ let buddy: NonNull<Frame> =
188188+ NonNull::from(self.frame_for_addr(buddy).unwrap()).cast();
189189+ block = cmp::min(buddy, block);
190190+ order += 1;
191191+ } else {
192192+ break;
193193+ }
194194+ }
195195+196196+ self.free_lists[order].push_back(block);
197197+ self.used_frames -= 1 << initial_order;
198198+ self.max_order = cmp::max(self.max_order, order);
199199+200200+ #[cfg(debug_assertions)]
201201+ self.assert_valid();
202202+ }
203203+204204+ pub fn max_alignment_hint(&self) -> usize {
205205+ self.order_size(self.max_order)
206206+ }
207207+208208+ fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> {
209209+ let relative = addr.checked_sub_addr(self.area.start).unwrap();
210210+ let idx = relative >> A::PAGE_SIZE_LOG_2;
211211+ Some(&mut self.frames[idx])
212212+ }
213213+214214+ pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool {
215215+ self.area.contains(&addr)
216216+ }
217217+218218+ fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> {
219219+ assert!(block >= self.area.start);
220220+ assert!(block.is_aligned_to(A::PAGE_SIZE << order));
221221+222222+ let relative = block.checked_sub_addr(self.area.start).unwrap();
223223+ let size = self.order_size(order);
224224+ if size >= self.area.size() {
225225+ // MAX_ORDER blocks do not have buddies
226226+ None
227227+ } else {
228228+ // Fun: We can find our buddy by xoring the right bit in our
229229+ // offset from the base of the heap.
230230+ Some(self.area.start.checked_add(relative ^ size).unwrap())
231231+ }
232232+ }
233233+234234+ fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool {
235235+ let mut c = self.free_lists[order].cursor_front_mut();
236236+237237+ while let Some(candidate) = c.current() {
238238+ if candidate.addr() == to_remove {
239239+ c.remove_current().unwrap();
240240+ return true;
241241+ }
242242+243243+ c.move_next();
244244+ }
245245+246246+ false
247247+ }
248248+249249+ // The size of the blocks we allocate for a given order.
250250+ const fn order_size(&self, order: usize) -> usize {
251251+ 1 << (A::PAGE_SIZE_LOG_2 as usize + order)
252252+ }
253253+254254+ const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> {
255255+ // We can only allocate blocks that are at least one page
256256+ if !layout.size().is_multiple_of(A::PAGE_SIZE) {
257257+ return Err(AllocError);
258258+ }
259259+260260+ // We can only allocate blocks that are at least page aligned
261261+ if !layout.align().is_multiple_of(A::PAGE_SIZE) {
262262+ return Err(AllocError);
263263+ }
264264+265265+ let size = layout.size().next_power_of_two();
266266+267267+ // We cannot allocate blocks larger than our largest size class
268268+ if size > self.order_size(self.free_lists.len()) {
269269+ return Err(AllocError);
270270+ }
271271+272272+ Ok(size)
273273+ }
274274+275275+ const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> {
276276+ if let Ok(size) = self.allocation_size(layout) {
277277+ Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize)
278278+ } else {
279279+ Err(AllocError)
280280+ }
281281+ }
282282+283283+ fn assert_valid(&self) {
284284+ for (order, l) in self.free_lists.iter().enumerate() {
285285+ l.assert_valid();
286286+287287+ for f in l {
288288+ assert!(
289289+ f.addr().is_aligned_to(A::PAGE_SIZE << order),
290290+ "frame {f:?} is not aligned to order {order}"
291291+ );
292292+ }
293293+ }
294294+295295+ assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames);
296296+ }
297297+}
298298+299299+fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize {
300300+ let mut frames = 0;
301301+ for (order, l) in area.free_lists.iter().enumerate() {
302302+ frames += l.len() << order;
303303+ }
304304+ frames
305305+}
306306+307307+#[cfg(test)]
308308+mod tests {
309309+ use alloc::vec::Vec;
310310+311311+ use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest};
312312+313313+ use super::*;
314314+ use crate::test_utils::TestAddressSpace;
315315+316316+ const PAGE_SIZE: usize = 4096;
317317+318318+ prop_compose! {
319319+ // Generate arbitrary integers up to half the maximum desired value,
320320+ // then multiply them by 2, thus producing only even integers in the
321321+ // desired range.
322322+ fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE }
323323+ }
324324+325325+ proptest! {
326326+ #[test]
327327+ fn new_fixed_base(num_frames in 0..50_000usize) {
328328+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
329329+ PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE),
330330+ {
331331+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
332332+ frames.resize_with(num_frames, || MaybeUninit::uninit());
333333+ frames.leak()
334334+ }
335335+ );
336336+ area.assert_valid();
337337+338338+ // let's check whether the area correctly initialized itself
339339+ //
340340+ // since we start on an aligned base address (0x0) we expect it have split off chunks
341341+ // largest-to-smallest. We replicate the process here, but take a block from its free list.
342342+ let mut frames_remaining = num_frames;
343343+ while frames_remaining > 0 {
344344+ // clamp the order we calculate at the max possible order
345345+ let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1);
346346+347347+ let chunk = area.free_lists[chunk_order].pop_back();
348348+ prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}");
349349+350350+ frames_remaining -= 1 << chunk_order;
351351+ }
352352+ // At the end of this process we expect all free lists to be empty
353353+ prop_assert!(area.free_lists.iter().all(|list| list.is_empty()));
354354+ }
355355+356356+ #[test]
357357+ fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) {
358358+359359+ let area = {
360360+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
361361+ prop_assume!(area_end.is_some());
362362+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
363363+ };
364364+365365+ let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
366366+ area,
367367+ {
368368+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
369369+ frames.resize_with(num_frames, || MaybeUninit::uninit());
370370+ frames.leak()
371371+ }
372372+ );
373373+ area.assert_valid();
374374+375375+ // TODO figure out if we can test the free lists in a sensible way
376376+ }
377377+378378+ #[test]
379379+ fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) {
380380+ let area = {
381381+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
382382+ prop_assume!(area_end.is_some());
383383+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
384384+ };
385385+386386+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
387387+ area,
388388+ {
389389+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
390390+ frames.resize_with(num_frames, || MaybeUninit::uninit());
391391+ frames.leak()
392392+ }
393393+ );
394394+ area.assert_valid();
395395+396396+ debug_assert_eq!(frames_in_area(&mut area), num_frames);
397397+ }
398398+399399+ #[test]
400400+ fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) {
401401+ let area = {
402402+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
403403+ prop_assume!(area_end.is_some());
404404+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
405405+ };
406406+407407+ let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
408408+ area.clone(),
409409+ {
410410+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
411411+ frames.resize_with(num_frames, || MaybeUninit::uninit());
412412+ frames.leak()
413413+ }
414414+ );
415415+ area1.assert_valid();
416416+417417+ let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
418418+ area,
419419+ {
420420+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
421421+ frames.resize_with(num_frames, || MaybeUninit::uninit());
422422+ frames.leak()
423423+ }
424424+ );
425425+ area2.assert_valid();
426426+427427+ // we can only allocate contiguous blocks of the largest order available
428428+ prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE));
429429+430430+ let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap();
431431+432432+ let block = area2.allocate(layout).unwrap();
433433+ prop_assert!(block.len() >= alloc_frames);
434434+435435+ unsafe { area2.deallocate(block.cast(), layout); }
436436+437437+ assert_eq!(frames_in_area(&area2), num_frames);
438438+439439+ for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() {
440440+ prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len());
441441+ }
442442+ }
443443+ }
444444+}
+133
libs/mem/src/frame_alloc/area_selection.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::slice;
99+use core::fmt::Formatter;
1010+use core::marker::PhantomData;
1111+use core::mem;
1212+use core::mem::MaybeUninit;
1313+use core::ops::Range;
1414+1515+use fallible_iterator::FallibleIterator;
1616+use smallvec::SmallVec;
1717+1818+use crate::address_space::RawAddressSpace;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB
2222+2323+#[derive(Debug)]
2424+pub struct AreaSelection {
2525+ pub area: Range<PhysicalAddress>,
2626+ pub bookkeeping: &'static mut [MaybeUninit<Frame>],
2727+ pub wasted_bytes: usize,
2828+}
2929+3030+#[derive(Debug)]
3131+pub struct SelectionError {
3232+ pub range: Range<PhysicalAddress>,
3333+}
3434+3535+pub struct ArenaSelections<A: RawAddressSpace> {
3636+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
3737+ wasted_bytes: usize,
3838+3939+ _aspace: PhantomData<A>,
4040+}
4141+4242+pub fn select_areas<A: RawAddressSpace>(
4343+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
4444+) -> ArenaSelections<A> {
4545+ ArenaSelections {
4646+ allocatable_regions,
4747+ wasted_bytes: 0,
4848+4949+ _aspace: PhantomData,
5050+ }
5151+}
5252+5353+impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> {
5454+ type Item = AreaSelection;
5555+ type Error = SelectionError;
5656+5757+ fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
5858+ let Some(mut area) = self.allocatable_regions.pop() else {
5959+ return Ok(None);
6060+ };
6161+6262+ while let Some(region) = self.allocatable_regions.pop() {
6363+ debug_assert!(!area.is_overlapping(®ion));
6464+6565+ let pages_in_hole = if area.end <= region.start {
6666+ // the region is higher than the current area
6767+ region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE
6868+ } else {
6969+ debug_assert!(region.end <= area.start);
7070+ // the region is lower than the current area
7171+ area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE
7272+ };
7373+7474+ let waste_from_hole = size_of::<Frame>() * pages_in_hole;
7575+7676+ if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES {
7777+ self.allocatable_regions.push(region);
7878+ break;
7979+ } else {
8080+ self.wasted_bytes += waste_from_hole;
8181+8282+ if area.end <= region.start {
8383+ area.end = region.end;
8484+ } else {
8585+ area.start = region.start;
8686+ }
8787+ }
8888+ }
8989+9090+ let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap();
9191+ // We can't use empty areas anyway
9292+ if aligned.is_empty() {
9393+ return Err(SelectionError { range: aligned });
9494+ }
9595+9696+ let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE;
9797+9898+ let bookkeeping_start = aligned
9999+ .end
100100+ .checked_sub(bookkeeping_size_frames * size_of::<Frame>())
101101+ .unwrap()
102102+ .align_down(A::PAGE_SIZE);
103103+104104+ // The area has no space to hold its own bookkeeping
105105+ if bookkeeping_start < aligned.start {
106106+ return Err(SelectionError { range: aligned });
107107+ }
108108+109109+ let bookkeeping = unsafe {
110110+ slice::from_raw_parts_mut(
111111+ bookkeeping_start.as_mut_ptr().cast(),
112112+ bookkeeping_size_frames,
113113+ )
114114+ };
115115+ aligned.end = bookkeeping_start;
116116+117117+ Ok(Some(AreaSelection {
118118+ area: aligned,
119119+ bookkeeping,
120120+ wasted_bytes: mem::take(&mut self.wasted_bytes),
121121+ }))
122122+ }
123123+}
124124+125125+// ===== impl SelectionError =====
126126+127127+impl core::fmt::Display for SelectionError {
128128+ fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
129129+ todo!()
130130+ }
131131+}
132132+133133+impl core::error::Error for SelectionError {}
+19
libs/mem/src/lib.rs
···11+#![cfg_attr(not(test), no_std)]
22+extern crate alloc;
33+44+mod access_rules;
55+pub mod address_space;
66+mod addresses;
77+mod frame;
88+pub mod frame_alloc;
99+#[cfg(test)]
1010+mod test_utils;
1111+mod utils;
1212+mod vmo;
1313+mod test;
1414+1515+pub type Result<T> = anyhow::Result<T>;
1616+1717+pub use access_rules::{AccessRules, WriteOrExecute};
1818+pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress};
1919+pub use frame::{Frame, FrameRef};
+56
libs/mem/src/test.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::ops::RangeBounds;
99+use crate::{AccessRules, VirtualAddress};
1010+use crate::address_space::Batch;
1111+1212+struct AddressSpaceRegion<A> {
1313+ _aspace: A,
1414+}
1515+1616+impl<A> AddressSpaceRegion<A> {
1717+ /// Map physical memory to back the given `range`
1818+ ///
1919+ /// After this call succeeds, accesses that align with the given `access` are guaranteed to
2020+ /// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
2121+ ///
2222+ /// # Errors
2323+ ///
2424+ /// - `range` is out of bounds
2525+ /// - `access_rules` is NOT a subset of self.access_rules
2626+ pub fn commit(
2727+ &mut self,
2828+ range: impl RangeBounds<VirtualAddress>,
2929+ access_rules: AccessRules,
3030+ batch: &mut Batch,
3131+ raw_aspace: &mut A,
3232+ ) -> crate::Result<()> {
3333+3434+3535+3636+3737+3838+ todo!()
3939+ }
4040+4141+ /// Release physical memory frames backing the given `range`.
4242+ ///
4343+ /// After this call succeeds, accesses will page fault.
4444+ ///
4545+ /// # Errors
4646+ ///
4747+ /// - `range` is out of bounds for this address space region
4848+ pub fn decommit(
4949+ &mut self,
5050+ range: impl RangeBounds<VirtualAddress>,
5151+ batch: &mut Batch,
5252+ raw_aspace: &mut A,
5353+ ) -> crate::Result<()> {
5454+ todo!()
5555+ }
5656+}
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+macro_rules! assert_unsafe_precondition_ {
99+ ($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => {
1010+ {
1111+ // This check is inlineable, but not by the MIR inliner.
1212+ // The reason for this is that the MIR inliner is in an exceptionally bad position
1313+ // to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`,
1414+ // which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and
1515+ // would be bad for compile times.
1616+ //
1717+ // LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without
1818+ // inlining the check. If it's `true`, it can inline it and get significantly better performance.
1919+ #[inline]
2020+ const fn precondition_check($($name:$ty),*) {
2121+ assert!($e, concat!("unsafe precondition(s) violated: ", $message,
2222+ "\n\nThis indicates a bug in the program. \
2323+ This Undefined Behavior check is optional, and cannot be relied on for safety."))
2424+ }
2525+2626+ #[cfg(debug_assertions)]
2727+ precondition_check($($arg,)*);
2828+ }
2929+ };
3030+}
3131+pub(crate) use assert_unsafe_precondition_;
+529
libs/mem/src/vmo.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::sync::Arc;
99+use core::alloc::Layout;
1010+use core::ops::{Bound, Range, RangeBounds};
1111+use core::{fmt, ptr};
1212+1313+use anyhow::{anyhow, ensure};
1414+use fallible_iterator::FallibleIterator;
1515+use lock_api::RwLock;
1616+use smallvec::SmallVec;
1717+1818+use crate::frame_alloc::FrameAllocator;
1919+use crate::{AccessRules, FrameRef};
2020+2121+pub struct Vmo {
2222+ name: &'static str,
2323+ vmo: RawVmo,
2424+}
2525+2626+#[derive(Debug)]
2727+struct RawVmo {
2828+ data: *const (),
2929+ vtable: &'static RawVmoVTable,
3030+}
3131+3232+#[derive(PartialEq, Copy, Clone, Debug)]
3333+struct RawVmoVTable {
3434+ clone: unsafe fn(*const ()) -> RawVmo,
3535+ acquire: unsafe fn(
3636+ *const (),
3737+ index: usize,
3838+ access_rules: AccessRules,
3939+ ) -> crate::Result<Option<FrameRef>>,
4040+ release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
4141+ clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
4242+ len: unsafe fn(*const ()) -> usize,
4343+ resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
4444+ drop: unsafe fn(*const ()),
4545+}
4646+4747+// ===== impl Vmo =====
4848+4949+impl Unpin for Vmo {}
5050+5151+// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Send
5252+// therefore Vmo is Send too
5353+unsafe impl Send for Vmo {}
5454+// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Sync
5555+// therefore Vmo is Sync too
5656+unsafe impl Sync for Vmo {}
5757+5858+impl Clone for Vmo {
5959+ #[inline]
6060+ fn clone(&self) -> Self {
6161+ Self {
6262+ vmo: unsafe { (self.vmo.vtable.clone)(self.vmo.data) },
6363+ name: self.name,
6464+ }
6565+ }
6666+}
6767+6868+impl Drop for Vmo {
6969+ #[inline]
7070+ fn drop(&mut self) {
7171+ unsafe { (self.vmo.vtable.drop)(self.vmo.data) }
7272+ }
7373+}
7474+7575+impl fmt::Debug for Vmo {
7676+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
7777+ let vtable_ptr = self.vmo.vtable as *const RawVmoVTable;
7878+ f.debug_struct("Vmo")
7979+ .field("name", &self.name)
8080+ .field("data", &self.vmo.data)
8181+ .field("vtable", &vtable_ptr)
8282+ .finish()
8383+ }
8484+}
8585+8686+impl Vmo {
8787+ /// Creates a new `Vmo` from the provided `len`, `data` pointer and `vtable`.
8888+ ///
8989+ /// TODO
9090+ ///
9191+ /// The `data` pointer can be used to store arbitrary data as required by the vmo implementation.
9292+ /// This could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
9393+ /// The value of this pointer will get passed to all functions that are part
9494+ /// of the `vtable` as the first parameter.
9595+ ///
9696+ /// It is important to consider that the `data` pointer must point to a
9797+ /// thread safe type such as an `Arc`.
9898+ ///
9999+ /// The `vtable` customizes the behavior of a `Cmo`. For each operation
100100+ /// on the `Clock`, the associated function in the `vtable` will be called.
101101+ ///
102102+ /// # Safety
103103+ ///
104104+ /// The behavior of the returned `Vmo` is undefined if the contract defined
105105+ /// in [`RawVmoVTable`]'s documentation is not upheld.
106106+ #[inline]
107107+ #[must_use]
108108+ pub const unsafe fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
109109+ // Safety: ensured by caller
110110+ unsafe { Self::from_raw(RawVmo { data, vtable }) }
111111+ }
112112+113113+ /// Creates a new `Vmo` from a [`RawVmo`].
114114+ ///
115115+ /// # Safety
116116+ ///
117117+ /// The behavior of the returned `Vmo` is undefined if the contract defined
118118+ /// in [`RawVmo`]'s and [`RawVmoVTable`]'s documentation is not upheld.
119119+ #[inline]
120120+ #[must_use]
121121+ pub const unsafe fn from_raw(vmo: RawVmo) -> Self {
122122+ Self {
123123+ vmo,
124124+ name: "<unnamed mystery VMO>",
125125+ }
126126+ }
127127+128128+ /// Add an arbitrary user-defined name to this `Vmo`.
129129+ pub fn named(mut self, name: &'static str) -> Self {
130130+ self.name = name;
131131+ self
132132+ }
133133+134134+ /// Returns this `Vmo`'s name, if it was given one using the [`Vmo::named`]
135135+ /// method.
136136+ pub fn name(&self) -> &'static str {
137137+ self.name
138138+ }
139139+140140+ pub fn len(&self) -> usize {
141141+ unsafe { (self.vmo.vtable.len)(self.vmo.data) }
142142+ }
143143+144144+ pub fn has_content_source(&self) -> bool {
145145+ self.content_source().is_some()
146146+ }
147147+148148+ pub fn content_source(&self) -> Option<()> {
149149+ todo!()
150150+ }
151151+152152+ /// Gets the `data` pointer used to create this `Vmo`.
153153+ #[inline]
154154+ #[must_use]
155155+ pub fn data(&self) -> *const () {
156156+ self.vmo.data
157157+ }
158158+159159+ /// Gets the `vtable` pointer used to create this `Vmo`.
160160+ #[inline]
161161+ #[must_use]
162162+ pub fn vtable(&self) -> &'static RawVmoVTable {
163163+ self.vmo.vtable
164164+ }
165165+166166+ // Release the frame at the given `index`. After this call succeeds, all accessed following the
167167+ // given `access_rules` MUST NOT fault.
168168+ // UNIT: frames
169169+ pub fn acquire<R>(
170170+ &self,
171171+ range: R,
172172+ access_rules: AccessRules,
173173+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
174174+ where
175175+ R: RangeBounds<usize>,
176176+ {
177177+ let range = self.bound_check(range);
178178+179179+ let i = range
180180+ .into_iter()
181181+ .flat_map(|r| r)
182182+ .filter_map(move |idx| unsafe {
183183+ (self.vmo.vtable.acquire)(self.vmo.data, idx, access_rules).transpose()
184184+ });
185185+186186+ fallible_iterator::convert(i)
187187+ }
188188+189189+ // Release the frame at the given `index`. After this call succeeds, all accessed to the frame
190190+ // MUST fault. Returns the base physical address of the release frame.
191191+ // UNIT: frames
192192+ pub fn release<R>(
193193+ &self,
194194+ range: R,
195195+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
196196+ where
197197+ R: RangeBounds<usize>,
198198+ {
199199+ let range = self.bound_check(range);
200200+201201+ let i = range
202202+ .into_iter()
203203+ .flat_map(|r| r)
204204+ .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
205205+206206+ fallible_iterator::convert(i)
207207+ }
208208+209209+ // Release the frame at the given `index`. After this call succeeds, all accessed to the frame
210210+ // MUST fault. Returns the base physical address of the release frame.
211211+ // UNIT: frames
212212+ pub fn clear<R>(
213213+ &self,
214214+ range: R,
215215+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
216216+ where
217217+ R: RangeBounds<usize>,
218218+ {
219219+ let range = self.bound_check(range);
220220+221221+ let i = range
222222+ .into_iter()
223223+ .flat_map(|r| r)
224224+ .filter_map(|idx| unsafe { (self.vmo.vtable.clear)(self.vmo.data, idx).transpose() });
225225+226226+ fallible_iterator::convert(i)
227227+ }
228228+229229+ // Grow the VMO to `new_size` (guaranteed to be larger than or equal to the current size).
230230+ fn grow(&self, new_len: usize) -> crate::Result<()> {
231231+ debug_assert!(new_len >= self.len());
232232+233233+ unsafe { (self.vmo.vtable.resize)(self.vmo.data, new_len)? };
234234+235235+ Ok(())
236236+ }
237237+238238+ // Shrink the VMO to `new_size` (guaranteed to be smaller than or equal to the current size).
239239+ // After this call succeeds, all accesses outside the new range MUST fault.
240240+ // UNIT: frames
241241+ pub fn shrink(
242242+ &self,
243243+ new_len: usize,
244244+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> {
245245+ debug_assert!(new_len <= self.len());
246246+247247+ let old_len = self.len();
248248+249249+ todo!();
250250+ fallible_iterator::empty()
251251+ // unsafe {
252252+ // (self.vmo.vtable.resize)(self.vmo.data, new_len)?;
253253+ // };
254254+ //
255255+ // let i = (new_len..old_len)
256256+ // .into_iter()
257257+ // .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
258258+ //
259259+ // fallible_iterator::convert(i)
260260+ }
261261+262262+ #[inline]
263263+ fn bound_check<R>(&self, range: R) -> crate::Result<Range<usize>>
264264+ where
265265+ R: RangeBounds<usize>,
266266+ {
267267+ let start = match range.start_bound() {
268268+ Bound::Included(b) => *b,
269269+ Bound::Excluded(b) => *b + 1,
270270+ Bound::Unbounded => 0,
271271+ };
272272+ let end = match range.end_bound() {
273273+ Bound::Included(b) => *b + 1,
274274+ Bound::Excluded(b) => *b,
275275+ Bound::Unbounded => self.len(),
276276+ };
277277+278278+ ensure!(end <= self.len());
279279+280280+ Ok(start..end)
281281+ }
282282+}
283283+284284+// ===== impl RawVmo =====
285285+286286+impl RawVmo {
287287+ /// Creates a new `RawVmo` from the provided `data` pointer and `vtable`.
288288+ ///
289289+ /// The `data` pointer can be used to store arbitrary data as required by the VMO implementation.
290290+ /// his could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
291291+ /// The value of this pointer will get passed to all functions that are part
292292+ /// of the `vtable` as the first parameter.
293293+ ///
294294+ /// It is important to consider that the `data` pointer must point to a
295295+ /// thread safe type such as an `Arc`.
296296+ ///
297297+ /// The `vtable` customizes the behavior of a `Vmo`. For each operation
298298+ /// on the `Vmo`, the associated function in the `vtable` will be called.
299299+ #[inline]
300300+ #[must_use]
301301+ pub const fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
302302+ Self { data, vtable }
303303+ }
304304+}
305305+306306+// ===== impl RawVmoVTable =====
307307+308308+impl RawVmoVTable {
309309+ pub const fn new(
310310+ clone: unsafe fn(*const ()) -> RawVmo,
311311+ acquire: unsafe fn(
312312+ *const (),
313313+ index: usize,
314314+ access_rules: AccessRules,
315315+ ) -> crate::Result<Option<FrameRef>>,
316316+ release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
317317+ clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
318318+ len: unsafe fn(*const ()) -> usize,
319319+ resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
320320+ drop: unsafe fn(*const ()),
321321+ ) -> Self {
322322+ Self {
323323+ clone,
324324+ acquire,
325325+ release,
326326+ clear,
327327+ len,
328328+ resize,
329329+ drop,
330330+ }
331331+ }
332332+}
333333+334334+pub fn stub_vmo() -> Vmo {
335335+ const WIRED_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
336336+ stub_clone,
337337+ stub_acquire,
338338+ stub_release,
339339+ stub_clear,
340340+ stub_len,
341341+ stub_resize,
342342+ stub_drop,
343343+ );
344344+345345+ unsafe fn stub_clone(ptr: *const ()) -> RawVmo {
346346+ debug_assert!(ptr.is_null());
347347+ RawVmo::new(ptr, &WIRED_VMO_VTABLE)
348348+ }
349349+350350+ unsafe fn stub_acquire(
351351+ ptr: *const (),
352352+ _index: usize,
353353+ _access_rules: AccessRules,
354354+ ) -> crate::Result<Option<FrameRef>> {
355355+ debug_assert!(ptr.is_null());
356356+ unreachable!()
357357+ }
358358+ unsafe fn stub_release(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
359359+ debug_assert!(ptr.is_null());
360360+ unreachable!()
361361+ }
362362+ unsafe fn stub_clear(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
363363+ debug_assert!(ptr.is_null());
364364+ unreachable!()
365365+ }
366366+ unsafe fn stub_len(ptr: *const ()) -> usize {
367367+ debug_assert!(ptr.is_null());
368368+ unreachable!()
369369+ }
370370+ unsafe fn stub_resize(ptr: *const (), _new_len: usize) -> crate::Result<()> {
371371+ debug_assert!(ptr.is_null());
372372+ unreachable!()
373373+ }
374374+ unsafe fn stub_drop(ptr: *const ()) {
375375+ debug_assert!(ptr.is_null());
376376+ }
377377+378378+ unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) }
379379+}
380380+381381+pub struct PagedVmo<R: lock_api::RawRwLock> {
382382+ list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>,
383383+ frame_alloc: &'static dyn FrameAllocator,
384384+}
385385+386386+impl<R: lock_api::RawRwLock> PagedVmo<R> {
387387+ const RAW_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
388388+ Self::clone_vmo,
389389+ Self::acquire,
390390+ Self::release,
391391+ Self::clear,
392392+ Self::len,
393393+ Self::resize,
394394+ Self::drop_vmo,
395395+ );
396396+397397+ pub fn new(frame_alloc: &'static dyn FrameAllocator) -> Self {
398398+ Self {
399399+ list: RwLock::new(SmallVec::new()),
400400+ frame_alloc,
401401+ }
402402+ }
403403+404404+ #[inline(always)]
405405+ pub fn into_vmo(self: Arc<Self>) -> Vmo {
406406+ unsafe { Vmo::new(Arc::into_raw(self) as *const (), &Self::RAW_VMO_VTABLE) }
407407+ }
408408+409409+ fn allocate_frame(&self) -> FrameRef {
410410+ let layout =
411411+ Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size())
412412+ .unwrap();
413413+414414+ let frames = self.frame_alloc.allocate(layout).unwrap();
415415+ debug_assert_eq!(frames.len(), 1);
416416+417417+ unsafe { FrameRef::from_raw_parts(frames.cast(), self.frame_alloc.clone()) }
418418+ }
419419+420420+ fn clone_the_zero_frame(&self) -> FrameRef {
421421+ todo!()
422422+ }
423423+424424+ #[inline(always)]
425425+ unsafe fn clone_vmo(vmo: *const ()) -> RawVmo {
426426+ unsafe {
427427+ Arc::increment_strong_count(vmo.cast::<Self>());
428428+ }
429429+ RawVmo::new(vmo, &Self::RAW_VMO_VTABLE)
430430+ }
431431+432432+ unsafe fn drop_vmo(ptr: *const ()) {
433433+ drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) });
434434+ }
435435+436436+ unsafe fn acquire(
437437+ ptr: *const (),
438438+ index: usize,
439439+ access_rules: AccessRules,
440440+ ) -> crate::Result<Option<FrameRef>> {
441441+ let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
442442+443443+ todo!()
444444+ }
445445+446446+ unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
447447+ let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
448448+ let mut list = me.list.write();
449449+450450+ let slot = list
451451+ .get_mut(index)
452452+ .ok_or(anyhow!("index out of bounds"))?
453453+ .take();
454454+455455+ Ok(slot)
456456+ }
457457+458458+ unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
459459+ let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
460460+ let mut list = me.list.write();
461461+462462+ let prev_frame = list
463463+ .get_mut(index)
464464+ .ok_or(anyhow!("index out of bounds"))?
465465+ .replace(me.clone_the_zero_frame());
466466+467467+ Ok(prev_frame)
468468+ }
469469+470470+ unsafe fn len(ptr: *const ()) -> usize {
471471+ let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
472472+ let list = me.list.read();
473473+474474+ list.len()
475475+ }
476476+477477+ unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> {
478478+ let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
479479+ let mut list = me.list.write();
480480+481481+ list.resize(new_len, None);
482482+483483+ Ok(())
484484+ }
485485+}
486486+487487+struct VVmo<R: lock_api::RawRwLock> {
488488+ list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>,
489489+ frame_alloc: &'static dyn FrameAllocator,
490490+ the_zero_frame: FrameRef,
491491+}
492492+493493+struct Batch {
494494+ freed: SmallVec<[FrameRef; 4]>,
495495+ allocated: SmallVec<[FrameRef; 4]>,
496496+}
497497+498498+impl<R: lock_api::RawRwLock> VVmo<R> {
499499+ fn allocate_one(&self) -> FrameRef {
500500+ let layout =
501501+ Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size())
502502+ .unwrap();
503503+ let frame = self.frame_alloc.allocate(layout).unwrap();
504504+ debug_assert_eq!(frame.len(), 1);
505505+ unsafe { FrameRef::from_raw_parts(frame.cast(), self.frame_alloc.clone()) }
506506+ }
507507+508508+ pub fn acquire(&self, index: usize, access_rules: AccessRules, batch: &mut Batch) {
509509+ let mut list = self.list.write();
510510+511511+ if let Some(old_frame) = list.get(index).unwrap() {
512512+ assert!(!old_frame.is_unique());
513513+514514+ if access_rules.is_read_only() {
515515+ }
516516+517517+518518+519519+520520+ } else {
521521+ let new_frame = self.allocate_one();
522522+ list.insert(index, Some(new_frame));
523523+ // TODO report new_frame for mapping
524524+ }
525525+ }
526526+527527+ pub fn release(&self, range: Range<usize>, batch: &mut Batch) {}
528528+ pub fn clear(&self, range: Range<usize>, batch: &mut Batch) {}
529529+}