···11+# Seeds for failure cases proptest has generated in the past. It is
22+# automatically read and these particular cases re-run before any
33+# novel cases are generated.
44+#
55+# It is recommended to check this file in to source control so that
66+# everyone who runs the test benefits from these saved cases.
77+cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1
88+cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257
99+cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257
1010+cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354
1111+cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257
1212+cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20
1313+cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+69
libs/mem/src/access_rules.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mycelium_bitfield::bitfield! {
99+ /// Rules that dictate how a region of virtual memory may be accessed.
1010+ ///
1111+ /// # W^X
1212+ ///
1313+ /// In order to prevent malicious code execution as proactively as possible,
1414+ /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
1515+ /// through the [`WriteOrExecute`] enum field.
1616+ #[derive(PartialEq, Eq)]
1717+ pub struct AccessRules<u8> {
1818+ /// If set, reading from the memory region is allowed.
1919+ pub const READ: bool;
2020+ /// Whether executing, or writing this memory region is allowed (or neither).
2121+ pub const WRITE_OR_EXECUTE: WriteOrExecute;
2222+ /// If set, requires code in the memory region to use aarch64 Branch Target Identification.
2323+ /// Does nothing on non-aarch64 architectures.
2424+ pub const BTI: bool;
2525+ }
2626+}
2727+2828+/// Whether executing, or writing this memory region is allowed (or neither).
2929+///
3030+/// This is an enum to enforce [`W^X`] at the type-level.
3131+///
3232+/// [`W^X`]: AccessRules
3333+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3434+#[repr(u8)]
3535+pub enum WriteOrExecute {
3636+ /// Neither writing nor execution of the memory region is allowed.
3737+ Neither = 0b00,
3838+ /// Writing to the memory region is allowed.
3939+ Write = 0b01,
4040+ /// Executing code from the memory region is allowed.
4141+ Execute = 0b10,
4242+}
4343+4444+// ===== impl WriteOrExecute =====
4545+4646+impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
4747+ type Error = core::convert::Infallible;
4848+4949+ /// The number of bits required to represent a value of this type.
5050+ const BITS: u32 = 2;
5151+5252+ #[inline]
5353+ fn try_from_bits(bits: u8) -> Result<Self, Self::Error> {
5454+ match bits {
5555+ b if b == Self::Neither as u8 => Ok(Self::Neither),
5656+ b if b == Self::Write as u8 => Ok(Self::Write),
5757+ b if b == Self::Execute as u8 => Ok(Self::Execute),
5858+ _ => {
5959+ // this should never happen unless the bitpacking code is broken
6060+ unreachable!("invalid memory region access rules {bits:#b}")
6161+ }
6262+ }
6363+ }
6464+6565+ #[inline]
6666+ fn into_bits(self) -> u8 {
6767+ self as u8
6868+ }
6969+}
+1000
libs/mem/src/address_space.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod batch;
99+mod region;
1010+1111+use alloc::boxed::Box;
1212+use core::alloc::Layout;
1313+use core::num::NonZeroUsize;
1414+use core::ops::{Bound, ControlFlow, Range};
1515+use core::ptr::NonNull;
1616+1717+use anyhow::{format_err, Context};
1818+use rand::distr::Uniform;
1919+use rand::Rng;
2020+use rand_chacha::ChaCha20Rng;
2121+use region::AddressSpaceRegion;
2222+use wavltree::{CursorMut, WAVLTree};
2323+2424+use crate::access_rules::AccessRules;
2525+use crate::utils::assert_unsafe_precondition_;
2626+use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
2727+2828+pub(crate) use batch::Batch;
2929+3030+pub unsafe trait RawAddressSpace {
3131+ /// The smallest addressable chunk of memory of this address space. All address argument provided
3232+ /// to methods of this type (both virtual and physical) must be aligned to this.
3333+ const PAGE_SIZE: usize;
3434+ const VIRT_ADDR_BITS: u32;
3535+3636+ const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8;
3737+ const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1);
3838+3939+ /// The [`Flush`] implementation for this address space.
4040+ type Flush: Flush;
4141+4242+ /// Return a new, empty flush for this address space.
4343+ fn flush(&self) -> Self::Flush;
4444+4545+ /// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given
4646+ /// [`VirtualAddress`] if mapped.
4747+ fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>;
4848+4949+ /// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the
5050+ /// specified access rules.
5151+ ///
5252+ /// If this returns `Ok`, the mapping is added to the raw address space and all future
5353+ /// accesses to the virtual address range will translate to accesses of the physical address
5454+ /// range.
5555+ ///
5656+ /// # Safety
5757+ ///
5858+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
5959+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
6060+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6161+ ///
6262+ /// # Errors
6363+ ///
6464+ /// Returning `Err` indicates the mapping cannot be established and the virtual address range
6565+ /// remains unaltered.
6666+ unsafe fn map(
6767+ &mut self,
6868+ virt: VirtualAddress,
6969+ phys: PhysicalAddress,
7070+ len: NonZeroUsize,
7171+ access_rules: AccessRules,
7272+ flush: &mut Self::Flush,
7373+ ) -> crate::Result<()>;
7474+7575+ /// Unmap a contiguous range of `len` virtual addresses.
7676+ ///
7777+ /// After this returns all accesses to the virtual address region will cause a fault.
7878+ ///
7979+ /// # Safety
8080+ ///
8181+ /// - `virt..virt+len` must be mapped
8282+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
8383+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
8484+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
8585+ unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush);
8686+8787+ /// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses.
8888+ ///
8989+ /// After this returns all accesses to the virtual address region must follow the
9090+ /// specified `AccessRules` or cause a fault.
9191+ ///
9292+ /// # Safety
9393+ ///
9494+ /// - `virt..virt+len` must be mapped
9595+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
9696+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
9797+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
9898+ unsafe fn set_access_rules(
9999+ &mut self,
100100+ virt: VirtualAddress,
101101+ len: NonZeroUsize,
102102+ access_rules: AccessRules,
103103+ flush: &mut Self::Flush,
104104+ );
105105+}
106106+107107+/// A type that can flush changes made to a [`RawAddressSpace`].
108108+///
109109+/// Note: [`Flush`] is purely optional, it exists so implementation MAY batch
110110+/// Note that the implementation is not required to delay materializing changes until [`Flush::flush`]
111111+/// is called.
112112+pub trait Flush {
113113+ /// Flush changes made to its [`RawAddressSpace`].
114114+ ///
115115+ /// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across
116116+ /// all affected threads/CPUs.
117117+ ///
118118+ /// # Errors
119119+ ///
120120+ /// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might
121121+ /// still have taken effect across all or some of the threads/CPUs.
122122+ fn flush(self) -> crate::Result<()>;
123123+}
124124+125125+pub struct AddressSpace<R: RawAddressSpace> {
126126+ raw: R,
127127+ regions: WAVLTree<AddressSpaceRegion>,
128128+ batched_raw: Batch,
129129+ max_range: Range<VirtualAddress>,
130130+ rng: Option<ChaCha20Rng>,
131131+}
132132+133133+impl<R: RawAddressSpace> AddressSpace<R> {
134134+ pub fn new(raw: R, rng: Option<ChaCha20Rng>) -> Self {
135135+ Self {
136136+ raw,
137137+ regions: WAVLTree::new(),
138138+ batched_raw: Batch::new(),
139139+ max_range: VirtualAddress::MIN..VirtualAddress::MAX,
140140+ rng,
141141+ }
142142+ }
143143+144144+ /// Attempts to reserve a region of virtual memory.
145145+ ///
146146+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
147147+ /// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault.
148148+ ///
149149+ /// The returned region may have a larger size than specified by `layout.size()`, and may or may
150150+ /// not have its contents initialized.
151151+ ///
152152+ /// The returned region of virtual memory remains mapped as long as it is [*currently mapped*]
153153+ /// and the address space type itself has not been dropped.
154154+ ///
155155+ /// [*currently mapped*]: #currently-mapped-memory
156156+ ///
157157+ /// # Errors
158158+ ///
159159+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
160160+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
161161+ pub fn map(
162162+ &mut self,
163163+ layout: Layout,
164164+ access_rules: AccessRules,
165165+ ) -> crate::Result<NonNull<[u8]>> {
166166+ #[cfg(debug_assertions)]
167167+ self.assert_valid("[AddressSpace::map]");
168168+169169+ let layout = layout.align_to(R::PAGE_SIZE).unwrap();
170170+171171+ let spot = self
172172+ .find_spot_for(layout)
173173+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
174174+175175+ // TODO "relaxed" frame provider
176176+ let region = AddressSpaceRegion::new(spot, layout, access_rules);
177177+178178+ let region = self.regions.insert(Box::pin(region));
179179+180180+ // TODO OPTIONAL eagerly commit a few pages
181181+182182+ self.batched_raw.flush_changes(&mut self.raw)?;
183183+184184+ Ok(region.as_non_null())
185185+ }
186186+187187+ /// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region
188188+ /// is zero-initialized.
189189+ ///
190190+ /// # Errors
191191+ ///
192192+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
193193+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
194194+ pub fn map_zeroed(
195195+ &mut self,
196196+ layout: Layout,
197197+ access_rules: AccessRules,
198198+ ) -> crate::Result<NonNull<[u8]>> {
199199+ #[cfg(debug_assertions)]
200200+ self.assert_valid("[AddressSpace::map_zeroed]");
201201+202202+ let layout = layout.align_to(R::PAGE_SIZE).unwrap();
203203+204204+ let spot = self
205205+ .find_spot_for(layout)
206206+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
207207+208208+ // TODO "zeroed" frame provider
209209+ let region = AddressSpaceRegion::new(spot, layout, access_rules);
210210+211211+ let region = self.regions.insert(Box::pin(region));
212212+213213+ // TODO OPTIONAL eagerly commit a few pages
214214+215215+ self.batched_raw.flush_changes(&mut self.raw)?;
216216+217217+ Ok(region.as_non_null())
218218+ }
219219+220220+ /// Unmaps the virtual memory region referenced by `ptr`.
221221+ ///
222222+ /// # Safety
223223+ ///
224224+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
225225+ /// * `layout` must [*fit*] that region of memory.
226226+ ///
227227+ /// [*currently mapped*]: #currently-mapped-memory
228228+ /// [*fit*]: #memory-fitting
229229+ pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) {
230230+ #[cfg(debug_assertions)]
231231+ self.assert_valid("[AddressSpace::unmap]");
232232+233233+ // Safety: responsibility of caller
234234+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
235235+236236+ // Safety: responsibility of caller
237237+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
238238+239239+ region.decommit(.., &mut self.batched_raw).unwrap();
240240+241241+ self.batched_raw.flush_changes(&mut self.raw).unwrap();
242242+ }
243243+244244+ /// Attempts to extend the virtual memory reservation.
245245+ ///
246246+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
247247+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
248248+ /// this, the address space may extend the mapping referenced by `ptr` to fit the new layout.
249249+ ///
250250+ /// TODO describe how extending a file-backed, of DMA-backed mapping works
251251+ ///
252252+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
253253+ ///
254254+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
255255+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
256256+ /// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer
257257+ /// for accessing this region now.
258258+ ///
259259+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
260260+ /// this address space, and the contents of the region are unaltered.
261261+ ///
262262+ /// [*Undefined Behavior*]
263263+ ///
264264+ /// # Safety
265265+ ///
266266+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
267267+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
268268+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
269269+ ///
270270+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
271271+ ///
272272+ /// [*currently mapped*]: #currently-mapped-memory
273273+ /// [*fit*]: #memory-fitting
274274+ ///
275275+ /// # Errors
276276+ ///
277277+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
278278+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
279279+ pub unsafe fn grow(
280280+ &mut self,
281281+ ptr: NonNull<u8>,
282282+ old_layout: Layout,
283283+ new_layout: Layout,
284284+ ) -> crate::Result<NonNull<[u8]>> {
285285+ #[cfg(debug_assertions)]
286286+ self.assert_valid("[AddressSpace::grow]");
287287+288288+ assert_unsafe_precondition_!(
289289+ "TODO",
290290+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
291291+ old_layout.align().is_multiple_of(page_size)
292292+ }
293293+ );
294294+295295+ assert_unsafe_precondition_!(
296296+ "TODO",
297297+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
298298+ new_layout.align().is_multiple_of(page_size)
299299+ }
300300+ );
301301+302302+ if new_layout == old_layout {
303303+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
304304+ }
305305+306306+ assert_unsafe_precondition_!(
307307+ "TODO",
308308+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
309309+ new_layout.size() >= old_layout.size()
310310+ }
311311+ );
312312+313313+ if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } {
314314+ Ok(ptr)
315315+ } else {
316316+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
317317+ }
318318+ }
319319+320320+ /// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place.
321321+ ///
322322+ /// # Safety
323323+ ///
324324+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
325325+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
326326+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
327327+ ///
328328+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
329329+ ///
330330+ /// [*currently mapped*]: #currently-mapped-memory
331331+ /// [*fit*]: #memory-fitting
332332+ ///
333333+ /// # Errors
334334+ ///
335335+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
336336+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
337337+ pub unsafe fn grow_in_place(
338338+ &mut self,
339339+ ptr: NonNull<u8>,
340340+ old_layout: Layout,
341341+ new_layout: Layout,
342342+ ) -> crate::Result<NonNull<[u8]>> {
343343+ #[cfg(debug_assertions)]
344344+ self.assert_valid("[AddressSpace::grow_in_place]");
345345+346346+ assert_unsafe_precondition_!(
347347+ "TODO",
348348+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
349349+ old_layout.align().is_multiple_of(page_size)
350350+ }
351351+ );
352352+353353+ assert_unsafe_precondition_!(
354354+ "TODO",
355355+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
356356+ new_layout.align().is_multiple_of(page_size)
357357+ }
358358+ );
359359+360360+ if new_layout == old_layout {
361361+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
362362+ }
363363+364364+ assert_unsafe_precondition_!(
365365+ "TODO",
366366+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
367367+ new_layout.size() >= old_layout.size()
368368+ }
369369+ );
370370+371371+ unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) }
372372+ }
373373+374374+ /// Attempts to shrink the virtual memory reservation.
375375+ ///
376376+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
377377+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
378378+ /// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout.
379379+ ///
380380+ /// TODO describe how shrinking a file-backed, of DMA-backed mapping works
381381+ ///
382382+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
383383+ ///
384384+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
385385+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
386386+ /// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer
387387+ /// for accessing this region now.
388388+ ///
389389+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
390390+ /// this address space, and the contents of the region are unaltered.
391391+ ///
392392+ /// [*Undefined Behavior*]
393393+ ///
394394+ /// # Safety
395395+ ///
396396+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
397397+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
398398+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
399399+ ///
400400+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
401401+ ///
402402+ /// [*currently mapped*]: #currently-mapped-memory
403403+ /// [*fit*]: #memory-fitting
404404+ ///
405405+ /// # Errors
406406+ ///
407407+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
408408+ /// constraints, virtual memory is exhausted, or shrinking otherwise fails.
409409+ pub unsafe fn shrink(
410410+ &mut self,
411411+ ptr: NonNull<u8>,
412412+ old_layout: Layout,
413413+ new_layout: Layout,
414414+ ) -> crate::Result<NonNull<[u8]>> {
415415+ #[cfg(debug_assertions)]
416416+ self.assert_valid("[AddressSpace::shrink]");
417417+418418+ assert_unsafe_precondition_!(
419419+ "TODO",
420420+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
421421+ old_layout.align().is_multiple_of(page_size)
422422+ }
423423+ );
424424+425425+ assert_unsafe_precondition_!(
426426+ "TODO",
427427+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
428428+ new_layout.align().is_multiple_of(page_size)
429429+ }
430430+ );
431431+432432+ if new_layout == old_layout {
433433+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
434434+ }
435435+436436+ assert_unsafe_precondition_!(
437437+ "TODO",
438438+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
439439+ new_layout.size() <= old_layout.size()
440440+ }
441441+ );
442442+443443+ if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } {
444444+ Ok(ptr)
445445+ } else {
446446+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
447447+ }
448448+ }
449449+450450+ /// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be
451451+ /// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned.
452452+ ///
453453+ /// # Safety
454454+ ///
455455+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
456456+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
457457+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
458458+ ///
459459+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
460460+ ///
461461+ /// [*currently mapped*]: #currently-mapped-memory
462462+ /// [*fit*]: #memory-fitting
463463+ ///
464464+ /// # Errors
465465+ ///
466466+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
467467+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
468468+ pub unsafe fn shrink_in_place(
469469+ &mut self,
470470+ ptr: NonNull<u8>,
471471+ old_layout: Layout,
472472+ new_layout: Layout,
473473+ ) -> crate::Result<NonNull<[u8]>> {
474474+ #[cfg(debug_assertions)]
475475+ self.assert_valid("[AddressSpace::shrink_in_place]");
476476+477477+ assert_unsafe_precondition_!(
478478+ "TODO",
479479+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
480480+ old_layout.align().is_multiple_of(page_size)
481481+ }
482482+ );
483483+484484+ assert_unsafe_precondition_!(
485485+ "TODO",
486486+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
487487+ new_layout.align().is_multiple_of(page_size)
488488+ }
489489+ );
490490+491491+ if new_layout == old_layout {
492492+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
493493+ }
494494+495495+ assert_unsafe_precondition_!(
496496+ "TODO",
497497+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
498498+ new_layout.size() <= old_layout.size()
499499+ }
500500+ );
501501+502502+ unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) }
503503+ }
504504+505505+ /// Updates the access rules for the virtual memory region referenced by `ptr`.
506506+ ///
507507+ /// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault.
508508+ ///
509509+ /// If this method returns `Err`, the access rules of the memory region are unaltered.
510510+ ///
511511+ /// # Safety
512512+ ///
513513+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
514514+ /// * `layout` must [*fit*] that region of memory.
515515+ ///
516516+ /// [*currently mapped*]: #currently-mapped-memory
517517+ /// [*fit*]: #memory-fitting
518518+ pub unsafe fn update_access_rules(
519519+ &mut self,
520520+ ptr: NonNull<u8>,
521521+ layout: Layout,
522522+ access_rules: AccessRules,
523523+ ) -> crate::Result<()> {
524524+ #[cfg(debug_assertions)]
525525+ self.assert_valid("[AddressSpace::update_access_rules]");
526526+527527+ // Safety: responsibility of caller
528528+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
529529+530530+ // Safety: responsibility of caller
531531+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
532532+533533+ region.update_access_rules(access_rules, &mut self.batched_raw)?;
534534+535535+ self.batched_raw.flush_changes(&mut self.raw)?;
536536+537537+ Ok(())
538538+ }
539539+540540+ /// Attempts to fill the virtual memory region referenced by `ptr` with zeroes.
541541+ ///
542542+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
543543+ /// mapped region. The pointer is suitable for holding data described by `new_layout` and is
544544+ /// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the
545545+ /// virtual memory region.
546546+ ///
547547+ /// TODO describe how clearing a file-backed, of DMA-backed mapping works
548548+ ///
549549+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
550550+ ///
551551+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
552552+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
553553+ /// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer
554554+ /// for accessing this region now.
555555+ ///
556556+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
557557+ /// this address space, and the contents of the region are unaltered.
558558+ ///
559559+ /// [*Undefined Behavior*]
560560+ ///
561561+ /// # Safety
562562+ ///
563563+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
564564+ /// * `layout` must [*fit*] that region of memory.
565565+ ///
566566+ /// [*currently mapped*]: #currently-mapped-memory
567567+ /// [*fit*]: #memory-fitting
568568+ ///
569569+ /// # Errors
570570+ ///
571571+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
572572+ /// constraints, clearing a virtual memory region is not supported by the backing storage, or
573573+ /// clearing otherwise fails.
574574+ pub unsafe fn clear(
575575+ &mut self,
576576+ ptr: NonNull<u8>,
577577+ layout: Layout,
578578+ ) -> crate::Result<NonNull<[u8]>> {
579579+ #[cfg(debug_assertions)]
580580+ self.assert_valid("[AddressSpace::clear]");
581581+582582+ // Safety: responsibility of caller
583583+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
584584+585585+ // Safety: responsibility of caller
586586+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
587587+588588+ region.clear(&mut self.batched_raw)?;
589589+590590+ self.batched_raw.flush_changes(&mut self.raw)?;
591591+592592+ Ok(region.as_non_null())
593593+ }
594594+595595+ pub fn assert_valid(&self, msg: &str) {
596596+ let mut regions = self.regions.iter();
597597+598598+ let Some(first_region) = regions.next() else {
599599+ assert!(
600600+ self.regions.is_empty(),
601601+ "{msg}region iterator is empty but tree is not."
602602+ );
603603+604604+ return;
605605+ };
606606+607607+ first_region.assert_valid(msg);
608608+609609+ let mut seen_range = first_region.range().clone();
610610+611611+ while let Some(region) = regions.next() {
612612+ assert!(
613613+ !region.range().is_overlapping(&seen_range),
614614+ "{msg}region cannot overlap previous region; region={region:?}"
615615+ );
616616+ assert!(
617617+ region.range().start >= self.max_range.start
618618+ && region.range().end <= self.max_range.end,
619619+ "{msg}region cannot lie outside of max address space range; region={region:?}"
620620+ );
621621+622622+ seen_range = seen_range.start..region.range().end;
623623+624624+ region.assert_valid(msg);
625625+626626+ // TODO assert validity of of VMO against phys addresses
627627+ // let (_phys, access_rules) = self
628628+ // .batched_raw
629629+ // .raw_address_space()
630630+ // .lookup(region.range().start)
631631+ // .unwrap_or_else(|| {
632632+ // panic!("{msg}region base address is not mapped in raw address space region={region:?}")
633633+ // });
634634+ //
635635+ // assert_eq!(
636636+ // access_rules,
637637+ // region.access_rules(),
638638+ // "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}",
639639+ // region.access_rules(),
640640+ // );
641641+ }
642642+ }
643643+644644+ /// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`]
645645+ /// and [`Self::shrink_in_place`].
646646+ ///
647647+ /// # Safety
648648+ ///
649649+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
650650+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
651651+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
652652+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
653653+ unsafe fn grow_in_place_inner(
654654+ &mut self,
655655+ ptr: NonNull<u8>,
656656+ old_layout: Layout,
657657+ new_layout: Layout,
658658+ ) -> crate::Result<NonNull<[u8]>> {
659659+ // Safety: responsibility of caller
660660+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
661661+662662+ let next_range = cursor.peek_next().map(|region| region.range().clone());
663663+664664+ // Safety: responsibility of caller
665665+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
666666+667667+ region.grow_in_place(new_layout, next_range, &mut self.batched_raw)?;
668668+669669+ self.batched_raw.flush_changes(&mut self.raw)?;
670670+671671+ Ok(region.as_non_null())
672672+ }
673673+674674+ /// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`]
675675+ /// and [`Self::grow_in_place`].
676676+ ///
677677+ /// # Safety
678678+ ///
679679+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
680680+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
681681+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
682682+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
683683+ unsafe fn shrink_in_place_inner(
684684+ &mut self,
685685+ ptr: NonNull<u8>,
686686+ old_layout: Layout,
687687+ new_layout: Layout,
688688+ ) -> crate::Result<NonNull<[u8]>> {
689689+ // Safety: responsibility of caller
690690+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
691691+692692+ // Safety: responsibility of caller
693693+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
694694+695695+ region.shrink(new_layout, &mut self.batched_raw)?;
696696+697697+ self.batched_raw.flush_changes(&mut self.raw)?;
698698+699699+ Ok(region.as_non_null())
700700+ }
701701+702702+ /// Reallocates a virtual address region. This will unmap and remove the old region, allocating
703703+ /// a new region that will be backed the old regions physical memory.
704704+ ///
705705+ /// # Safety
706706+ ///
707707+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
708708+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
709709+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
710710+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
711711+ unsafe fn reallocate_region(
712712+ &mut self,
713713+ ptr: NonNull<u8>,
714714+ old_layout: Layout,
715715+ new_layout: Layout,
716716+ ) -> crate::Result<NonNull<[u8]>> {
717717+ // Safety: responsibility of caller
718718+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
719719+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
720720+721721+ let spot = self.find_spot_for(new_layout).context(format_err!(
722722+ "cannot find free spot for layout {new_layout:?}"
723723+ ))?;
724724+725725+ region.move_to(spot, new_layout, &mut self.batched_raw)?;
726726+727727+ Ok(region.as_non_null())
728728+ }
729729+730730+ /// Find a spot in the address space that satisfies the given `layout` requirements.
731731+ ///
732732+ /// If a spot suitable for holding data described by `layout` is found, the base address of the
733733+ /// address range is returned in `Some`. The returned address is already correct aligned to
734734+ /// `layout.align()`.
735735+ ///
736736+ /// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in
737737+ /// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot
738738+ /// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a
739739+ /// different time might still succeed.
740740+ fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> {
741741+ // The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is
742742+ // guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works
743743+ // elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively.
744744+ // Here is how it works:
745745+ // 1. We set up two counters: (see the GapVisitor)
746746+ // - `candidate_spot_count` which we initialize to zero
747747+ // - `target_index` which we either set to a random value between 0..<the maximum number of
748748+ // possible addresses in the address space> if ASLR is requested OR to zero otherwise.
749749+ // 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the
750750+ // gaps between regions. We count the number of addresses in each gap that satisfy the
751751+ // requested `Layout`s size and alignment and add that to the `candidate_spot_count`.
752752+ // IF the number of spots in the gap is greater than our chosen target index, we pick the
753753+ // spot at the target index and finish. ELSE we *decrement* the target index by the number
754754+ // of spots and continue to the next gap.
755755+ // 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original
756756+ // guess for `target_index` was too big, in which case we need to retry.
757757+ // 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know
758758+ // the *actual* number of possible spots in the address space since we just counted them
759759+ // during the first attempt. We initialize `target_index` to `0..candidate_spot_count`
760760+ // which is guaranteed to return us a spot.
761761+ // IF `candidate_spot_count` is ZERO after the first attempt, there is no point in
762762+ // retrying since we cannot fulfill the requested layout.
763763+ //
764764+ // Note that in practice, we use a binary tree to keep track of regions, and we use binary search
765765+ // to optimize the search for a suitable gap instead of linear iteration.
766766+767767+ let layout = layout.pad_to_align();
768768+769769+ // First attempt: guess a random target index
770770+ let max_candidate_spots = self.max_range.size();
771771+772772+ let target_index: usize = self
773773+ .rng
774774+ .as_mut()
775775+ .map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap()))
776776+ .unwrap_or_default();
777777+778778+ // First attempt: visit the binary search tree to find a gap
779779+ let mut v = GapVisitor::new(layout, target_index);
780780+ self.visit_gaps(&mut v);
781781+782782+ // if we found a spot already we're done
783783+ if let Some(chosen) = v.chosen {
784784+ return Some(chosen);
785785+ }
786786+787787+ // otherwise, Second attempt: we need to retry with the correct candidate spot count
788788+ // but if we counted no suitable candidate spots during the first attempt, we cannot fulfill
789789+ // the request.
790790+ if v.candidate_spots == 0 {
791791+ return None;
792792+ }
793793+794794+ // Second attempt: pick a new target_index that's actually fulfillable
795795+ let target_index: usize = self
796796+ .rng
797797+ .as_mut()
798798+ .map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap()))
799799+ .unwrap_or_default();
800800+801801+ // Second attempt: visit the binary search tree to find a gap
802802+ let mut v = GapVisitor::new(layout, target_index);
803803+ self.visit_gaps(&mut v);
804804+805805+ let chosen = v
806806+ .chosen
807807+ .expect("There must be a chosen spot after the first attempt. This is a bug!");
808808+809809+ debug_assert!(chosen.is_canonical::<R>());
810810+811811+ Some(chosen)
812812+ }
813813+814814+ /// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space
815815+ /// from lowest to highest addresses.
816816+ fn visit_gaps(&self, v: &mut GapVisitor) {
817817+ let Some(root) = self.regions.root().get() else {
818818+ // if the tree is empty, we treat the entire max_range as the gap
819819+ // note that we do not care about the returned ControlFlow, as there is nothing else we
820820+ // could try to find a spot anyway
821821+ let _ = v.visit(self.max_range.clone());
822822+823823+ return;
824824+ };
825825+826826+ // see if there is a suitable gap between BEFORE the first address space region
827827+ if v.visit(self.max_range.start..root.subtree_range().start)
828828+ .is_break()
829829+ {
830830+ return;
831831+ }
832832+833833+ // now comes the main part of the search. we start at the WAVLTree root node and do a
834834+ // binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion`
835835+ // to speed up this search. See `AddressSpaceRegion` for details on how this works.
836836+837837+ let mut maybe_current = self.regions.root().get();
838838+ let mut already_visited = VirtualAddress::MIN;
839839+840840+ while let Some(current) = maybe_current {
841841+ // If there is no suitable gap in this entire
842842+ if current.suitable_gap_in_subtree(v.layout()) {
843843+ // First, look at the left subtree
844844+ if let Some(left) = current.left_child() {
845845+ if left.suitable_gap_in_subtree(v.layout())
846846+ && left.subtree_range().end > already_visited
847847+ {
848848+ maybe_current = Some(left);
849849+ continue;
850850+ }
851851+852852+ if v.visit(left.subtree_range().end..current.range().start)
853853+ .is_break()
854854+ {
855855+ return;
856856+ }
857857+ }
858858+859859+ if let Some(right) = current.right_child() {
860860+ if v.visit(current.range().end..right.subtree_range().start)
861861+ .is_break()
862862+ {
863863+ return;
864864+ }
865865+866866+ if right.suitable_gap_in_subtree(v.layout())
867867+ && right.subtree_range().end > already_visited
868868+ {
869869+ maybe_current = Some(right);
870870+ continue;
871871+ }
872872+ }
873873+ }
874874+875875+ already_visited = current.subtree_range().end;
876876+ maybe_current = current.parent();
877877+ }
878878+879879+ // see if there is a suitable gap between AFTER the last address space region
880880+ if v.visit(root.subtree_range().end..self.max_range.end)
881881+ .is_break()
882882+ {
883883+ return;
884884+ }
885885+ }
886886+}
887887+888888+/// # Safety
889889+///
890890+/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
891891+/// * `layout` must [*fit*] that region of memory.
892892+///
893893+/// [*currently mapped*]: #currently-mapped-memory
894894+/// [*fit*]: #memory-fitting
895895+unsafe fn get_region_containing_ptr(
896896+ regions: &mut WAVLTree<AddressSpaceRegion>,
897897+ ptr: NonNull<u8>,
898898+ layout: Layout,
899899+) -> CursorMut<'_, AddressSpaceRegion> {
900900+ let addr = VirtualAddress::from_non_null(ptr);
901901+902902+ let cursor = regions.lower_bound_mut(Bound::Included(&addr));
903903+904904+ assert_unsafe_precondition_!(
905905+ "TODO",
906906+ (cursor: &CursorMut<AddressSpaceRegion> = &cursor) => cursor.get().is_some()
907907+ );
908908+909909+ // Safety: The caller guarantees the pointer is currently mapped which means we must have
910910+ // a corresponding address space region for it
911911+ let region = unsafe { cursor.get().unwrap_unchecked() };
912912+913913+ assert_unsafe_precondition_!(
914914+ "TODO",
915915+ (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => {
916916+ let range = region.range();
917917+918918+ range.start.get() <= addr.get() && addr.get() < range.end.get()
919919+ }
920920+ );
921921+922922+ assert_unsafe_precondition_!(
923923+ "`layout` does not fit memory region",
924924+ (layout: Layout = layout, region: &AddressSpaceRegion = ®ion) => region.layout_fits_region(layout)
925925+ );
926926+927927+ cursor
928928+}
929929+930930+pub(crate) struct GapVisitor {
931931+ layout: Layout,
932932+ target_index: usize,
933933+ candidate_spots: usize,
934934+ chosen: Option<VirtualAddress>,
935935+}
936936+937937+impl GapVisitor {
938938+ fn new(layout: Layout, target_index: usize) -> Self {
939939+ Self {
940940+ layout,
941941+ target_index,
942942+ candidate_spots: 0,
943943+ chosen: None,
944944+ }
945945+ }
946946+947947+ pub fn layout(&self) -> Layout {
948948+ self.layout
949949+ }
950950+951951+ /// Returns the number of spots in the given range that satisfy the layout we require
952952+ fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize {
953953+ debug_assert!(
954954+ range.start.is_aligned_to(self.layout.align())
955955+ && range.end.is_aligned_to(self.layout.align())
956956+ );
957957+958958+ // ranges passed in here can become empty for a number of reasons (aligning might produce ranges
959959+ // where end > start, or the range might be empty to begin with) in either case an empty
960960+ // range means no spots are available
961961+ if range.is_empty() {
962962+ return 0;
963963+ }
964964+965965+ let range_size = range.size();
966966+ if range_size >= self.layout.size() {
967967+ ((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1
968968+ } else {
969969+ 0
970970+ }
971971+ }
972972+973973+ pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> {
974974+ // if we have already chosen a spot, signal the caller to stop
975975+ if self.chosen.is_some() {
976976+ return ControlFlow::Break(());
977977+ }
978978+979979+ let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap();
980980+981981+ let spot_count = self.spots_in_range(&aligned_gap);
982982+983983+ self.candidate_spots += spot_count;
984984+985985+ if self.target_index < spot_count {
986986+ self.chosen = Some(
987987+ aligned_gap
988988+ .start
989989+ .checked_add(self.target_index << self.layout.align().ilog2())
990990+ .unwrap(),
991991+ );
992992+993993+ ControlFlow::Break(())
994994+ } else {
995995+ self.target_index -= spot_count;
996996+997997+ ControlFlow::Continue(())
998998+ }
999999+ }
10001000+}
+336
libs/mem/src/address_space/batch.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::cmp;
99+use core::num::{NonZero, NonZeroUsize};
1010+1111+use smallvec::SmallVec;
1212+1313+use crate::address_space::{Flush, RawAddressSpace};
1414+use crate::{AccessRules, PhysicalAddress, VirtualAddress};
1515+1616+/// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`.
1717+///
1818+/// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space
1919+/// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB
2020+/// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce
2121+/// this number.
2222+pub struct Batch {
2323+ ops: SmallVec<[BatchOperation; 4]>,
2424+}
2525+2626+enum BatchOperation {
2727+ Map(MapOperation),
2828+ Unmap(UnmapOperation),
2929+ SetAccessRules(SetAccessRulesOperation),
3030+}
3131+3232+struct MapOperation {
3333+ virt: VirtualAddress,
3434+ phys: PhysicalAddress,
3535+ len: NonZeroUsize,
3636+ access_rules: AccessRules,
3737+}
3838+3939+struct UnmapOperation {
4040+ virt: VirtualAddress,
4141+ len: NonZeroUsize,
4242+}
4343+4444+struct SetAccessRulesOperation {
4545+ virt: VirtualAddress,
4646+ len: NonZeroUsize,
4747+ access_rules: AccessRules,
4848+}
4949+5050+// ===== impl Batch =====
5151+5252+impl Batch {
5353+ /// Construct a new empty [`Batch`].
5454+ pub fn new() -> Self {
5555+ Self {
5656+ ops: SmallVec::new(),
5757+ }
5858+ }
5959+6060+ /// Add a [`map`] operation to the set of batched operations.
6161+ ///
6262+ /// # Safety
6363+ ///
6464+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
6565+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
6666+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6767+ ///
6868+ /// [`map`]: RawAddressSpace::map
6969+ pub fn map(
7070+ &mut self,
7171+ virt: VirtualAddress,
7272+ phys: PhysicalAddress,
7373+ len: NonZeroUsize,
7474+ access_rules: AccessRules,
7575+ ) {
7676+ let mut new = MapOperation {
7777+ virt,
7878+ phys,
7979+ len,
8080+ access_rules,
8181+ };
8282+8383+ let ops = self.ops.iter_mut().filter_map(|op| match op {
8484+ BatchOperation::Map(op) => Some(op),
8585+ _ => None,
8686+ });
8787+8888+ for op in ops {
8989+ match op.try_merge_with(new) {
9090+ Ok(()) => return,
9191+ Err(new_) => new = new_,
9292+ }
9393+ }
9494+9595+ self.ops.push(BatchOperation::Map(new));
9696+ }
9797+9898+ /// Add an [`unmap`] operation to the set of batched operations.
9999+ ///
100100+ /// # Safety
101101+ ///
102102+ /// - virt..virt+len must be mapped
103103+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
104104+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
105105+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
106106+ ///
107107+ /// [`unmap`]: RawAddressSpace::unmap
108108+ pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) {
109109+ let mut new = UnmapOperation { virt, len };
110110+111111+ let ops = self.ops.iter_mut().filter_map(|op| match op {
112112+ BatchOperation::Unmap(op) => Some(op),
113113+ _ => None,
114114+ });
115115+116116+ for op in ops {
117117+ match op.try_merge_with(new) {
118118+ Ok(()) => return,
119119+ Err(new_) => new = new_,
120120+ }
121121+ }
122122+123123+ self.ops.push(BatchOperation::Unmap(new));
124124+ }
125125+126126+ /// Add a [`set_access_rules`] operation to the set of batched operations.
127127+ ///
128128+ /// # Safety
129129+ ///
130130+ /// - virt..virt+len must be mapped
131131+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
132132+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
133133+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
134134+ ///
135135+ /// [`set_access_rules`]: RawAddressSpace::set_access_rules
136136+ pub fn set_access_rules(
137137+ &mut self,
138138+ virt: VirtualAddress,
139139+ len: NonZeroUsize,
140140+ access_rules: AccessRules,
141141+ ) {
142142+ let mut new = SetAccessRulesOperation {
143143+ virt,
144144+ len,
145145+ access_rules,
146146+ };
147147+148148+ let ops = self.ops.iter_mut().filter_map(|op| match op {
149149+ BatchOperation::SetAccessRules(op) => Some(op),
150150+ _ => None,
151151+ });
152152+153153+ for op in ops {
154154+ match op.try_merge_with(new) {
155155+ Ok(()) => return,
156156+ Err(new_) => new = new_,
157157+ }
158158+ }
159159+160160+ self.ops.push(BatchOperation::SetAccessRules(new));
161161+ }
162162+163163+ /// Flushes the `Batch` ensuring all changes are materialized into the raw address space.
164164+ pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> {
165165+ let mut flush = raw_aspace.flush();
166166+ for op in self.ops.drain(..) {
167167+ match op {
168168+ BatchOperation::Map(op) => {
169169+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
170170+ debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE));
171171+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
172172+173173+ // Safety: the caller promised the correctness of the values on construction of
174174+ // the operation.
175175+ unsafe {
176176+ raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
177177+ }
178178+ }
179179+ BatchOperation::Unmap(op) => {
180180+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
181181+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
182182+183183+ // Safety: the caller promised the correctness of the values on construction of
184184+ // the operation.
185185+ unsafe {
186186+ raw_aspace.unmap(op.virt, op.len, &mut flush);
187187+ }
188188+ }
189189+ BatchOperation::SetAccessRules(op) => {
190190+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
191191+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
192192+193193+ // Safety: the caller promised the correctness of the values on construction of
194194+ // the operation.
195195+ unsafe {
196196+ raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
197197+ }
198198+ }
199199+ };
200200+ }
201201+ flush.flush()
202202+ }
203203+}
204204+205205+// ===== impl MapOperation =====
206206+207207+impl MapOperation {
208208+ /// Returns true if this operation can be merged with `other`.
209209+ ///
210210+ /// Map operations can be merged if:
211211+ /// - their [`AccessRules`] are the same
212212+ /// - their virtual address ranges are contiguous (no gap between self and other)
213213+ /// - their physical address ranges are contiguous
214214+ /// - the resulting virtual address range still has the same size as the resulting
215215+ /// physical address range
216216+ const fn can_merge_with(&self, other: &Self) -> bool {
217217+ // the access rules need to be the same
218218+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
219219+220220+ let overlap_virt = self.virt.get() <= other.len.get()
221221+ && other.virt.get() <= self.virt.get() + self.len.get();
222222+223223+ let overlap_phys = self.phys.get() <= other.len.get()
224224+ && other.phys.get() <= self.phys.get() + self.len.get();
225225+226226+ let offset_virt = self.virt.get().wrapping_sub(other.virt.get());
227227+ let offset_phys = self.virt.get().wrapping_sub(other.virt.get());
228228+ let same_offset = offset_virt == offset_phys;
229229+230230+ same_rules && overlap_virt && overlap_phys && same_offset
231231+ }
232232+233233+ /// Attempt to merge this operation with `other`.
234234+ ///
235235+ /// If this returns `Ok`, `other` has been merged into `self`.
236236+ ///
237237+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
238238+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
239239+ if self.can_merge_with(&other) {
240240+ let offset = self.virt.get().wrapping_sub(other.virt.get());
241241+ let len = self
242242+ .len
243243+ .get()
244244+ .checked_add(other.len.get())
245245+ .unwrap()
246246+ .wrapping_add(offset);
247247+248248+ self.virt = cmp::min(self.virt, other.virt);
249249+ self.phys = cmp::min(self.phys, other.phys);
250250+ self.len = NonZero::new(len).ok_or(other)?;
251251+252252+ Ok(())
253253+ } else {
254254+ Err(other)
255255+ }
256256+ }
257257+}
258258+259259+// ===== impl UnmapOperation =====
260260+261261+impl UnmapOperation {
262262+ /// Returns true if this operation can be merged with `other`.
263263+ ///
264264+ /// Unmap operations can be merged if:
265265+ /// - their virtual address ranges are contiguous (no gap between self and other)
266266+ const fn can_merge_with(&self, other: &Self) -> bool {
267267+ self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get()
268268+ }
269269+270270+ /// Attempt to merge this operation with `other`.
271271+ ///
272272+ /// If this returns `Ok`, `other` has been merged into `self`.
273273+ ///
274274+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
275275+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
276276+ if self.can_merge_with(&other) {
277277+ let offset = self.virt.get().wrapping_sub(other.virt.get());
278278+ let len = self
279279+ .len
280280+ .get()
281281+ .checked_add(other.len.get())
282282+ .unwrap()
283283+ .wrapping_add(offset);
284284+285285+ self.virt = cmp::min(self.virt, other.virt);
286286+ self.len = NonZero::new(len).ok_or(other)?;
287287+288288+ Ok(())
289289+ } else {
290290+ Err(other)
291291+ }
292292+ }
293293+}
294294+295295+// ===== impl ProtectOperation =====
296296+297297+impl SetAccessRulesOperation {
298298+ /// Returns true if this operation can be merged with `other`.
299299+ ///
300300+ /// Protect operations can be merged if:
301301+ /// - their [`AccessRules`] are the same
302302+ /// - their virtual address ranges are contiguous (no gap between self and other)
303303+ const fn can_merge_with(&self, other: &Self) -> bool {
304304+ // the access rules need to be the same
305305+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
306306+307307+ let overlap = self.virt.get() <= other.len.get()
308308+ && other.virt.get() <= self.virt.get() + self.len.get();
309309+310310+ same_rules && overlap
311311+ }
312312+313313+ /// Attempt to merge this operation with `other`.
314314+ ///
315315+ /// If this returns `Ok`, `other` has been merged into `self`.
316316+ ///
317317+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
318318+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
319319+ if self.can_merge_with(&other) {
320320+ let offset = self.virt.get().wrapping_sub(other.virt.get());
321321+ let len = self
322322+ .len
323323+ .get()
324324+ .checked_add(other.len.get())
325325+ .unwrap()
326326+ .wrapping_add(offset);
327327+328328+ self.virt = cmp::min(self.virt, other.virt);
329329+ self.len = NonZero::new(len).ok_or(other)?;
330330+331331+ Ok(())
332332+ } else {
333333+ Err(other)
334334+ }
335335+ }
336336+}
+371
libs/mem/src/address_space/region.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::boxed::Box;
99+use core::alloc::Layout;
1010+use core::mem::offset_of;
1111+use core::ops::{Range, RangeBounds};
1212+use core::pin::Pin;
1313+use core::ptr::NonNull;
1414+use core::{cmp, mem, slice};
1515+1616+use anyhow::bail;
1717+use pin_project::pin_project;
1818+1919+use crate::address_space::batch::Batch;
2020+use crate::{AccessRules, AddressRangeExt, VirtualAddress};
2121+2222+#[pin_project]
2323+#[derive(Debug)]
2424+pub struct AddressSpaceRegion {
2525+ access_rules: AccessRules,
2626+ layout: Layout,
2727+ range: Range<VirtualAddress>,
2828+ /// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
2929+ subtree_range: Range<VirtualAddress>,
3030+ /// The largest gap in this subtree, used when allocating new regions
3131+ max_gap: usize,
3232+ /// Links to other regions in the WAVL tree
3333+ links: wavltree::Links<AddressSpaceRegion>,
3434+}
3535+3636+impl AddressSpaceRegion {
3737+ pub const fn new(spot: VirtualAddress, layout: Layout, access_rules: AccessRules) -> Self {
3838+ Self {
3939+ range: spot..spot.checked_add(layout.size()).unwrap(),
4040+ access_rules,
4141+ layout,
4242+4343+ max_gap: 0,
4444+ subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
4545+ links: wavltree::Links::new(),
4646+ }
4747+ }
4848+4949+ pub const fn range(&self) -> &Range<VirtualAddress> {
5050+ &self.range
5151+ }
5252+5353+ pub const fn subtree_range(&self) -> &Range<VirtualAddress> {
5454+ &self.subtree_range
5555+ }
5656+5757+ pub const fn access_rules(&self) -> AccessRules {
5858+ self.access_rules
5959+ }
6060+6161+ pub fn as_slice(&self) -> &[u8] {
6262+ let ptr = self.range.start.as_ptr();
6363+ let len = self.range.size();
6464+6565+ unsafe { slice::from_raw_parts(ptr, len) }
6666+ }
6767+6868+ pub fn as_slice_mut(&mut self) -> &mut [u8] {
6969+ let ptr = self.range.start.as_mut_ptr();
7070+ let len = self.range.size();
7171+7272+ unsafe { slice::from_raw_parts_mut(ptr, len) }
7373+ }
7474+7575+ pub fn as_non_null(&self) -> NonNull<[u8]> {
7676+ let ptr = self.range.start.as_non_null().unwrap();
7777+ NonNull::slice_from_raw_parts(ptr, self.range.size())
7878+ }
7979+8080+ pub const fn layout_fits_region(&self, layout: Layout) -> bool {
8181+ self.range.start.is_aligned_to(layout.align())
8282+ && layout.size() >= self.layout.size()
8383+ && layout.size() <= self.range.end.get() - self.range.start.get()
8484+ }
8585+8686+ /// grow region to `new_len`, attempting to grow the VMO accordingly
8787+ /// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
8888+ pub fn grow_in_place(
8989+ &mut self,
9090+ new_layout: Layout,
9191+ next_range: Option<Range<VirtualAddress>>,
9292+ _batch: &mut Batch,
9393+ ) -> crate::Result<()> {
9494+ if new_layout.align() > self.layout.align() {
9595+ bail!("cannot grow in-place: New alignment greater than current");
9696+ }
9797+9898+ let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
9999+100100+ if let Some(next_range) = next_range
101101+ && next_range.is_overlapping(&new_range)
102102+ {
103103+ bail!("cannot grow in-place: New overlapping with next range");
104104+ }
105105+106106+ // TODO attempt to resize VMO
107107+ self.update_range(new_range);
108108+109109+ Ok(())
110110+ }
111111+112112+ /// shrink region to the first `len` bytes, dropping the rest frames.
113113+ /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
114114+ pub fn shrink(&mut self, new_layout: Layout, _batch: &mut Batch) -> crate::Result<()> {
115115+ if new_layout.align() > self.layout.align() {
116116+ bail!("cannot grow in-place: New alignment greater than current");
117117+ }
118118+119119+ let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
120120+121121+ // TODO drop rest pages in VMO if possible (add unmaps to batch)
122122+ self.update_range(new_range);
123123+124124+ Ok(())
125125+ }
126126+127127+ /// move the entire region to the new base address, remapping any already mapped frames
128128+ pub fn move_to(&mut self, new_base: VirtualAddress, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
129129+ let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
130130+131131+ // TODO
132132+ // - attempt to resize VMO
133133+ // - update self range
134134+ // - for every frame in VMO
135135+ // - attempt to map at new offset (add maps to batch)
136136+137137+ todo!()
138138+ }
139139+140140+ pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
141141+ where
142142+ R: RangeBounds<VirtualAddress>,
143143+ {
144144+ // TODO
145145+ // - for every *uncommited* frame in range
146146+ // - request frame from VMO (add map to batch)
147147+148148+ todo!()
149149+ }
150150+151151+ pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
152152+ where
153153+ R: RangeBounds<VirtualAddress>,
154154+ {
155155+ // TODO
156156+ // - for every *committed* frame in range
157157+ // - drop pages in VMO if possible (add unmaps to batch)
158158+159159+ todo!()
160160+ }
161161+162162+ /// updates the access rules fo this region
163163+ pub fn update_access_rules(
164164+ &mut self,
165165+ access_rules: AccessRules,
166166+ batch: &mut Batch,
167167+ ) -> crate::Result<()> {
168168+ // TODO
169169+ // - for every frame in VMO
170170+ // - update access rules (add protects to batch)
171171+ // - update self access rules
172172+173173+ todo!()
174174+ }
175175+176176+ pub fn clear(&mut self, batch: &mut Batch) -> crate::Result<()> {
177177+ // TODO
178178+ // - replace VMO with "zeroed" VMO
179179+ // - drop pages in VMO if possible (add unmaps to batch)
180180+181181+ todo!()
182182+ }
183183+184184+ pub fn assert_valid(&self, msg: &str) {
185185+ assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
186186+ assert!(
187187+ self.subtree_range.start <= self.range.start
188188+ && self.range.end <= self.subtree_range.end,
189189+ "{msg}region range cannot be bigger than its subtree range; region={self:?}"
190190+ );
191191+ assert!(
192192+ self.max_gap < self.subtree_range.size(),
193193+ "{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}"
194194+ );
195195+ assert!(
196196+ self.range.start.is_aligned_to(self.layout.align()),
197197+ "{msg}region range is not aligned to its layout; region={self:?}"
198198+ );
199199+ assert!(
200200+ self.range.size() >= self.layout.size(),
201201+ "{msg}region range is smaller than its layout; region={self:?}"
202202+ );
203203+204204+ self.links.assert_valid();
205205+ }
206206+207207+ /// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used
208208+ /// during gap-searching.
209209+ pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool {
210210+ // we need the layout to be padded to alignment
211211+ debug_assert!(layout.size().is_multiple_of(layout.align()));
212212+213213+ self.max_gap >= layout.size()
214214+ }
215215+216216+ /// Returns the left child node in the search tree of regions, used during gap-searching.
217217+ pub fn left_child(&self) -> Option<&Self> {
218218+ Some(unsafe { self.links.left()?.as_ref() })
219219+ }
220220+221221+ /// Returns the right child node in the search tree of regions, used during gap-searching.
222222+ pub fn right_child(&self) -> Option<&Self> {
223223+ Some(unsafe { self.links.right()?.as_ref() })
224224+ }
225225+226226+ /// Returns the parent node in the search tree of regions, used during gap-searching.
227227+ pub fn parent(&self) -> Option<&Self> {
228228+ Some(unsafe { self.links.parent()?.as_ref() })
229229+ }
230230+231231+ fn update_range(&mut self, new_range: Range<VirtualAddress>) {
232232+ self.range = new_range;
233233+ // We also must propagate the information about our changed range to the rest of the tree
234234+ // so searching for a free spot returns the correct results.
235235+ Self::propagate_update_to_parent(Some(NonNull::from(self)));
236236+ }
237237+238238+ /// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`]
239239+ /// implementation below after each tree mutation that impacted this node or its subtree in some way
240240+ /// (insertion, rotation, deletion).
241241+ ///
242242+ /// Returns `true` if this nodes metadata changed.
243243+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
244244+ fn update_gap_metadata(
245245+ mut node: NonNull<Self>,
246246+ left: Option<NonNull<Self>>,
247247+ right: Option<NonNull<Self>>,
248248+ ) -> bool {
249249+ fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize {
250250+ right_first_byte
251251+ .checked_sub_addr(left_last_byte)
252252+ .unwrap_or_default() // TODO use saturating_sub_addr
253253+ }
254254+255255+ let node = unsafe { node.as_mut() };
256256+ let mut left_max_gap = 0;
257257+ let mut right_max_gap = 0;
258258+259259+ // recalculate the subtree_range start
260260+ let old_subtree_range_start = if let Some(left) = left {
261261+ let left = unsafe { left.as_ref() };
262262+ let left_gap = gap(left.subtree_range.end, node.range.start);
263263+ left_max_gap = cmp::max(left_gap, left.max_gap);
264264+ mem::replace(&mut node.subtree_range.start, left.subtree_range.start)
265265+ } else {
266266+ mem::replace(&mut node.subtree_range.start, node.range.start)
267267+ };
268268+269269+ // recalculate the subtree range end
270270+ let old_subtree_range_end = if let Some(right) = right {
271271+ let right = unsafe { right.as_ref() };
272272+ let right_gap = gap(node.range.end, right.subtree_range.start);
273273+ right_max_gap = cmp::max(right_gap, right.max_gap);
274274+ mem::replace(&mut node.subtree_range.end, right.subtree_range.end)
275275+ } else {
276276+ mem::replace(&mut node.subtree_range.end, node.range.end)
277277+ };
278278+279279+ // recalculate the map_gap
280280+ let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap));
281281+282282+ old_max_gap != node.max_gap
283283+ || old_subtree_range_start != node.subtree_range.start
284284+ || old_subtree_range_end != node.subtree_range.end
285285+ }
286286+287287+ // Propagate metadata updates to this regions parent in the search tree. If we had to update
288288+ // our metadata the parent must update its metadata too.
289289+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
290290+ fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) {
291291+ while let Some(node) = maybe_node {
292292+ let links = unsafe { &node.as_ref().links };
293293+ let changed = Self::update_gap_metadata(node, links.left(), links.right());
294294+295295+ // if the metadata didn't actually change, we don't need to recalculate parents
296296+ if !changed {
297297+ return;
298298+ }
299299+300300+ maybe_node = links.parent();
301301+ }
302302+ }
303303+}
304304+305305+unsafe impl wavltree::Linked for AddressSpaceRegion {
306306+ /// Any heap-allocated type that owns an element may be used.
307307+ ///
308308+ /// An element *must not* move while part of an intrusive data
309309+ /// structure. In many cases, `Pin` may be used to enforce this.
310310+ type Handle = Pin<Box<Self>>; // TODO better handle type
311311+312312+ type Key = VirtualAddress;
313313+314314+ /// Convert an owned `Handle` into a raw pointer
315315+ fn into_ptr(handle: Self::Handle) -> NonNull<Self> {
316316+ // Safety: wavltree treats the ptr as pinned
317317+ unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
318318+ }
319319+320320+ /// Convert a raw pointer back into an owned `Handle`.
321321+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
322322+ // Safety: `NonNull` *must* be constructed from a pinned reference
323323+ // which the tree implementation upholds.
324324+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) }
325325+ }
326326+327327+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> {
328328+ ptr.map_addr(|addr| {
329329+ let offset = offset_of!(Self, links);
330330+ addr.checked_add(offset).unwrap()
331331+ })
332332+ .cast()
333333+ }
334334+335335+ fn get_key(&self) -> &Self::Key {
336336+ &self.range.start
337337+ }
338338+339339+ fn after_insert(self: Pin<&mut Self>) {
340340+ debug_assert_eq!(self.subtree_range.start, self.range.start);
341341+ debug_assert_eq!(self.subtree_range.end, self.range.end);
342342+ debug_assert_eq!(self.max_gap, 0);
343343+ Self::propagate_update_to_parent(self.links.parent());
344344+ }
345345+346346+ fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) {
347347+ Self::propagate_update_to_parent(parent);
348348+ }
349349+350350+ fn after_rotate(
351351+ self: Pin<&mut Self>,
352352+ parent: NonNull<Self>,
353353+ sibling: Option<NonNull<Self>>,
354354+ lr_child: Option<NonNull<Self>>,
355355+ side: wavltree::Side,
356356+ ) {
357357+ let this = self.project();
358358+ // Safety: caller ensures ptr is valid
359359+ let _parent = unsafe { parent.as_ref() };
360360+361361+ this.subtree_range.start = _parent.subtree_range.start;
362362+ this.subtree_range.end = _parent.subtree_range.end;
363363+ *this.max_gap = _parent.max_gap;
364364+365365+ if side == wavltree::Side::Left {
366366+ Self::update_gap_metadata(parent, sibling, lr_child);
367367+ } else {
368368+ Self::update_gap_metadata(parent, lr_child, sibling);
369369+ }
370370+ }
371371+}
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::cmp::PartialEq;
1010+use core::fmt::Debug;
1111+use core::mem::offset_of;
1212+use core::ptr::NonNull;
1313+use core::sync::atomic;
1414+use core::sync::atomic::{AtomicUsize, Ordering};
1515+1616+use cordyceps::{list, Linked};
1717+use pin_project::pin_project;
1818+1919+use crate::frame_alloc::FrameAllocator;
2020+use crate::PhysicalAddress;
2121+2222+/// Soft limit on the amount of references that may be made to a `Frame`.
2323+const MAX_REFCOUNT: usize = isize::MAX as usize;
2424+2525+pub struct FrameRef<A: FrameAllocator> {
2626+ frame: NonNull<Frame>,
2727+ alloc: A,
2828+}
2929+3030+#[pin_project(!Unpin)]
3131+#[derive(Debug)]
3232+pub struct Frame {
3333+ addr: PhysicalAddress,
3434+ refcount: AtomicUsize,
3535+ #[pin]
3636+ links: list::Links<Self>,
3737+}
3838+3939+// ===== impl FrameRef =====
4040+4141+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
4242+unsafe impl Send for Frame {}
4343+4444+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
4545+unsafe impl Sync for Frame {}
4646+4747+impl<A: FrameAllocator + Clone> Clone for FrameRef<A> {
4848+ /// Makes a clone of the `Frame`.
4949+ ///
5050+ /// This creates reference to the same `FrameInfo`, increasing the reference count by one.
5151+ fn clone(&self) -> Self {
5252+ // Increase the reference count by one. Using relaxed ordering, as knowledge of the
5353+ // original reference prevents other threads from erroneously deleting
5454+ // the object.
5555+ //
5656+ // Again, restating what the `Arc` implementation quotes from the
5757+ // [Boost documentation][1]:
5858+ //
5959+ // > Increasing the reference counter can always be done with memory_order_relaxed: New
6060+ // > references to an object can only be formed from an existing
6161+ // > reference, and passing an existing reference from one thread to
6262+ // > another must already provide any required synchronization.
6363+ //
6464+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
6565+ let old_size = self.frame().refcount.fetch_add(1, Ordering::Relaxed);
6666+ debug_assert_ne!(old_size, 0);
6767+6868+ // Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
6969+ // `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount
7070+ // would *really* bad as it would treat the frame as free and potentially cause a use-after-free
7171+ // scenario. Realistically this branch should never be taken.
7272+ //
7373+ // Also worth noting: Just like `Arc`, the refcount could still overflow when in between
7474+ // the load above and this check some other cpu increased the refcount from `isize::MAX` to
7575+ // `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in
7676+ // one conditional atomic operation produces much worse code, so if its good enough for the
7777+ // standard library, it is good enough for us.
7878+ assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow");
7979+8080+ unsafe { Self::from_raw_parts(self.frame, self.alloc.clone()) }
8181+ }
8282+}
8383+8484+impl<A: FrameAllocator> Drop for FrameRef<A> {
8585+ /// Drops the `Frame`.
8686+ ///
8787+ /// This will decrement the reference count. If the reference count reaches zero
8888+ /// then this frame will be marked as free and returned to the frame allocator.
8989+ fn drop(&mut self) {
9090+ if self.frame().refcount.fetch_sub(1, Ordering::Release) != 1 {
9191+ return;
9292+ }
9393+9494+ // Ensure uses of `FrameInfo` happen before freeing it.
9595+ // Because it is marked `Release`, the decreasing of the reference count synchronizes
9696+ // with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing
9797+ // the reference count, which happens before this fence, which happens before freeing `FrameInfo`.
9898+ //
9999+ // This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and
100100+ // may explain further:
101101+ //
102102+ // > It is important to enforce any possible access to the object in one
103103+ // > thread (through an existing reference) to *happen before* deleting
104104+ // > the object in a different thread. This is achieved by a "release"
105105+ // > operation after dropping a reference (any access to the object
106106+ // > through this reference must obviously happened before), and an
107107+ // > "acquire" operation before deleting the object.
108108+ //
109109+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
110110+ atomic::fence(Ordering::Acquire);
111111+112112+ self.drop_slow();
113113+ }
114114+}
115115+116116+impl<A: FrameAllocator> FrameRef<A> {
117117+ unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: A) -> Self {
118118+ Self { frame, alloc }
119119+ }
120120+121121+ fn frame(&self) -> &Frame {
122122+ unsafe { self.frame.as_ref() }
123123+ }
124124+125125+ #[inline(never)]
126126+ fn drop_slow(&mut self) {
127127+ let layout = unsafe { Layout::from_size_align_unchecked(A::FRAME_SIZE, A::FRAME_SIZE) };
128128+ unsafe {
129129+ self.alloc.deallocate(self.frame, layout);
130130+ }
131131+ }
132132+}
133133+134134+// ===== impl Frame =====
135135+136136+impl PartialEq<Frame> for &Frame {
137137+ fn eq(&self, other: &Frame) -> bool {
138138+ self.refcount() == other.refcount() && self.addr == other.addr
139139+ }
140140+}
141141+142142+impl Frame {
143143+ pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self {
144144+ Self {
145145+ addr,
146146+ refcount: AtomicUsize::new(initial_refcount),
147147+ links: list::Links::new(),
148148+ }
149149+ }
150150+151151+ pub fn refcount(&self) -> usize {
152152+ self.refcount.load(Ordering::Relaxed)
153153+ }
154154+155155+ pub fn addr(&self) -> PhysicalAddress {
156156+ self.addr
157157+ }
158158+}
159159+160160+unsafe impl Linked<list::Links<Self>> for Frame {
161161+ type Handle = NonNull<Self>;
162162+163163+ fn into_ptr(r: Self::Handle) -> NonNull<Self> {
164164+ r
165165+ }
166166+167167+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
168168+ ptr
169169+ }
170170+171171+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> {
172172+ ptr.map_addr(|addr| {
173173+ let offset = offset_of!(Self, links);
174174+ addr.checked_add(offset).unwrap()
175175+ })
176176+ .cast()
177177+ }
178178+}
+135
libs/mem/src/frame_alloc.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod area;
99+mod area_selection;
1010+1111+use core::alloc::Layout;
1212+use core::cell::RefCell;
1313+use core::cmp;
1414+use core::ops::Range;
1515+use core::ptr::NonNull;
1616+use core::sync::atomic::{AtomicUsize, Ordering};
1717+1818+use cordyceps::List;
1919+use cpu_local::collection::CpuLocal;
2020+use fallible_iterator::FallibleIterator;
2121+use lock_api::Mutex;
2222+use smallvec::SmallVec;
2323+2424+use crate::address_space::RawAddressSpace;
2525+use crate::frame_alloc::area::Area;
2626+use crate::frame_alloc::area_selection::select_areas;
2727+use crate::{Frame, PhysicalAddress};
2828+2929+#[derive(Debug)]
3030+pub struct AllocError;
3131+3232+pub unsafe trait FrameAllocator: Send + Sync + 'static {
3333+ const FRAME_SIZE: usize;
3434+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
3535+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
3636+}
3737+3838+const MAX_FRAMES_IN_CACHE: usize = 256;
3939+4040+pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> {
4141+ areas: Mutex<L, SmallVec<[Area<A>; 4]>>,
4242+ cpu_local_cache: CpuLocal<RefCell<List<Frame>>>,
4343+ max_alignment_hint: AtomicUsize,
4444+}
4545+4646+impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> {
4747+ pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> {
4848+ let mut max_alignment_hint = 0;
4949+ let mut areas = SmallVec::new();
5050+5151+ let mut selections = select_areas::<A>(allocatable_regions);
5252+ while let Some(selection) = selections.next()? {
5353+ let area = Area::new(selection.area, selection.bookkeeping);
5454+ max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint());
5555+ areas.push(area);
5656+ }
5757+5858+ Ok(Self {
5959+ areas: Mutex::new(areas),
6060+ cpu_local_cache: CpuLocal::new(),
6161+ max_alignment_hint: AtomicUsize::new(max_alignment_hint),
6262+ })
6363+ }
6464+6565+ pub fn max_alignment_hint(&self) -> usize {
6666+ self.max_alignment_hint.load(Ordering::Relaxed)
6767+ }
6868+6969+ fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> {
7070+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
7171+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
7272+ cache.pop_back()
7373+ } else {
7474+ None
7575+ }
7676+ }
7777+7878+ fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool {
7979+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
8080+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
8181+8282+ if cache.len() < MAX_FRAMES_IN_CACHE {
8383+ cache.push_back(block);
8484+ return true;
8585+ }
8686+ }
8787+8888+ false
8989+ }
9090+}
9191+9292+unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
9393+ for &'static FrameAlloc<L, A>
9494+{
9595+ const FRAME_SIZE: usize = A::PAGE_SIZE;
9696+9797+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
9898+ // attempt to allocate from the CPU-local cache first
9999+ if let Some(frame) = self.allocate_local(layout) {
100100+ return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1));
101101+ }
102102+103103+ let mut areas = self.areas.lock();
104104+ for area in areas.iter_mut() {
105105+ if let Ok(frames) = area.allocate(layout) {
106106+ return Ok(frames);
107107+ }
108108+ }
109109+110110+ Err(AllocError)
111111+ }
112112+113113+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) {
114114+ // attempt to place the frame into the CPU-local cache first
115115+ if self.deallocate_local(block, layout) {
116116+ return;
117117+ }
118118+119119+ let mut areas = self.areas.lock();
120120+ for area in areas.iter_mut() {
121121+ let block_ = unsafe { block.as_ref() };
122122+123123+ if area.contains_frame(block_.addr()) {
124124+ unsafe { area.deallocate(block, layout) };
125125+126126+ self.max_alignment_hint
127127+ .fetch_max(area.max_alignment_hint(), Ordering::Relaxed);
128128+129129+ return;
130130+ }
131131+ }
132132+133133+ unreachable!();
134134+ }
135135+}
+444
libs/mem/src/frame_alloc/area.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::marker::PhantomData;
1010+use core::mem::MaybeUninit;
1111+use core::ops::Range;
1212+use core::ptr::NonNull;
1313+use core::{cmp, fmt};
1414+1515+use cordyceps::List;
1616+1717+use crate::address_space::RawAddressSpace;
1818+use crate::frame_alloc::AllocError;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_ORDER: usize = 11;
2222+2323+pub struct Area<A: RawAddressSpace> {
2424+ area: Range<PhysicalAddress>,
2525+ frames: &'static mut [MaybeUninit<Frame>],
2626+2727+ free_lists: [List<Frame>; MAX_ORDER],
2828+2929+ max_order: usize,
3030+ total_frames: usize,
3131+ used_frames: usize,
3232+3333+ _aspace: PhantomData<A>,
3434+}
3535+3636+unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {}
3737+unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {}
3838+3939+impl<A: RawAddressSpace> fmt::Debug for Area<A> {
4040+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4141+ f.debug_struct("Area")
4242+ .field("area", &self.area)
4343+ .field(
4444+ "frames",
4545+ &format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()),
4646+ )
4747+ .field("free_lists", &self.free_lists)
4848+ .field("max_order", &self.max_order)
4949+ .field("total_frames", &self.total_frames)
5050+ .field("used_frames", &self.used_frames)
5151+ .finish()
5252+ }
5353+}
5454+5555+impl<A: RawAddressSpace> Area<A> {
5656+ pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self {
5757+ let mut free_lists = [const { List::new() }; MAX_ORDER];
5858+ let mut total_frames = 0;
5959+ let mut max_order = 0;
6060+6161+ let mut remaining_bytes = area.size();
6262+ let mut addr = area.start;
6363+6464+ // This is the main area initialization loop. We loop through the `area` "chopping off" the
6565+ // largest possible min_block_size-aligned block from the area and add that to its corresponding
6666+ // free list.
6767+ //
6868+ // Note: Remember that for buddy allocators `size == align`. That means we both need to check
6969+ // the alignment and size of our remaining area and can only chop off whatever is smaller.
7070+ while remaining_bytes > 0 {
7171+ // println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}");
7272+7373+ // the largest size we can chop off given the alignment of the remaining area
7474+ let max_align = if addr == PhysicalAddress::ZERO {
7575+ // if area happens to start exactly at address 0x0 our calculation below doesn't work.
7676+ // address 0x0 actually supports *any* alignment so we special-case it and return `MAX`
7777+ usize::MAX
7878+ } else {
7979+ // otherwise mask out the least significant bit of the address to figure out its alignment
8080+ addr.get() & (!addr.get() + 1)
8181+ };
8282+ // the largest size we can chop off given the size of the remaining area
8383+ // which is the next smaller power of two
8484+ let max_size = 1 << remaining_bytes.ilog2();
8585+8686+ // our chosen size will be the smallest of
8787+ // - the maximum size by remaining areas alignment
8888+ // - the maximum size by remaining areas size
8989+ // - the maximum block size supported by this allocator
9090+ let size = cmp::min(
9191+ cmp::min(max_align, max_size),
9292+ A::PAGE_SIZE << (MAX_ORDER - 1),
9393+ );
9494+ debug_assert!(size.is_multiple_of(A::PAGE_SIZE));
9595+9696+ let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize;
9797+9898+ {
9999+ let frame = frames[total_frames].write(Frame::new(addr, 0));
100100+101101+ free_lists[order].push_back(NonNull::from(frame));
102102+ }
103103+104104+ total_frames += 1 << order;
105105+ max_order = cmp::max(max_order, order);
106106+ addr = addr.checked_add(size).unwrap();
107107+ remaining_bytes -= size;
108108+ }
109109+110110+ // Make sure we've accounted for all frames
111111+ debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE);
112112+113113+ Self {
114114+ area,
115115+ frames,
116116+117117+ free_lists,
118118+119119+ max_order,
120120+ total_frames,
121121+ used_frames: 0,
122122+123123+ _aspace: PhantomData,
124124+ }
125125+ }
126126+127127+ pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
128128+ #[cfg(debug_assertions)]
129129+ self.assert_valid();
130130+131131+ let min_order = self.allocation_order(layout)?;
132132+133133+ // Starting at the smallest sufficient size class, search for a free block. If we find one in
134134+ // a free list, return it and its order.
135135+ let (block, block_order) = self.free_lists[min_order..]
136136+ .iter_mut()
137137+ .enumerate()
138138+ .find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order)))
139139+ .ok_or(AllocError)?;
140140+141141+ // if the block we found is larger than the `min_order` we need, we repeatedly split off
142142+ // the upper half (of decreasing size) until we reach the desired size. The split off blocks
143143+ // are returned to their appropriate free lists.
144144+ for order in (min_order..block_order).rev() {
145145+ let block = unsafe { block.as_ref() };
146146+ let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap();
147147+ let buddy = self.frame_for_addr(buddy_addr).unwrap();
148148+149149+ let buddy = buddy.write(Frame::new(buddy_addr, 0));
150150+ let buddy = NonNull::from(buddy);
151151+152152+ self.free_lists[order].push_back(buddy);
153153+ }
154154+155155+ let alloc_size_frames = 1 << min_order;
156156+157157+ // lazily initialize all frames
158158+ for idx in 0..alloc_size_frames {
159159+ let block = unsafe { block.as_ref() };
160160+ let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap();
161161+162162+ let frame = self.frame_for_addr(addr).unwrap();
163163+ frame.write(Frame::new(addr, 1));
164164+ }
165165+166166+ self.used_frames += alloc_size_frames;
167167+168168+ #[cfg(debug_assertions)]
169169+ self.assert_valid();
170170+171171+ Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames))
172172+ }
173173+174174+ pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) {
175175+ #[cfg(debug_assertions)]
176176+ self.assert_valid();
177177+178178+ let initial_order = self.allocation_order(layout).unwrap();
179179+ let mut order = initial_order;
180180+181181+ while order < self.free_lists.len() - 1 {
182182+ let block_ = unsafe { block.as_ref() };
183183+ if let Some(buddy) = self.buddy_addr(order, block_.addr())
184184+ && cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1))
185185+ && self.remove_from_free_list(order, buddy)
186186+ {
187187+ let buddy: NonNull<Frame> =
188188+ NonNull::from(self.frame_for_addr(buddy).unwrap()).cast();
189189+ block = cmp::min(buddy, block);
190190+ order += 1;
191191+ } else {
192192+ break;
193193+ }
194194+ }
195195+196196+ self.free_lists[order].push_back(block);
197197+ self.used_frames -= 1 << initial_order;
198198+ self.max_order = cmp::max(self.max_order, order);
199199+200200+ #[cfg(debug_assertions)]
201201+ self.assert_valid();
202202+ }
203203+204204+ pub fn max_alignment_hint(&self) -> usize {
205205+ self.order_size(self.max_order)
206206+ }
207207+208208+ fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> {
209209+ let relative = addr.checked_sub_addr(self.area.start).unwrap();
210210+ let idx = relative >> A::PAGE_SIZE_LOG_2;
211211+ Some(&mut self.frames[idx])
212212+ }
213213+214214+ pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool {
215215+ self.area.contains(&addr)
216216+ }
217217+218218+ fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> {
219219+ assert!(block >= self.area.start);
220220+ assert!(block.is_aligned_to(A::PAGE_SIZE << order));
221221+222222+ let relative = block.checked_sub_addr(self.area.start).unwrap();
223223+ let size = self.order_size(order);
224224+ if size >= self.area.size() {
225225+ // MAX_ORDER blocks do not have buddies
226226+ None
227227+ } else {
228228+ // Fun: We can find our buddy by xoring the right bit in our
229229+ // offset from the base of the heap.
230230+ Some(self.area.start.checked_add(relative ^ size).unwrap())
231231+ }
232232+ }
233233+234234+ fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool {
235235+ let mut c = self.free_lists[order].cursor_front_mut();
236236+237237+ while let Some(candidate) = c.current() {
238238+ if candidate.addr() == to_remove {
239239+ c.remove_current().unwrap();
240240+ return true;
241241+ }
242242+243243+ c.move_next();
244244+ }
245245+246246+ false
247247+ }
248248+249249+ // The size of the blocks we allocate for a given order.
250250+ const fn order_size(&self, order: usize) -> usize {
251251+ 1 << (A::PAGE_SIZE_LOG_2 as usize + order)
252252+ }
253253+254254+ const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> {
255255+ // We can only allocate blocks that are at least one page
256256+ if !layout.size().is_multiple_of(A::PAGE_SIZE) {
257257+ return Err(AllocError);
258258+ }
259259+260260+ // We can only allocate blocks that are at least page aligned
261261+ if !layout.align().is_multiple_of(A::PAGE_SIZE) {
262262+ return Err(AllocError);
263263+ }
264264+265265+ let size = layout.size().next_power_of_two();
266266+267267+ // We cannot allocate blocks larger than our largest size class
268268+ if size > self.order_size(self.free_lists.len()) {
269269+ return Err(AllocError);
270270+ }
271271+272272+ Ok(size)
273273+ }
274274+275275+ const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> {
276276+ if let Ok(size) = self.allocation_size(layout) {
277277+ Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize)
278278+ } else {
279279+ Err(AllocError)
280280+ }
281281+ }
282282+283283+ fn assert_valid(&self) {
284284+ for (order, l) in self.free_lists.iter().enumerate() {
285285+ l.assert_valid();
286286+287287+ for f in l {
288288+ assert!(
289289+ f.addr().is_aligned_to(A::PAGE_SIZE << order),
290290+ "frame {f:?} is not aligned to order {order}"
291291+ );
292292+ }
293293+ }
294294+295295+ assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames);
296296+ }
297297+}
298298+299299+fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize {
300300+ let mut frames = 0;
301301+ for (order, l) in area.free_lists.iter().enumerate() {
302302+ frames += l.len() << order;
303303+ }
304304+ frames
305305+}
306306+307307+#[cfg(test)]
308308+mod tests {
309309+ use alloc::vec::Vec;
310310+311311+ use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest};
312312+313313+ use super::*;
314314+ use crate::test_utils::TestAddressSpace;
315315+316316+ const PAGE_SIZE: usize = 4096;
317317+318318+ prop_compose! {
319319+ // Generate arbitrary integers up to half the maximum desired value,
320320+ // then multiply them by 2, thus producing only even integers in the
321321+ // desired range.
322322+ fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE }
323323+ }
324324+325325+ proptest! {
326326+ #[test]
327327+ fn new_fixed_base(num_frames in 0..50_000usize) {
328328+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
329329+ PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE),
330330+ {
331331+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
332332+ frames.resize_with(num_frames, || MaybeUninit::uninit());
333333+ frames.leak()
334334+ }
335335+ );
336336+ area.assert_valid();
337337+338338+ // let's check whether the area correctly initialized itself
339339+ //
340340+ // since we start on an aligned base address (0x0) we expect it have split off chunks
341341+ // largest-to-smallest. We replicate the process here, but take a block from its free list.
342342+ let mut frames_remaining = num_frames;
343343+ while frames_remaining > 0 {
344344+ // clamp the order we calculate at the max possible order
345345+ let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1);
346346+347347+ let chunk = area.free_lists[chunk_order].pop_back();
348348+ prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}");
349349+350350+ frames_remaining -= 1 << chunk_order;
351351+ }
352352+ // At the end of this process we expect all free lists to be empty
353353+ prop_assert!(area.free_lists.iter().all(|list| list.is_empty()));
354354+ }
355355+356356+ #[test]
357357+ fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) {
358358+359359+ let area = {
360360+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
361361+ prop_assume!(area_end.is_some());
362362+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
363363+ };
364364+365365+ let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
366366+ area,
367367+ {
368368+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
369369+ frames.resize_with(num_frames, || MaybeUninit::uninit());
370370+ frames.leak()
371371+ }
372372+ );
373373+ area.assert_valid();
374374+375375+ // TODO figure out if we can test the free lists in a sensible way
376376+ }
377377+378378+ #[test]
379379+ fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) {
380380+ let area = {
381381+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
382382+ prop_assume!(area_end.is_some());
383383+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
384384+ };
385385+386386+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
387387+ area,
388388+ {
389389+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
390390+ frames.resize_with(num_frames, || MaybeUninit::uninit());
391391+ frames.leak()
392392+ }
393393+ );
394394+ area.assert_valid();
395395+396396+ debug_assert_eq!(frames_in_area(&mut area), num_frames);
397397+ }
398398+399399+ #[test]
400400+ fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) {
401401+ let area = {
402402+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
403403+ prop_assume!(area_end.is_some());
404404+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
405405+ };
406406+407407+ let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
408408+ area.clone(),
409409+ {
410410+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
411411+ frames.resize_with(num_frames, || MaybeUninit::uninit());
412412+ frames.leak()
413413+ }
414414+ );
415415+ area1.assert_valid();
416416+417417+ let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
418418+ area,
419419+ {
420420+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
421421+ frames.resize_with(num_frames, || MaybeUninit::uninit());
422422+ frames.leak()
423423+ }
424424+ );
425425+ area2.assert_valid();
426426+427427+ // we can only allocate contiguous blocks of the largest order available
428428+ prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE));
429429+430430+ let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap();
431431+432432+ let block = area2.allocate(layout).unwrap();
433433+ prop_assert!(block.len() >= alloc_frames);
434434+435435+ unsafe { area2.deallocate(block.cast(), layout); }
436436+437437+ assert_eq!(frames_in_area(&area2), num_frames);
438438+439439+ for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() {
440440+ prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len());
441441+ }
442442+ }
443443+ }
444444+}
+133
libs/mem/src/frame_alloc/area_selection.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::slice;
99+use core::fmt::Formatter;
1010+use core::marker::PhantomData;
1111+use core::mem;
1212+use core::mem::MaybeUninit;
1313+use core::ops::Range;
1414+1515+use fallible_iterator::FallibleIterator;
1616+use smallvec::SmallVec;
1717+1818+use crate::address_space::RawAddressSpace;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB
2222+2323+#[derive(Debug)]
2424+pub struct AreaSelection {
2525+ pub area: Range<PhysicalAddress>,
2626+ pub bookkeeping: &'static mut [MaybeUninit<Frame>],
2727+ pub wasted_bytes: usize,
2828+}
2929+3030+#[derive(Debug)]
3131+pub struct SelectionError {
3232+ pub range: Range<PhysicalAddress>,
3333+}
3434+3535+pub struct ArenaSelections<A: RawAddressSpace> {
3636+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
3737+ wasted_bytes: usize,
3838+3939+ _aspace: PhantomData<A>,
4040+}
4141+4242+pub fn select_areas<A: RawAddressSpace>(
4343+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
4444+) -> ArenaSelections<A> {
4545+ ArenaSelections {
4646+ allocatable_regions,
4747+ wasted_bytes: 0,
4848+4949+ _aspace: PhantomData,
5050+ }
5151+}
5252+5353+impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> {
5454+ type Item = AreaSelection;
5555+ type Error = SelectionError;
5656+5757+ fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
5858+ let Some(mut area) = self.allocatable_regions.pop() else {
5959+ return Ok(None);
6060+ };
6161+6262+ while let Some(region) = self.allocatable_regions.pop() {
6363+ debug_assert!(!area.is_overlapping(®ion));
6464+6565+ let pages_in_hole = if area.end <= region.start {
6666+ // the region is higher than the current area
6767+ region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE
6868+ } else {
6969+ debug_assert!(region.end <= area.start);
7070+ // the region is lower than the current area
7171+ area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE
7272+ };
7373+7474+ let waste_from_hole = size_of::<Frame>() * pages_in_hole;
7575+7676+ if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES {
7777+ self.allocatable_regions.push(region);
7878+ break;
7979+ } else {
8080+ self.wasted_bytes += waste_from_hole;
8181+8282+ if area.end <= region.start {
8383+ area.end = region.end;
8484+ } else {
8585+ area.start = region.start;
8686+ }
8787+ }
8888+ }
8989+9090+ let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap();
9191+ // We can't use empty areas anyway
9292+ if aligned.is_empty() {
9393+ return Err(SelectionError { range: aligned });
9494+ }
9595+9696+ let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE;
9797+9898+ let bookkeeping_start = aligned
9999+ .end
100100+ .checked_sub(bookkeeping_size_frames * size_of::<Frame>())
101101+ .unwrap()
102102+ .align_down(A::PAGE_SIZE);
103103+104104+ // The area has no space to hold its own bookkeeping
105105+ if bookkeeping_start < aligned.start {
106106+ return Err(SelectionError { range: aligned });
107107+ }
108108+109109+ let bookkeeping = unsafe {
110110+ slice::from_raw_parts_mut(
111111+ bookkeeping_start.as_mut_ptr().cast(),
112112+ bookkeeping_size_frames,
113113+ )
114114+ };
115115+ aligned.end = bookkeeping_start;
116116+117117+ Ok(Some(AreaSelection {
118118+ area: aligned,
119119+ bookkeeping,
120120+ wasted_bytes: mem::take(&mut self.wasted_bytes),
121121+ }))
122122+ }
123123+}
124124+125125+// ===== impl SelectionError =====
126126+127127+impl core::fmt::Display for SelectionError {
128128+ fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
129129+ todo!()
130130+ }
131131+}
132132+133133+impl core::error::Error for SelectionError {}
+37
libs/mem/src/lib.rs
···11+#![cfg_attr(not(test), no_std)]
22+extern crate alloc;
33+44+mod access_rules;
55+pub mod address_space;
66+mod addresses;
77+mod frame;
88+pub mod frame_alloc;
99+mod test_utils;
1010+mod utils;
1111+mod vmo;
1212+1313+pub type Result<T> = anyhow::Result<T>;
1414+1515+pub use access_rules::{AccessRules, WriteOrExecute};
1616+pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress};
1717+pub use frame::{Frame, FrameRef};
1818+1919+// For every region we need to track 3 pieces of information:
2020+// 1. The virtual memory region it occupies.
2121+// - required to know which virtual memory regions are free to use
2222+// - required to know when resolving page faults
2323+// 2. The physical memory region(s) it occupies.
2424+// - required to know when resolving page faults
2525+// - required for swap
2626+// - either
2727+// - PAGED (general purpose, lazy, paged physical memory. Can be committed, decommitted, swapped, or compressed)
2828+// - MMIO (physmem is MMIO instead of regular RAM. Can NOT be swapped, or compressed, but CAN be committed and decomitted)
2929+// - WIRED (the mapping was set up during boot. Can NEITHER be committed, decomitted, swapped, nor compressed)
3030+// 3. The content of the memory region.
3131+// - required for resolving page faults
3232+// - when first committing physical memory we need to know what to fill the memory with
3333+// - needs writeback hooks so changes can be flushed
3434+// - either
3535+// - ZERO FRAME (the special zero frame, filled only with zeroes)
3636+// - USERSPACE provider (for file system, swap, etc.)
3737+
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+macro_rules! assert_unsafe_precondition_ {
99+ ($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => {
1010+ {
1111+ // This check is inlineable, but not by the MIR inliner.
1212+ // The reason for this is that the MIR inliner is in an exceptionally bad position
1313+ // to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`,
1414+ // which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and
1515+ // would be bad for compile times.
1616+ //
1717+ // LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without
1818+ // inlining the check. If it's `true`, it can inline it and get significantly better performance.
1919+ #[inline]
2020+ const fn precondition_check($($name:$ty),*) {
2121+ assert!($e, concat!("unsafe precondition(s) violated: ", $message,
2222+ "\n\nThis indicates a bug in the program. \
2323+ This Undefined Behavior check is optional, and cannot be relied on for safety."))
2424+ }
2525+2626+ #[cfg(debug_assertions)]
2727+ precondition_check($($arg,)*);
2828+ }
2929+ };
3030+}
3131+pub(crate) use assert_unsafe_precondition_;
+267
libs/mem/src/vmo.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::sync::Arc;
99+use core::convert::Infallible;
1010+use core::fmt;
1111+use core::ops::RangeBounds;
1212+1313+use kasync::io::{Read, Write};
1414+1515+use crate::address_space::Batch;
1616+1717+pub trait VirtualMemoryObject: fmt::Debug {
1818+ type Err;
1919+2020+ // attempt to resize the vmo to `new_size`
2121+ async fn resize(&mut self, new_size: usize) -> Result<(), Self::Err>;
2222+2323+ // find physical pages to back the range of the object
2424+ async fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> Result<(), Self::Err>
2525+ where
2626+ R: RangeBounds<usize>;
2727+2828+ // free a range of the vmo back to the default state
2929+ async fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
3030+ where
3131+ R: RangeBounds<usize>;
3232+3333+ // Zero a range of the VMO. May release physical pages in the process.
3434+ async fn clear<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
3535+ where
3636+ R: RangeBounds<usize>;
3737+3838+ // Fetches content in the given range of the object. This should operate logically equivalent to
3939+ // reading such that future reads are quicker.
4040+ async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
4141+ where
4242+ R: RangeBounds<usize>;
4343+}
4444+4545+#[derive(Debug)]
4646+pub struct WiredVmo(());
4747+4848+impl VirtualMemoryObject for WiredVmo {
4949+ type Err = Infallible;
5050+5151+ async fn resize(&mut self, _new_size: usize) -> Result<(), Self::Err> {
5252+ unreachable!("cannot resize WIRED memory object");
5353+ }
5454+5555+ async fn commit<R>(
5656+ &mut self,
5757+ _range: R,
5858+ _will_write: bool,
5959+ _batch: &mut Batch,
6060+ ) -> Result<(), Self::Err>
6161+ where
6262+ R: RangeBounds<usize>,
6363+ {
6464+ unreachable!("cannot commit WIRED memory object. Wired memory is always committed.");
6565+ }
6666+6767+ async fn decommit<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
6868+ where
6969+ R: RangeBounds<usize>,
7070+ {
7171+ unreachable!("cannot decommit WIRED memory object. Wired memory is always committed.");
7272+ }
7373+7474+ async fn clear<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
7575+ where
7676+ R: RangeBounds<usize>,
7777+ {
7878+ todo!()
7979+ }
8080+8181+ async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
8282+ where
8383+ R: RangeBounds<usize>,
8484+ {
8585+ todo!()
8686+ }
8787+}
8888+8989+#[derive(Debug)]
9090+pub struct PagedVmo {
9191+ source: Arc<dyn FrameSource + Send + Sync>,
9292+}
9393+9494+impl VirtualMemoryObject for PagedVmo {
9595+ type Err = anyhow::Error;
9696+9797+ async fn resize(&mut self, new_size: usize) -> Result<(), Self::Err> {
9898+ todo!()
9999+ }
100100+101101+ async fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> Result<(), Self::Err>
102102+ where
103103+ R: RangeBounds<usize>,
104104+ {
105105+ todo!()
106106+ }
107107+108108+ async fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
109109+ where
110110+ R: RangeBounds<usize>,
111111+ {
112112+ todo!()
113113+ }
114114+115115+ async fn clear<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
116116+ where
117117+ R: RangeBounds<usize>,
118118+ {
119119+ todo!()
120120+ }
121121+122122+ async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
123123+ where
124124+ R: RangeBounds<usize>,
125125+ {
126126+ todo!()
127127+ }
128128+}
129129+130130+trait FrameSource: Read<Err = anyhow::Error> + Write<Err = anyhow::Error> + fmt::Debug {}
131131+132132+// impl<A: RawAddressSpace> VirtualMemoryObject for PhysVmo<A> {
133133+// fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
134134+// where
135135+// R: RangeBounds<usize>,
136136+// {
137137+// let range_phys = slice_range(&self.range, range)?;
138138+//
139139+// // batch.map(
140140+// // // range.start,
141141+// // range_phys.start,
142142+// // NonZeroUsize::new(range_phys.size()).unwrap(),
143143+// // // self.permissions.into(),
144144+// // )?;
145145+//
146146+// todo!()
147147+// }
148148+//
149149+// fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
150150+// where
151151+// R: RangeBounds<usize>,
152152+// {
153153+// todo!()
154154+// }
155155+// }
156156+//
157157+// #[derive(Debug)]
158158+// pub struct PagedVmo {}
159159+//
160160+// impl VirtualMemoryObject for PagedVmo {
161161+// fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
162162+// where
163163+// R: RangeBounds<usize>,
164164+// {
165165+// // let access = slice_range(self.range, range);
166166+//
167167+// // if will_write {
168168+// // let mut vmo = vmo.write();
169169+// //
170170+// // for addr in range.iter().step_by(arch::PAGE_SIZE) {
171171+// // debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE));
172172+// // let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap();
173173+// // let frame = vmo.require_owned_frame(vmo_relative_offset)?;
174174+// // batch.queue_map(
175175+// // addr,
176176+// // frame.addr(),
177177+// // NonZeroUsize::new(arch::PAGE_SIZE).unwrap(),
178178+// // self.permissions.into(),
179179+// // )?;
180180+// // }
181181+// // } else {
182182+// // let mut vmo = vmo.write();
183183+// //
184184+// // for addr in range.iter().step_by(arch::PAGE_SIZE) {
185185+// // debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE));
186186+// // let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap();
187187+// // let frame = vmo.require_read_frame(vmo_relative_offset)?;
188188+// // batch.queue_map(
189189+// // addr,
190190+// // frame.addr(),
191191+// // NonZeroUsize::new(arch::PAGE_SIZE).unwrap(),
192192+// // self.permissions.difference(Permissions::WRITE).into(),
193193+// // )?;
194194+// // }
195195+// // }
196196+//
197197+// todo!()
198198+// }
199199+//
200200+// fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
201201+// where
202202+// R: RangeBounds<usize>,
203203+// {
204204+// todo!()
205205+// }
206206+// }
207207+//
208208+// fn slice_range<R: RangeBounds<usize>>(
209209+// range: &Range<PhysicalAddress>,
210210+// bounds: R,
211211+// ) -> crate::Result<Range<PhysicalAddress>> {
212212+// let start = match bounds.start_bound() {
213213+// Bound::Included(b) => range.start.checked_add(*b).unwrap(),
214214+// Bound::Excluded(b) => range.start.checked_add(*b + 1).unwrap(),
215215+// Bound::Unbounded => range.start,
216216+// };
217217+// let end = match bounds.end_bound() {
218218+// Bound::Included(b) => range.start.checked_add(*b + 1).unwrap(),
219219+// Bound::Excluded(b) => range.start.checked_add(*b).unwrap(),
220220+// Bound::Unbounded => range.end,
221221+// };
222222+//
223223+// ensure!(end <= range.end, "requested range {:?} is out of bounds for {range:?}", start..end);
224224+//
225225+// Ok(start..end)
226226+// }
227227+//
228228+// #[cfg(test)]
229229+// mod tests {
230230+// use core::ops::Bound::{Excluded, Included};
231231+//
232232+// use super::*;
233233+//
234234+// #[test]
235235+// fn _subrange() {
236236+// let range = PhysicalAddress::new(0)..PhysicalAddress::new(10);
237237+//
238238+// assert_eq!(
239239+// slice_range(&range, 0..1).unwrap(),
240240+// PhysicalAddress::new(0)..PhysicalAddress::new(1)
241241+// );
242242+// assert_eq!(
243243+// slice_range(&range, ..).unwrap(),
244244+// PhysicalAddress::new(0)..PhysicalAddress::new(10)
245245+// );
246246+// assert_eq!(
247247+// slice_range(&range, 0..).unwrap(),
248248+// PhysicalAddress::new(0)..PhysicalAddress::new(10)
249249+// );
250250+// assert_eq!(
251251+// slice_range(&range, ..10).unwrap(),
252252+// PhysicalAddress::new(0)..PhysicalAddress::new(10)
253253+// );
254254+// assert_eq!(
255255+// slice_range(&range, 0..10).unwrap(),
256256+// PhysicalAddress::new(0)..PhysicalAddress::new(10)
257257+// );
258258+// assert_eq!(
259259+// slice_range(&range, 0..=9).unwrap(),
260260+// PhysicalAddress::new(0)..PhysicalAddress::new(10)
261261+// );
262262+// assert_eq!(
263263+// slice_range(&range, (Excluded(0), Included(9))).unwrap(),
264264+// PhysicalAddress::new(1)..PhysicalAddress::new(10)
265265+// );
266266+// }
267267+// }