···11+# Seeds for failure cases proptest has generated in the past. It is
22+# automatically read and these particular cases re-run before any
33+# novel cases are generated.
44+#
55+# It is recommended to check this file in to source control so that
66+# everyone who runs the test benefits from these saved cases.
77+cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1
88+cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257
99+cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257
1010+cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354
1111+cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257
1212+cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20
1313+cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+69
libs/mem/src/access_rules.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mycelium_bitfield::bitfield! {
99+ /// Rules that dictate how a region of virtual memory may be accessed.
1010+ ///
1111+ /// # W^X
1212+ ///
1313+ /// In order to prevent malicious code execution as proactively as possible,
1414+ /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
1515+ /// through the [`WriteOrExecute`] enum field.
1616+ #[derive(PartialEq, Eq)]
1717+ pub struct AccessRules<u8> {
1818+ /// If set, reading from the memory region is allowed.
1919+ pub const READ: bool;
2020+ /// Whether executing, or writing this memory region is allowed (or neither).
2121+ pub const WRITE_OR_EXECUTE: WriteOrExecute;
2222+ /// If set, requires code in the memory region to use aarch64 Branch Target Identification.
2323+ /// Does nothing on non-aarch64 architectures.
2424+ pub const BTI: bool;
2525+ }
2626+}
2727+2828+/// Whether executing, or writing this memory region is allowed (or neither).
2929+///
3030+/// This is an enum to enforce [`W^X`] at the type-level.
3131+///
3232+/// [`W^X`]: AccessRules
3333+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3434+#[repr(u8)]
3535+pub enum WriteOrExecute {
3636+ /// Neither writing nor execution of the memory region is allowed.
3737+ Neither = 0b00,
3838+ /// Writing to the memory region is allowed.
3939+ Write = 0b01,
4040+ /// Executing code from the memory region is allowed.
4141+ Execute = 0b10,
4242+}
4343+4444+// ===== impl WriteOrExecute =====
4545+4646+impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
4747+ type Error = core::convert::Infallible;
4848+4949+ /// The number of bits required to represent a value of this type.
5050+ const BITS: u32 = 2;
5151+5252+ #[inline]
5353+ fn try_from_bits(bits: u8) -> Result<Self, Self::Error> {
5454+ match bits {
5555+ b if b == Self::Neither as u8 => Ok(Self::Neither),
5656+ b if b == Self::Write as u8 => Ok(Self::Write),
5757+ b if b == Self::Execute as u8 => Ok(Self::Execute),
5858+ _ => {
5959+ // this should never happen unless the bitpacking code is broken
6060+ unreachable!("invalid memory region access rules {bits:#b}")
6161+ }
6262+ }
6363+ }
6464+6565+ #[inline]
6666+ fn into_bits(self) -> u8 {
6767+ self as u8
6868+ }
6969+}
+997
libs/mem/src/address_space.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod batch;
99+mod region;
1010+1111+use alloc::boxed::Box;
1212+use core::alloc::Layout;
1313+use core::num::NonZeroUsize;
1414+use core::ops::{Bound, ControlFlow, Range};
1515+use core::ptr::NonNull;
1616+1717+use anyhow::{Context, format_err};
1818+pub(crate) use batch::Batch;
1919+use rand::Rng;
2020+use rand::distr::Uniform;
2121+use rand_chacha::ChaCha20Rng;
2222+use region::AddressSpaceRegion;
2323+use wavltree::{CursorMut, WAVLTree};
2424+2525+use crate::access_rules::AccessRules;
2626+use crate::utils::assert_unsafe_precondition_;
2727+use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
2828+2929+pub unsafe trait RawAddressSpace {
3030+ /// The smallest addressable chunk of memory of this address space. All address argument provided
3131+ /// to methods of this type (both virtual and physical) must be aligned to this.
3232+ const PAGE_SIZE: usize;
3333+ const VIRT_ADDR_BITS: u32;
3434+3535+ const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8;
3636+ const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1);
3737+3838+ /// The [`Flush`] implementation for this address space.
3939+ type Flush: Flush;
4040+4141+ /// Return a new, empty flush for this address space.
4242+ fn flush(&self) -> Self::Flush;
4343+4444+ /// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given
4545+ /// [`VirtualAddress`] if mapped.
4646+ fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>;
4747+4848+ /// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the
4949+ /// specified access rules.
5050+ ///
5151+ /// If this returns `Ok`, the mapping is added to the raw address space and all future
5252+ /// accesses to the virtual address range will translate to accesses of the physical address
5353+ /// range.
5454+ ///
5555+ /// # Safety
5656+ ///
5757+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
5858+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
5959+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6060+ ///
6161+ /// # Errors
6262+ ///
6363+ /// Returning `Err` indicates the mapping cannot be established and the virtual address range
6464+ /// remains unaltered.
6565+ unsafe fn map(
6666+ &mut self,
6767+ virt: VirtualAddress,
6868+ phys: PhysicalAddress,
6969+ len: NonZeroUsize,
7070+ access_rules: AccessRules,
7171+ flush: &mut Self::Flush,
7272+ ) -> crate::Result<()>;
7373+7474+ /// Unmap a contiguous range of `len` virtual addresses.
7575+ ///
7676+ /// After this returns all accesses to the virtual address region will cause a fault.
7777+ ///
7878+ /// # Safety
7979+ ///
8080+ /// - `virt..virt+len` must be mapped
8181+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
8282+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
8383+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
8484+ unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush);
8585+8686+ /// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses.
8787+ ///
8888+ /// After this returns all accesses to the virtual address region must follow the
8989+ /// specified `AccessRules` or cause a fault.
9090+ ///
9191+ /// # Safety
9292+ ///
9393+ /// - `virt..virt+len` must be mapped
9494+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
9595+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
9696+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
9797+ unsafe fn set_access_rules(
9898+ &mut self,
9999+ virt: VirtualAddress,
100100+ len: NonZeroUsize,
101101+ access_rules: AccessRules,
102102+ flush: &mut Self::Flush,
103103+ );
104104+}
105105+106106+/// A type that can flush changes made to a [`RawAddressSpace`].
107107+///
108108+/// Note: [`Flush`] is purely optional, it exists so implementation MAY batch
109109+/// Note that the implementation is not required to delay materializing changes until [`Flush::flush`]
110110+/// is called.
111111+pub trait Flush {
112112+ /// Flush changes made to its [`RawAddressSpace`].
113113+ ///
114114+ /// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across
115115+ /// all affected threads/CPUs.
116116+ ///
117117+ /// # Errors
118118+ ///
119119+ /// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might
120120+ /// still have taken effect across all or some of the threads/CPUs.
121121+ fn flush(self) -> crate::Result<()>;
122122+}
123123+124124+pub struct AddressSpace<R: RawAddressSpace> {
125125+ raw: R,
126126+ regions: WAVLTree<AddressSpaceRegion<R>>,
127127+ batch: Batch,
128128+ max_range: Range<VirtualAddress>,
129129+ rng: Option<ChaCha20Rng>,
130130+}
131131+132132+impl<R: RawAddressSpace> AddressSpace<R> {
133133+ pub fn new(raw: R, rng: Option<ChaCha20Rng>) -> Self {
134134+ Self {
135135+ raw,
136136+ regions: WAVLTree::new(),
137137+ batch: Batch::new(),
138138+ max_range: VirtualAddress::MIN..VirtualAddress::MAX,
139139+ rng,
140140+ }
141141+ }
142142+143143+ /// Attempts to reserve a region of virtual memory.
144144+ ///
145145+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
146146+ /// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault.
147147+ ///
148148+ /// The returned region may have a larger size than specified by `layout.size()`, and may or may
149149+ /// not have its contents initialized.
150150+ ///
151151+ /// The returned region of virtual memory remains mapped as long as it is [*currently mapped*]
152152+ /// and the address space type itself has not been dropped.
153153+ ///
154154+ /// [*currently mapped*]: #currently-mapped-memory
155155+ ///
156156+ /// # Errors
157157+ ///
158158+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
159159+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
160160+ pub fn map(
161161+ &mut self,
162162+ layout: Layout,
163163+ access_rules: AccessRules,
164164+ ) -> crate::Result<NonNull<[u8]>> {
165165+ #[cfg(debug_assertions)]
166166+ self.assert_valid("[AddressSpace::map]");
167167+168168+ let layout = layout.align_to(R::PAGE_SIZE).unwrap();
169169+170170+ let spot = self
171171+ .find_spot_for(layout)
172172+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
173173+174174+ // TODO "relaxed" frame provider
175175+ let region = AddressSpaceRegion::new(spot, layout, access_rules);
176176+177177+ let region = self.regions.insert(Box::pin(region));
178178+179179+ // TODO OPTIONAL eagerly commit a few pages
180180+181181+ self.batch.flush_changes(&mut self.raw)?;
182182+183183+ Ok(region.as_non_null())
184184+ }
185185+186186+ /// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region
187187+ /// is zero-initialized.
188188+ ///
189189+ /// # Errors
190190+ ///
191191+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
192192+ /// constraints, virtual memory is exhausted, or mapping otherwise fails.
193193+ pub fn map_zeroed(
194194+ &mut self,
195195+ layout: Layout,
196196+ access_rules: AccessRules,
197197+ ) -> crate::Result<NonNull<[u8]>> {
198198+ #[cfg(debug_assertions)]
199199+ self.assert_valid("[AddressSpace::map_zeroed]");
200200+201201+ let layout = layout.align_to(R::PAGE_SIZE).unwrap();
202202+203203+ let spot = self
204204+ .find_spot_for(layout)
205205+ .context(format_err!("cannot find free spot for layout {layout:?}"))?;
206206+207207+ // TODO "zeroed" frame provider
208208+ let region = AddressSpaceRegion::new(spot, layout, access_rules);
209209+210210+ let region = self.regions.insert(Box::pin(region));
211211+212212+ // TODO OPTIONAL eagerly commit a few pages
213213+214214+ self.batch.flush_changes(&mut self.raw)?;
215215+216216+ Ok(region.as_non_null())
217217+ }
218218+219219+ /// Unmaps the virtual memory region referenced by `ptr`.
220220+ ///
221221+ /// # Safety
222222+ ///
223223+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
224224+ /// * `layout` must [*fit*] that region of memory.
225225+ ///
226226+ /// [*currently mapped*]: #currently-mapped-memory
227227+ /// [*fit*]: #memory-fitting
228228+ pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) {
229229+ #[cfg(debug_assertions)]
230230+ self.assert_valid("[AddressSpace::unmap]");
231231+232232+ // Safety: responsibility of caller
233233+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
234234+235235+ // Safety: responsibility of caller
236236+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
237237+238238+ region.decommit(.., &mut self.batch, &mut self.raw).unwrap();
239239+ }
240240+241241+ /// Attempts to extend the virtual memory reservation.
242242+ ///
243243+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
244244+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
245245+ /// this, the address space may extend the mapping referenced by `ptr` to fit the new layout.
246246+ ///
247247+ /// TODO describe how extending a file-backed, of DMA-backed mapping works
248248+ ///
249249+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
250250+ ///
251251+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
252252+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
253253+ /// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer
254254+ /// for accessing this region now.
255255+ ///
256256+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
257257+ /// this address space, and the contents of the region are unaltered.
258258+ ///
259259+ /// [*Undefined Behavior*]
260260+ ///
261261+ /// # Safety
262262+ ///
263263+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
264264+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
265265+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
266266+ ///
267267+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
268268+ ///
269269+ /// [*currently mapped*]: #currently-mapped-memory
270270+ /// [*fit*]: #memory-fitting
271271+ ///
272272+ /// # Errors
273273+ ///
274274+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
275275+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
276276+ pub unsafe fn grow(
277277+ &mut self,
278278+ ptr: NonNull<u8>,
279279+ old_layout: Layout,
280280+ new_layout: Layout,
281281+ ) -> crate::Result<NonNull<[u8]>> {
282282+ #[cfg(debug_assertions)]
283283+ self.assert_valid("[AddressSpace::grow]");
284284+285285+ assert_unsafe_precondition_!(
286286+ "TODO",
287287+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
288288+ old_layout.align().is_multiple_of(page_size)
289289+ }
290290+ );
291291+292292+ assert_unsafe_precondition_!(
293293+ "TODO",
294294+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
295295+ new_layout.align().is_multiple_of(page_size)
296296+ }
297297+ );
298298+299299+ if new_layout == old_layout {
300300+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
301301+ }
302302+303303+ assert_unsafe_precondition_!(
304304+ "TODO",
305305+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
306306+ new_layout.size() >= old_layout.size()
307307+ }
308308+ );
309309+310310+ if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } {
311311+ Ok(ptr)
312312+ } else {
313313+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
314314+ }
315315+ }
316316+317317+ /// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place.
318318+ ///
319319+ /// # Safety
320320+ ///
321321+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
322322+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
323323+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
324324+ ///
325325+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
326326+ ///
327327+ /// [*currently mapped*]: #currently-mapped-memory
328328+ /// [*fit*]: #memory-fitting
329329+ ///
330330+ /// # Errors
331331+ ///
332332+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
333333+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
334334+ pub unsafe fn grow_in_place(
335335+ &mut self,
336336+ ptr: NonNull<u8>,
337337+ old_layout: Layout,
338338+ new_layout: Layout,
339339+ ) -> crate::Result<NonNull<[u8]>> {
340340+ #[cfg(debug_assertions)]
341341+ self.assert_valid("[AddressSpace::grow_in_place]");
342342+343343+ assert_unsafe_precondition_!(
344344+ "TODO",
345345+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
346346+ old_layout.align().is_multiple_of(page_size)
347347+ }
348348+ );
349349+350350+ assert_unsafe_precondition_!(
351351+ "TODO",
352352+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
353353+ new_layout.align().is_multiple_of(page_size)
354354+ }
355355+ );
356356+357357+ if new_layout == old_layout {
358358+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
359359+ }
360360+361361+ assert_unsafe_precondition_!(
362362+ "TODO",
363363+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
364364+ new_layout.size() >= old_layout.size()
365365+ }
366366+ );
367367+368368+ unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) }
369369+ }
370370+371371+ /// Attempts to shrink the virtual memory reservation.
372372+ ///
373373+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
374374+ /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
375375+ /// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout.
376376+ ///
377377+ /// TODO describe how shrinking a file-backed, of DMA-backed mapping works
378378+ ///
379379+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
380380+ ///
381381+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
382382+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
383383+ /// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer
384384+ /// for accessing this region now.
385385+ ///
386386+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
387387+ /// this address space, and the contents of the region are unaltered.
388388+ ///
389389+ /// [*Undefined Behavior*]
390390+ ///
391391+ /// # Safety
392392+ ///
393393+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
394394+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
395395+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
396396+ ///
397397+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
398398+ ///
399399+ /// [*currently mapped*]: #currently-mapped-memory
400400+ /// [*fit*]: #memory-fitting
401401+ ///
402402+ /// # Errors
403403+ ///
404404+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
405405+ /// constraints, virtual memory is exhausted, or shrinking otherwise fails.
406406+ pub unsafe fn shrink(
407407+ &mut self,
408408+ ptr: NonNull<u8>,
409409+ old_layout: Layout,
410410+ new_layout: Layout,
411411+ ) -> crate::Result<NonNull<[u8]>> {
412412+ #[cfg(debug_assertions)]
413413+ self.assert_valid("[AddressSpace::shrink]");
414414+415415+ assert_unsafe_precondition_!(
416416+ "TODO",
417417+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
418418+ old_layout.align().is_multiple_of(page_size)
419419+ }
420420+ );
421421+422422+ assert_unsafe_precondition_!(
423423+ "TODO",
424424+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
425425+ new_layout.align().is_multiple_of(page_size)
426426+ }
427427+ );
428428+429429+ if new_layout == old_layout {
430430+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
431431+ }
432432+433433+ assert_unsafe_precondition_!(
434434+ "TODO",
435435+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
436436+ new_layout.size() <= old_layout.size()
437437+ }
438438+ );
439439+440440+ if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } {
441441+ Ok(ptr)
442442+ } else {
443443+ unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
444444+ }
445445+ }
446446+447447+ /// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be
448448+ /// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned.
449449+ ///
450450+ /// # Safety
451451+ ///
452452+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
453453+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
454454+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
455455+ ///
456456+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
457457+ ///
458458+ /// [*currently mapped*]: #currently-mapped-memory
459459+ /// [*fit*]: #memory-fitting
460460+ ///
461461+ /// # Errors
462462+ ///
463463+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
464464+ /// constraints, virtual memory is exhausted, or growing otherwise fails.
465465+ pub unsafe fn shrink_in_place(
466466+ &mut self,
467467+ ptr: NonNull<u8>,
468468+ old_layout: Layout,
469469+ new_layout: Layout,
470470+ ) -> crate::Result<NonNull<[u8]>> {
471471+ #[cfg(debug_assertions)]
472472+ self.assert_valid("[AddressSpace::shrink_in_place]");
473473+474474+ assert_unsafe_precondition_!(
475475+ "TODO",
476476+ (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
477477+ old_layout.align().is_multiple_of(page_size)
478478+ }
479479+ );
480480+481481+ assert_unsafe_precondition_!(
482482+ "TODO",
483483+ (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
484484+ new_layout.align().is_multiple_of(page_size)
485485+ }
486486+ );
487487+488488+ if new_layout == old_layout {
489489+ return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
490490+ }
491491+492492+ assert_unsafe_precondition_!(
493493+ "TODO",
494494+ (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
495495+ new_layout.size() <= old_layout.size()
496496+ }
497497+ );
498498+499499+ unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) }
500500+ }
501501+502502+ /// Updates the access rules for the virtual memory region referenced by `ptr`.
503503+ ///
504504+ /// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault.
505505+ ///
506506+ /// If this method returns `Err`, the access rules of the memory region are unaltered.
507507+ ///
508508+ /// # Safety
509509+ ///
510510+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
511511+ /// * `layout` must [*fit*] that region of memory.
512512+ ///
513513+ /// [*currently mapped*]: #currently-mapped-memory
514514+ /// [*fit*]: #memory-fitting
515515+ pub unsafe fn update_access_rules(
516516+ &mut self,
517517+ ptr: NonNull<u8>,
518518+ layout: Layout,
519519+ access_rules: AccessRules,
520520+ ) -> crate::Result<()> {
521521+ #[cfg(debug_assertions)]
522522+ self.assert_valid("[AddressSpace::update_access_rules]");
523523+524524+ // Safety: responsibility of caller
525525+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
526526+527527+ // Safety: responsibility of caller
528528+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
529529+530530+ region.update_access_rules(access_rules, &mut self.batch)?;
531531+532532+ self.batch.flush_changes(&mut self.raw)?;
533533+534534+ Ok(())
535535+ }
536536+537537+ /// Attempts to fill the virtual memory region referenced by `ptr` with zeroes.
538538+ ///
539539+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
540540+ /// mapped region. The pointer is suitable for holding data described by `new_layout` and is
541541+ /// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the
542542+ /// virtual memory region.
543543+ ///
544544+ /// TODO describe how clearing a file-backed, of DMA-backed mapping works
545545+ ///
546546+ /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
547547+ ///
548548+ /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
549549+ /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
550550+ /// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer
551551+ /// for accessing this region now.
552552+ ///
553553+ /// If this method returns `Err`, then ownership of the memory region has not been transferred to
554554+ /// this address space, and the contents of the region are unaltered.
555555+ ///
556556+ /// [*Undefined Behavior*]
557557+ ///
558558+ /// # Safety
559559+ ///
560560+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
561561+ /// * `layout` must [*fit*] that region of memory.
562562+ ///
563563+ /// [*currently mapped*]: #currently-mapped-memory
564564+ /// [*fit*]: #memory-fitting
565565+ ///
566566+ /// # Errors
567567+ ///
568568+ /// Returning `Err` indicates the layout does not meet the address space's size or alignment
569569+ /// constraints, clearing a virtual memory region is not supported by the backing storage, or
570570+ /// clearing otherwise fails.
571571+ pub unsafe fn clear(
572572+ &mut self,
573573+ ptr: NonNull<u8>,
574574+ layout: Layout,
575575+ ) -> crate::Result<NonNull<[u8]>> {
576576+ #[cfg(debug_assertions)]
577577+ self.assert_valid("[AddressSpace::clear]");
578578+579579+ // Safety: responsibility of caller
580580+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
581581+582582+ // Safety: responsibility of caller
583583+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
584584+585585+ region.clear(&mut self.batch)?;
586586+587587+ self.batch.flush_changes(&mut self.raw)?;
588588+589589+ Ok(region.as_non_null())
590590+ }
591591+592592+ pub fn assert_valid(&self, msg: &str) {
593593+ let mut regions = self.regions.iter();
594594+595595+ let Some(first_region) = regions.next() else {
596596+ assert!(
597597+ self.regions.is_empty(),
598598+ "{msg}region iterator is empty but tree is not."
599599+ );
600600+601601+ return;
602602+ };
603603+604604+ first_region.assert_valid(msg);
605605+606606+ let mut seen_range = first_region.range().clone();
607607+608608+ while let Some(region) = regions.next() {
609609+ assert!(
610610+ !region.range().is_overlapping(&seen_range),
611611+ "{msg}region cannot overlap previous region; region={region:?}"
612612+ );
613613+ assert!(
614614+ region.range().start >= self.max_range.start
615615+ && region.range().end <= self.max_range.end,
616616+ "{msg}region cannot lie outside of max address space range; region={region:?}"
617617+ );
618618+619619+ seen_range = seen_range.start..region.range().end;
620620+621621+ region.assert_valid(msg);
622622+623623+ // TODO assert validity of of VMO against phys addresses
624624+ // let (_phys, access_rules) = self
625625+ // .batched_raw
626626+ // .raw_address_space()
627627+ // .lookup(region.range().start)
628628+ // .unwrap_or_else(|| {
629629+ // panic!("{msg}region base address is not mapped in raw address space region={region:?}")
630630+ // });
631631+ //
632632+ // assert_eq!(
633633+ // access_rules,
634634+ // region.access_rules(),
635635+ // "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}",
636636+ // region.access_rules(),
637637+ // );
638638+ }
639639+ }
640640+641641+ /// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`]
642642+ /// and [`Self::shrink_in_place`].
643643+ ///
644644+ /// # Safety
645645+ ///
646646+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
647647+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
648648+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
649649+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
650650+ unsafe fn grow_in_place_inner(
651651+ &mut self,
652652+ ptr: NonNull<u8>,
653653+ old_layout: Layout,
654654+ new_layout: Layout,
655655+ ) -> crate::Result<NonNull<[u8]>> {
656656+ // Safety: responsibility of caller
657657+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
658658+659659+ let next_range = cursor.peek_next().map(|region| region.range().clone());
660660+661661+ // Safety: responsibility of caller
662662+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
663663+664664+ region.grow_in_place(new_layout, next_range, &mut self.batch)?;
665665+666666+ self.batch.flush_changes(&mut self.raw)?;
667667+668668+ Ok(region.as_non_null())
669669+ }
670670+671671+ /// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`]
672672+ /// and [`Self::grow_in_place`].
673673+ ///
674674+ /// # Safety
675675+ ///
676676+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
677677+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
678678+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
679679+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
680680+ unsafe fn shrink_in_place_inner(
681681+ &mut self,
682682+ ptr: NonNull<u8>,
683683+ old_layout: Layout,
684684+ new_layout: Layout,
685685+ ) -> crate::Result<NonNull<[u8]>> {
686686+ // Safety: responsibility of caller
687687+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
688688+689689+ // Safety: responsibility of caller
690690+ let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
691691+692692+ region.shrink(new_layout, &mut self.batch)?;
693693+694694+ self.batch.flush_changes()?;
695695+696696+ Ok(region.as_non_null())
697697+ }
698698+699699+ /// Reallocates a virtual address region. This will unmap and remove the old region, allocating
700700+ /// a new region that will be backed the old regions physical memory.
701701+ ///
702702+ /// # Safety
703703+ ///
704704+ /// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
705705+ /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
706706+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
707707+ /// * `new_layout.align()` must be multiple of PAGE_SIZE
708708+ unsafe fn reallocate_region(
709709+ &mut self,
710710+ ptr: NonNull<u8>,
711711+ old_layout: Layout,
712712+ new_layout: Layout,
713713+ ) -> crate::Result<NonNull<[u8]>> {
714714+ // Safety: responsibility of caller
715715+ let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
716716+ let mut region = unsafe { cursor.remove().unwrap_unchecked() };
717717+718718+ let spot = self.find_spot_for(new_layout).context(format_err!(
719719+ "cannot find free spot for layout {new_layout:?}"
720720+ ))?;
721721+722722+ region.move_to(spot, new_layout, &mut self.batch)?;
723723+724724+ Ok(region.as_non_null())
725725+ }
726726+727727+ /// Find a spot in the address space that satisfies the given `layout` requirements.
728728+ ///
729729+ /// If a spot suitable for holding data described by `layout` is found, the base address of the
730730+ /// address range is returned in `Some`. The returned address is already correct aligned to
731731+ /// `layout.align()`.
732732+ ///
733733+ /// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in
734734+ /// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot
735735+ /// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a
736736+ /// different time might still succeed.
737737+ fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> {
738738+ // The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is
739739+ // guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works
740740+ // elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively.
741741+ // Here is how it works:
742742+ // 1. We set up two counters: (see the GapVisitor)
743743+ // - `candidate_spot_count` which we initialize to zero
744744+ // - `target_index` which we either set to a random value between 0..<the maximum number of
745745+ // possible addresses in the address space> if ASLR is requested OR to zero otherwise.
746746+ // 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the
747747+ // gaps between regions. We count the number of addresses in each gap that satisfy the
748748+ // requested `Layout`s size and alignment and add that to the `candidate_spot_count`.
749749+ // IF the number of spots in the gap is greater than our chosen target index, we pick the
750750+ // spot at the target index and finish. ELSE we *decrement* the target index by the number
751751+ // of spots and continue to the next gap.
752752+ // 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original
753753+ // guess for `target_index` was too big, in which case we need to retry.
754754+ // 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know
755755+ // the *actual* number of possible spots in the address space since we just counted them
756756+ // during the first attempt. We initialize `target_index` to `0..candidate_spot_count`
757757+ // which is guaranteed to return us a spot.
758758+ // IF `candidate_spot_count` is ZERO after the first attempt, there is no point in
759759+ // retrying since we cannot fulfill the requested layout.
760760+ //
761761+ // Note that in practice, we use a binary tree to keep track of regions, and we use binary search
762762+ // to optimize the search for a suitable gap instead of linear iteration.
763763+764764+ let layout = layout.pad_to_align();
765765+766766+ // First attempt: guess a random target index
767767+ let max_candidate_spots = self.max_range.size();
768768+769769+ let target_index: usize = self
770770+ .rng
771771+ .as_mut()
772772+ .map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap()))
773773+ .unwrap_or_default();
774774+775775+ // First attempt: visit the binary search tree to find a gap
776776+ let mut v = GapVisitor::new(layout, target_index);
777777+ self.visit_gaps(&mut v);
778778+779779+ // if we found a spot already we're done
780780+ if let Some(chosen) = v.chosen {
781781+ return Some(chosen);
782782+ }
783783+784784+ // otherwise, Second attempt: we need to retry with the correct candidate spot count
785785+ // but if we counted no suitable candidate spots during the first attempt, we cannot fulfill
786786+ // the request.
787787+ if v.candidate_spots == 0 {
788788+ return None;
789789+ }
790790+791791+ // Second attempt: pick a new target_index that's actually fulfillable
792792+ let target_index: usize = self
793793+ .rng
794794+ .as_mut()
795795+ .map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap()))
796796+ .unwrap_or_default();
797797+798798+ // Second attempt: visit the binary search tree to find a gap
799799+ let mut v = GapVisitor::new(layout, target_index);
800800+ self.visit_gaps(&mut v);
801801+802802+ let chosen = v
803803+ .chosen
804804+ .expect("There must be a chosen spot after the first attempt. This is a bug!");
805805+806806+ debug_assert!(chosen.is_canonical::<R>());
807807+808808+ Some(chosen)
809809+ }
810810+811811+ /// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space
812812+ /// from lowest to highest addresses.
813813+ fn visit_gaps(&self, v: &mut GapVisitor) {
814814+ let Some(root) = self.regions.root().get() else {
815815+ // if the tree is empty, we treat the entire max_range as the gap
816816+ // note that we do not care about the returned ControlFlow, as there is nothing else we
817817+ // could try to find a spot anyway
818818+ let _ = v.visit(self.max_range.clone());
819819+820820+ return;
821821+ };
822822+823823+ // see if there is a suitable gap between BEFORE the first address space region
824824+ if v.visit(self.max_range.start..root.subtree_range().start)
825825+ .is_break()
826826+ {
827827+ return;
828828+ }
829829+830830+ // now comes the main part of the search. we start at the WAVLTree root node and do a
831831+ // binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion`
832832+ // to speed up this search. See `AddressSpaceRegion` for details on how this works.
833833+834834+ let mut maybe_current = self.regions.root().get();
835835+ let mut already_visited = VirtualAddress::MIN;
836836+837837+ while let Some(current) = maybe_current {
838838+ // If there is no suitable gap in this entire
839839+ if current.suitable_gap_in_subtree(v.layout()) {
840840+ // First, look at the left subtree
841841+ if let Some(left) = current.left_child() {
842842+ if left.suitable_gap_in_subtree(v.layout())
843843+ && left.subtree_range().end > already_visited
844844+ {
845845+ maybe_current = Some(left);
846846+ continue;
847847+ }
848848+849849+ if v.visit(left.subtree_range().end..current.range().start)
850850+ .is_break()
851851+ {
852852+ return;
853853+ }
854854+ }
855855+856856+ if let Some(right) = current.right_child() {
857857+ if v.visit(current.range().end..right.subtree_range().start)
858858+ .is_break()
859859+ {
860860+ return;
861861+ }
862862+863863+ if right.suitable_gap_in_subtree(v.layout())
864864+ && right.subtree_range().end > already_visited
865865+ {
866866+ maybe_current = Some(right);
867867+ continue;
868868+ }
869869+ }
870870+ }
871871+872872+ already_visited = current.subtree_range().end;
873873+ maybe_current = current.parent();
874874+ }
875875+876876+ // see if there is a suitable gap between AFTER the last address space region
877877+ if v.visit(root.subtree_range().end..self.max_range.end)
878878+ .is_break()
879879+ {
880880+ return;
881881+ }
882882+ }
883883+}
884884+885885+/// # Safety
886886+///
887887+/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
888888+/// * `layout` must [*fit*] that region of memory.
889889+///
890890+/// [*currently mapped*]: #currently-mapped-memory
891891+/// [*fit*]: #memory-fitting
892892+unsafe fn get_region_containing_ptr(
893893+ regions: &mut WAVLTree<AddressSpaceRegion>,
894894+ ptr: NonNull<u8>,
895895+ layout: Layout,
896896+) -> CursorMut<'_, AddressSpaceRegion> {
897897+ let addr = VirtualAddress::from_non_null(ptr);
898898+899899+ let cursor = regions.lower_bound_mut(Bound::Included(&addr));
900900+901901+ assert_unsafe_precondition_!(
902902+ "TODO",
903903+ (cursor: &CursorMut<AddressSpaceRegion> = &cursor) => cursor.get().is_some()
904904+ );
905905+906906+ // Safety: The caller guarantees the pointer is currently mapped which means we must have
907907+ // a corresponding address space region for it
908908+ let region = unsafe { cursor.get().unwrap_unchecked() };
909909+910910+ assert_unsafe_precondition_!(
911911+ "TODO",
912912+ (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => {
913913+ let range = region.range();
914914+915915+ range.start.get() <= addr.get() && addr.get() < range.end.get()
916916+ }
917917+ );
918918+919919+ assert_unsafe_precondition_!(
920920+ "`layout` does not fit memory region",
921921+ (layout: Layout = layout, region: &AddressSpaceRegion = ®ion) => region.layout_fits_region(layout)
922922+ );
923923+924924+ cursor
925925+}
926926+927927+pub(crate) struct GapVisitor {
928928+ layout: Layout,
929929+ target_index: usize,
930930+ candidate_spots: usize,
931931+ chosen: Option<VirtualAddress>,
932932+}
933933+934934+impl GapVisitor {
935935+ fn new(layout: Layout, target_index: usize) -> Self {
936936+ Self {
937937+ layout,
938938+ target_index,
939939+ candidate_spots: 0,
940940+ chosen: None,
941941+ }
942942+ }
943943+944944+ pub fn layout(&self) -> Layout {
945945+ self.layout
946946+ }
947947+948948+ /// Returns the number of spots in the given range that satisfy the layout we require
949949+ fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize {
950950+ debug_assert!(
951951+ range.start.is_aligned_to(self.layout.align())
952952+ && range.end.is_aligned_to(self.layout.align())
953953+ );
954954+955955+ // ranges passed in here can become empty for a number of reasons (aligning might produce ranges
956956+ // where end > start, or the range might be empty to begin with) in either case an empty
957957+ // range means no spots are available
958958+ if range.is_empty() {
959959+ return 0;
960960+ }
961961+962962+ let range_size = range.size();
963963+ if range_size >= self.layout.size() {
964964+ ((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1
965965+ } else {
966966+ 0
967967+ }
968968+ }
969969+970970+ pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> {
971971+ // if we have already chosen a spot, signal the caller to stop
972972+ if self.chosen.is_some() {
973973+ return ControlFlow::Break(());
974974+ }
975975+976976+ let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap();
977977+978978+ let spot_count = self.spots_in_range(&aligned_gap);
979979+980980+ self.candidate_spots += spot_count;
981981+982982+ if self.target_index < spot_count {
983983+ self.chosen = Some(
984984+ aligned_gap
985985+ .start
986986+ .checked_add(self.target_index << self.layout.align().ilog2())
987987+ .unwrap(),
988988+ );
989989+990990+ ControlFlow::Break(())
991991+ } else {
992992+ self.target_index -= spot_count;
993993+994994+ ControlFlow::Continue(())
995995+ }
996996+ }
997997+}
+336
libs/mem/src/address_space/batch.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::cmp;
99+use core::num::{NonZero, NonZeroUsize};
1010+1111+use smallvec::SmallVec;
1212+1313+use crate::address_space::{Flush, RawAddressSpace};
1414+use crate::{AccessRules, PhysicalAddress, VirtualAddress};
1515+1616+/// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`.
1717+///
1818+/// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space
1919+/// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB
2020+/// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce
2121+/// this number.
2222+pub struct Batch {
2323+ ops: SmallVec<[BatchOperation; 4]>,
2424+}
2525+2626+enum BatchOperation {
2727+ Map(MapOperation),
2828+ Unmap(UnmapOperation),
2929+ SetAccessRules(SetAccessRulesOperation),
3030+}
3131+3232+struct MapOperation {
3333+ virt: VirtualAddress,
3434+ phys: PhysicalAddress,
3535+ len: NonZeroUsize,
3636+ access_rules: AccessRules,
3737+}
3838+3939+struct UnmapOperation {
4040+ virt: VirtualAddress,
4141+ len: NonZeroUsize,
4242+}
4343+4444+struct SetAccessRulesOperation {
4545+ virt: VirtualAddress,
4646+ len: NonZeroUsize,
4747+ access_rules: AccessRules,
4848+}
4949+5050+// ===== impl Batch =====
5151+5252+impl Batch {
5353+ /// Construct a new empty [`Batch`].
5454+ pub fn new() -> Self {
5555+ Self {
5656+ ops: SmallVec::new(),
5757+ }
5858+ }
5959+6060+ /// Add a [`map`] operation to the set of batched operations.
6161+ ///
6262+ /// # Safety
6363+ ///
6464+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
6565+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
6666+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
6767+ ///
6868+ /// [`map`]: RawAddressSpace::map
6969+ pub unsafe fn map(
7070+ &mut self,
7171+ virt: VirtualAddress,
7272+ phys: PhysicalAddress,
7373+ len: NonZeroUsize,
7474+ access_rules: AccessRules,
7575+ ) {
7676+ let mut new = MapOperation {
7777+ virt,
7878+ phys,
7979+ len,
8080+ access_rules,
8181+ };
8282+8383+ let ops = self.ops.iter_mut().filter_map(|op| match op {
8484+ BatchOperation::Map(op) => Some(op),
8585+ _ => None,
8686+ });
8787+8888+ for op in ops {
8989+ match op.try_merge_with(new) {
9090+ Ok(()) => return,
9191+ Err(new_) => new = new_,
9292+ }
9393+ }
9494+9595+ self.ops.push(BatchOperation::Map(new));
9696+ }
9797+9898+ /// Add an [`unmap`] operation to the set of batched operations.
9999+ ///
100100+ /// # Safety
101101+ ///
102102+ /// - virt..virt+len must be mapped
103103+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
104104+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
105105+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
106106+ ///
107107+ /// [`unmap`]: RawAddressSpace::unmap
108108+ pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) {
109109+ let mut new = UnmapOperation { virt, len };
110110+111111+ let ops = self.ops.iter_mut().filter_map(|op| match op {
112112+ BatchOperation::Unmap(op) => Some(op),
113113+ _ => None,
114114+ });
115115+116116+ for op in ops {
117117+ match op.try_merge_with(new) {
118118+ Ok(()) => return,
119119+ Err(new_) => new = new_,
120120+ }
121121+ }
122122+123123+ self.ops.push(BatchOperation::Unmap(new));
124124+ }
125125+126126+ /// Add a [`set_access_rules`] operation to the set of batched operations.
127127+ ///
128128+ /// # Safety
129129+ ///
130130+ /// - virt..virt+len must be mapped
131131+ /// - `virt` must be aligned to `Self::PAGE_SIZE`
132132+ /// - `phys` must be aligned to `Self::PAGE_SIZE`
133133+ /// - `len` must an integer multiple of `Self::PAGE_SIZE`
134134+ ///
135135+ /// [`set_access_rules`]: RawAddressSpace::set_access_rules
136136+ pub fn set_access_rules(
137137+ &mut self,
138138+ virt: VirtualAddress,
139139+ len: NonZeroUsize,
140140+ access_rules: AccessRules,
141141+ ) {
142142+ let mut new = SetAccessRulesOperation {
143143+ virt,
144144+ len,
145145+ access_rules,
146146+ };
147147+148148+ let ops = self.ops.iter_mut().filter_map(|op| match op {
149149+ BatchOperation::SetAccessRules(op) => Some(op),
150150+ _ => None,
151151+ });
152152+153153+ for op in ops {
154154+ match op.try_merge_with(new) {
155155+ Ok(()) => return,
156156+ Err(new_) => new = new_,
157157+ }
158158+ }
159159+160160+ self.ops.push(BatchOperation::SetAccessRules(new));
161161+ }
162162+163163+ /// Flushes the `Batch` ensuring all changes are materialized into the raw address space.
164164+ pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> {
165165+ let mut flush = raw_aspace.flush();
166166+ for op in self.ops.drain(..) {
167167+ match op {
168168+ BatchOperation::Map(op) => {
169169+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
170170+ debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE));
171171+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
172172+173173+ // Safety: the caller promised the correctness of the values on construction of
174174+ // the operation.
175175+ unsafe {
176176+ raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
177177+ }
178178+ }
179179+ BatchOperation::Unmap(op) => {
180180+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
181181+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
182182+183183+ // Safety: the caller promised the correctness of the values on construction of
184184+ // the operation.
185185+ unsafe {
186186+ raw_aspace.unmap(op.virt, op.len, &mut flush);
187187+ }
188188+ }
189189+ BatchOperation::SetAccessRules(op) => {
190190+ debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
191191+ debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
192192+193193+ // Safety: the caller promised the correctness of the values on construction of
194194+ // the operation.
195195+ unsafe {
196196+ raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
197197+ }
198198+ }
199199+ };
200200+ }
201201+ flush.flush()
202202+ }
203203+}
204204+205205+// ===== impl MapOperation =====
206206+207207+impl MapOperation {
208208+ /// Returns true if this operation can be merged with `other`.
209209+ ///
210210+ /// Map operations can be merged if:
211211+ /// - their [`AccessRules`] are the same
212212+ /// - their virtual address ranges are contiguous (no gap between self and other)
213213+ /// - their physical address ranges are contiguous
214214+ /// - the resulting virtual address range still has the same size as the resulting
215215+ /// physical address range
216216+ const fn can_merge_with(&self, other: &Self) -> bool {
217217+ // the access rules need to be the same
218218+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
219219+220220+ let overlap_virt = self.virt.get() <= other.len.get()
221221+ && other.virt.get() <= self.virt.get() + self.len.get();
222222+223223+ let overlap_phys = self.phys.get() <= other.len.get()
224224+ && other.phys.get() <= self.phys.get() + self.len.get();
225225+226226+ let offset_virt = self.virt.get().wrapping_sub(other.virt.get());
227227+ let offset_phys = self.virt.get().wrapping_sub(other.virt.get());
228228+ let same_offset = offset_virt == offset_phys;
229229+230230+ same_rules && overlap_virt && overlap_phys && same_offset
231231+ }
232232+233233+ /// Attempt to merge this operation with `other`.
234234+ ///
235235+ /// If this returns `Ok`, `other` has been merged into `self`.
236236+ ///
237237+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
238238+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
239239+ if self.can_merge_with(&other) {
240240+ let offset = self.virt.get().wrapping_sub(other.virt.get());
241241+ let len = self
242242+ .len
243243+ .get()
244244+ .checked_add(other.len.get())
245245+ .unwrap()
246246+ .wrapping_add(offset);
247247+248248+ self.virt = cmp::min(self.virt, other.virt);
249249+ self.phys = cmp::min(self.phys, other.phys);
250250+ self.len = NonZero::new(len).ok_or(other)?;
251251+252252+ Ok(())
253253+ } else {
254254+ Err(other)
255255+ }
256256+ }
257257+}
258258+259259+// ===== impl UnmapOperation =====
260260+261261+impl UnmapOperation {
262262+ /// Returns true if this operation can be merged with `other`.
263263+ ///
264264+ /// Unmap operations can be merged if:
265265+ /// - their virtual address ranges are contiguous (no gap between self and other)
266266+ const fn can_merge_with(&self, other: &Self) -> bool {
267267+ self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get()
268268+ }
269269+270270+ /// Attempt to merge this operation with `other`.
271271+ ///
272272+ /// If this returns `Ok`, `other` has been merged into `self`.
273273+ ///
274274+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
275275+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
276276+ if self.can_merge_with(&other) {
277277+ let offset = self.virt.get().wrapping_sub(other.virt.get());
278278+ let len = self
279279+ .len
280280+ .get()
281281+ .checked_add(other.len.get())
282282+ .unwrap()
283283+ .wrapping_add(offset);
284284+285285+ self.virt = cmp::min(self.virt, other.virt);
286286+ self.len = NonZero::new(len).ok_or(other)?;
287287+288288+ Ok(())
289289+ } else {
290290+ Err(other)
291291+ }
292292+ }
293293+}
294294+295295+// ===== impl ProtectOperation =====
296296+297297+impl SetAccessRulesOperation {
298298+ /// Returns true if this operation can be merged with `other`.
299299+ ///
300300+ /// Protect operations can be merged if:
301301+ /// - their [`AccessRules`] are the same
302302+ /// - their virtual address ranges are contiguous (no gap between self and other)
303303+ const fn can_merge_with(&self, other: &Self) -> bool {
304304+ // the access rules need to be the same
305305+ let same_rules = self.access_rules.bits() == other.access_rules.bits();
306306+307307+ let overlap = self.virt.get() <= other.len.get()
308308+ && other.virt.get() <= self.virt.get() + self.len.get();
309309+310310+ same_rules && overlap
311311+ }
312312+313313+ /// Attempt to merge this operation with `other`.
314314+ ///
315315+ /// If this returns `Ok`, `other` has been merged into `self`.
316316+ ///
317317+ /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
318318+ fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
319319+ if self.can_merge_with(&other) {
320320+ let offset = self.virt.get().wrapping_sub(other.virt.get());
321321+ let len = self
322322+ .len
323323+ .get()
324324+ .checked_add(other.len.get())
325325+ .unwrap()
326326+ .wrapping_add(offset);
327327+328328+ self.virt = cmp::min(self.virt, other.virt);
329329+ self.len = NonZero::new(len).ok_or(other)?;
330330+331331+ Ok(())
332332+ } else {
333333+ Err(other)
334334+ }
335335+ }
336336+}
+548
libs/mem/src/address_space/region.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::boxed::Box;
99+use core::alloc::Layout;
1010+use core::marker::PhantomData;
1111+use core::mem::offset_of;
1212+use core::num::NonZeroUsize;
1313+use core::ops::{Bound, Range, RangeBounds};
1414+use core::pin::Pin;
1515+use core::ptr::NonNull;
1616+use core::{cmp, fmt, mem, slice};
1717+1818+use fallible_iterator::FallibleIterator;
1919+use pin_project::pin_project;
2020+2121+use crate::address_space::{Batch, RawAddressSpace};
2222+use crate::vmo::Vmo;
2323+use crate::{AccessRules, AddressRangeExt, VirtualAddress};
2424+2525+#[pin_project]
2626+#[derive(Debug)]
2727+pub struct AddressSpaceRegion<R> {
2828+ range: Range<VirtualAddress>,
2929+ access_rules: AccessRules,
3030+ layout: Layout,
3131+ vmo: Vmo,
3232+ vmo_offset: usize,
3333+3434+ /// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
3535+ subtree_range: Range<VirtualAddress>,
3636+ /// The largest gap in this subtree, used when allocating new regions
3737+ max_gap: usize,
3838+ /// Links to other regions in the WAVL tree
3939+ links: wavltree::Links<AddressSpaceRegion<R>>,
4040+4141+ _raw_aspace: PhantomData<R>,
4242+}
4343+4444+impl<R: RawAddressSpace> AddressSpaceRegion<R> {
4545+ pub const fn new(
4646+ spot: VirtualAddress,
4747+ layout: Layout,
4848+ access_rules: AccessRules,
4949+ vmo: Vmo,
5050+ vmo_offset: usize,
5151+ ) -> Self {
5252+ Self {
5353+ range: spot..spot.checked_add(layout.size()).unwrap(),
5454+ access_rules,
5555+ layout,
5656+ vmo,
5757+ vmo_offset,
5858+5959+ max_gap: 0,
6060+ subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
6161+ links: wavltree::Links::new(),
6262+6363+ _raw_aspace: PhantomData,
6464+ }
6565+ }
6666+6767+ pub const fn range(&self) -> &Range<VirtualAddress> {
6868+ &self.range
6969+ }
7070+7171+ pub const fn subtree_range(&self) -> &Range<VirtualAddress> {
7272+ &self.subtree_range
7373+ }
7474+7575+ pub const fn access_rules(&self) -> AccessRules {
7676+ self.access_rules
7777+ }
7878+7979+ pub fn as_slice(&self) -> &[u8] {
8080+ let ptr = self.range.start.as_ptr();
8181+ let len = self.range.size();
8282+8383+ unsafe { slice::from_raw_parts(ptr, len) }
8484+ }
8585+8686+ pub fn as_slice_mut(&mut self) -> &mut [u8] {
8787+ let ptr = self.range.start.as_mut_ptr();
8888+ let len = self.range.size();
8989+9090+ unsafe { slice::from_raw_parts_mut(ptr, len) }
9191+ }
9292+9393+ pub fn as_non_null(&self) -> NonNull<[u8]> {
9494+ let ptr = self.range.start.as_non_null().unwrap();
9595+ NonNull::slice_from_raw_parts(ptr, self.range.size())
9696+ }
9797+9898+ pub const fn layout_fits_region(&self, layout: Layout) -> bool {
9999+ self.range.start.is_aligned_to(layout.align())
100100+ && layout.size() >= self.layout.size()
101101+ && layout.size() <= self.range.end.get() - self.range.start.get()
102102+ }
103103+104104+ /// Find physical memory frames to back the given `range`.
105105+ /// After this call succeeds, accesses that align with the given `access` are guaranteed to
106106+ /// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
107107+ ///
108108+ /// # Errors
109109+ ///
110110+ /// - `range` is out of bounds
111111+ /// - `access_rules` is NOT a subset of self.access_rules
112112+ pub fn commit(
113113+ &self,
114114+ range: impl RangeBounds<VirtualAddress>,
115115+ access_rules: AccessRules,
116116+ batch: &mut Batch,
117117+ raw_aspace: &mut R,
118118+ ) -> crate::Result<()> {
119119+ let vmo_relative = self.bounds_to_vmo_relative(range);
120120+121121+ let mut acquired_frames = self.vmo.acquire(vmo_relative).enumerate();
122122+ while let Some((idx, frame)) = acquired_frames.next()? {
123123+ let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap();
124124+125125+ unsafe {
126126+ batch.map(
127127+ virt,
128128+ frame.addr(),
129129+ NonZeroUsize::new(R::PAGE_SIZE).unwrap(),
130130+ access_rules,
131131+ );
132132+ }
133133+134134+ if self.vmo.has_content_source() {
135135+ // TODO add virt addr to coalescer
136136+ }
137137+ }
138138+139139+ // materialize changes
140140+ batch.flush_changes(raw_aspace)?;
141141+142142+ // initialize patched holes if necessary
143143+ if self.vmo.has_content_source() {
144144+ // for every region in coalescer
145145+ // figure out content source offset
146146+ // read from content source at offset into region
147147+ }
148148+149149+ Ok(())
150150+ }
151151+152152+ /// Release physical memory frames backing the given `range`.
153153+ /// After this call succeeds, accesses will page fault.
154154+ ///
155155+ /// # Errors
156156+ ///
157157+ /// - `range` is out of bounds
158158+ pub fn decommit(
159159+ &self,
160160+ range: impl RangeBounds<VirtualAddress>,
161161+ batch: &mut Batch,
162162+ raw_aspace: &mut R,
163163+ ) -> crate::Result<()> {
164164+ let vmo_relative = self.bounds_to_vmo_relative(range);
165165+166166+ let mut released_frames = self.vmo.release(vmo_relative).enumerate();
167167+ while let Some((idx, _frame)) = released_frames.next()? {
168168+ let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap();
169169+ unsafe { batch.unmap(virt, NonZeroUsize::new(R::PAGE_SIZE).unwrap()) };
170170+171171+ // if VMO has content source && frame is dirty
172172+ // add virt addr to coalescer
173173+ }
174174+175175+ // for every region in coalescer
176176+ // figure out content source offset
177177+ // write region to content source at offset
178178+179179+ // materialize changes
180180+ batch.flush_changes(raw_aspace)?;
181181+182182+ Ok(())
183183+ }
184184+185185+ /// Zero out the memory in the given `range`.
186186+ /// This MAY release physical memory frames backing the `range`.
187187+ ///
188188+ /// # Errors
189189+ ///
190190+ /// - `range` is out of bounds
191191+ pub fn clear(&self, range: impl RangeBounds<VirtualAddress>) -> crate::Result<()> {
192192+ todo!()
193193+ }
194194+195195+ /// Update the access rules of this `AddressSpaceRegion`.
196196+ pub fn update_access_rules(
197197+ &mut self,
198198+ access_rules: AccessRules,
199199+ batch: &mut Batch,
200200+ ) -> crate::Result<()> {
201201+ todo!()
202202+ }
203203+204204+ /// Fetches content in the given `range`. This operates logically equivalent to
205205+ /// a read, write, or instruction fetch (depending on `access_rules`) so that future accesses
206206+ /// are quicker. The provided `access_rules` MUST be a subset or equal to this regions access rules.
207207+ ///
208208+ /// # Errors
209209+ ///
210210+ /// - `range` is out of bounds
211211+ /// - `access_rules` is NOT a subset of self.access_rules
212212+ pub fn prefetch(
213213+ &self,
214214+ range: impl RangeBounds<VirtualAddress>,
215215+ access_rules: AccessRules,
216216+ ) -> crate::Result<()> {
217217+ todo!()
218218+ }
219219+220220+ /// Attempts to grow the address space region to `new_len`.
221221+ /// `new_len` MUST be larger than or equal to the current length.
222222+ pub fn grow(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
223223+ todo!()
224224+ }
225225+226226+ /// Attempts to shrink the address space region to `new_len`.
227227+ /// `new_len` MUST be smaller than or equal to the current length.
228228+ pub fn shrink(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
229229+ todo!()
230230+ }
231231+232232+ // /// grow region to `new_len`, attempting to grow the VMO accordingly
233233+ // /// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
234234+ // pub fn grow_in_place(
235235+ // &mut self,
236236+ // new_layout: Layout,
237237+ // next_range: Option<Range<VirtualAddress>>,
238238+ // batch: &mut Batch,
239239+ // ) -> crate::Result<()> {
240240+ // if new_layout.align() > self.layout.align() {
241241+ // bail!("cannot grow in-place: New alignment greater than current");
242242+ // }
243243+ //
244244+ // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
245245+ //
246246+ // if let Some(next_range) = next_range
247247+ // && next_range.is_overlapping(&new_range)
248248+ // {
249249+ // bail!("cannot grow in-place: New overlapping with next range");
250250+ // }
251251+ //
252252+ // self.vmo.resize(new_range.size(), batch)?;
253253+ //
254254+ // self.update_range(new_range);
255255+ //
256256+ // Ok(())
257257+ // }
258258+ //
259259+ // /// shrink region to the first `len` bytes, dropping the rest frames.
260260+ // /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
261261+ // pub fn shrink(&mut self, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
262262+ // if new_layout.align() > self.layout.align() {
263263+ // bail!("cannot grow in-place: New alignment greater than current");
264264+ // }
265265+ //
266266+ // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
267267+ //
268268+ // self.vmo.resize(new_range.size(), batch)?;
269269+ //
270270+ // self.update_range(new_range);
271271+ //
272272+ // Ok(())
273273+ // }
274274+ //
275275+ // /// move the entire region to the new base address, remapping any already mapped frames
276276+ // pub fn move_to(
277277+ // &mut self,
278278+ // new_base: VirtualAddress,
279279+ // new_layout: Layout,
280280+ // batch: &mut Batch,
281281+ // ) -> crate::Result<()> {
282282+ // let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
283283+ //
284284+ // self.vmo.resize(new_range.size(), batch)?;
285285+ // self.update_range(new_range);
286286+ //
287287+ // // - for every frame in VMO
288288+ // // - attempt to map at new offset (add maps to batch)
289289+ //
290290+ // todo!()
291291+ // }
292292+ //
293293+ // pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
294294+ // where
295295+ // R: RangeBounds<VirtualAddress>,
296296+ // {
297297+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
298298+ //
299299+ // self.vmo.commit(bounds, will_write, batch)
300300+ // }
301301+ //
302302+ // pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
303303+ // where
304304+ // R: RangeBounds<VirtualAddress>,
305305+ // {
306306+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
307307+ //
308308+ // self.vmo.decommit(bounds, batch)
309309+ // }
310310+ //
311311+ // /// updates the access rules of this region
312312+ // pub fn update_access_rules(
313313+ // &mut self,
314314+ // access_rules: AccessRules,
315315+ // batch: &mut Batch,
316316+ // ) -> crate::Result<()> {
317317+ // // TODO
318318+ // // - for every frame in VMO
319319+ // // - update access rules (add protects to batch)
320320+ // // - update self access rules
321321+ //
322322+ // todo!()
323323+ // }
324324+ //
325325+ // pub fn clear<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
326326+ // where
327327+ // R: RangeBounds<VirtualAddress>,
328328+ // {
329329+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
330330+ //
331331+ // self.vmo.clear(bounds, batch)
332332+ // }
333333+ //
334334+ // pub fn prefetch<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
335335+ // where
336336+ // R: RangeBounds<VirtualAddress>,
337337+ // {
338338+ // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
339339+ //
340340+ // self.vmo.prefetch(bounds, batch)
341341+ // }
342342+343343+ pub fn assert_valid(&self, msg: &str)
344344+ where
345345+ R: fmt::Debug,
346346+ {
347347+ assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
348348+ assert!(
349349+ self.subtree_range.start <= self.range.start
350350+ && self.range.end <= self.subtree_range.end,
351351+ "{msg}region range cannot be bigger than its subtree range; region={self:?}"
352352+ );
353353+ assert!(
354354+ self.max_gap < self.subtree_range.size(),
355355+ "{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}"
356356+ );
357357+ assert!(
358358+ self.range.start.is_aligned_to(self.layout.align()),
359359+ "{msg}region range is not aligned to its layout; region={self:?}"
360360+ );
361361+ assert!(
362362+ self.range.size() >= self.layout.size(),
363363+ "{msg}region range is smaller than its layout; region={self:?}"
364364+ );
365365+366366+ self.links.assert_valid();
367367+ }
368368+369369+ /// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used
370370+ /// during gap-searching.
371371+ pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool {
372372+ // we need the layout to be padded to alignment
373373+ debug_assert!(layout.size().is_multiple_of(layout.align()));
374374+375375+ self.max_gap >= layout.size()
376376+ }
377377+378378+ /// Returns the left child node in the search tree of regions, used during gap-searching.
379379+ pub fn left_child(&self) -> Option<&Self> {
380380+ Some(unsafe { self.links.left()?.as_ref() })
381381+ }
382382+383383+ /// Returns the right child node in the search tree of regions, used during gap-searching.
384384+ pub fn right_child(&self) -> Option<&Self> {
385385+ Some(unsafe { self.links.right()?.as_ref() })
386386+ }
387387+388388+ /// Returns the parent node in the search tree of regions, used during gap-searching.
389389+ pub fn parent(&self) -> Option<&Self> {
390390+ Some(unsafe { self.links.parent()?.as_ref() })
391391+ }
392392+393393+ #[inline]
394394+ fn bounds_to_vmo_relative(
395395+ &self,
396396+ bounds: impl RangeBounds<VirtualAddress>,
397397+ ) -> (Bound<usize>, Bound<usize>) {
398398+ let start = bounds.start_bound().map(|addr| {
399399+ (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset
400400+ });
401401+ let end = bounds.end_bound().map(|addr| {
402402+ (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset
403403+ });
404404+405405+ (start, end)
406406+ }
407407+408408+ fn update_range(&mut self, new_range: Range<VirtualAddress>) {
409409+ self.range = new_range;
410410+ // We also must propagate the information about our changed range to the rest of the tree
411411+ // so searching for a free spot returns the correct results.
412412+ Self::propagate_update_to_parent(Some(NonNull::from(self)));
413413+ }
414414+415415+ /// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`]
416416+ /// implementation below after each tree mutation that impacted this node or its subtree in some way
417417+ /// (insertion, rotation, deletion).
418418+ ///
419419+ /// Returns `true` if this nodes metadata changed.
420420+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
421421+ fn update_gap_metadata(
422422+ mut node: NonNull<Self>,
423423+ left: Option<NonNull<Self>>,
424424+ right: Option<NonNull<Self>>,
425425+ ) -> bool {
426426+ fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize {
427427+ right_first_byte
428428+ .checked_sub_addr(left_last_byte)
429429+ .unwrap_or_default() // TODO use saturating_sub_addr
430430+ }
431431+432432+ let node = unsafe { node.as_mut() };
433433+ let mut left_max_gap = 0;
434434+ let mut right_max_gap = 0;
435435+436436+ // recalculate the subtree_range start
437437+ let old_subtree_range_start = if let Some(left) = left {
438438+ let left = unsafe { left.as_ref() };
439439+ let left_gap = gap(left.subtree_range.end, node.range.start);
440440+ left_max_gap = cmp::max(left_gap, left.max_gap);
441441+ mem::replace(&mut node.subtree_range.start, left.subtree_range.start)
442442+ } else {
443443+ mem::replace(&mut node.subtree_range.start, node.range.start)
444444+ };
445445+446446+ // recalculate the subtree range end
447447+ let old_subtree_range_end = if let Some(right) = right {
448448+ let right = unsafe { right.as_ref() };
449449+ let right_gap = gap(node.range.end, right.subtree_range.start);
450450+ right_max_gap = cmp::max(right_gap, right.max_gap);
451451+ mem::replace(&mut node.subtree_range.end, right.subtree_range.end)
452452+ } else {
453453+ mem::replace(&mut node.subtree_range.end, node.range.end)
454454+ };
455455+456456+ // recalculate the map_gap
457457+ let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap));
458458+459459+ old_max_gap != node.max_gap
460460+ || old_subtree_range_start != node.subtree_range.start
461461+ || old_subtree_range_end != node.subtree_range.end
462462+ }
463463+464464+ // Propagate metadata updates to this regions parent in the search tree. If we had to update
465465+ // our metadata the parent must update its metadata too.
466466+ #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
467467+ fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) {
468468+ while let Some(node) = maybe_node {
469469+ let links = unsafe { &node.as_ref().links };
470470+ let changed = Self::update_gap_metadata(node, links.left(), links.right());
471471+472472+ // if the metadata didn't actually change, we don't need to recalculate parents
473473+ if !changed {
474474+ return;
475475+ }
476476+477477+ maybe_node = links.parent();
478478+ }
479479+ }
480480+}
481481+482482+unsafe impl<A: RawAddressSpace> wavltree::Linked for AddressSpaceRegion<A> {
483483+ /// Any heap-allocated type that owns an element may be used.
484484+ ///
485485+ /// An element *must not* move while part of an intrusive data
486486+ /// structure. In many cases, `Pin` may be used to enforce this.
487487+ type Handle = Pin<Box<Self>>; // TODO better handle type
488488+489489+ type Key = VirtualAddress;
490490+491491+ /// Convert an owned `Handle` into a raw pointer
492492+ fn into_ptr(handle: Self::Handle) -> NonNull<Self> {
493493+ // Safety: wavltree treats the ptr as pinned
494494+ unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
495495+ }
496496+497497+ /// Convert a raw pointer back into an owned `Handle`.
498498+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
499499+ // Safety: `NonNull` *must* be constructed from a pinned reference
500500+ // which the tree implementation upholds.
501501+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) }
502502+ }
503503+504504+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> {
505505+ ptr.map_addr(|addr| {
506506+ let offset = offset_of!(Self, links);
507507+ addr.checked_add(offset).unwrap()
508508+ })
509509+ .cast()
510510+ }
511511+512512+ fn get_key(&self) -> &Self::Key {
513513+ &self.range.start
514514+ }
515515+516516+ fn after_insert(self: Pin<&mut Self>) {
517517+ debug_assert_eq!(self.subtree_range.start, self.range.start);
518518+ debug_assert_eq!(self.subtree_range.end, self.range.end);
519519+ debug_assert_eq!(self.max_gap, 0);
520520+ Self::propagate_update_to_parent(self.links.parent());
521521+ }
522522+523523+ fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) {
524524+ Self::propagate_update_to_parent(parent);
525525+ }
526526+527527+ fn after_rotate(
528528+ self: Pin<&mut Self>,
529529+ parent: NonNull<Self>,
530530+ sibling: Option<NonNull<Self>>,
531531+ lr_child: Option<NonNull<Self>>,
532532+ side: wavltree::Side,
533533+ ) {
534534+ let this = self.project();
535535+ // Safety: caller ensures ptr is valid
536536+ let _parent = unsafe { parent.as_ref() };
537537+538538+ this.subtree_range.start = _parent.subtree_range.start;
539539+ this.subtree_range.end = _parent.subtree_range.end;
540540+ *this.max_gap = _parent.max_gap;
541541+542542+ if side == wavltree::Side::Left {
543543+ Self::update_gap_metadata(parent, sibling, lr_child);
544544+ } else {
545545+ Self::update_gap_metadata(parent, lr_child, sibling);
546546+ }
547547+ }
548548+}
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::cmp::PartialEq;
1010+use core::fmt;
1111+use core::fmt::Debug;
1212+use core::mem::offset_of;
1313+use core::ops::Deref;
1414+use core::ptr::NonNull;
1515+use core::sync::atomic;
1616+use core::sync::atomic::{AtomicUsize, Ordering};
1717+1818+use cordyceps::{Linked, list};
1919+use pin_project::pin_project;
2020+2121+use crate::PhysicalAddress;
2222+use crate::frame_alloc::FrameAllocator;
2323+2424+/// Soft limit on the amount of references that may be made to a `Frame`.
2525+const MAX_REFCOUNT: usize = isize::MAX as usize;
2626+2727+pub struct FrameRef {
2828+ frame: NonNull<Frame>,
2929+ alloc: &'static dyn FrameAllocator,
3030+}
3131+3232+#[pin_project(!Unpin)]
3333+#[derive(Debug)]
3434+pub struct Frame {
3535+ addr: PhysicalAddress,
3636+ refcount: AtomicUsize,
3737+ #[pin]
3838+ links: list::Links<Self>,
3939+}
4040+4141+// ===== impl FrameRef =====
4242+4343+impl Clone for FrameRef {
4444+ /// Makes a clone of the `Frame`.
4545+ ///
4646+ /// This creates reference to the same `FrameInfo`, increasing the reference count by one.
4747+ fn clone(&self) -> Self {
4848+ // Increase the reference count by one. Using relaxed ordering, as knowledge of the
4949+ // original reference prevents other threads from erroneously deleting
5050+ // the object.
5151+ //
5252+ // Again, restating what the `Arc` implementation quotes from the
5353+ // [Boost documentation][1]:
5454+ //
5555+ // > Increasing the reference counter can always be done with memory_order_relaxed: New
5656+ // > references to an object can only be formed from an existing
5757+ // > reference, and passing an existing reference from one thread to
5858+ // > another must already provide any required synchronization.
5959+ //
6060+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
6161+ let old_size = self.refcount.fetch_add(1, Ordering::Relaxed);
6262+ debug_assert_ne!(old_size, 0);
6363+6464+ // Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
6565+ // `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount
6666+ // would *really* bad as it would treat the frame as free and potentially cause a use-after-free
6767+ // scenario. Realistically this branch should never be taken.
6868+ //
6969+ // Also worth noting: Just like `Arc`, the refcount could still overflow when in between
7070+ // the load above and this check some other cpu increased the refcount from `isize::MAX` to
7171+ // `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in
7272+ // one conditional atomic operation produces much worse code, so if its good enough for the
7373+ // standard library, it is good enough for us.
7474+ assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow");
7575+7676+ unsafe { Self::from_raw_parts(self.frame, self.alloc.clone()) }
7777+ }
7878+}
7979+8080+impl Drop for FrameRef {
8181+ /// Drops the `Frame`.
8282+ ///
8383+ /// This will decrement the reference count. If the reference count reaches zero
8484+ /// then this frame will be marked as free and returned to the frame allocator.
8585+ fn drop(&mut self) {
8686+ if self.refcount.fetch_sub(1, Ordering::Release) != 1 {
8787+ return;
8888+ }
8989+9090+ // Ensure uses of `FrameInfo` happen before freeing it.
9191+ // Because it is marked `Release`, the decreasing of the reference count synchronizes
9292+ // with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing
9393+ // the reference count, which happens before this fence, which happens before freeing `FrameInfo`.
9494+ //
9595+ // This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and
9696+ // may explain further:
9797+ //
9898+ // > It is important to enforce any possible access to the object in one
9999+ // > thread (through an existing reference) to *happen before* deleting
100100+ // > the object in a different thread. This is achieved by a "release"
101101+ // > operation after dropping a reference (any access to the object
102102+ // > through this reference must obviously happened before), and an
103103+ // > "acquire" operation before deleting the object.
104104+ //
105105+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
106106+ atomic::fence(Ordering::Acquire);
107107+108108+ self.drop_slow();
109109+ }
110110+}
111111+112112+impl Debug for FrameRef {
113113+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
114114+ f.debug_struct("FrameRef")
115115+ .field("ptr", &self.frame)
116116+ .finish_non_exhaustive()
117117+ }
118118+}
119119+120120+impl Deref for FrameRef {
121121+ type Target = Frame;
122122+123123+ fn deref(&self) -> &Self::Target {
124124+ unsafe { self.frame.as_ref() }
125125+ }
126126+}
127127+128128+impl FrameRef {
129129+ unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self {
130130+ Self { frame, alloc }
131131+ }
132132+133133+ #[inline(never)]
134134+ fn drop_slow(&mut self) {
135135+ let layout = unsafe {
136136+ Layout::from_size_align_unchecked(self.alloc.page_size(), self.alloc.page_size())
137137+ };
138138+ unsafe {
139139+ self.alloc.deallocate(self.frame, layout);
140140+ }
141141+ }
142142+}
143143+144144+// ===== impl Frame =====
145145+146146+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
147147+unsafe impl Send for Frame {}
148148+149149+// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
150150+unsafe impl Sync for Frame {}
151151+152152+impl PartialEq<Frame> for &Frame {
153153+ fn eq(&self, other: &Frame) -> bool {
154154+ self.refcount() == other.refcount() && self.addr == other.addr
155155+ }
156156+}
157157+158158+impl Frame {
159159+ pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self {
160160+ Self {
161161+ addr,
162162+ refcount: AtomicUsize::new(initial_refcount),
163163+ links: list::Links::new(),
164164+ }
165165+ }
166166+167167+ pub fn refcount(&self) -> usize {
168168+ self.refcount.load(Ordering::Relaxed)
169169+ }
170170+171171+ pub fn addr(&self) -> PhysicalAddress {
172172+ self.addr
173173+ }
174174+}
175175+176176+unsafe impl Linked<list::Links<Self>> for Frame {
177177+ type Handle = NonNull<Self>;
178178+179179+ fn into_ptr(r: Self::Handle) -> NonNull<Self> {
180180+ r
181181+ }
182182+183183+ unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
184184+ ptr
185185+ }
186186+187187+ unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> {
188188+ ptr.map_addr(|addr| {
189189+ let offset = offset_of!(Self, links);
190190+ addr.checked_add(offset).unwrap()
191191+ })
192192+ .cast()
193193+ }
194194+}
+137
libs/mem/src/frame_alloc.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+mod area;
99+mod area_selection;
1010+1111+use core::alloc::Layout;
1212+use core::cell::RefCell;
1313+use core::cmp;
1414+use core::ops::Range;
1515+use core::ptr::NonNull;
1616+use core::sync::atomic::{AtomicUsize, Ordering};
1717+1818+use cordyceps::List;
1919+use cpu_local::collection::CpuLocal;
2020+use fallible_iterator::FallibleIterator;
2121+use lock_api::Mutex;
2222+use smallvec::SmallVec;
2323+2424+use crate::address_space::RawAddressSpace;
2525+use crate::frame_alloc::area::Area;
2626+use crate::frame_alloc::area_selection::select_areas;
2727+use crate::{Frame, PhysicalAddress};
2828+2929+#[derive(Debug)]
3030+pub struct AllocError;
3131+3232+pub unsafe trait FrameAllocator: Send + Sync + 'static {
3333+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
3434+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
3535+ fn page_size(&self) -> usize;
3636+}
3737+3838+const MAX_FRAMES_IN_CACHE: usize = 256;
3939+4040+pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> {
4141+ areas: Mutex<L, SmallVec<[Area<A>; 4]>>,
4242+ cpu_local_cache: CpuLocal<RefCell<List<Frame>>>,
4343+ max_alignment_hint: AtomicUsize,
4444+}
4545+4646+impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> {
4747+ pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> {
4848+ let mut max_alignment_hint = 0;
4949+ let mut areas = SmallVec::new();
5050+5151+ let mut selections = select_areas::<A>(allocatable_regions);
5252+ while let Some(selection) = selections.next()? {
5353+ let area = Area::new(selection.area, selection.bookkeeping);
5454+ max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint());
5555+ areas.push(area);
5656+ }
5757+5858+ Ok(Self {
5959+ areas: Mutex::new(areas),
6060+ cpu_local_cache: CpuLocal::new(),
6161+ max_alignment_hint: AtomicUsize::new(max_alignment_hint),
6262+ })
6363+ }
6464+6565+ pub fn max_alignment_hint(&self) -> usize {
6666+ self.max_alignment_hint.load(Ordering::Relaxed)
6767+ }
6868+6969+ fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> {
7070+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
7171+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
7272+ cache.pop_back()
7373+ } else {
7474+ None
7575+ }
7676+ }
7777+7878+ fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool {
7979+ if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
8080+ let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
8181+8282+ if cache.len() < MAX_FRAMES_IN_CACHE {
8383+ cache.push_back(block);
8484+ return true;
8585+ }
8686+ }
8787+8888+ false
8989+ }
9090+}
9191+9292+unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
9393+ for &'static FrameAlloc<L, A>
9494+{
9595+ fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
9696+ // attempt to allocate from the CPU-local cache first
9797+ if let Some(frame) = self.allocate_local(layout) {
9898+ return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1));
9999+ }
100100+101101+ let mut areas = self.areas.lock();
102102+ for area in areas.iter_mut() {
103103+ if let Ok(frames) = area.allocate(layout) {
104104+ return Ok(frames);
105105+ }
106106+ }
107107+108108+ Err(AllocError)
109109+ }
110110+111111+ unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) {
112112+ // attempt to place the frame into the CPU-local cache first
113113+ if self.deallocate_local(block, layout) {
114114+ return;
115115+ }
116116+117117+ let mut areas = self.areas.lock();
118118+ for area in areas.iter_mut() {
119119+ let block_ = unsafe { block.as_ref() };
120120+121121+ if area.contains_frame(block_.addr()) {
122122+ unsafe { area.deallocate(block, layout) };
123123+124124+ self.max_alignment_hint
125125+ .fetch_max(area.max_alignment_hint(), Ordering::Relaxed);
126126+127127+ return;
128128+ }
129129+ }
130130+131131+ unreachable!();
132132+ }
133133+134134+ fn page_size(&self) -> usize {
135135+ A::PAGE_SIZE
136136+ }
137137+}
+444
libs/mem/src/frame_alloc/area.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use core::alloc::Layout;
99+use core::marker::PhantomData;
1010+use core::mem::MaybeUninit;
1111+use core::ops::Range;
1212+use core::ptr::NonNull;
1313+use core::{cmp, fmt};
1414+1515+use cordyceps::List;
1616+1717+use crate::address_space::RawAddressSpace;
1818+use crate::frame_alloc::AllocError;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_ORDER: usize = 11;
2222+2323+pub struct Area<A: RawAddressSpace> {
2424+ area: Range<PhysicalAddress>,
2525+ frames: &'static mut [MaybeUninit<Frame>],
2626+2727+ free_lists: [List<Frame>; MAX_ORDER],
2828+2929+ max_order: usize,
3030+ total_frames: usize,
3131+ used_frames: usize,
3232+3333+ _aspace: PhantomData<A>,
3434+}
3535+3636+unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {}
3737+unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {}
3838+3939+impl<A: RawAddressSpace> fmt::Debug for Area<A> {
4040+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4141+ f.debug_struct("Area")
4242+ .field("area", &self.area)
4343+ .field(
4444+ "frames",
4545+ &format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()),
4646+ )
4747+ .field("free_lists", &self.free_lists)
4848+ .field("max_order", &self.max_order)
4949+ .field("total_frames", &self.total_frames)
5050+ .field("used_frames", &self.used_frames)
5151+ .finish()
5252+ }
5353+}
5454+5555+impl<A: RawAddressSpace> Area<A> {
5656+ pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self {
5757+ let mut free_lists = [const { List::new() }; MAX_ORDER];
5858+ let mut total_frames = 0;
5959+ let mut max_order = 0;
6060+6161+ let mut remaining_bytes = area.size();
6262+ let mut addr = area.start;
6363+6464+ // This is the main area initialization loop. We loop through the `area` "chopping off" the
6565+ // largest possible min_block_size-aligned block from the area and add that to its corresponding
6666+ // free list.
6767+ //
6868+ // Note: Remember that for buddy allocators `size == align`. That means we both need to check
6969+ // the alignment and size of our remaining area and can only chop off whatever is smaller.
7070+ while remaining_bytes > 0 {
7171+ // println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}");
7272+7373+ // the largest size we can chop off given the alignment of the remaining area
7474+ let max_align = if addr == PhysicalAddress::ZERO {
7575+ // if area happens to start exactly at address 0x0 our calculation below doesn't work.
7676+ // address 0x0 actually supports *any* alignment so we special-case it and return `MAX`
7777+ usize::MAX
7878+ } else {
7979+ // otherwise mask out the least significant bit of the address to figure out its alignment
8080+ addr.get() & (!addr.get() + 1)
8181+ };
8282+ // the largest size we can chop off given the size of the remaining area
8383+ // which is the next smaller power of two
8484+ let max_size = 1 << remaining_bytes.ilog2();
8585+8686+ // our chosen size will be the smallest of
8787+ // - the maximum size by remaining areas alignment
8888+ // - the maximum size by remaining areas size
8989+ // - the maximum block size supported by this allocator
9090+ let size = cmp::min(
9191+ cmp::min(max_align, max_size),
9292+ A::PAGE_SIZE << (MAX_ORDER - 1),
9393+ );
9494+ debug_assert!(size.is_multiple_of(A::PAGE_SIZE));
9595+9696+ let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize;
9797+9898+ {
9999+ let frame = frames[total_frames].write(Frame::new(addr, 0));
100100+101101+ free_lists[order].push_back(NonNull::from(frame));
102102+ }
103103+104104+ total_frames += 1 << order;
105105+ max_order = cmp::max(max_order, order);
106106+ addr = addr.checked_add(size).unwrap();
107107+ remaining_bytes -= size;
108108+ }
109109+110110+ // Make sure we've accounted for all frames
111111+ debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE);
112112+113113+ Self {
114114+ area,
115115+ frames,
116116+117117+ free_lists,
118118+119119+ max_order,
120120+ total_frames,
121121+ used_frames: 0,
122122+123123+ _aspace: PhantomData,
124124+ }
125125+ }
126126+127127+ pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
128128+ #[cfg(debug_assertions)]
129129+ self.assert_valid();
130130+131131+ let min_order = self.allocation_order(layout)?;
132132+133133+ // Starting at the smallest sufficient size class, search for a free block. If we find one in
134134+ // a free list, return it and its order.
135135+ let (block, block_order) = self.free_lists[min_order..]
136136+ .iter_mut()
137137+ .enumerate()
138138+ .find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order)))
139139+ .ok_or(AllocError)?;
140140+141141+ // if the block we found is larger than the `min_order` we need, we repeatedly split off
142142+ // the upper half (of decreasing size) until we reach the desired size. The split off blocks
143143+ // are returned to their appropriate free lists.
144144+ for order in (min_order..block_order).rev() {
145145+ let block = unsafe { block.as_ref() };
146146+ let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap();
147147+ let buddy = self.frame_for_addr(buddy_addr).unwrap();
148148+149149+ let buddy = buddy.write(Frame::new(buddy_addr, 0));
150150+ let buddy = NonNull::from(buddy);
151151+152152+ self.free_lists[order].push_back(buddy);
153153+ }
154154+155155+ let alloc_size_frames = 1 << min_order;
156156+157157+ // lazily initialize all frames
158158+ for idx in 0..alloc_size_frames {
159159+ let block = unsafe { block.as_ref() };
160160+ let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap();
161161+162162+ let frame = self.frame_for_addr(addr).unwrap();
163163+ frame.write(Frame::new(addr, 1));
164164+ }
165165+166166+ self.used_frames += alloc_size_frames;
167167+168168+ #[cfg(debug_assertions)]
169169+ self.assert_valid();
170170+171171+ Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames))
172172+ }
173173+174174+ pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) {
175175+ #[cfg(debug_assertions)]
176176+ self.assert_valid();
177177+178178+ let initial_order = self.allocation_order(layout).unwrap();
179179+ let mut order = initial_order;
180180+181181+ while order < self.free_lists.len() - 1 {
182182+ let block_ = unsafe { block.as_ref() };
183183+ if let Some(buddy) = self.buddy_addr(order, block_.addr())
184184+ && cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1))
185185+ && self.remove_from_free_list(order, buddy)
186186+ {
187187+ let buddy: NonNull<Frame> =
188188+ NonNull::from(self.frame_for_addr(buddy).unwrap()).cast();
189189+ block = cmp::min(buddy, block);
190190+ order += 1;
191191+ } else {
192192+ break;
193193+ }
194194+ }
195195+196196+ self.free_lists[order].push_back(block);
197197+ self.used_frames -= 1 << initial_order;
198198+ self.max_order = cmp::max(self.max_order, order);
199199+200200+ #[cfg(debug_assertions)]
201201+ self.assert_valid();
202202+ }
203203+204204+ pub fn max_alignment_hint(&self) -> usize {
205205+ self.order_size(self.max_order)
206206+ }
207207+208208+ fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> {
209209+ let relative = addr.checked_sub_addr(self.area.start).unwrap();
210210+ let idx = relative >> A::PAGE_SIZE_LOG_2;
211211+ Some(&mut self.frames[idx])
212212+ }
213213+214214+ pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool {
215215+ self.area.contains(&addr)
216216+ }
217217+218218+ fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> {
219219+ assert!(block >= self.area.start);
220220+ assert!(block.is_aligned_to(A::PAGE_SIZE << order));
221221+222222+ let relative = block.checked_sub_addr(self.area.start).unwrap();
223223+ let size = self.order_size(order);
224224+ if size >= self.area.size() {
225225+ // MAX_ORDER blocks do not have buddies
226226+ None
227227+ } else {
228228+ // Fun: We can find our buddy by xoring the right bit in our
229229+ // offset from the base of the heap.
230230+ Some(self.area.start.checked_add(relative ^ size).unwrap())
231231+ }
232232+ }
233233+234234+ fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool {
235235+ let mut c = self.free_lists[order].cursor_front_mut();
236236+237237+ while let Some(candidate) = c.current() {
238238+ if candidate.addr() == to_remove {
239239+ c.remove_current().unwrap();
240240+ return true;
241241+ }
242242+243243+ c.move_next();
244244+ }
245245+246246+ false
247247+ }
248248+249249+ // The size of the blocks we allocate for a given order.
250250+ const fn order_size(&self, order: usize) -> usize {
251251+ 1 << (A::PAGE_SIZE_LOG_2 as usize + order)
252252+ }
253253+254254+ const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> {
255255+ // We can only allocate blocks that are at least one page
256256+ if !layout.size().is_multiple_of(A::PAGE_SIZE) {
257257+ return Err(AllocError);
258258+ }
259259+260260+ // We can only allocate blocks that are at least page aligned
261261+ if !layout.align().is_multiple_of(A::PAGE_SIZE) {
262262+ return Err(AllocError);
263263+ }
264264+265265+ let size = layout.size().next_power_of_two();
266266+267267+ // We cannot allocate blocks larger than our largest size class
268268+ if size > self.order_size(self.free_lists.len()) {
269269+ return Err(AllocError);
270270+ }
271271+272272+ Ok(size)
273273+ }
274274+275275+ const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> {
276276+ if let Ok(size) = self.allocation_size(layout) {
277277+ Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize)
278278+ } else {
279279+ Err(AllocError)
280280+ }
281281+ }
282282+283283+ fn assert_valid(&self) {
284284+ for (order, l) in self.free_lists.iter().enumerate() {
285285+ l.assert_valid();
286286+287287+ for f in l {
288288+ assert!(
289289+ f.addr().is_aligned_to(A::PAGE_SIZE << order),
290290+ "frame {f:?} is not aligned to order {order}"
291291+ );
292292+ }
293293+ }
294294+295295+ assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames);
296296+ }
297297+}
298298+299299+fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize {
300300+ let mut frames = 0;
301301+ for (order, l) in area.free_lists.iter().enumerate() {
302302+ frames += l.len() << order;
303303+ }
304304+ frames
305305+}
306306+307307+#[cfg(test)]
308308+mod tests {
309309+ use alloc::vec::Vec;
310310+311311+ use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest};
312312+313313+ use super::*;
314314+ use crate::test_utils::TestAddressSpace;
315315+316316+ const PAGE_SIZE: usize = 4096;
317317+318318+ prop_compose! {
319319+ // Generate arbitrary integers up to half the maximum desired value,
320320+ // then multiply them by 2, thus producing only even integers in the
321321+ // desired range.
322322+ fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE }
323323+ }
324324+325325+ proptest! {
326326+ #[test]
327327+ fn new_fixed_base(num_frames in 0..50_000usize) {
328328+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
329329+ PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE),
330330+ {
331331+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
332332+ frames.resize_with(num_frames, || MaybeUninit::uninit());
333333+ frames.leak()
334334+ }
335335+ );
336336+ area.assert_valid();
337337+338338+ // let's check whether the area correctly initialized itself
339339+ //
340340+ // since we start on an aligned base address (0x0) we expect it have split off chunks
341341+ // largest-to-smallest. We replicate the process here, but take a block from its free list.
342342+ let mut frames_remaining = num_frames;
343343+ while frames_remaining > 0 {
344344+ // clamp the order we calculate at the max possible order
345345+ let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1);
346346+347347+ let chunk = area.free_lists[chunk_order].pop_back();
348348+ prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}");
349349+350350+ frames_remaining -= 1 << chunk_order;
351351+ }
352352+ // At the end of this process we expect all free lists to be empty
353353+ prop_assert!(area.free_lists.iter().all(|list| list.is_empty()));
354354+ }
355355+356356+ #[test]
357357+ fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) {
358358+359359+ let area = {
360360+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
361361+ prop_assume!(area_end.is_some());
362362+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
363363+ };
364364+365365+ let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
366366+ area,
367367+ {
368368+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
369369+ frames.resize_with(num_frames, || MaybeUninit::uninit());
370370+ frames.leak()
371371+ }
372372+ );
373373+ area.assert_valid();
374374+375375+ // TODO figure out if we can test the free lists in a sensible way
376376+ }
377377+378378+ #[test]
379379+ fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) {
380380+ let area = {
381381+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
382382+ prop_assume!(area_end.is_some());
383383+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
384384+ };
385385+386386+ let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
387387+ area,
388388+ {
389389+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
390390+ frames.resize_with(num_frames, || MaybeUninit::uninit());
391391+ frames.leak()
392392+ }
393393+ );
394394+ area.assert_valid();
395395+396396+ debug_assert_eq!(frames_in_area(&mut area), num_frames);
397397+ }
398398+399399+ #[test]
400400+ fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) {
401401+ let area = {
402402+ let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
403403+ prop_assume!(area_end.is_some());
404404+ PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
405405+ };
406406+407407+ let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
408408+ area.clone(),
409409+ {
410410+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
411411+ frames.resize_with(num_frames, || MaybeUninit::uninit());
412412+ frames.leak()
413413+ }
414414+ );
415415+ area1.assert_valid();
416416+417417+ let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
418418+ area,
419419+ {
420420+ let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
421421+ frames.resize_with(num_frames, || MaybeUninit::uninit());
422422+ frames.leak()
423423+ }
424424+ );
425425+ area2.assert_valid();
426426+427427+ // we can only allocate contiguous blocks of the largest order available
428428+ prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE));
429429+430430+ let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap();
431431+432432+ let block = area2.allocate(layout).unwrap();
433433+ prop_assert!(block.len() >= alloc_frames);
434434+435435+ unsafe { area2.deallocate(block.cast(), layout); }
436436+437437+ assert_eq!(frames_in_area(&area2), num_frames);
438438+439439+ for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() {
440440+ prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len());
441441+ }
442442+ }
443443+ }
444444+}
+133
libs/mem/src/frame_alloc/area_selection.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::slice;
99+use core::fmt::Formatter;
1010+use core::marker::PhantomData;
1111+use core::mem;
1212+use core::mem::MaybeUninit;
1313+use core::ops::Range;
1414+1515+use fallible_iterator::FallibleIterator;
1616+use smallvec::SmallVec;
1717+1818+use crate::address_space::RawAddressSpace;
1919+use crate::{AddressRangeExt, Frame, PhysicalAddress};
2020+2121+const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB
2222+2323+#[derive(Debug)]
2424+pub struct AreaSelection {
2525+ pub area: Range<PhysicalAddress>,
2626+ pub bookkeeping: &'static mut [MaybeUninit<Frame>],
2727+ pub wasted_bytes: usize,
2828+}
2929+3030+#[derive(Debug)]
3131+pub struct SelectionError {
3232+ pub range: Range<PhysicalAddress>,
3333+}
3434+3535+pub struct ArenaSelections<A: RawAddressSpace> {
3636+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
3737+ wasted_bytes: usize,
3838+3939+ _aspace: PhantomData<A>,
4040+}
4141+4242+pub fn select_areas<A: RawAddressSpace>(
4343+ allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
4444+) -> ArenaSelections<A> {
4545+ ArenaSelections {
4646+ allocatable_regions,
4747+ wasted_bytes: 0,
4848+4949+ _aspace: PhantomData,
5050+ }
5151+}
5252+5353+impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> {
5454+ type Item = AreaSelection;
5555+ type Error = SelectionError;
5656+5757+ fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
5858+ let Some(mut area) = self.allocatable_regions.pop() else {
5959+ return Ok(None);
6060+ };
6161+6262+ while let Some(region) = self.allocatable_regions.pop() {
6363+ debug_assert!(!area.is_overlapping(®ion));
6464+6565+ let pages_in_hole = if area.end <= region.start {
6666+ // the region is higher than the current area
6767+ region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE
6868+ } else {
6969+ debug_assert!(region.end <= area.start);
7070+ // the region is lower than the current area
7171+ area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE
7272+ };
7373+7474+ let waste_from_hole = size_of::<Frame>() * pages_in_hole;
7575+7676+ if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES {
7777+ self.allocatable_regions.push(region);
7878+ break;
7979+ } else {
8080+ self.wasted_bytes += waste_from_hole;
8181+8282+ if area.end <= region.start {
8383+ area.end = region.end;
8484+ } else {
8585+ area.start = region.start;
8686+ }
8787+ }
8888+ }
8989+9090+ let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap();
9191+ // We can't use empty areas anyway
9292+ if aligned.is_empty() {
9393+ return Err(SelectionError { range: aligned });
9494+ }
9595+9696+ let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE;
9797+9898+ let bookkeeping_start = aligned
9999+ .end
100100+ .checked_sub(bookkeeping_size_frames * size_of::<Frame>())
101101+ .unwrap()
102102+ .align_down(A::PAGE_SIZE);
103103+104104+ // The area has no space to hold its own bookkeeping
105105+ if bookkeeping_start < aligned.start {
106106+ return Err(SelectionError { range: aligned });
107107+ }
108108+109109+ let bookkeeping = unsafe {
110110+ slice::from_raw_parts_mut(
111111+ bookkeeping_start.as_mut_ptr().cast(),
112112+ bookkeeping_size_frames,
113113+ )
114114+ };
115115+ aligned.end = bookkeeping_start;
116116+117117+ Ok(Some(AreaSelection {
118118+ area: aligned,
119119+ bookkeeping,
120120+ wasted_bytes: mem::take(&mut self.wasted_bytes),
121121+ }))
122122+ }
123123+}
124124+125125+// ===== impl SelectionError =====
126126+127127+impl core::fmt::Display for SelectionError {
128128+ fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
129129+ todo!()
130130+ }
131131+}
132132+133133+impl core::error::Error for SelectionError {}
+18
libs/mem/src/lib.rs
···11+#![cfg_attr(not(test), no_std)]
22+extern crate alloc;
33+44+mod access_rules;
55+pub mod address_space;
66+mod addresses;
77+mod frame;
88+pub mod frame_alloc;
99+#[cfg(test)]
1010+mod test_utils;
1111+mod utils;
1212+mod vmo;
1313+1414+pub type Result<T> = anyhow::Result<T>;
1515+1616+pub use access_rules::{AccessRules, WriteOrExecute};
1717+pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress};
1818+pub use frame::{Frame, FrameRef};
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+macro_rules! assert_unsafe_precondition_ {
99+ ($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => {
1010+ {
1111+ // This check is inlineable, but not by the MIR inliner.
1212+ // The reason for this is that the MIR inliner is in an exceptionally bad position
1313+ // to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`,
1414+ // which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and
1515+ // would be bad for compile times.
1616+ //
1717+ // LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without
1818+ // inlining the check. If it's `true`, it can inline it and get significantly better performance.
1919+ #[inline]
2020+ const fn precondition_check($($name:$ty),*) {
2121+ assert!($e, concat!("unsafe precondition(s) violated: ", $message,
2222+ "\n\nThis indicates a bug in the program. \
2323+ This Undefined Behavior check is optional, and cannot be relied on for safety."))
2424+ }
2525+2626+ #[cfg(debug_assertions)]
2727+ precondition_check($($arg,)*);
2828+ }
2929+ };
3030+}
3131+pub(crate) use assert_unsafe_precondition_;
+418
libs/mem/src/vmo.rs
···11+// Copyright 2025. Jonas Kruckenberg
22+//
33+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
44+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
55+// http://opensource.org/licenses/MIT>, at your option. This file may not be
66+// copied, modified, or distributed except according to those terms.
77+88+use alloc::sync::Arc;
99+use core::ops::{Bound, Range, RangeBounds};
1010+use core::{fmt, ptr};
1111+1212+use anyhow::ensure;
1313+use fallible_iterator::FallibleIterator;
1414+use lock_api::RwLock;
1515+use smallvec::SmallVec;
1616+1717+use crate::frame_list::FrameList;
1818+use crate::{FrameRef, PhysicalAddress};
1919+2020+pub struct Vmo {
2121+ name: &'static str,
2222+ vmo: RawVmo,
2323+}
2424+2525+#[derive(Debug)]
2626+struct RawVmo {
2727+ data: *const (),
2828+ vtable: &'static RawVmoVTable,
2929+}
3030+3131+#[derive(Copy, Clone, Debug)]
3232+struct RawVmoVTable {
3333+ clone: unsafe fn(*const ()) -> RawVmo,
3434+ acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
3535+ release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
3636+ clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
3737+ len: unsafe fn(*const ()) -> usize,
3838+ resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
3939+ drop: unsafe fn(*const ()),
4040+}
4141+4242+// ===== impl Vmo =====
4343+4444+impl Unpin for Vmo {}
4545+4646+// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Send
4747+// therefore Vmo is Send too
4848+unsafe impl Send for Vmo {}
4949+// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Sync
5050+// therefore Vmo is Sync too
5151+unsafe impl Sync for Vmo {}
5252+5353+impl Clone for Vmo {
5454+ #[inline]
5555+ fn clone(&self) -> Self {
5656+ Self {
5757+ vmo: unsafe { (self.vmo.vtable.clone)(self.vmo.data) },
5858+ name: self.name,
5959+ }
6060+ }
6161+}
6262+6363+impl Drop for Vmo {
6464+ #[inline]
6565+ fn drop(&mut self) {
6666+ unsafe { (self.vmo.vtable.drop)(self.vmo.data) }
6767+ }
6868+}
6969+7070+impl fmt::Debug for Vmo {
7171+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
7272+ let vtable_ptr = self.vmo.vtable as *const RawVmoVTable;
7373+ f.debug_struct("Vmo")
7474+ .field("name", &self.name)
7575+ .field("data", &self.vmo.data)
7676+ .field("vtable", &vtable_ptr)
7777+ .finish()
7878+ }
7979+}
8080+8181+impl Vmo {
8282+ /// Creates a new `Vmo` from the provided `len`, `data` pointer and `vtable`.
8383+ ///
8484+ /// TODO
8585+ ///
8686+ /// The `data` pointer can be used to store arbitrary data as required by the vmo implementation.
8787+ /// This could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
8888+ /// The value of this pointer will get passed to all functions that are part
8989+ /// of the `vtable` as the first parameter.
9090+ ///
9191+ /// It is important to consider that the `data` pointer must point to a
9292+ /// thread safe type such as an `Arc`.
9393+ ///
9494+ /// The `vtable` customizes the behavior of a `Cmo`. For each operation
9595+ /// on the `Clock`, the associated function in the `vtable` will be called.
9696+ ///
9797+ /// # Safety
9898+ ///
9999+ /// The behavior of the returned `Vmo` is undefined if the contract defined
100100+ /// in [`RawVmoVTable`]'s documentation is not upheld.
101101+ #[inline]
102102+ #[must_use]
103103+ pub const unsafe fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
104104+ // Safety: ensured by caller
105105+ unsafe { Self::from_raw(RawVmo { data, vtable }) }
106106+ }
107107+108108+ /// Creates a new `Vmo` from a [`RawVmo`].
109109+ ///
110110+ /// # Safety
111111+ ///
112112+ /// The behavior of the returned `Vmo` is undefined if the contract defined
113113+ /// in [`RawVmo`]'s and [`RawVmoVTable`]'s documentation is not upheld.
114114+ #[inline]
115115+ #[must_use]
116116+ pub const unsafe fn from_raw(vmo: RawVmo) -> Self {
117117+ Self {
118118+ vmo,
119119+ name: "<unnamed mystery VMO>",
120120+ }
121121+ }
122122+123123+ /// Add an arbitrary user-defined name to this `Vmo`.
124124+ pub fn named(mut self, name: &'static str) -> Self {
125125+ self.name = name;
126126+ self
127127+ }
128128+129129+ /// Returns this `Vmo`'s name, if it was given one using the [`Vmo::named`]
130130+ /// method.
131131+ pub fn name(&self) -> &'static str {
132132+ self.name
133133+ }
134134+135135+ pub fn len(&self) -> usize {
136136+ unsafe { (self.vmo.vtable.len)(self.vmo.data) }
137137+ }
138138+139139+ pub fn has_content_source(&self) -> bool {
140140+ self.content_source().is_some()
141141+ }
142142+143143+ pub fn content_source(&self) -> Option<()> {
144144+ todo!()
145145+ }
146146+147147+ /// Gets the `data` pointer used to create this `Vmo`.
148148+ #[inline]
149149+ #[must_use]
150150+ pub fn data(&self) -> *const () {
151151+ self.vmo.data
152152+ }
153153+154154+ /// Gets the `vtable` pointer used to create this `Vmo`.
155155+ #[inline]
156156+ #[must_use]
157157+ pub fn vtable(&self) -> &'static RawVmoVTable {
158158+ self.vmo.vtable
159159+ }
160160+161161+ // Release the frame at the given `index`. After this call succeeds, all accessed following the
162162+ // given `access_rules` MUST NOT fault.
163163+ // UNIT: frames
164164+ pub fn acquire<R>(
165165+ &self,
166166+ range: R,
167167+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
168168+ where
169169+ R: RangeBounds<usize>,
170170+ {
171171+ let range = self.bound_check(range);
172172+173173+ let i = range
174174+ .into_iter()
175175+ .flat_map(|r| r)
176176+ .filter_map(|idx| unsafe { (self.vmo.vtable.acquire)(self.vmo.data, idx).transpose() });
177177+178178+ fallible_iterator::convert(i)
179179+ }
180180+181181+ // Release the frame at the given `index`. After this call succeeds, all accessed to the frame
182182+ // MUST fault. Returns the base physical address of the release frame.
183183+ // UNIT: frames
184184+ pub fn release<R>(
185185+ &self,
186186+ range: R,
187187+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
188188+ where
189189+ R: RangeBounds<usize>,
190190+ {
191191+ let range = self.bound_check(range);
192192+193193+ let i = range
194194+ .into_iter()
195195+ .flat_map(|r| r)
196196+ .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
197197+198198+ fallible_iterator::convert(i)
199199+ }
200200+201201+ // Release the frame at the given `index`. After this call succeeds, all accessed to the frame
202202+ // MUST fault. Returns the base physical address of the release frame.
203203+ // UNIT: frames
204204+ pub fn clear<R>(
205205+ &self,
206206+ range: R,
207207+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
208208+ where
209209+ R: RangeBounds<usize>,
210210+ {
211211+ let range = self.bound_check(range);
212212+213213+ let i = range
214214+ .into_iter()
215215+ .flat_map(|r| r)
216216+ .filter_map(|idx| unsafe { (self.vmo.vtable.clear)(self.vmo.data, idx).transpose() });
217217+218218+ fallible_iterator::convert(i)
219219+ }
220220+221221+ // Grow the VMO to `new_size` (guaranteed to be larger than or equal to the current size).
222222+ fn grow(&self, new_len: usize) -> crate::Result<()> {
223223+ debug_assert!(new_len >= self.len());
224224+225225+ unsafe { (self.vmo.vtable.resize)(self.vmo.data, new_len)? };
226226+227227+ Ok(())
228228+ }
229229+230230+ // Shrink the VMO to `new_size` (guaranteed to be smaller than or equal to the current size).
231231+ // After this call succeeds, all accesses outside the new range MUST fault.
232232+ // UNIT: frames
233233+ pub fn shrink(
234234+ &self,
235235+ new_len: usize,
236236+ ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> {
237237+ debug_assert!(new_len <= self.len());
238238+239239+ let old_len = self.len();
240240+241241+ unsafe {
242242+ (self.vmo.vtable.resize)(self.vmo.data, new_len)?;
243243+ };
244244+245245+ let i = (new_len..old_len)
246246+ .into_iter()
247247+ .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
248248+249249+ fallible_iterator::convert(i)
250250+ }
251251+252252+ #[inline]
253253+ fn bound_check<R>(&self, range: R) -> crate::Result<Range<usize>>
254254+ where
255255+ R: RangeBounds<usize>,
256256+ {
257257+ let start = match range.start_bound() {
258258+ Bound::Included(b) => *b,
259259+ Bound::Excluded(b) => *b + 1,
260260+ Bound::Unbounded => 0,
261261+ };
262262+ let end = match range.end_bound() {
263263+ Bound::Included(b) => *b + 1,
264264+ Bound::Excluded(b) => *b,
265265+ Bound::Unbounded => self.len(),
266266+ };
267267+268268+ ensure!(end <= self.len());
269269+270270+ Ok(start..end)
271271+ }
272272+}
273273+274274+// ===== impl RawVmo =====
275275+276276+impl RawVmo {
277277+ /// Creates a new `RawVmo` from the provided `data` pointer and `vtable`.
278278+ ///
279279+ /// The `data` pointer can be used to store arbitrary data as required by the VMO implementation.
280280+ /// his could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
281281+ /// The value of this pointer will get passed to all functions that are part
282282+ /// of the `vtable` as the first parameter.
283283+ ///
284284+ /// It is important to consider that the `data` pointer must point to a
285285+ /// thread safe type such as an `Arc`.
286286+ ///
287287+ /// The `vtable` customizes the behavior of a `Vmo`. For each operation
288288+ /// on the `Vmo`, the associated function in the `vtable` will be called.
289289+ #[inline]
290290+ #[must_use]
291291+ pub const fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
292292+ Self { data, vtable }
293293+ }
294294+}
295295+296296+// ===== impl RawVmoVTable =====
297297+298298+impl RawVmoVTable {
299299+ pub const fn new(
300300+ clone: unsafe fn(*const ()) -> RawVmo,
301301+ acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
302302+ release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
303303+ clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
304304+ len: unsafe fn(*const ()) -> usize,
305305+ resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
306306+ drop: unsafe fn(*const ()),
307307+ ) -> Self {
308308+ Self {
309309+ clone,
310310+ acquire,
311311+ release,
312312+ clear,
313313+ len,
314314+ resize,
315315+ drop,
316316+ }
317317+ }
318318+}
319319+320320+pub fn stub_vmo() -> Vmo {
321321+ const WIRED_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
322322+ stub_clone,
323323+ stub_acquire,
324324+ stub_release,
325325+ stub_clear,
326326+ stub_len,
327327+ stub_resize,
328328+ stub_drop,
329329+ );
330330+331331+ unsafe fn stub_clone(ptr: *const ()) -> RawVmo {
332332+ debug_assert!(ptr.is_null());
333333+ RawVmo::new(ptr, &WIRED_VMO_VTABLE)
334334+ }
335335+336336+ unsafe fn stub_acquire(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
337337+ debug_assert!(ptr.is_null());
338338+ unreachable!()
339339+ }
340340+ unsafe fn stub_release(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
341341+ debug_assert!(ptr.is_null());
342342+ unreachable!()
343343+ }
344344+ unsafe fn stub_clear(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
345345+ debug_assert!(ptr.is_null());
346346+ unreachable!()
347347+ }
348348+ unsafe fn stub_len(ptr: *const ()) -> usize {
349349+ debug_assert!(ptr.is_null());
350350+ unreachable!()
351351+ }
352352+ unsafe fn stub_resize(ptr: *const (), _new_len: usize) -> crate::Result<()> {
353353+ debug_assert!(ptr.is_null());
354354+ unreachable!()
355355+ }
356356+ unsafe fn stub_drop(ptr: *const ()) {
357357+ debug_assert!(ptr.is_null());
358358+ }
359359+360360+ unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) }
361361+}
362362+363363+struct PagedVmo<R: lock_api::RawRwLock> {
364364+ list: RwLock<R, SmallVec<[FrameRef; 64]>>,
365365+}
366366+367367+impl<R: lock_api::RawRwLock> PagedVmo<R> {
368368+ pub const fn new(phys: Range<PhysicalAddress>) -> Self {
369369+ todo!()
370370+ }
371371+372372+ const VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
373373+ Self::clone,
374374+ Self::acquire,
375375+ Self::release,
376376+ Self::clear,
377377+ Self::len,
378378+ Self::resize,
379379+ Self::drop,
380380+ );
381381+382382+ unsafe fn clone(ptr: *const ()) -> RawVmo {
383383+ unsafe {
384384+ Arc::increment_strong_count(ptr.cast::<Self>());
385385+ }
386386+ RawVmo::new(ptr, &Self::VMO_VTABLE)
387387+ }
388388+389389+ unsafe fn drop(ptr: *const ()) {
390390+ drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) });
391391+ }
392392+393393+ unsafe fn acquire(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
394394+ let me = ptr.cast::<Self>().as_ref().unwrap();
395395+396396+ let mut list = me.list.write();
397397+398398+ list.entry(index).or_insert_with(|| todo!("allocate frame"));
399399+400400+ // list
401401+ }
402402+403403+ unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
404404+ todo!()
405405+ }
406406+407407+ unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
408408+ todo!()
409409+ }
410410+411411+ unsafe fn len(ptr: *const ()) -> usize {
412412+ todo!()
413413+ }
414414+415415+ unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> {
416416+ todo!()
417417+ }
418418+}