Next Generation WASM Microkernel Operating System

refactor: separate memory subsystem into own crate

+4122 -7
+163 -4
Cargo.lock
··· 47 47 checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" 48 48 49 49 [[package]] 50 + name = "allocator-api2" 51 + version = "0.3.0" 52 + source = "registry+https://github.com/rust-lang/crates.io-index" 53 + checksum = "78200ac3468a57d333cd0ea5dd398e25111194dcacd49208afca95c629a6311d" 54 + 55 + [[package]] 50 56 name = "anes" 51 57 version = "0.1.6" 52 58 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 136 142 ] 137 143 138 144 [[package]] 145 + name = "bit-set" 146 + version = "0.8.0" 147 + source = "registry+https://github.com/rust-lang/crates.io-index" 148 + checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" 149 + dependencies = [ 150 + "bit-vec", 151 + ] 152 + 153 + [[package]] 154 + name = "bit-vec" 155 + version = "0.8.0" 156 + source = "registry+https://github.com/rust-lang/crates.io-index" 157 + checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" 158 + 159 + [[package]] 139 160 name = "bitflags" 140 161 version = "2.9.2" 141 162 source = "registry+https://github.com/rust-lang/crates.io-index" 142 163 checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29" 143 164 144 165 [[package]] 166 + name = "brie-tree" 167 + version = "0.1.2" 168 + source = "registry+https://github.com/rust-lang/crates.io-index" 169 + checksum = "fc07bcb47a1eaa44b6eb9ae3dd5e895cbf222f7f378ecbe014e1dae4bba30a66" 170 + dependencies = [ 171 + "allocator-api2 0.3.0", 172 + "cfg-if", 173 + "nonmax", 174 + ] 175 + 176 + [[package]] 145 177 name = "bumpalo" 146 178 version = "3.19.0" 147 179 source = "registry+https://github.com/rust-lang/crates.io-index" 148 180 checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" 149 181 dependencies = [ 150 - "allocator-api2", 182 + "allocator-api2 0.2.21", 151 183 ] 152 184 153 185 [[package]] ··· 563 595 checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" 564 596 565 597 [[package]] 598 + name = "errno" 599 + version = "0.3.13" 600 + source = "registry+https://github.com/rust-lang/crates.io-index" 601 + checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" 602 + dependencies = [ 603 + "libc", 604 + "windows-sys 0.59.0", 605 + ] 606 + 607 + [[package]] 566 608 name = "escape8259" 567 609 version = "0.5.3" 568 610 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 590 632 dependencies = [ 591 633 "criterion", 592 634 ] 635 + 636 + [[package]] 637 + name = "fastrand" 638 + version = "2.3.0" 639 + source = "registry+https://github.com/rust-lang/crates.io-index" 640 + checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" 593 641 594 642 [[package]] 595 643 name = "fdt" ··· 952 1000 "cordyceps", 953 1001 "cpu-local", 954 1002 "criterion", 955 - "fastrand", 1003 + "fastrand 0.1.0", 956 1004 "futures", 957 1005 "lazy_static", 958 1006 "loom", ··· 995 1043 "cranelift-entity", 996 1044 "cranelift-frontend", 997 1045 "fallible-iterator", 998 - "fastrand", 1046 + "fastrand 0.1.0", 999 1047 "fdt", 1000 1048 "futures", 1001 1049 "gimli", ··· 1120 1168 ] 1121 1169 1122 1170 [[package]] 1171 + name = "linux-raw-sys" 1172 + version = "0.9.4" 1173 + source = "registry+https://github.com/rust-lang/crates.io-index" 1174 + checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" 1175 + 1176 + [[package]] 1123 1177 name = "litemap" 1124 1178 version = "0.8.0" 1125 1179 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1196 1250 ] 1197 1251 1198 1252 [[package]] 1253 + name = "mem" 1254 + version = "0.1.0" 1255 + dependencies = [ 1256 + "anyhow", 1257 + "brie-tree", 1258 + "cordyceps", 1259 + "cpu-local", 1260 + "fallible-iterator", 1261 + "kasync", 1262 + "lock_api", 1263 + "mycelium-bitfield", 1264 + "pin-project", 1265 + "proptest", 1266 + "rand", 1267 + "rand_chacha", 1268 + "smallvec", 1269 + "wavltree", 1270 + ] 1271 + 1272 + [[package]] 1199 1273 name = "memchr" 1200 1274 version = "2.7.5" 1201 1275 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1217 1291 checksum = "24e0cc5e2c585acbd15c5ce911dff71e1f4d5313f43345873311c4f5efd741cc" 1218 1292 1219 1293 [[package]] 1294 + name = "nonmax" 1295 + version = "0.5.5" 1296 + source = "registry+https://github.com/rust-lang/crates.io-index" 1297 + checksum = "610a5acd306ec67f907abe5567859a3c693fb9886eb1f012ab8f2a47bef3db51" 1298 + 1299 + [[package]] 1220 1300 name = "nu-ansi-term" 1221 1301 version = "0.46.0" 1222 1302 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1468 1548 ] 1469 1549 1470 1550 [[package]] 1551 + name = "proptest" 1552 + version = "1.7.0" 1553 + source = "registry+https://github.com/rust-lang/crates.io-index" 1554 + checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" 1555 + dependencies = [ 1556 + "bit-set", 1557 + "bit-vec", 1558 + "bitflags", 1559 + "lazy_static", 1560 + "num-traits", 1561 + "rand", 1562 + "rand_chacha", 1563 + "rand_xorshift", 1564 + "regex-syntax 0.8.5", 1565 + "rusty-fork", 1566 + "tempfile", 1567 + "unarray", 1568 + ] 1569 + 1570 + [[package]] 1571 + name = "quick-error" 1572 + version = "1.2.3" 1573 + source = "registry+https://github.com/rust-lang/crates.io-index" 1574 + checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" 1575 + 1576 + [[package]] 1471 1577 name = "quote" 1472 1578 version = "1.0.40" 1473 1579 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1512 1618 ] 1513 1619 1514 1620 [[package]] 1621 + name = "rand_xorshift" 1622 + version = "0.4.0" 1623 + source = "registry+https://github.com/rust-lang/crates.io-index" 1624 + checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" 1625 + dependencies = [ 1626 + "rand_core", 1627 + ] 1628 + 1629 + [[package]] 1515 1630 name = "rayon" 1516 1631 version = "1.10.0" 1517 1632 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1536 1651 version = "0.11.1" 1537 1652 source = "git+https://github.com/JonasKruckenberg/regalloc2?branch=jonas%2Frefactor%2Fstatic-machine-env#305811667665047d750521973be4b3b7a6a7d312" 1538 1653 dependencies = [ 1539 - "allocator-api2", 1654 + "allocator-api2 0.2.21", 1540 1655 "bumpalo", 1541 1656 "hashbrown", 1542 1657 "log", ··· 1620 1735 ] 1621 1736 1622 1737 [[package]] 1738 + name = "rustix" 1739 + version = "1.0.8" 1740 + source = "registry+https://github.com/rust-lang/crates.io-index" 1741 + checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" 1742 + dependencies = [ 1743 + "bitflags", 1744 + "errno", 1745 + "libc", 1746 + "linux-raw-sys", 1747 + "windows-sys 0.59.0", 1748 + ] 1749 + 1750 + [[package]] 1623 1751 name = "rustversion" 1624 1752 version = "1.0.21" 1625 1753 source = "registry+https://github.com/rust-lang/crates.io-index" 1626 1754 checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" 1755 + 1756 + [[package]] 1757 + name = "rusty-fork" 1758 + version = "0.3.0" 1759 + source = "registry+https://github.com/rust-lang/crates.io-index" 1760 + checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" 1761 + dependencies = [ 1762 + "fnv", 1763 + "quick-error", 1764 + "tempfile", 1765 + "wait-timeout", 1766 + ] 1627 1767 1628 1768 [[package]] 1629 1769 name = "ryu" ··· 1787 1927 version = "0.13.2" 1788 1928 source = "registry+https://github.com/rust-lang/crates.io-index" 1789 1929 checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" 1930 + 1931 + [[package]] 1932 + name = "tempfile" 1933 + version = "3.20.0" 1934 + source = "registry+https://github.com/rust-lang/crates.io-index" 1935 + checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" 1936 + dependencies = [ 1937 + "fastrand 2.3.0", 1938 + "getrandom", 1939 + "once_cell", 1940 + "rustix", 1941 + "windows-sys 0.59.0", 1942 + ] 1790 1943 1791 1944 [[package]] 1792 1945 name = "thiserror" ··· 2092 2245 "bitflags", 2093 2246 "spin", 2094 2247 ] 2248 + 2249 + [[package]] 2250 + name = "unarray" 2251 + version = "0.1.4" 2252 + source = "registry+https://github.com/rust-lang/crates.io-index" 2253 + checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" 2095 2254 2096 2255 [[package]] 2097 2256 name = "unicode-ident"
+29
libs/mem/Cargo.toml
··· 1 + [package] 2 + name = "mem" 3 + version.workspace = true 4 + edition.workspace = true 5 + authors.workspace = true 6 + license.workspace = true 7 + 8 + [dependencies] 9 + cpu-local.workspace = true 10 + kasync.workspace = true 11 + 12 + # 3rd-party dependencies 13 + mycelium-bitfield.workspace = true 14 + anyhow.workspace = true 15 + cordyceps.workspace = true 16 + pin-project.workspace = true 17 + lock_api.workspace = true 18 + fallible-iterator.workspace = true 19 + smallvec.workspace = true 20 + wavltree.workspace = true 21 + rand_chacha.workspace = true 22 + rand.workspace = true 23 + brie-tree = "0.1.2" 24 + 25 + [dev-dependencies] 26 + proptest = "1.7.0" 27 + 28 + [lints] 29 + workspace = true
+13
libs/mem/proptest-regressions/frame.txt
··· 1 + # Seeds for failure cases proptest has generated in the past. It is 2 + # automatically read and these particular cases re-run before any 3 + # novel cases are generated. 4 + # 5 + # It is recommended to check this file in to source control so that 6 + # everyone who runs the test benefits from these saved cases. 7 + cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1 8 + cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257 9 + cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257 10 + cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354 11 + cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257 12 + cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20 13 + cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+69
libs/mem/src/access_rules.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + mycelium_bitfield::bitfield! { 9 + /// Rules that dictate how a region of virtual memory may be accessed. 10 + /// 11 + /// # W^X 12 + /// 13 + /// In order to prevent malicious code execution as proactively as possible, 14 + /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced 15 + /// through the [`WriteOrExecute`] enum field. 16 + #[derive(PartialEq, Eq)] 17 + pub struct AccessRules<u8> { 18 + /// If set, reading from the memory region is allowed. 19 + pub const READ: bool; 20 + /// Whether executing, or writing this memory region is allowed (or neither). 21 + pub const WRITE_OR_EXECUTE: WriteOrExecute; 22 + /// If set, requires code in the memory region to use aarch64 Branch Target Identification. 23 + /// Does nothing on non-aarch64 architectures. 24 + pub const BTI: bool; 25 + } 26 + } 27 + 28 + /// Whether executing, or writing this memory region is allowed (or neither). 29 + /// 30 + /// This is an enum to enforce [`W^X`] at the type-level. 31 + /// 32 + /// [`W^X`]: AccessRules 33 + #[derive(Copy, Clone, Debug, Eq, PartialEq)] 34 + #[repr(u8)] 35 + pub enum WriteOrExecute { 36 + /// Neither writing nor execution of the memory region is allowed. 37 + Neither = 0b00, 38 + /// Writing to the memory region is allowed. 39 + Write = 0b01, 40 + /// Executing code from the memory region is allowed. 41 + Execute = 0b10, 42 + } 43 + 44 + // ===== impl WriteOrExecute ===== 45 + 46 + impl mycelium_bitfield::FromBits<u8> for WriteOrExecute { 47 + type Error = core::convert::Infallible; 48 + 49 + /// The number of bits required to represent a value of this type. 50 + const BITS: u32 = 2; 51 + 52 + #[inline] 53 + fn try_from_bits(bits: u8) -> Result<Self, Self::Error> { 54 + match bits { 55 + b if b == Self::Neither as u8 => Ok(Self::Neither), 56 + b if b == Self::Write as u8 => Ok(Self::Write), 57 + b if b == Self::Execute as u8 => Ok(Self::Execute), 58 + _ => { 59 + // this should never happen unless the bitpacking code is broken 60 + unreachable!("invalid memory region access rules {bits:#b}") 61 + } 62 + } 63 + } 64 + 65 + #[inline] 66 + fn into_bits(self) -> u8 { 67 + self as u8 68 + } 69 + }
+997
libs/mem/src/address_space.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + mod batch; 9 + mod region; 10 + 11 + use alloc::boxed::Box; 12 + use core::alloc::Layout; 13 + use core::num::NonZeroUsize; 14 + use core::ops::{Bound, ControlFlow, Range}; 15 + use core::ptr::NonNull; 16 + 17 + use anyhow::{Context, format_err}; 18 + pub(crate) use batch::Batch; 19 + use rand::Rng; 20 + use rand::distr::Uniform; 21 + use rand_chacha::ChaCha20Rng; 22 + use region::AddressSpaceRegion; 23 + use wavltree::{CursorMut, WAVLTree}; 24 + 25 + use crate::access_rules::AccessRules; 26 + use crate::utils::assert_unsafe_precondition_; 27 + use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 28 + 29 + pub unsafe trait RawAddressSpace { 30 + /// The smallest addressable chunk of memory of this address space. All address argument provided 31 + /// to methods of this type (both virtual and physical) must be aligned to this. 32 + const PAGE_SIZE: usize; 33 + const VIRT_ADDR_BITS: u32; 34 + 35 + const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8; 36 + const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1); 37 + 38 + /// The [`Flush`] implementation for this address space. 39 + type Flush: Flush; 40 + 41 + /// Return a new, empty flush for this address space. 42 + fn flush(&self) -> Self::Flush; 43 + 44 + /// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given 45 + /// [`VirtualAddress`] if mapped. 46 + fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>; 47 + 48 + /// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the 49 + /// specified access rules. 50 + /// 51 + /// If this returns `Ok`, the mapping is added to the raw address space and all future 52 + /// accesses to the virtual address range will translate to accesses of the physical address 53 + /// range. 54 + /// 55 + /// # Safety 56 + /// 57 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 58 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 59 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 60 + /// 61 + /// # Errors 62 + /// 63 + /// Returning `Err` indicates the mapping cannot be established and the virtual address range 64 + /// remains unaltered. 65 + unsafe fn map( 66 + &mut self, 67 + virt: VirtualAddress, 68 + phys: PhysicalAddress, 69 + len: NonZeroUsize, 70 + access_rules: AccessRules, 71 + flush: &mut Self::Flush, 72 + ) -> crate::Result<()>; 73 + 74 + /// Unmap a contiguous range of `len` virtual addresses. 75 + /// 76 + /// After this returns all accesses to the virtual address region will cause a fault. 77 + /// 78 + /// # Safety 79 + /// 80 + /// - `virt..virt+len` must be mapped 81 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 82 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 83 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 84 + unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush); 85 + 86 + /// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses. 87 + /// 88 + /// After this returns all accesses to the virtual address region must follow the 89 + /// specified `AccessRules` or cause a fault. 90 + /// 91 + /// # Safety 92 + /// 93 + /// - `virt..virt+len` must be mapped 94 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 95 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 96 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 97 + unsafe fn set_access_rules( 98 + &mut self, 99 + virt: VirtualAddress, 100 + len: NonZeroUsize, 101 + access_rules: AccessRules, 102 + flush: &mut Self::Flush, 103 + ); 104 + } 105 + 106 + /// A type that can flush changes made to a [`RawAddressSpace`]. 107 + /// 108 + /// Note: [`Flush`] is purely optional, it exists so implementation MAY batch 109 + /// Note that the implementation is not required to delay materializing changes until [`Flush::flush`] 110 + /// is called. 111 + pub trait Flush { 112 + /// Flush changes made to its [`RawAddressSpace`]. 113 + /// 114 + /// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across 115 + /// all affected threads/CPUs. 116 + /// 117 + /// # Errors 118 + /// 119 + /// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might 120 + /// still have taken effect across all or some of the threads/CPUs. 121 + fn flush(self) -> crate::Result<()>; 122 + } 123 + 124 + pub struct AddressSpace<R: RawAddressSpace> { 125 + raw: R, 126 + regions: WAVLTree<AddressSpaceRegion<R>>, 127 + batch: Batch, 128 + max_range: Range<VirtualAddress>, 129 + rng: Option<ChaCha20Rng>, 130 + } 131 + 132 + impl<R: RawAddressSpace> AddressSpace<R> { 133 + pub fn new(raw: R, rng: Option<ChaCha20Rng>) -> Self { 134 + Self { 135 + raw, 136 + regions: WAVLTree::new(), 137 + batch: Batch::new(), 138 + max_range: VirtualAddress::MIN..VirtualAddress::MAX, 139 + rng, 140 + } 141 + } 142 + 143 + /// Attempts to reserve a region of virtual memory. 144 + /// 145 + /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees 146 + /// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault. 147 + /// 148 + /// The returned region may have a larger size than specified by `layout.size()`, and may or may 149 + /// not have its contents initialized. 150 + /// 151 + /// The returned region of virtual memory remains mapped as long as it is [*currently mapped*] 152 + /// and the address space type itself has not been dropped. 153 + /// 154 + /// [*currently mapped*]: #currently-mapped-memory 155 + /// 156 + /// # Errors 157 + /// 158 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 159 + /// constraints, virtual memory is exhausted, or mapping otherwise fails. 160 + pub fn map( 161 + &mut self, 162 + layout: Layout, 163 + access_rules: AccessRules, 164 + ) -> crate::Result<NonNull<[u8]>> { 165 + #[cfg(debug_assertions)] 166 + self.assert_valid("[AddressSpace::map]"); 167 + 168 + let layout = layout.align_to(R::PAGE_SIZE).unwrap(); 169 + 170 + let spot = self 171 + .find_spot_for(layout) 172 + .context(format_err!("cannot find free spot for layout {layout:?}"))?; 173 + 174 + // TODO "relaxed" frame provider 175 + let region = AddressSpaceRegion::new(spot, layout, access_rules); 176 + 177 + let region = self.regions.insert(Box::pin(region)); 178 + 179 + // TODO OPTIONAL eagerly commit a few pages 180 + 181 + self.batch.flush_changes(&mut self.raw)?; 182 + 183 + Ok(region.as_non_null()) 184 + } 185 + 186 + /// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region 187 + /// is zero-initialized. 188 + /// 189 + /// # Errors 190 + /// 191 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 192 + /// constraints, virtual memory is exhausted, or mapping otherwise fails. 193 + pub fn map_zeroed( 194 + &mut self, 195 + layout: Layout, 196 + access_rules: AccessRules, 197 + ) -> crate::Result<NonNull<[u8]>> { 198 + #[cfg(debug_assertions)] 199 + self.assert_valid("[AddressSpace::map_zeroed]"); 200 + 201 + let layout = layout.align_to(R::PAGE_SIZE).unwrap(); 202 + 203 + let spot = self 204 + .find_spot_for(layout) 205 + .context(format_err!("cannot find free spot for layout {layout:?}"))?; 206 + 207 + // TODO "zeroed" frame provider 208 + let region = AddressSpaceRegion::new(spot, layout, access_rules); 209 + 210 + let region = self.regions.insert(Box::pin(region)); 211 + 212 + // TODO OPTIONAL eagerly commit a few pages 213 + 214 + self.batch.flush_changes(&mut self.raw)?; 215 + 216 + Ok(region.as_non_null()) 217 + } 218 + 219 + /// Unmaps the virtual memory region referenced by `ptr`. 220 + /// 221 + /// # Safety 222 + /// 223 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and 224 + /// * `layout` must [*fit*] that region of memory. 225 + /// 226 + /// [*currently mapped*]: #currently-mapped-memory 227 + /// [*fit*]: #memory-fitting 228 + pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) { 229 + #[cfg(debug_assertions)] 230 + self.assert_valid("[AddressSpace::unmap]"); 231 + 232 + // Safety: responsibility of caller 233 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) }; 234 + 235 + // Safety: responsibility of caller 236 + let mut region = unsafe { cursor.remove().unwrap_unchecked() }; 237 + 238 + region.decommit(.., &mut self.batch, &mut self.raw).unwrap(); 239 + } 240 + 241 + /// Attempts to extend the virtual memory reservation. 242 + /// 243 + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the 244 + /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish 245 + /// this, the address space may extend the mapping referenced by `ptr` to fit the new layout. 246 + /// 247 + /// TODO describe how extending a file-backed, of DMA-backed mapping works 248 + /// 249 + /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones. 250 + /// 251 + /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been 252 + /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*], 253 + /// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer 254 + /// for accessing this region now. 255 + /// 256 + /// If this method returns `Err`, then ownership of the memory region has not been transferred to 257 + /// this address space, and the contents of the region are unaltered. 258 + /// 259 + /// [*Undefined Behavior*] 260 + /// 261 + /// # Safety 262 + /// 263 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 264 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 265 + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. 266 + /// 267 + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 268 + /// 269 + /// [*currently mapped*]: #currently-mapped-memory 270 + /// [*fit*]: #memory-fitting 271 + /// 272 + /// # Errors 273 + /// 274 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 275 + /// constraints, virtual memory is exhausted, or growing otherwise fails. 276 + pub unsafe fn grow( 277 + &mut self, 278 + ptr: NonNull<u8>, 279 + old_layout: Layout, 280 + new_layout: Layout, 281 + ) -> crate::Result<NonNull<[u8]>> { 282 + #[cfg(debug_assertions)] 283 + self.assert_valid("[AddressSpace::grow]"); 284 + 285 + assert_unsafe_precondition_!( 286 + "TODO", 287 + (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 288 + old_layout.align().is_multiple_of(page_size) 289 + } 290 + ); 291 + 292 + assert_unsafe_precondition_!( 293 + "TODO", 294 + (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 295 + new_layout.align().is_multiple_of(page_size) 296 + } 297 + ); 298 + 299 + if new_layout == old_layout { 300 + return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())); 301 + } 302 + 303 + assert_unsafe_precondition_!( 304 + "TODO", 305 + (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => { 306 + new_layout.size() >= old_layout.size() 307 + } 308 + ); 309 + 310 + if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } { 311 + Ok(ptr) 312 + } else { 313 + unsafe { self.reallocate_region(ptr, old_layout, new_layout) } 314 + } 315 + } 316 + 317 + /// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place. 318 + /// 319 + /// # Safety 320 + /// 321 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 322 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 323 + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. 324 + /// 325 + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 326 + /// 327 + /// [*currently mapped*]: #currently-mapped-memory 328 + /// [*fit*]: #memory-fitting 329 + /// 330 + /// # Errors 331 + /// 332 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 333 + /// constraints, virtual memory is exhausted, or growing otherwise fails. 334 + pub unsafe fn grow_in_place( 335 + &mut self, 336 + ptr: NonNull<u8>, 337 + old_layout: Layout, 338 + new_layout: Layout, 339 + ) -> crate::Result<NonNull<[u8]>> { 340 + #[cfg(debug_assertions)] 341 + self.assert_valid("[AddressSpace::grow_in_place]"); 342 + 343 + assert_unsafe_precondition_!( 344 + "TODO", 345 + (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 346 + old_layout.align().is_multiple_of(page_size) 347 + } 348 + ); 349 + 350 + assert_unsafe_precondition_!( 351 + "TODO", 352 + (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 353 + new_layout.align().is_multiple_of(page_size) 354 + } 355 + ); 356 + 357 + if new_layout == old_layout { 358 + return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())); 359 + } 360 + 361 + assert_unsafe_precondition_!( 362 + "TODO", 363 + (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => { 364 + new_layout.size() >= old_layout.size() 365 + } 366 + ); 367 + 368 + unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } 369 + } 370 + 371 + /// Attempts to shrink the virtual memory reservation. 372 + /// 373 + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the 374 + /// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish 375 + /// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout. 376 + /// 377 + /// TODO describe how shrinking a file-backed, of DMA-backed mapping works 378 + /// 379 + /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones. 380 + /// 381 + /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been 382 + /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*], 383 + /// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer 384 + /// for accessing this region now. 385 + /// 386 + /// If this method returns `Err`, then ownership of the memory region has not been transferred to 387 + /// this address space, and the contents of the region are unaltered. 388 + /// 389 + /// [*Undefined Behavior*] 390 + /// 391 + /// # Safety 392 + /// 393 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 394 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 395 + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. 396 + /// 397 + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 398 + /// 399 + /// [*currently mapped*]: #currently-mapped-memory 400 + /// [*fit*]: #memory-fitting 401 + /// 402 + /// # Errors 403 + /// 404 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 405 + /// constraints, virtual memory is exhausted, or shrinking otherwise fails. 406 + pub unsafe fn shrink( 407 + &mut self, 408 + ptr: NonNull<u8>, 409 + old_layout: Layout, 410 + new_layout: Layout, 411 + ) -> crate::Result<NonNull<[u8]>> { 412 + #[cfg(debug_assertions)] 413 + self.assert_valid("[AddressSpace::shrink]"); 414 + 415 + assert_unsafe_precondition_!( 416 + "TODO", 417 + (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 418 + old_layout.align().is_multiple_of(page_size) 419 + } 420 + ); 421 + 422 + assert_unsafe_precondition_!( 423 + "TODO", 424 + (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 425 + new_layout.align().is_multiple_of(page_size) 426 + } 427 + ); 428 + 429 + if new_layout == old_layout { 430 + return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())); 431 + } 432 + 433 + assert_unsafe_precondition_!( 434 + "TODO", 435 + (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => { 436 + new_layout.size() <= old_layout.size() 437 + } 438 + ); 439 + 440 + if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } { 441 + Ok(ptr) 442 + } else { 443 + unsafe { self.reallocate_region(ptr, old_layout, new_layout) } 444 + } 445 + } 446 + 447 + /// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be 448 + /// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned. 449 + /// 450 + /// # Safety 451 + /// 452 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 453 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 454 + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. 455 + /// 456 + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 457 + /// 458 + /// [*currently mapped*]: #currently-mapped-memory 459 + /// [*fit*]: #memory-fitting 460 + /// 461 + /// # Errors 462 + /// 463 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 464 + /// constraints, virtual memory is exhausted, or growing otherwise fails. 465 + pub unsafe fn shrink_in_place( 466 + &mut self, 467 + ptr: NonNull<u8>, 468 + old_layout: Layout, 469 + new_layout: Layout, 470 + ) -> crate::Result<NonNull<[u8]>> { 471 + #[cfg(debug_assertions)] 472 + self.assert_valid("[AddressSpace::shrink_in_place]"); 473 + 474 + assert_unsafe_precondition_!( 475 + "TODO", 476 + (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 477 + old_layout.align().is_multiple_of(page_size) 478 + } 479 + ); 480 + 481 + assert_unsafe_precondition_!( 482 + "TODO", 483 + (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 484 + new_layout.align().is_multiple_of(page_size) 485 + } 486 + ); 487 + 488 + if new_layout == old_layout { 489 + return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())); 490 + } 491 + 492 + assert_unsafe_precondition_!( 493 + "TODO", 494 + (old_layout: Layout = old_layout, new_layout: Layout = new_layout) => { 495 + new_layout.size() <= old_layout.size() 496 + } 497 + ); 498 + 499 + unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } 500 + } 501 + 502 + /// Updates the access rules for the virtual memory region referenced by `ptr`. 503 + /// 504 + /// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault. 505 + /// 506 + /// If this method returns `Err`, the access rules of the memory region are unaltered. 507 + /// 508 + /// # Safety 509 + /// 510 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and 511 + /// * `layout` must [*fit*] that region of memory. 512 + /// 513 + /// [*currently mapped*]: #currently-mapped-memory 514 + /// [*fit*]: #memory-fitting 515 + pub unsafe fn update_access_rules( 516 + &mut self, 517 + ptr: NonNull<u8>, 518 + layout: Layout, 519 + access_rules: AccessRules, 520 + ) -> crate::Result<()> { 521 + #[cfg(debug_assertions)] 522 + self.assert_valid("[AddressSpace::update_access_rules]"); 523 + 524 + // Safety: responsibility of caller 525 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) }; 526 + 527 + // Safety: responsibility of caller 528 + let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 529 + 530 + region.update_access_rules(access_rules, &mut self.batch)?; 531 + 532 + self.batch.flush_changes(&mut self.raw)?; 533 + 534 + Ok(()) 535 + } 536 + 537 + /// Attempts to fill the virtual memory region referenced by `ptr` with zeroes. 538 + /// 539 + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the 540 + /// mapped region. The pointer is suitable for holding data described by `new_layout` and is 541 + /// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the 542 + /// virtual memory region. 543 + /// 544 + /// TODO describe how clearing a file-backed, of DMA-backed mapping works 545 + /// 546 + /// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones. 547 + /// 548 + /// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been 549 + /// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*], 550 + /// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer 551 + /// for accessing this region now. 552 + /// 553 + /// If this method returns `Err`, then ownership of the memory region has not been transferred to 554 + /// this address space, and the contents of the region are unaltered. 555 + /// 556 + /// [*Undefined Behavior*] 557 + /// 558 + /// # Safety 559 + /// 560 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and 561 + /// * `layout` must [*fit*] that region of memory. 562 + /// 563 + /// [*currently mapped*]: #currently-mapped-memory 564 + /// [*fit*]: #memory-fitting 565 + /// 566 + /// # Errors 567 + /// 568 + /// Returning `Err` indicates the layout does not meet the address space's size or alignment 569 + /// constraints, clearing a virtual memory region is not supported by the backing storage, or 570 + /// clearing otherwise fails. 571 + pub unsafe fn clear( 572 + &mut self, 573 + ptr: NonNull<u8>, 574 + layout: Layout, 575 + ) -> crate::Result<NonNull<[u8]>> { 576 + #[cfg(debug_assertions)] 577 + self.assert_valid("[AddressSpace::clear]"); 578 + 579 + // Safety: responsibility of caller 580 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) }; 581 + 582 + // Safety: responsibility of caller 583 + let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 584 + 585 + region.clear(&mut self.batch)?; 586 + 587 + self.batch.flush_changes(&mut self.raw)?; 588 + 589 + Ok(region.as_non_null()) 590 + } 591 + 592 + pub fn assert_valid(&self, msg: &str) { 593 + let mut regions = self.regions.iter(); 594 + 595 + let Some(first_region) = regions.next() else { 596 + assert!( 597 + self.regions.is_empty(), 598 + "{msg}region iterator is empty but tree is not." 599 + ); 600 + 601 + return; 602 + }; 603 + 604 + first_region.assert_valid(msg); 605 + 606 + let mut seen_range = first_region.range().clone(); 607 + 608 + while let Some(region) = regions.next() { 609 + assert!( 610 + !region.range().is_overlapping(&seen_range), 611 + "{msg}region cannot overlap previous region; region={region:?}" 612 + ); 613 + assert!( 614 + region.range().start >= self.max_range.start 615 + && region.range().end <= self.max_range.end, 616 + "{msg}region cannot lie outside of max address space range; region={region:?}" 617 + ); 618 + 619 + seen_range = seen_range.start..region.range().end; 620 + 621 + region.assert_valid(msg); 622 + 623 + // TODO assert validity of of VMO against phys addresses 624 + // let (_phys, access_rules) = self 625 + // .batched_raw 626 + // .raw_address_space() 627 + // .lookup(region.range().start) 628 + // .unwrap_or_else(|| { 629 + // panic!("{msg}region base address is not mapped in raw address space region={region:?}") 630 + // }); 631 + // 632 + // assert_eq!( 633 + // access_rules, 634 + // region.access_rules(), 635 + // "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}", 636 + // region.access_rules(), 637 + // ); 638 + } 639 + } 640 + 641 + /// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`] 642 + /// and [`Self::shrink_in_place`]. 643 + /// 644 + /// # Safety 645 + /// 646 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 647 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 648 + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. 649 + /// * `new_layout.align()` must be multiple of PAGE_SIZE 650 + unsafe fn grow_in_place_inner( 651 + &mut self, 652 + ptr: NonNull<u8>, 653 + old_layout: Layout, 654 + new_layout: Layout, 655 + ) -> crate::Result<NonNull<[u8]>> { 656 + // Safety: responsibility of caller 657 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) }; 658 + 659 + let next_range = cursor.peek_next().map(|region| region.range().clone()); 660 + 661 + // Safety: responsibility of caller 662 + let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 663 + 664 + region.grow_in_place(new_layout, next_range, &mut self.batch)?; 665 + 666 + self.batch.flush_changes(&mut self.raw)?; 667 + 668 + Ok(region.as_non_null()) 669 + } 670 + 671 + /// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`] 672 + /// and [`Self::grow_in_place`]. 673 + /// 674 + /// # Safety 675 + /// 676 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 677 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 678 + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. 679 + /// * `new_layout.align()` must be multiple of PAGE_SIZE 680 + unsafe fn shrink_in_place_inner( 681 + &mut self, 682 + ptr: NonNull<u8>, 683 + old_layout: Layout, 684 + new_layout: Layout, 685 + ) -> crate::Result<NonNull<[u8]>> { 686 + // Safety: responsibility of caller 687 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) }; 688 + 689 + // Safety: responsibility of caller 690 + let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 691 + 692 + region.shrink(new_layout, &mut self.batch)?; 693 + 694 + self.batch.flush_changes()?; 695 + 696 + Ok(region.as_non_null()) 697 + } 698 + 699 + /// Reallocates a virtual address region. This will unmap and remove the old region, allocating 700 + /// a new region that will be backed the old regions physical memory. 701 + /// 702 + /// # Safety 703 + /// 704 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space. 705 + /// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.). 706 + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. 707 + /// * `new_layout.align()` must be multiple of PAGE_SIZE 708 + unsafe fn reallocate_region( 709 + &mut self, 710 + ptr: NonNull<u8>, 711 + old_layout: Layout, 712 + new_layout: Layout, 713 + ) -> crate::Result<NonNull<[u8]>> { 714 + // Safety: responsibility of caller 715 + let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) }; 716 + let mut region = unsafe { cursor.remove().unwrap_unchecked() }; 717 + 718 + let spot = self.find_spot_for(new_layout).context(format_err!( 719 + "cannot find free spot for layout {new_layout:?}" 720 + ))?; 721 + 722 + region.move_to(spot, new_layout, &mut self.batch)?; 723 + 724 + Ok(region.as_non_null()) 725 + } 726 + 727 + /// Find a spot in the address space that satisfies the given `layout` requirements. 728 + /// 729 + /// If a spot suitable for holding data described by `layout` is found, the base address of the 730 + /// address range is returned in `Some`. The returned address is already correct aligned to 731 + /// `layout.align()`. 732 + /// 733 + /// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in 734 + /// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot 735 + /// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a 736 + /// different time might still succeed. 737 + fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> { 738 + // The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is 739 + // guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works 740 + // elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively. 741 + // Here is how it works: 742 + // 1. We set up two counters: (see the GapVisitor) 743 + // - `candidate_spot_count` which we initialize to zero 744 + // - `target_index` which we either set to a random value between 0..<the maximum number of 745 + // possible addresses in the address space> if ASLR is requested OR to zero otherwise. 746 + // 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the 747 + // gaps between regions. We count the number of addresses in each gap that satisfy the 748 + // requested `Layout`s size and alignment and add that to the `candidate_spot_count`. 749 + // IF the number of spots in the gap is greater than our chosen target index, we pick the 750 + // spot at the target index and finish. ELSE we *decrement* the target index by the number 751 + // of spots and continue to the next gap. 752 + // 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original 753 + // guess for `target_index` was too big, in which case we need to retry. 754 + // 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know 755 + // the *actual* number of possible spots in the address space since we just counted them 756 + // during the first attempt. We initialize `target_index` to `0..candidate_spot_count` 757 + // which is guaranteed to return us a spot. 758 + // IF `candidate_spot_count` is ZERO after the first attempt, there is no point in 759 + // retrying since we cannot fulfill the requested layout. 760 + // 761 + // Note that in practice, we use a binary tree to keep track of regions, and we use binary search 762 + // to optimize the search for a suitable gap instead of linear iteration. 763 + 764 + let layout = layout.pad_to_align(); 765 + 766 + // First attempt: guess a random target index 767 + let max_candidate_spots = self.max_range.size(); 768 + 769 + let target_index: usize = self 770 + .rng 771 + .as_mut() 772 + .map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap())) 773 + .unwrap_or_default(); 774 + 775 + // First attempt: visit the binary search tree to find a gap 776 + let mut v = GapVisitor::new(layout, target_index); 777 + self.visit_gaps(&mut v); 778 + 779 + // if we found a spot already we're done 780 + if let Some(chosen) = v.chosen { 781 + return Some(chosen); 782 + } 783 + 784 + // otherwise, Second attempt: we need to retry with the correct candidate spot count 785 + // but if we counted no suitable candidate spots during the first attempt, we cannot fulfill 786 + // the request. 787 + if v.candidate_spots == 0 { 788 + return None; 789 + } 790 + 791 + // Second attempt: pick a new target_index that's actually fulfillable 792 + let target_index: usize = self 793 + .rng 794 + .as_mut() 795 + .map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap())) 796 + .unwrap_or_default(); 797 + 798 + // Second attempt: visit the binary search tree to find a gap 799 + let mut v = GapVisitor::new(layout, target_index); 800 + self.visit_gaps(&mut v); 801 + 802 + let chosen = v 803 + .chosen 804 + .expect("There must be a chosen spot after the first attempt. This is a bug!"); 805 + 806 + debug_assert!(chosen.is_canonical::<R>()); 807 + 808 + Some(chosen) 809 + } 810 + 811 + /// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space 812 + /// from lowest to highest addresses. 813 + fn visit_gaps(&self, v: &mut GapVisitor) { 814 + let Some(root) = self.regions.root().get() else { 815 + // if the tree is empty, we treat the entire max_range as the gap 816 + // note that we do not care about the returned ControlFlow, as there is nothing else we 817 + // could try to find a spot anyway 818 + let _ = v.visit(self.max_range.clone()); 819 + 820 + return; 821 + }; 822 + 823 + // see if there is a suitable gap between BEFORE the first address space region 824 + if v.visit(self.max_range.start..root.subtree_range().start) 825 + .is_break() 826 + { 827 + return; 828 + } 829 + 830 + // now comes the main part of the search. we start at the WAVLTree root node and do a 831 + // binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion` 832 + // to speed up this search. See `AddressSpaceRegion` for details on how this works. 833 + 834 + let mut maybe_current = self.regions.root().get(); 835 + let mut already_visited = VirtualAddress::MIN; 836 + 837 + while let Some(current) = maybe_current { 838 + // If there is no suitable gap in this entire 839 + if current.suitable_gap_in_subtree(v.layout()) { 840 + // First, look at the left subtree 841 + if let Some(left) = current.left_child() { 842 + if left.suitable_gap_in_subtree(v.layout()) 843 + && left.subtree_range().end > already_visited 844 + { 845 + maybe_current = Some(left); 846 + continue; 847 + } 848 + 849 + if v.visit(left.subtree_range().end..current.range().start) 850 + .is_break() 851 + { 852 + return; 853 + } 854 + } 855 + 856 + if let Some(right) = current.right_child() { 857 + if v.visit(current.range().end..right.subtree_range().start) 858 + .is_break() 859 + { 860 + return; 861 + } 862 + 863 + if right.suitable_gap_in_subtree(v.layout()) 864 + && right.subtree_range().end > already_visited 865 + { 866 + maybe_current = Some(right); 867 + continue; 868 + } 869 + } 870 + } 871 + 872 + already_visited = current.subtree_range().end; 873 + maybe_current = current.parent(); 874 + } 875 + 876 + // see if there is a suitable gap between AFTER the last address space region 877 + if v.visit(root.subtree_range().end..self.max_range.end) 878 + .is_break() 879 + { 880 + return; 881 + } 882 + } 883 + } 884 + 885 + /// # Safety 886 + /// 887 + /// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and 888 + /// * `layout` must [*fit*] that region of memory. 889 + /// 890 + /// [*currently mapped*]: #currently-mapped-memory 891 + /// [*fit*]: #memory-fitting 892 + unsafe fn get_region_containing_ptr( 893 + regions: &mut WAVLTree<AddressSpaceRegion>, 894 + ptr: NonNull<u8>, 895 + layout: Layout, 896 + ) -> CursorMut<'_, AddressSpaceRegion> { 897 + let addr = VirtualAddress::from_non_null(ptr); 898 + 899 + let cursor = regions.lower_bound_mut(Bound::Included(&addr)); 900 + 901 + assert_unsafe_precondition_!( 902 + "TODO", 903 + (cursor: &CursorMut<AddressSpaceRegion> = &cursor) => cursor.get().is_some() 904 + ); 905 + 906 + // Safety: The caller guarantees the pointer is currently mapped which means we must have 907 + // a corresponding address space region for it 908 + let region = unsafe { cursor.get().unwrap_unchecked() }; 909 + 910 + assert_unsafe_precondition_!( 911 + "TODO", 912 + (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => { 913 + let range = region.range(); 914 + 915 + range.start.get() <= addr.get() && addr.get() < range.end.get() 916 + } 917 + ); 918 + 919 + assert_unsafe_precondition_!( 920 + "`layout` does not fit memory region", 921 + (layout: Layout = layout, region: &AddressSpaceRegion = &region) => region.layout_fits_region(layout) 922 + ); 923 + 924 + cursor 925 + } 926 + 927 + pub(crate) struct GapVisitor { 928 + layout: Layout, 929 + target_index: usize, 930 + candidate_spots: usize, 931 + chosen: Option<VirtualAddress>, 932 + } 933 + 934 + impl GapVisitor { 935 + fn new(layout: Layout, target_index: usize) -> Self { 936 + Self { 937 + layout, 938 + target_index, 939 + candidate_spots: 0, 940 + chosen: None, 941 + } 942 + } 943 + 944 + pub fn layout(&self) -> Layout { 945 + self.layout 946 + } 947 + 948 + /// Returns the number of spots in the given range that satisfy the layout we require 949 + fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize { 950 + debug_assert!( 951 + range.start.is_aligned_to(self.layout.align()) 952 + && range.end.is_aligned_to(self.layout.align()) 953 + ); 954 + 955 + // ranges passed in here can become empty for a number of reasons (aligning might produce ranges 956 + // where end > start, or the range might be empty to begin with) in either case an empty 957 + // range means no spots are available 958 + if range.is_empty() { 959 + return 0; 960 + } 961 + 962 + let range_size = range.size(); 963 + if range_size >= self.layout.size() { 964 + ((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1 965 + } else { 966 + 0 967 + } 968 + } 969 + 970 + pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> { 971 + // if we have already chosen a spot, signal the caller to stop 972 + if self.chosen.is_some() { 973 + return ControlFlow::Break(()); 974 + } 975 + 976 + let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap(); 977 + 978 + let spot_count = self.spots_in_range(&aligned_gap); 979 + 980 + self.candidate_spots += spot_count; 981 + 982 + if self.target_index < spot_count { 983 + self.chosen = Some( 984 + aligned_gap 985 + .start 986 + .checked_add(self.target_index << self.layout.align().ilog2()) 987 + .unwrap(), 988 + ); 989 + 990 + ControlFlow::Break(()) 991 + } else { 992 + self.target_index -= spot_count; 993 + 994 + ControlFlow::Continue(()) 995 + } 996 + } 997 + }
+336
libs/mem/src/address_space/batch.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use core::cmp; 9 + use core::num::{NonZero, NonZeroUsize}; 10 + 11 + use smallvec::SmallVec; 12 + 13 + use crate::address_space::{Flush, RawAddressSpace}; 14 + use crate::{AccessRules, PhysicalAddress, VirtualAddress}; 15 + 16 + /// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`. 17 + /// 18 + /// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space 19 + /// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB 20 + /// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce 21 + /// this number. 22 + pub struct Batch { 23 + ops: SmallVec<[BatchOperation; 4]>, 24 + } 25 + 26 + enum BatchOperation { 27 + Map(MapOperation), 28 + Unmap(UnmapOperation), 29 + SetAccessRules(SetAccessRulesOperation), 30 + } 31 + 32 + struct MapOperation { 33 + virt: VirtualAddress, 34 + phys: PhysicalAddress, 35 + len: NonZeroUsize, 36 + access_rules: AccessRules, 37 + } 38 + 39 + struct UnmapOperation { 40 + virt: VirtualAddress, 41 + len: NonZeroUsize, 42 + } 43 + 44 + struct SetAccessRulesOperation { 45 + virt: VirtualAddress, 46 + len: NonZeroUsize, 47 + access_rules: AccessRules, 48 + } 49 + 50 + // ===== impl Batch ===== 51 + 52 + impl Batch { 53 + /// Construct a new empty [`Batch`]. 54 + pub fn new() -> Self { 55 + Self { 56 + ops: SmallVec::new(), 57 + } 58 + } 59 + 60 + /// Add a [`map`] operation to the set of batched operations. 61 + /// 62 + /// # Safety 63 + /// 64 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 65 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 66 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 67 + /// 68 + /// [`map`]: RawAddressSpace::map 69 + pub unsafe fn map( 70 + &mut self, 71 + virt: VirtualAddress, 72 + phys: PhysicalAddress, 73 + len: NonZeroUsize, 74 + access_rules: AccessRules, 75 + ) { 76 + let mut new = MapOperation { 77 + virt, 78 + phys, 79 + len, 80 + access_rules, 81 + }; 82 + 83 + let ops = self.ops.iter_mut().filter_map(|op| match op { 84 + BatchOperation::Map(op) => Some(op), 85 + _ => None, 86 + }); 87 + 88 + for op in ops { 89 + match op.try_merge_with(new) { 90 + Ok(()) => return, 91 + Err(new_) => new = new_, 92 + } 93 + } 94 + 95 + self.ops.push(BatchOperation::Map(new)); 96 + } 97 + 98 + /// Add an [`unmap`] operation to the set of batched operations. 99 + /// 100 + /// # Safety 101 + /// 102 + /// - virt..virt+len must be mapped 103 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 104 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 105 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 106 + /// 107 + /// [`unmap`]: RawAddressSpace::unmap 108 + pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) { 109 + let mut new = UnmapOperation { virt, len }; 110 + 111 + let ops = self.ops.iter_mut().filter_map(|op| match op { 112 + BatchOperation::Unmap(op) => Some(op), 113 + _ => None, 114 + }); 115 + 116 + for op in ops { 117 + match op.try_merge_with(new) { 118 + Ok(()) => return, 119 + Err(new_) => new = new_, 120 + } 121 + } 122 + 123 + self.ops.push(BatchOperation::Unmap(new)); 124 + } 125 + 126 + /// Add a [`set_access_rules`] operation to the set of batched operations. 127 + /// 128 + /// # Safety 129 + /// 130 + /// - virt..virt+len must be mapped 131 + /// - `virt` must be aligned to `Self::PAGE_SIZE` 132 + /// - `phys` must be aligned to `Self::PAGE_SIZE` 133 + /// - `len` must an integer multiple of `Self::PAGE_SIZE` 134 + /// 135 + /// [`set_access_rules`]: RawAddressSpace::set_access_rules 136 + pub fn set_access_rules( 137 + &mut self, 138 + virt: VirtualAddress, 139 + len: NonZeroUsize, 140 + access_rules: AccessRules, 141 + ) { 142 + let mut new = SetAccessRulesOperation { 143 + virt, 144 + len, 145 + access_rules, 146 + }; 147 + 148 + let ops = self.ops.iter_mut().filter_map(|op| match op { 149 + BatchOperation::SetAccessRules(op) => Some(op), 150 + _ => None, 151 + }); 152 + 153 + for op in ops { 154 + match op.try_merge_with(new) { 155 + Ok(()) => return, 156 + Err(new_) => new = new_, 157 + } 158 + } 159 + 160 + self.ops.push(BatchOperation::SetAccessRules(new)); 161 + } 162 + 163 + /// Flushes the `Batch` ensuring all changes are materialized into the raw address space. 164 + pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> { 165 + let mut flush = raw_aspace.flush(); 166 + for op in self.ops.drain(..) { 167 + match op { 168 + BatchOperation::Map(op) => { 169 + debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE)); 170 + debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE)); 171 + debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE)); 172 + 173 + // Safety: the caller promised the correctness of the values on construction of 174 + // the operation. 175 + unsafe { 176 + raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?; 177 + } 178 + } 179 + BatchOperation::Unmap(op) => { 180 + debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE)); 181 + debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE)); 182 + 183 + // Safety: the caller promised the correctness of the values on construction of 184 + // the operation. 185 + unsafe { 186 + raw_aspace.unmap(op.virt, op.len, &mut flush); 187 + } 188 + } 189 + BatchOperation::SetAccessRules(op) => { 190 + debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE)); 191 + debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE)); 192 + 193 + // Safety: the caller promised the correctness of the values on construction of 194 + // the operation. 195 + unsafe { 196 + raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush); 197 + } 198 + } 199 + }; 200 + } 201 + flush.flush() 202 + } 203 + } 204 + 205 + // ===== impl MapOperation ===== 206 + 207 + impl MapOperation { 208 + /// Returns true if this operation can be merged with `other`. 209 + /// 210 + /// Map operations can be merged if: 211 + /// - their [`AccessRules`] are the same 212 + /// - their virtual address ranges are contiguous (no gap between self and other) 213 + /// - their physical address ranges are contiguous 214 + /// - the resulting virtual address range still has the same size as the resulting 215 + /// physical address range 216 + const fn can_merge_with(&self, other: &Self) -> bool { 217 + // the access rules need to be the same 218 + let same_rules = self.access_rules.bits() == other.access_rules.bits(); 219 + 220 + let overlap_virt = self.virt.get() <= other.len.get() 221 + && other.virt.get() <= self.virt.get() + self.len.get(); 222 + 223 + let overlap_phys = self.phys.get() <= other.len.get() 224 + && other.phys.get() <= self.phys.get() + self.len.get(); 225 + 226 + let offset_virt = self.virt.get().wrapping_sub(other.virt.get()); 227 + let offset_phys = self.virt.get().wrapping_sub(other.virt.get()); 228 + let same_offset = offset_virt == offset_phys; 229 + 230 + same_rules && overlap_virt && overlap_phys && same_offset 231 + } 232 + 233 + /// Attempt to merge this operation with `other`. 234 + /// 235 + /// If this returns `Ok`, `other` has been merged into `self`. 236 + /// 237 + /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant. 238 + fn try_merge_with(&mut self, other: Self) -> Result<(), Self> { 239 + if self.can_merge_with(&other) { 240 + let offset = self.virt.get().wrapping_sub(other.virt.get()); 241 + let len = self 242 + .len 243 + .get() 244 + .checked_add(other.len.get()) 245 + .unwrap() 246 + .wrapping_add(offset); 247 + 248 + self.virt = cmp::min(self.virt, other.virt); 249 + self.phys = cmp::min(self.phys, other.phys); 250 + self.len = NonZero::new(len).ok_or(other)?; 251 + 252 + Ok(()) 253 + } else { 254 + Err(other) 255 + } 256 + } 257 + } 258 + 259 + // ===== impl UnmapOperation ===== 260 + 261 + impl UnmapOperation { 262 + /// Returns true if this operation can be merged with `other`. 263 + /// 264 + /// Unmap operations can be merged if: 265 + /// - their virtual address ranges are contiguous (no gap between self and other) 266 + const fn can_merge_with(&self, other: &Self) -> bool { 267 + self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get() 268 + } 269 + 270 + /// Attempt to merge this operation with `other`. 271 + /// 272 + /// If this returns `Ok`, `other` has been merged into `self`. 273 + /// 274 + /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant. 275 + fn try_merge_with(&mut self, other: Self) -> Result<(), Self> { 276 + if self.can_merge_with(&other) { 277 + let offset = self.virt.get().wrapping_sub(other.virt.get()); 278 + let len = self 279 + .len 280 + .get() 281 + .checked_add(other.len.get()) 282 + .unwrap() 283 + .wrapping_add(offset); 284 + 285 + self.virt = cmp::min(self.virt, other.virt); 286 + self.len = NonZero::new(len).ok_or(other)?; 287 + 288 + Ok(()) 289 + } else { 290 + Err(other) 291 + } 292 + } 293 + } 294 + 295 + // ===== impl ProtectOperation ===== 296 + 297 + impl SetAccessRulesOperation { 298 + /// Returns true if this operation can be merged with `other`. 299 + /// 300 + /// Protect operations can be merged if: 301 + /// - their [`AccessRules`] are the same 302 + /// - their virtual address ranges are contiguous (no gap between self and other) 303 + const fn can_merge_with(&self, other: &Self) -> bool { 304 + // the access rules need to be the same 305 + let same_rules = self.access_rules.bits() == other.access_rules.bits(); 306 + 307 + let overlap = self.virt.get() <= other.len.get() 308 + && other.virt.get() <= self.virt.get() + self.len.get(); 309 + 310 + same_rules && overlap 311 + } 312 + 313 + /// Attempt to merge this operation with `other`. 314 + /// 315 + /// If this returns `Ok`, `other` has been merged into `self`. 316 + /// 317 + /// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant. 318 + fn try_merge_with(&mut self, other: Self) -> Result<(), Self> { 319 + if self.can_merge_with(&other) { 320 + let offset = self.virt.get().wrapping_sub(other.virt.get()); 321 + let len = self 322 + .len 323 + .get() 324 + .checked_add(other.len.get()) 325 + .unwrap() 326 + .wrapping_add(offset); 327 + 328 + self.virt = cmp::min(self.virt, other.virt); 329 + self.len = NonZero::new(len).ok_or(other)?; 330 + 331 + Ok(()) 332 + } else { 333 + Err(other) 334 + } 335 + } 336 + }
+548
libs/mem/src/address_space/region.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use alloc::boxed::Box; 9 + use core::alloc::Layout; 10 + use core::marker::PhantomData; 11 + use core::mem::offset_of; 12 + use core::num::NonZeroUsize; 13 + use core::ops::{Bound, Range, RangeBounds}; 14 + use core::pin::Pin; 15 + use core::ptr::NonNull; 16 + use core::{cmp, fmt, mem, slice}; 17 + 18 + use fallible_iterator::FallibleIterator; 19 + use pin_project::pin_project; 20 + 21 + use crate::address_space::{Batch, RawAddressSpace}; 22 + use crate::vmo::Vmo; 23 + use crate::{AccessRules, AddressRangeExt, VirtualAddress}; 24 + 25 + #[pin_project] 26 + #[derive(Debug)] 27 + pub struct AddressSpaceRegion<R> { 28 + range: Range<VirtualAddress>, 29 + access_rules: AccessRules, 30 + layout: Layout, 31 + vmo: Vmo, 32 + vmo_offset: usize, 33 + 34 + /// The address range covered by this region and its WAVL tree subtree, used when allocating new regions 35 + subtree_range: Range<VirtualAddress>, 36 + /// The largest gap in this subtree, used when allocating new regions 37 + max_gap: usize, 38 + /// Links to other regions in the WAVL tree 39 + links: wavltree::Links<AddressSpaceRegion<R>>, 40 + 41 + _raw_aspace: PhantomData<R>, 42 + } 43 + 44 + impl<R: RawAddressSpace> AddressSpaceRegion<R> { 45 + pub const fn new( 46 + spot: VirtualAddress, 47 + layout: Layout, 48 + access_rules: AccessRules, 49 + vmo: Vmo, 50 + vmo_offset: usize, 51 + ) -> Self { 52 + Self { 53 + range: spot..spot.checked_add(layout.size()).unwrap(), 54 + access_rules, 55 + layout, 56 + vmo, 57 + vmo_offset, 58 + 59 + max_gap: 0, 60 + subtree_range: spot..spot.checked_add(layout.size()).unwrap(), 61 + links: wavltree::Links::new(), 62 + 63 + _raw_aspace: PhantomData, 64 + } 65 + } 66 + 67 + pub const fn range(&self) -> &Range<VirtualAddress> { 68 + &self.range 69 + } 70 + 71 + pub const fn subtree_range(&self) -> &Range<VirtualAddress> { 72 + &self.subtree_range 73 + } 74 + 75 + pub const fn access_rules(&self) -> AccessRules { 76 + self.access_rules 77 + } 78 + 79 + pub fn as_slice(&self) -> &[u8] { 80 + let ptr = self.range.start.as_ptr(); 81 + let len = self.range.size(); 82 + 83 + unsafe { slice::from_raw_parts(ptr, len) } 84 + } 85 + 86 + pub fn as_slice_mut(&mut self) -> &mut [u8] { 87 + let ptr = self.range.start.as_mut_ptr(); 88 + let len = self.range.size(); 89 + 90 + unsafe { slice::from_raw_parts_mut(ptr, len) } 91 + } 92 + 93 + pub fn as_non_null(&self) -> NonNull<[u8]> { 94 + let ptr = self.range.start.as_non_null().unwrap(); 95 + NonNull::slice_from_raw_parts(ptr, self.range.size()) 96 + } 97 + 98 + pub const fn layout_fits_region(&self, layout: Layout) -> bool { 99 + self.range.start.is_aligned_to(layout.align()) 100 + && layout.size() >= self.layout.size() 101 + && layout.size() <= self.range.end.get() - self.range.start.get() 102 + } 103 + 104 + /// Find physical memory frames to back the given `range`. 105 + /// After this call succeeds, accesses that align with the given `access` are guaranteed to 106 + /// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules. 107 + /// 108 + /// # Errors 109 + /// 110 + /// - `range` is out of bounds 111 + /// - `access_rules` is NOT a subset of self.access_rules 112 + pub fn commit( 113 + &self, 114 + range: impl RangeBounds<VirtualAddress>, 115 + access_rules: AccessRules, 116 + batch: &mut Batch, 117 + raw_aspace: &mut R, 118 + ) -> crate::Result<()> { 119 + let vmo_relative = self.bounds_to_vmo_relative(range); 120 + 121 + let mut acquired_frames = self.vmo.acquire(vmo_relative).enumerate(); 122 + while let Some((idx, frame)) = acquired_frames.next()? { 123 + let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap(); 124 + 125 + unsafe { 126 + batch.map( 127 + virt, 128 + frame.addr(), 129 + NonZeroUsize::new(R::PAGE_SIZE).unwrap(), 130 + access_rules, 131 + ); 132 + } 133 + 134 + if self.vmo.has_content_source() { 135 + // TODO add virt addr to coalescer 136 + } 137 + } 138 + 139 + // materialize changes 140 + batch.flush_changes(raw_aspace)?; 141 + 142 + // initialize patched holes if necessary 143 + if self.vmo.has_content_source() { 144 + // for every region in coalescer 145 + // figure out content source offset 146 + // read from content source at offset into region 147 + } 148 + 149 + Ok(()) 150 + } 151 + 152 + /// Release physical memory frames backing the given `range`. 153 + /// After this call succeeds, accesses will page fault. 154 + /// 155 + /// # Errors 156 + /// 157 + /// - `range` is out of bounds 158 + pub fn decommit( 159 + &self, 160 + range: impl RangeBounds<VirtualAddress>, 161 + batch: &mut Batch, 162 + raw_aspace: &mut R, 163 + ) -> crate::Result<()> { 164 + let vmo_relative = self.bounds_to_vmo_relative(range); 165 + 166 + let mut released_frames = self.vmo.release(vmo_relative).enumerate(); 167 + while let Some((idx, _frame)) = released_frames.next()? { 168 + let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap(); 169 + unsafe { batch.unmap(virt, NonZeroUsize::new(R::PAGE_SIZE).unwrap()) }; 170 + 171 + // if VMO has content source && frame is dirty 172 + // add virt addr to coalescer 173 + } 174 + 175 + // for every region in coalescer 176 + // figure out content source offset 177 + // write region to content source at offset 178 + 179 + // materialize changes 180 + batch.flush_changes(raw_aspace)?; 181 + 182 + Ok(()) 183 + } 184 + 185 + /// Zero out the memory in the given `range`. 186 + /// This MAY release physical memory frames backing the `range`. 187 + /// 188 + /// # Errors 189 + /// 190 + /// - `range` is out of bounds 191 + pub fn clear(&self, range: impl RangeBounds<VirtualAddress>) -> crate::Result<()> { 192 + todo!() 193 + } 194 + 195 + /// Update the access rules of this `AddressSpaceRegion`. 196 + pub fn update_access_rules( 197 + &mut self, 198 + access_rules: AccessRules, 199 + batch: &mut Batch, 200 + ) -> crate::Result<()> { 201 + todo!() 202 + } 203 + 204 + /// Fetches content in the given `range`. This operates logically equivalent to 205 + /// a read, write, or instruction fetch (depending on `access_rules`) so that future accesses 206 + /// are quicker. The provided `access_rules` MUST be a subset or equal to this regions access rules. 207 + /// 208 + /// # Errors 209 + /// 210 + /// - `range` is out of bounds 211 + /// - `access_rules` is NOT a subset of self.access_rules 212 + pub fn prefetch( 213 + &self, 214 + range: impl RangeBounds<VirtualAddress>, 215 + access_rules: AccessRules, 216 + ) -> crate::Result<()> { 217 + todo!() 218 + } 219 + 220 + /// Attempts to grow the address space region to `new_len`. 221 + /// `new_len` MUST be larger than or equal to the current length. 222 + pub fn grow(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 223 + todo!() 224 + } 225 + 226 + /// Attempts to shrink the address space region to `new_len`. 227 + /// `new_len` MUST be smaller than or equal to the current length. 228 + pub fn shrink(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 229 + todo!() 230 + } 231 + 232 + // /// grow region to `new_len`, attempting to grow the VMO accordingly 233 + // /// `new_layout.size()` mut be greater than or equal to `self.layout.size()` 234 + // pub fn grow_in_place( 235 + // &mut self, 236 + // new_layout: Layout, 237 + // next_range: Option<Range<VirtualAddress>>, 238 + // batch: &mut Batch, 239 + // ) -> crate::Result<()> { 240 + // if new_layout.align() > self.layout.align() { 241 + // bail!("cannot grow in-place: New alignment greater than current"); 242 + // } 243 + // 244 + // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap(); 245 + // 246 + // if let Some(next_range) = next_range 247 + // && next_range.is_overlapping(&new_range) 248 + // { 249 + // bail!("cannot grow in-place: New overlapping with next range"); 250 + // } 251 + // 252 + // self.vmo.resize(new_range.size(), batch)?; 253 + // 254 + // self.update_range(new_range); 255 + // 256 + // Ok(()) 257 + // } 258 + // 259 + // /// shrink region to the first `len` bytes, dropping the rest frames. 260 + // /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()` 261 + // pub fn shrink(&mut self, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> { 262 + // if new_layout.align() > self.layout.align() { 263 + // bail!("cannot grow in-place: New alignment greater than current"); 264 + // } 265 + // 266 + // let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap(); 267 + // 268 + // self.vmo.resize(new_range.size(), batch)?; 269 + // 270 + // self.update_range(new_range); 271 + // 272 + // Ok(()) 273 + // } 274 + // 275 + // /// move the entire region to the new base address, remapping any already mapped frames 276 + // pub fn move_to( 277 + // &mut self, 278 + // new_base: VirtualAddress, 279 + // new_layout: Layout, 280 + // batch: &mut Batch, 281 + // ) -> crate::Result<()> { 282 + // let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap(); 283 + // 284 + // self.vmo.resize(new_range.size(), batch)?; 285 + // self.update_range(new_range); 286 + // 287 + // // - for every frame in VMO 288 + // // - attempt to map at new offset (add maps to batch) 289 + // 290 + // todo!() 291 + // } 292 + // 293 + // pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()> 294 + // where 295 + // R: RangeBounds<VirtualAddress>, 296 + // { 297 + // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound()); 298 + // 299 + // self.vmo.commit(bounds, will_write, batch) 300 + // } 301 + // 302 + // pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()> 303 + // where 304 + // R: RangeBounds<VirtualAddress>, 305 + // { 306 + // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound()); 307 + // 308 + // self.vmo.decommit(bounds, batch) 309 + // } 310 + // 311 + // /// updates the access rules of this region 312 + // pub fn update_access_rules( 313 + // &mut self, 314 + // access_rules: AccessRules, 315 + // batch: &mut Batch, 316 + // ) -> crate::Result<()> { 317 + // // TODO 318 + // // - for every frame in VMO 319 + // // - update access rules (add protects to batch) 320 + // // - update self access rules 321 + // 322 + // todo!() 323 + // } 324 + // 325 + // pub fn clear<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()> 326 + // where 327 + // R: RangeBounds<VirtualAddress>, 328 + // { 329 + // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound()); 330 + // 331 + // self.vmo.clear(bounds, batch) 332 + // } 333 + // 334 + // pub fn prefetch<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()> 335 + // where 336 + // R: RangeBounds<VirtualAddress>, 337 + // { 338 + // let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound()); 339 + // 340 + // self.vmo.prefetch(bounds, batch) 341 + // } 342 + 343 + pub fn assert_valid(&self, msg: &str) 344 + where 345 + R: fmt::Debug, 346 + { 347 + assert!(!self.range.is_empty(), "{msg}region range cannot be empty"); 348 + assert!( 349 + self.subtree_range.start <= self.range.start 350 + && self.range.end <= self.subtree_range.end, 351 + "{msg}region range cannot be bigger than its subtree range; region={self:?}" 352 + ); 353 + assert!( 354 + self.max_gap < self.subtree_range.size(), 355 + "{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}" 356 + ); 357 + assert!( 358 + self.range.start.is_aligned_to(self.layout.align()), 359 + "{msg}region range is not aligned to its layout; region={self:?}" 360 + ); 361 + assert!( 362 + self.range.size() >= self.layout.size(), 363 + "{msg}region range is smaller than its layout; region={self:?}" 364 + ); 365 + 366 + self.links.assert_valid(); 367 + } 368 + 369 + /// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used 370 + /// during gap-searching. 371 + pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool { 372 + // we need the layout to be padded to alignment 373 + debug_assert!(layout.size().is_multiple_of(layout.align())); 374 + 375 + self.max_gap >= layout.size() 376 + } 377 + 378 + /// Returns the left child node in the search tree of regions, used during gap-searching. 379 + pub fn left_child(&self) -> Option<&Self> { 380 + Some(unsafe { self.links.left()?.as_ref() }) 381 + } 382 + 383 + /// Returns the right child node in the search tree of regions, used during gap-searching. 384 + pub fn right_child(&self) -> Option<&Self> { 385 + Some(unsafe { self.links.right()?.as_ref() }) 386 + } 387 + 388 + /// Returns the parent node in the search tree of regions, used during gap-searching. 389 + pub fn parent(&self) -> Option<&Self> { 390 + Some(unsafe { self.links.parent()?.as_ref() }) 391 + } 392 + 393 + #[inline] 394 + fn bounds_to_vmo_relative( 395 + &self, 396 + bounds: impl RangeBounds<VirtualAddress>, 397 + ) -> (Bound<usize>, Bound<usize>) { 398 + let start = bounds.start_bound().map(|addr| { 399 + (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset 400 + }); 401 + let end = bounds.end_bound().map(|addr| { 402 + (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset 403 + }); 404 + 405 + (start, end) 406 + } 407 + 408 + fn update_range(&mut self, new_range: Range<VirtualAddress>) { 409 + self.range = new_range; 410 + // We also must propagate the information about our changed range to the rest of the tree 411 + // so searching for a free spot returns the correct results. 412 + Self::propagate_update_to_parent(Some(NonNull::from(self))); 413 + } 414 + 415 + /// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`] 416 + /// implementation below after each tree mutation that impacted this node or its subtree in some way 417 + /// (insertion, rotation, deletion). 418 + /// 419 + /// Returns `true` if this nodes metadata changed. 420 + #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")] 421 + fn update_gap_metadata( 422 + mut node: NonNull<Self>, 423 + left: Option<NonNull<Self>>, 424 + right: Option<NonNull<Self>>, 425 + ) -> bool { 426 + fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize { 427 + right_first_byte 428 + .checked_sub_addr(left_last_byte) 429 + .unwrap_or_default() // TODO use saturating_sub_addr 430 + } 431 + 432 + let node = unsafe { node.as_mut() }; 433 + let mut left_max_gap = 0; 434 + let mut right_max_gap = 0; 435 + 436 + // recalculate the subtree_range start 437 + let old_subtree_range_start = if let Some(left) = left { 438 + let left = unsafe { left.as_ref() }; 439 + let left_gap = gap(left.subtree_range.end, node.range.start); 440 + left_max_gap = cmp::max(left_gap, left.max_gap); 441 + mem::replace(&mut node.subtree_range.start, left.subtree_range.start) 442 + } else { 443 + mem::replace(&mut node.subtree_range.start, node.range.start) 444 + }; 445 + 446 + // recalculate the subtree range end 447 + let old_subtree_range_end = if let Some(right) = right { 448 + let right = unsafe { right.as_ref() }; 449 + let right_gap = gap(node.range.end, right.subtree_range.start); 450 + right_max_gap = cmp::max(right_gap, right.max_gap); 451 + mem::replace(&mut node.subtree_range.end, right.subtree_range.end) 452 + } else { 453 + mem::replace(&mut node.subtree_range.end, node.range.end) 454 + }; 455 + 456 + // recalculate the map_gap 457 + let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap)); 458 + 459 + old_max_gap != node.max_gap 460 + || old_subtree_range_start != node.subtree_range.start 461 + || old_subtree_range_end != node.subtree_range.end 462 + } 463 + 464 + // Propagate metadata updates to this regions parent in the search tree. If we had to update 465 + // our metadata the parent must update its metadata too. 466 + #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")] 467 + fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) { 468 + while let Some(node) = maybe_node { 469 + let links = unsafe { &node.as_ref().links }; 470 + let changed = Self::update_gap_metadata(node, links.left(), links.right()); 471 + 472 + // if the metadata didn't actually change, we don't need to recalculate parents 473 + if !changed { 474 + return; 475 + } 476 + 477 + maybe_node = links.parent(); 478 + } 479 + } 480 + } 481 + 482 + unsafe impl<A: RawAddressSpace> wavltree::Linked for AddressSpaceRegion<A> { 483 + /// Any heap-allocated type that owns an element may be used. 484 + /// 485 + /// An element *must not* move while part of an intrusive data 486 + /// structure. In many cases, `Pin` may be used to enforce this. 487 + type Handle = Pin<Box<Self>>; // TODO better handle type 488 + 489 + type Key = VirtualAddress; 490 + 491 + /// Convert an owned `Handle` into a raw pointer 492 + fn into_ptr(handle: Self::Handle) -> NonNull<Self> { 493 + // Safety: wavltree treats the ptr as pinned 494 + unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) } 495 + } 496 + 497 + /// Convert a raw pointer back into an owned `Handle`. 498 + unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { 499 + // Safety: `NonNull` *must* be constructed from a pinned reference 500 + // which the tree implementation upholds. 501 + unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) } 502 + } 503 + 504 + unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> { 505 + ptr.map_addr(|addr| { 506 + let offset = offset_of!(Self, links); 507 + addr.checked_add(offset).unwrap() 508 + }) 509 + .cast() 510 + } 511 + 512 + fn get_key(&self) -> &Self::Key { 513 + &self.range.start 514 + } 515 + 516 + fn after_insert(self: Pin<&mut Self>) { 517 + debug_assert_eq!(self.subtree_range.start, self.range.start); 518 + debug_assert_eq!(self.subtree_range.end, self.range.end); 519 + debug_assert_eq!(self.max_gap, 0); 520 + Self::propagate_update_to_parent(self.links.parent()); 521 + } 522 + 523 + fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) { 524 + Self::propagate_update_to_parent(parent); 525 + } 526 + 527 + fn after_rotate( 528 + self: Pin<&mut Self>, 529 + parent: NonNull<Self>, 530 + sibling: Option<NonNull<Self>>, 531 + lr_child: Option<NonNull<Self>>, 532 + side: wavltree::Side, 533 + ) { 534 + let this = self.project(); 535 + // Safety: caller ensures ptr is valid 536 + let _parent = unsafe { parent.as_ref() }; 537 + 538 + this.subtree_range.start = _parent.subtree_range.start; 539 + this.subtree_range.end = _parent.subtree_range.end; 540 + *this.max_gap = _parent.max_gap; 541 + 542 + if side == wavltree::Side::Left { 543 + Self::update_gap_metadata(parent, sibling, lr_child); 544 + } else { 545 + Self::update_gap_metadata(parent, lr_child, sibling); 546 + } 547 + } 548 + }
+414
libs/mem/src/addresses.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use core::alloc::{Layout, LayoutError}; 9 + use core::ops::Range; 10 + 11 + use crate::address_space::RawAddressSpace; 12 + 13 + macro_rules! impl_address { 14 + ($address_ty:ident) => { 15 + impl $address_ty { 16 + pub const MAX: Self = Self(usize::MAX); 17 + pub const MIN: Self = Self(0); 18 + pub const ZERO: Self = Self(0); 19 + pub const BITS: u32 = usize::BITS; 20 + 21 + #[inline] 22 + pub const fn get(&self) -> usize { 23 + self.0 24 + } 25 + 26 + #[must_use] 27 + #[inline] 28 + pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self { 29 + Self(ptr.expose_provenance()) 30 + } 31 + 32 + #[must_use] 33 + #[inline] 34 + pub fn from_mut_ptr<T: ?Sized>(ptr: *mut T) -> Self { 35 + Self(ptr.expose_provenance()) 36 + } 37 + 38 + #[must_use] 39 + #[inline] 40 + pub fn from_non_null<T: ?Sized>(ptr: ::core::ptr::NonNull<T>) -> Self { 41 + Self(ptr.addr().get()) 42 + } 43 + 44 + #[inline] 45 + pub fn as_ptr(self) -> *const u8 { 46 + ::core::ptr::with_exposed_provenance(self.0) 47 + } 48 + 49 + #[inline] 50 + pub fn as_mut_ptr(self) -> *mut u8 { 51 + ::core::ptr::with_exposed_provenance_mut(self.0) 52 + } 53 + 54 + #[inline] 55 + pub fn as_non_null(self) -> Option<::core::ptr::NonNull<u8>> { 56 + ::core::num::NonZeroUsize::new(self.0) 57 + .map(::core::ptr::NonNull::with_exposed_provenance) 58 + } 59 + 60 + #[must_use] 61 + #[inline] 62 + pub const fn checked_add(self, rhs: usize) -> Option<Self> { 63 + if let Some(out) = self.0.checked_add(rhs) { 64 + Some(Self(out)) 65 + } else { 66 + None 67 + } 68 + } 69 + 70 + #[must_use] 71 + #[inline] 72 + pub const fn checked_add_signed(self, rhs: isize) -> Option<Self> { 73 + if let Some(out) = self.0.checked_add_signed(rhs) { 74 + Some(Self(out)) 75 + } else { 76 + None 77 + } 78 + } 79 + 80 + #[must_use] 81 + #[inline] 82 + pub const fn checked_sub(self, rhs: usize) -> Option<Self> { 83 + if let Some(out) = self.0.checked_sub(rhs) { 84 + Some(Self(out)) 85 + } else { 86 + None 87 + } 88 + } 89 + #[must_use] 90 + #[inline] 91 + pub const fn checked_div(self, rhs: usize) -> Option<Self> { 92 + if let Some(out) = self.0.checked_div(rhs) { 93 + Some(Self(out)) 94 + } else { 95 + None 96 + } 97 + } 98 + #[must_use] 99 + #[inline] 100 + pub const fn checked_mul(self, rhs: usize) -> Option<Self> { 101 + if let Some(out) = self.0.checked_mul(rhs) { 102 + Some(Self(out)) 103 + } else { 104 + None 105 + } 106 + } 107 + #[must_use] 108 + #[inline] 109 + pub const fn checked_shl(self, rhs: u32) -> Option<Self> { 110 + if let Some(out) = self.0.checked_shl(rhs) { 111 + Some(Self(out)) 112 + } else { 113 + None 114 + } 115 + } 116 + #[must_use] 117 + #[inline] 118 + pub const fn checked_shr(self, rhs: u32) -> Option<Self> { 119 + if let Some(out) = self.0.checked_shr(rhs) { 120 + Some(Self(out)) 121 + } else { 122 + None 123 + } 124 + } 125 + // #[must_use] 126 + // #[inline] 127 + // pub const fn saturating_add(self, rhs: usize) -> Self { 128 + // Self(self.0.saturating_add(rhs)) 129 + // } 130 + // #[must_use] 131 + // #[inline] 132 + // pub const fn saturating_add_signed(self, rhs: isize) -> Self { 133 + // Self(self.0.saturating_add_signed(rhs)) 134 + // } 135 + // #[must_use] 136 + // #[inline] 137 + // pub const fn saturating_div(self, rhs: usize) -> Self { 138 + // Self(self.0.saturating_div(rhs)) 139 + // } 140 + // #[must_use] 141 + // #[inline] 142 + // pub const fn saturating_sub(self, rhs: usize) -> Self { 143 + // Self(self.0.saturating_sub(rhs)) 144 + // } 145 + // #[must_use] 146 + // #[inline] 147 + // pub const fn saturating_mul(self, rhs: usize) -> Self { 148 + // Self(self.0.saturating_mul(rhs)) 149 + // } 150 + #[must_use] 151 + #[inline] 152 + pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { 153 + let (a, b) = self.0.overflowing_shl(rhs); 154 + (Self(a), b) 155 + } 156 + #[must_use] 157 + #[inline] 158 + pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { 159 + let (a, b) = self.0.overflowing_shr(rhs); 160 + (Self(a), b) 161 + } 162 + 163 + #[must_use] 164 + #[inline] 165 + pub const fn checked_sub_addr(self, rhs: Self) -> Option<usize> { 166 + self.0.checked_sub(rhs.0) 167 + } 168 + 169 + // #[must_use] 170 + // #[inline] 171 + // pub const fn saturating_sub_addr(self, rhs: Self) -> usize { 172 + // self.0.saturating_sub(rhs.0) 173 + // } 174 + 175 + #[must_use] 176 + #[inline] 177 + pub const fn is_aligned_to(&self, align: usize) -> bool { 178 + assert!( 179 + align.is_power_of_two(), 180 + "is_aligned_to: align is not a power-of-two" 181 + ); 182 + 183 + self.0 & (align - 1) == 0 184 + } 185 + 186 + #[must_use] 187 + #[inline] 188 + pub const fn checked_align_up(self, align: usize) -> Option<Self> { 189 + if !align.is_power_of_two() { 190 + panic!("checked_align_up: align is not a power-of-two"); 191 + } 192 + 193 + // SAFETY: `align` has been checked to be a power of 2 above 194 + let align_minus_one = unsafe { align.unchecked_sub(1) }; 195 + 196 + // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align) 197 + if let Some(addr_plus_align) = self.0.checked_add(align_minus_one) { 198 + let aligned = Self(addr_plus_align & 0usize.wrapping_sub(align)); 199 + debug_assert!(aligned.is_aligned_to(align)); 200 + debug_assert!(aligned.0 >= self.0); 201 + Some(aligned) 202 + } else { 203 + None 204 + } 205 + } 206 + 207 + // #[must_use] 208 + // #[inline] 209 + // pub const fn wrapping_align_up(self, align: usize) -> Self { 210 + // if !align.is_power_of_two() { 211 + // panic!("checked_align_up: align is not a power-of-two"); 212 + // } 213 + // 214 + // // SAFETY: `align` has been checked to be a power of 2 above 215 + // let align_minus_one = unsafe { align.unchecked_sub(1) }; 216 + // 217 + // // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align) 218 + // let out = addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align); 219 + // debug_assert!(out.is_aligned_to(align)); 220 + // out 221 + // } 222 + 223 + #[inline] 224 + pub const fn alignment(&self) -> usize { 225 + self.0 & (!self.0 + 1) 226 + } 227 + 228 + #[must_use] 229 + #[inline] 230 + pub const fn align_down(self, align: usize) -> Self { 231 + if !align.is_power_of_two() { 232 + panic!("checked_align_up: align is not a power-of-two"); 233 + } 234 + 235 + let aligned = Self(self.0 & 0usize.wrapping_sub(align)); 236 + debug_assert!(aligned.is_aligned_to(align)); 237 + debug_assert!(aligned.0 <= self.0); 238 + aligned 239 + } 240 + } 241 + 242 + impl ::core::fmt::Display for $address_ty { 243 + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { 244 + f.write_fmt(format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x 245 + } 246 + } 247 + 248 + impl ::core::fmt::Debug for $address_ty { 249 + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { 250 + f.debug_tuple(stringify!($address_ty)) 251 + .field(&format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x 252 + .finish() 253 + } 254 + } 255 + }; 256 + } 257 + 258 + #[repr(transparent)] 259 + #[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] 260 + pub struct VirtualAddress(usize); 261 + impl_address!(VirtualAddress); 262 + 263 + impl VirtualAddress { 264 + #[must_use] 265 + pub const fn new(n: usize) -> Self { 266 + Self(n) 267 + } 268 + 269 + pub const fn is_canonical<A: RawAddressSpace>(self) -> bool { 270 + (self.0 & A::CANONICAL_ADDRESS_MASK).wrapping_sub(1) >= A::CANONICAL_ADDRESS_MASK - 1 271 + } 272 + 273 + #[inline] 274 + pub const fn is_user_accessible<A: RawAddressSpace>(self) -> bool { 275 + // This address refers to userspace if it is in the lower half of the 276 + // canonical addresses. IOW - if all of the bits in the canonical address 277 + // mask are zero. 278 + (self.0 & A::CANONICAL_ADDRESS_MASK) == 0 279 + } 280 + } 281 + 282 + #[repr(transparent)] 283 + #[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] 284 + pub struct PhysicalAddress(usize); 285 + impl_address!(PhysicalAddress); 286 + 287 + impl PhysicalAddress { 288 + pub const fn new(n: usize) -> Self { 289 + Self(n) 290 + } 291 + } 292 + 293 + macro_rules! address_range_impl { 294 + () => { 295 + fn size(&self) -> usize { 296 + debug_assert!(self.start <= self.end); 297 + let is = self.end.checked_sub_addr(self.start).unwrap_or_default(); 298 + let should = if self.is_empty() { 299 + 0 300 + } else { 301 + self.end.get() - self.start.get() 302 + }; 303 + debug_assert_eq!(is, should); 304 + is 305 + } 306 + fn checked_add(self, offset: usize) -> Option<Self> { 307 + Some(Range::from( 308 + self.start.checked_add(offset)?..self.end.checked_add(offset)?, 309 + )) 310 + } 311 + fn as_ptr_range(&self) -> Range<*const u8> { 312 + Range::from(self.start.as_ptr()..self.end.as_ptr()) 313 + } 314 + fn as_mut_ptr_range(&self) -> Range<*mut u8> { 315 + Range::from(self.start.as_mut_ptr()..self.end.as_mut_ptr()) 316 + } 317 + fn checked_align_in(self, align: usize) -> Option<Self> 318 + where 319 + Self: Sized, 320 + { 321 + let res = Range::from(self.start.checked_align_up(align)?..self.end.align_down(align)); 322 + Some(res) 323 + } 324 + fn checked_align_out(self, align: usize) -> Option<Self> 325 + where 326 + Self: Sized, 327 + { 328 + let res = Range::from(self.start.align_down(align)..self.end.checked_align_up(align)?); 329 + // aligning outwards can only increase the size 330 + debug_assert!(res.start.0 <= res.end.0); 331 + Some(res) 332 + } 333 + // fn saturating_align_in(self, align: usize) -> Self { 334 + // self.start.saturating_align_up(align)..self.end.saturating_align_down(align) 335 + // } 336 + // fn saturating_align_out(self, align: usize) -> Self { 337 + // self.start.saturating_align_down(align)..self.end.saturating_align_up(align) 338 + // } 339 + 340 + // TODO test 341 + fn alignment(&self) -> usize { 342 + self.start.alignment() 343 + } 344 + fn into_layout(self) -> core::result::Result<Layout, core::alloc::LayoutError> { 345 + Layout::from_size_align(self.size(), self.alignment()) 346 + } 347 + fn is_overlapping(&self, other: &Self) -> bool { 348 + (self.start < other.end) & (other.start < self.end) 349 + } 350 + fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) { 351 + debug_assert!(self.is_overlapping(&other)); 352 + let a = Range::from(self.start..other.start); 353 + let b = Range::from(other.end..self.end); 354 + ((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b)) 355 + } 356 + fn clamp(&self, range: Self) -> Self { 357 + Range::from(self.start.max(range.start)..self.end.min(range.end)) 358 + } 359 + }; 360 + } 361 + 362 + pub trait AddressRangeExt { 363 + fn size(&self) -> usize; 364 + #[must_use] 365 + fn checked_add(self, offset: usize) -> Option<Self> 366 + where 367 + Self: Sized; 368 + #[must_use] 369 + fn as_ptr_range(&self) -> Range<*const u8>; 370 + #[must_use] 371 + fn as_mut_ptr_range(&self) -> Range<*mut u8>; 372 + #[must_use] 373 + fn checked_align_in(self, align: usize) -> Option<Self> 374 + where 375 + Self: Sized; 376 + #[must_use] 377 + fn checked_align_out(self, align: usize) -> Option<Self> 378 + where 379 + Self: Sized; 380 + // #[must_use] 381 + // fn saturating_align_in(self, align: usize) -> Self; 382 + // #[must_use] 383 + // fn saturating_align_out(self, align: usize) -> Self; 384 + fn alignment(&self) -> usize; 385 + fn into_layout(self) -> Result<Layout, LayoutError>; 386 + fn is_overlapping(&self, other: &Self) -> bool; 387 + fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) 388 + where 389 + Self: Sized; 390 + fn clamp(&self, range: Self) -> Self; 391 + fn is_user_accessible<A: RawAddressSpace>(&self) -> bool; 392 + } 393 + 394 + impl AddressRangeExt for Range<PhysicalAddress> { 395 + address_range_impl!(); 396 + fn is_user_accessible<A: RawAddressSpace>(&self) -> bool { 397 + unimplemented!("PhysicalAddress is never user accessible") 398 + } 399 + } 400 + 401 + impl AddressRangeExt for Range<VirtualAddress> { 402 + address_range_impl!(); 403 + 404 + fn is_user_accessible<A: RawAddressSpace>(&self) -> bool { 405 + if self.is_empty() { 406 + return false; 407 + } 408 + let Some(end_minus_one) = self.end.checked_sub(1) else { 409 + return false; 410 + }; 411 + 412 + self.start.is_user_accessible::<A>() && end_minus_one.is_user_accessible::<A>() 413 + } 414 + }
+194
libs/mem/src/frame.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use core::alloc::Layout; 9 + use core::cmp::PartialEq; 10 + use core::fmt; 11 + use core::fmt::Debug; 12 + use core::mem::offset_of; 13 + use core::ops::Deref; 14 + use core::ptr::NonNull; 15 + use core::sync::atomic; 16 + use core::sync::atomic::{AtomicUsize, Ordering}; 17 + 18 + use cordyceps::{Linked, list}; 19 + use pin_project::pin_project; 20 + 21 + use crate::PhysicalAddress; 22 + use crate::frame_alloc::FrameAllocator; 23 + 24 + /// Soft limit on the amount of references that may be made to a `Frame`. 25 + const MAX_REFCOUNT: usize = isize::MAX as usize; 26 + 27 + pub struct FrameRef { 28 + frame: NonNull<Frame>, 29 + alloc: &'static dyn FrameAllocator, 30 + } 31 + 32 + #[pin_project(!Unpin)] 33 + #[derive(Debug)] 34 + pub struct Frame { 35 + addr: PhysicalAddress, 36 + refcount: AtomicUsize, 37 + #[pin] 38 + links: list::Links<Self>, 39 + } 40 + 41 + // ===== impl FrameRef ===== 42 + 43 + impl Clone for FrameRef { 44 + /// Makes a clone of the `Frame`. 45 + /// 46 + /// This creates reference to the same `FrameInfo`, increasing the reference count by one. 47 + fn clone(&self) -> Self { 48 + // Increase the reference count by one. Using relaxed ordering, as knowledge of the 49 + // original reference prevents other threads from erroneously deleting 50 + // the object. 51 + // 52 + // Again, restating what the `Arc` implementation quotes from the 53 + // [Boost documentation][1]: 54 + // 55 + // > Increasing the reference counter can always be done with memory_order_relaxed: New 56 + // > references to an object can only be formed from an existing 57 + // > reference, and passing an existing reference from one thread to 58 + // > another must already provide any required synchronization. 59 + // 60 + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) 61 + let old_size = self.refcount.fetch_add(1, Ordering::Relaxed); 62 + debug_assert_ne!(old_size, 0); 63 + 64 + // Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking 65 + // `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount 66 + // would *really* bad as it would treat the frame as free and potentially cause a use-after-free 67 + // scenario. Realistically this branch should never be taken. 68 + // 69 + // Also worth noting: Just like `Arc`, the refcount could still overflow when in between 70 + // the load above and this check some other cpu increased the refcount from `isize::MAX` to 71 + // `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in 72 + // one conditional atomic operation produces much worse code, so if its good enough for the 73 + // standard library, it is good enough for us. 74 + assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow"); 75 + 76 + unsafe { Self::from_raw_parts(self.frame, self.alloc.clone()) } 77 + } 78 + } 79 + 80 + impl Drop for FrameRef { 81 + /// Drops the `Frame`. 82 + /// 83 + /// This will decrement the reference count. If the reference count reaches zero 84 + /// then this frame will be marked as free and returned to the frame allocator. 85 + fn drop(&mut self) { 86 + if self.refcount.fetch_sub(1, Ordering::Release) != 1 { 87 + return; 88 + } 89 + 90 + // Ensure uses of `FrameInfo` happen before freeing it. 91 + // Because it is marked `Release`, the decreasing of the reference count synchronizes 92 + // with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing 93 + // the reference count, which happens before this fence, which happens before freeing `FrameInfo`. 94 + // 95 + // This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and 96 + // may explain further: 97 + // 98 + // > It is important to enforce any possible access to the object in one 99 + // > thread (through an existing reference) to *happen before* deleting 100 + // > the object in a different thread. This is achieved by a "release" 101 + // > operation after dropping a reference (any access to the object 102 + // > through this reference must obviously happened before), and an 103 + // > "acquire" operation before deleting the object. 104 + // 105 + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) 106 + atomic::fence(Ordering::Acquire); 107 + 108 + self.drop_slow(); 109 + } 110 + } 111 + 112 + impl Debug for FrameRef { 113 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 114 + f.debug_struct("FrameRef") 115 + .field("ptr", &self.frame) 116 + .finish_non_exhaustive() 117 + } 118 + } 119 + 120 + impl Deref for FrameRef { 121 + type Target = Frame; 122 + 123 + fn deref(&self) -> &Self::Target { 124 + unsafe { self.frame.as_ref() } 125 + } 126 + } 127 + 128 + impl FrameRef { 129 + unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self { 130 + Self { frame, alloc } 131 + } 132 + 133 + #[inline(never)] 134 + fn drop_slow(&mut self) { 135 + let layout = unsafe { 136 + Layout::from_size_align_unchecked(self.alloc.page_size(), self.alloc.page_size()) 137 + }; 138 + unsafe { 139 + self.alloc.deallocate(self.frame, layout); 140 + } 141 + } 142 + } 143 + 144 + // ===== impl Frame ===== 145 + 146 + // Safety: assert_impl_all! above ensures that `FrameInfo` is `Send` 147 + unsafe impl Send for Frame {} 148 + 149 + // Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync` 150 + unsafe impl Sync for Frame {} 151 + 152 + impl PartialEq<Frame> for &Frame { 153 + fn eq(&self, other: &Frame) -> bool { 154 + self.refcount() == other.refcount() && self.addr == other.addr 155 + } 156 + } 157 + 158 + impl Frame { 159 + pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self { 160 + Self { 161 + addr, 162 + refcount: AtomicUsize::new(initial_refcount), 163 + links: list::Links::new(), 164 + } 165 + } 166 + 167 + pub fn refcount(&self) -> usize { 168 + self.refcount.load(Ordering::Relaxed) 169 + } 170 + 171 + pub fn addr(&self) -> PhysicalAddress { 172 + self.addr 173 + } 174 + } 175 + 176 + unsafe impl Linked<list::Links<Self>> for Frame { 177 + type Handle = NonNull<Self>; 178 + 179 + fn into_ptr(r: Self::Handle) -> NonNull<Self> { 180 + r 181 + } 182 + 183 + unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { 184 + ptr 185 + } 186 + 187 + unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> { 188 + ptr.map_addr(|addr| { 189 + let offset = offset_of!(Self, links); 190 + addr.checked_add(offset).unwrap() 191 + }) 192 + .cast() 193 + } 194 + }
+137
libs/mem/src/frame_alloc.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + mod area; 9 + mod area_selection; 10 + 11 + use core::alloc::Layout; 12 + use core::cell::RefCell; 13 + use core::cmp; 14 + use core::ops::Range; 15 + use core::ptr::NonNull; 16 + use core::sync::atomic::{AtomicUsize, Ordering}; 17 + 18 + use cordyceps::List; 19 + use cpu_local::collection::CpuLocal; 20 + use fallible_iterator::FallibleIterator; 21 + use lock_api::Mutex; 22 + use smallvec::SmallVec; 23 + 24 + use crate::address_space::RawAddressSpace; 25 + use crate::frame_alloc::area::Area; 26 + use crate::frame_alloc::area_selection::select_areas; 27 + use crate::{Frame, PhysicalAddress}; 28 + 29 + #[derive(Debug)] 30 + pub struct AllocError; 31 + 32 + pub unsafe trait FrameAllocator: Send + Sync + 'static { 33 + fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>; 34 + unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout); 35 + fn page_size(&self) -> usize; 36 + } 37 + 38 + const MAX_FRAMES_IN_CACHE: usize = 256; 39 + 40 + pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> { 41 + areas: Mutex<L, SmallVec<[Area<A>; 4]>>, 42 + cpu_local_cache: CpuLocal<RefCell<List<Frame>>>, 43 + max_alignment_hint: AtomicUsize, 44 + } 45 + 46 + impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> { 47 + pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> { 48 + let mut max_alignment_hint = 0; 49 + let mut areas = SmallVec::new(); 50 + 51 + let mut selections = select_areas::<A>(allocatable_regions); 52 + while let Some(selection) = selections.next()? { 53 + let area = Area::new(selection.area, selection.bookkeeping); 54 + max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint()); 55 + areas.push(area); 56 + } 57 + 58 + Ok(Self { 59 + areas: Mutex::new(areas), 60 + cpu_local_cache: CpuLocal::new(), 61 + max_alignment_hint: AtomicUsize::new(max_alignment_hint), 62 + }) 63 + } 64 + 65 + pub fn max_alignment_hint(&self) -> usize { 66 + self.max_alignment_hint.load(Ordering::Relaxed) 67 + } 68 + 69 + fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> { 70 + if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE { 71 + let mut cache = self.cpu_local_cache.get_or_default().borrow_mut(); 72 + cache.pop_back() 73 + } else { 74 + None 75 + } 76 + } 77 + 78 + fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool { 79 + if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE { 80 + let mut cache = self.cpu_local_cache.get_or_default().borrow_mut(); 81 + 82 + if cache.len() < MAX_FRAMES_IN_CACHE { 83 + cache.push_back(block); 84 + return true; 85 + } 86 + } 87 + 88 + false 89 + } 90 + } 91 + 92 + unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator 93 + for &'static FrameAlloc<L, A> 94 + { 95 + fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> { 96 + // attempt to allocate from the CPU-local cache first 97 + if let Some(frame) = self.allocate_local(layout) { 98 + return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1)); 99 + } 100 + 101 + let mut areas = self.areas.lock(); 102 + for area in areas.iter_mut() { 103 + if let Ok(frames) = area.allocate(layout) { 104 + return Ok(frames); 105 + } 106 + } 107 + 108 + Err(AllocError) 109 + } 110 + 111 + unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) { 112 + // attempt to place the frame into the CPU-local cache first 113 + if self.deallocate_local(block, layout) { 114 + return; 115 + } 116 + 117 + let mut areas = self.areas.lock(); 118 + for area in areas.iter_mut() { 119 + let block_ = unsafe { block.as_ref() }; 120 + 121 + if area.contains_frame(block_.addr()) { 122 + unsafe { area.deallocate(block, layout) }; 123 + 124 + self.max_alignment_hint 125 + .fetch_max(area.max_alignment_hint(), Ordering::Relaxed); 126 + 127 + return; 128 + } 129 + } 130 + 131 + unreachable!(); 132 + } 133 + 134 + fn page_size(&self) -> usize { 135 + A::PAGE_SIZE 136 + } 137 + }
+444
libs/mem/src/frame_alloc/area.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use core::alloc::Layout; 9 + use core::marker::PhantomData; 10 + use core::mem::MaybeUninit; 11 + use core::ops::Range; 12 + use core::ptr::NonNull; 13 + use core::{cmp, fmt}; 14 + 15 + use cordyceps::List; 16 + 17 + use crate::address_space::RawAddressSpace; 18 + use crate::frame_alloc::AllocError; 19 + use crate::{AddressRangeExt, Frame, PhysicalAddress}; 20 + 21 + const MAX_ORDER: usize = 11; 22 + 23 + pub struct Area<A: RawAddressSpace> { 24 + area: Range<PhysicalAddress>, 25 + frames: &'static mut [MaybeUninit<Frame>], 26 + 27 + free_lists: [List<Frame>; MAX_ORDER], 28 + 29 + max_order: usize, 30 + total_frames: usize, 31 + used_frames: usize, 32 + 33 + _aspace: PhantomData<A>, 34 + } 35 + 36 + unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {} 37 + unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {} 38 + 39 + impl<A: RawAddressSpace> fmt::Debug for Area<A> { 40 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 41 + f.debug_struct("Area") 42 + .field("area", &self.area) 43 + .field( 44 + "frames", 45 + &format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()), 46 + ) 47 + .field("free_lists", &self.free_lists) 48 + .field("max_order", &self.max_order) 49 + .field("total_frames", &self.total_frames) 50 + .field("used_frames", &self.used_frames) 51 + .finish() 52 + } 53 + } 54 + 55 + impl<A: RawAddressSpace> Area<A> { 56 + pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self { 57 + let mut free_lists = [const { List::new() }; MAX_ORDER]; 58 + let mut total_frames = 0; 59 + let mut max_order = 0; 60 + 61 + let mut remaining_bytes = area.size(); 62 + let mut addr = area.start; 63 + 64 + // This is the main area initialization loop. We loop through the `area` "chopping off" the 65 + // largest possible min_block_size-aligned block from the area and add that to its corresponding 66 + // free list. 67 + // 68 + // Note: Remember that for buddy allocators `size == align`. That means we both need to check 69 + // the alignment and size of our remaining area and can only chop off whatever is smaller. 70 + while remaining_bytes > 0 { 71 + // println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}"); 72 + 73 + // the largest size we can chop off given the alignment of the remaining area 74 + let max_align = if addr == PhysicalAddress::ZERO { 75 + // if area happens to start exactly at address 0x0 our calculation below doesn't work. 76 + // address 0x0 actually supports *any* alignment so we special-case it and return `MAX` 77 + usize::MAX 78 + } else { 79 + // otherwise mask out the least significant bit of the address to figure out its alignment 80 + addr.get() & (!addr.get() + 1) 81 + }; 82 + // the largest size we can chop off given the size of the remaining area 83 + // which is the next smaller power of two 84 + let max_size = 1 << remaining_bytes.ilog2(); 85 + 86 + // our chosen size will be the smallest of 87 + // - the maximum size by remaining areas alignment 88 + // - the maximum size by remaining areas size 89 + // - the maximum block size supported by this allocator 90 + let size = cmp::min( 91 + cmp::min(max_align, max_size), 92 + A::PAGE_SIZE << (MAX_ORDER - 1), 93 + ); 94 + debug_assert!(size.is_multiple_of(A::PAGE_SIZE)); 95 + 96 + let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize; 97 + 98 + { 99 + let frame = frames[total_frames].write(Frame::new(addr, 0)); 100 + 101 + free_lists[order].push_back(NonNull::from(frame)); 102 + } 103 + 104 + total_frames += 1 << order; 105 + max_order = cmp::max(max_order, order); 106 + addr = addr.checked_add(size).unwrap(); 107 + remaining_bytes -= size; 108 + } 109 + 110 + // Make sure we've accounted for all frames 111 + debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE); 112 + 113 + Self { 114 + area, 115 + frames, 116 + 117 + free_lists, 118 + 119 + max_order, 120 + total_frames, 121 + used_frames: 0, 122 + 123 + _aspace: PhantomData, 124 + } 125 + } 126 + 127 + pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> { 128 + #[cfg(debug_assertions)] 129 + self.assert_valid(); 130 + 131 + let min_order = self.allocation_order(layout)?; 132 + 133 + // Starting at the smallest sufficient size class, search for a free block. If we find one in 134 + // a free list, return it and its order. 135 + let (block, block_order) = self.free_lists[min_order..] 136 + .iter_mut() 137 + .enumerate() 138 + .find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order))) 139 + .ok_or(AllocError)?; 140 + 141 + // if the block we found is larger than the `min_order` we need, we repeatedly split off 142 + // the upper half (of decreasing size) until we reach the desired size. The split off blocks 143 + // are returned to their appropriate free lists. 144 + for order in (min_order..block_order).rev() { 145 + let block = unsafe { block.as_ref() }; 146 + let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap(); 147 + let buddy = self.frame_for_addr(buddy_addr).unwrap(); 148 + 149 + let buddy = buddy.write(Frame::new(buddy_addr, 0)); 150 + let buddy = NonNull::from(buddy); 151 + 152 + self.free_lists[order].push_back(buddy); 153 + } 154 + 155 + let alloc_size_frames = 1 << min_order; 156 + 157 + // lazily initialize all frames 158 + for idx in 0..alloc_size_frames { 159 + let block = unsafe { block.as_ref() }; 160 + let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap(); 161 + 162 + let frame = self.frame_for_addr(addr).unwrap(); 163 + frame.write(Frame::new(addr, 1)); 164 + } 165 + 166 + self.used_frames += alloc_size_frames; 167 + 168 + #[cfg(debug_assertions)] 169 + self.assert_valid(); 170 + 171 + Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames)) 172 + } 173 + 174 + pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) { 175 + #[cfg(debug_assertions)] 176 + self.assert_valid(); 177 + 178 + let initial_order = self.allocation_order(layout).unwrap(); 179 + let mut order = initial_order; 180 + 181 + while order < self.free_lists.len() - 1 { 182 + let block_ = unsafe { block.as_ref() }; 183 + if let Some(buddy) = self.buddy_addr(order, block_.addr()) 184 + && cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1)) 185 + && self.remove_from_free_list(order, buddy) 186 + { 187 + let buddy: NonNull<Frame> = 188 + NonNull::from(self.frame_for_addr(buddy).unwrap()).cast(); 189 + block = cmp::min(buddy, block); 190 + order += 1; 191 + } else { 192 + break; 193 + } 194 + } 195 + 196 + self.free_lists[order].push_back(block); 197 + self.used_frames -= 1 << initial_order; 198 + self.max_order = cmp::max(self.max_order, order); 199 + 200 + #[cfg(debug_assertions)] 201 + self.assert_valid(); 202 + } 203 + 204 + pub fn max_alignment_hint(&self) -> usize { 205 + self.order_size(self.max_order) 206 + } 207 + 208 + fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> { 209 + let relative = addr.checked_sub_addr(self.area.start).unwrap(); 210 + let idx = relative >> A::PAGE_SIZE_LOG_2; 211 + Some(&mut self.frames[idx]) 212 + } 213 + 214 + pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool { 215 + self.area.contains(&addr) 216 + } 217 + 218 + fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> { 219 + assert!(block >= self.area.start); 220 + assert!(block.is_aligned_to(A::PAGE_SIZE << order)); 221 + 222 + let relative = block.checked_sub_addr(self.area.start).unwrap(); 223 + let size = self.order_size(order); 224 + if size >= self.area.size() { 225 + // MAX_ORDER blocks do not have buddies 226 + None 227 + } else { 228 + // Fun: We can find our buddy by xoring the right bit in our 229 + // offset from the base of the heap. 230 + Some(self.area.start.checked_add(relative ^ size).unwrap()) 231 + } 232 + } 233 + 234 + fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool { 235 + let mut c = self.free_lists[order].cursor_front_mut(); 236 + 237 + while let Some(candidate) = c.current() { 238 + if candidate.addr() == to_remove { 239 + c.remove_current().unwrap(); 240 + return true; 241 + } 242 + 243 + c.move_next(); 244 + } 245 + 246 + false 247 + } 248 + 249 + // The size of the blocks we allocate for a given order. 250 + const fn order_size(&self, order: usize) -> usize { 251 + 1 << (A::PAGE_SIZE_LOG_2 as usize + order) 252 + } 253 + 254 + const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> { 255 + // We can only allocate blocks that are at least one page 256 + if !layout.size().is_multiple_of(A::PAGE_SIZE) { 257 + return Err(AllocError); 258 + } 259 + 260 + // We can only allocate blocks that are at least page aligned 261 + if !layout.align().is_multiple_of(A::PAGE_SIZE) { 262 + return Err(AllocError); 263 + } 264 + 265 + let size = layout.size().next_power_of_two(); 266 + 267 + // We cannot allocate blocks larger than our largest size class 268 + if size > self.order_size(self.free_lists.len()) { 269 + return Err(AllocError); 270 + } 271 + 272 + Ok(size) 273 + } 274 + 275 + const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> { 276 + if let Ok(size) = self.allocation_size(layout) { 277 + Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize) 278 + } else { 279 + Err(AllocError) 280 + } 281 + } 282 + 283 + fn assert_valid(&self) { 284 + for (order, l) in self.free_lists.iter().enumerate() { 285 + l.assert_valid(); 286 + 287 + for f in l { 288 + assert!( 289 + f.addr().is_aligned_to(A::PAGE_SIZE << order), 290 + "frame {f:?} is not aligned to order {order}" 291 + ); 292 + } 293 + } 294 + 295 + assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames); 296 + } 297 + } 298 + 299 + fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize { 300 + let mut frames = 0; 301 + for (order, l) in area.free_lists.iter().enumerate() { 302 + frames += l.len() << order; 303 + } 304 + frames 305 + } 306 + 307 + #[cfg(test)] 308 + mod tests { 309 + use alloc::vec::Vec; 310 + 311 + use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest}; 312 + 313 + use super::*; 314 + use crate::test_utils::TestAddressSpace; 315 + 316 + const PAGE_SIZE: usize = 4096; 317 + 318 + prop_compose! { 319 + // Generate arbitrary integers up to half the maximum desired value, 320 + // then multiply them by 2, thus producing only even integers in the 321 + // desired range. 322 + fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE } 323 + } 324 + 325 + proptest! { 326 + #[test] 327 + fn new_fixed_base(num_frames in 0..50_000usize) { 328 + let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new( 329 + PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE), 330 + { 331 + let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames); 332 + frames.resize_with(num_frames, || MaybeUninit::uninit()); 333 + frames.leak() 334 + } 335 + ); 336 + area.assert_valid(); 337 + 338 + // let's check whether the area correctly initialized itself 339 + // 340 + // since we start on an aligned base address (0x0) we expect it have split off chunks 341 + // largest-to-smallest. We replicate the process here, but take a block from its free list. 342 + let mut frames_remaining = num_frames; 343 + while frames_remaining > 0 { 344 + // clamp the order we calculate at the max possible order 345 + let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1); 346 + 347 + let chunk = area.free_lists[chunk_order].pop_back(); 348 + prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}"); 349 + 350 + frames_remaining -= 1 << chunk_order; 351 + } 352 + // At the end of this process we expect all free lists to be empty 353 + prop_assert!(area.free_lists.iter().all(|list| list.is_empty())); 354 + } 355 + 356 + #[test] 357 + fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) { 358 + 359 + let area = { 360 + let area_end = area_start.checked_add(num_frames * PAGE_SIZE); 361 + prop_assume!(area_end.is_some()); 362 + PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap()) 363 + }; 364 + 365 + let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new( 366 + area, 367 + { 368 + let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames); 369 + frames.resize_with(num_frames, || MaybeUninit::uninit()); 370 + frames.leak() 371 + } 372 + ); 373 + area.assert_valid(); 374 + 375 + // TODO figure out if we can test the free lists in a sensible way 376 + } 377 + 378 + #[test] 379 + fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) { 380 + let area = { 381 + let area_end = area_start.checked_add(num_frames * PAGE_SIZE); 382 + prop_assume!(area_end.is_some()); 383 + PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap()) 384 + }; 385 + 386 + let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new( 387 + area, 388 + { 389 + let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames); 390 + frames.resize_with(num_frames, || MaybeUninit::uninit()); 391 + frames.leak() 392 + } 393 + ); 394 + area.assert_valid(); 395 + 396 + debug_assert_eq!(frames_in_area(&mut area), num_frames); 397 + } 398 + 399 + #[test] 400 + fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) { 401 + let area = { 402 + let area_end = area_start.checked_add(num_frames * PAGE_SIZE); 403 + prop_assume!(area_end.is_some()); 404 + PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap()) 405 + }; 406 + 407 + let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new( 408 + area.clone(), 409 + { 410 + let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames); 411 + frames.resize_with(num_frames, || MaybeUninit::uninit()); 412 + frames.leak() 413 + } 414 + ); 415 + area1.assert_valid(); 416 + 417 + let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new( 418 + area, 419 + { 420 + let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames); 421 + frames.resize_with(num_frames, || MaybeUninit::uninit()); 422 + frames.leak() 423 + } 424 + ); 425 + area2.assert_valid(); 426 + 427 + // we can only allocate contiguous blocks of the largest order available 428 + prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE)); 429 + 430 + let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap(); 431 + 432 + let block = area2.allocate(layout).unwrap(); 433 + prop_assert!(block.len() >= alloc_frames); 434 + 435 + unsafe { area2.deallocate(block.cast(), layout); } 436 + 437 + assert_eq!(frames_in_area(&area2), num_frames); 438 + 439 + for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() { 440 + prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len()); 441 + } 442 + } 443 + } 444 + }
+133
libs/mem/src/frame_alloc/area_selection.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use alloc::slice; 9 + use core::fmt::Formatter; 10 + use core::marker::PhantomData; 11 + use core::mem; 12 + use core::mem::MaybeUninit; 13 + use core::ops::Range; 14 + 15 + use fallible_iterator::FallibleIterator; 16 + use smallvec::SmallVec; 17 + 18 + use crate::address_space::RawAddressSpace; 19 + use crate::{AddressRangeExt, Frame, PhysicalAddress}; 20 + 21 + const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB 22 + 23 + #[derive(Debug)] 24 + pub struct AreaSelection { 25 + pub area: Range<PhysicalAddress>, 26 + pub bookkeeping: &'static mut [MaybeUninit<Frame>], 27 + pub wasted_bytes: usize, 28 + } 29 + 30 + #[derive(Debug)] 31 + pub struct SelectionError { 32 + pub range: Range<PhysicalAddress>, 33 + } 34 + 35 + pub struct ArenaSelections<A: RawAddressSpace> { 36 + allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>, 37 + wasted_bytes: usize, 38 + 39 + _aspace: PhantomData<A>, 40 + } 41 + 42 + pub fn select_areas<A: RawAddressSpace>( 43 + allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>, 44 + ) -> ArenaSelections<A> { 45 + ArenaSelections { 46 + allocatable_regions, 47 + wasted_bytes: 0, 48 + 49 + _aspace: PhantomData, 50 + } 51 + } 52 + 53 + impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> { 54 + type Item = AreaSelection; 55 + type Error = SelectionError; 56 + 57 + fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { 58 + let Some(mut area) = self.allocatable_regions.pop() else { 59 + return Ok(None); 60 + }; 61 + 62 + while let Some(region) = self.allocatable_regions.pop() { 63 + debug_assert!(!area.is_overlapping(&region)); 64 + 65 + let pages_in_hole = if area.end <= region.start { 66 + // the region is higher than the current area 67 + region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE 68 + } else { 69 + debug_assert!(region.end <= area.start); 70 + // the region is lower than the current area 71 + area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE 72 + }; 73 + 74 + let waste_from_hole = size_of::<Frame>() * pages_in_hole; 75 + 76 + if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES { 77 + self.allocatable_regions.push(region); 78 + break; 79 + } else { 80 + self.wasted_bytes += waste_from_hole; 81 + 82 + if area.end <= region.start { 83 + area.end = region.end; 84 + } else { 85 + area.start = region.start; 86 + } 87 + } 88 + } 89 + 90 + let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap(); 91 + // We can't use empty areas anyway 92 + if aligned.is_empty() { 93 + return Err(SelectionError { range: aligned }); 94 + } 95 + 96 + let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE; 97 + 98 + let bookkeeping_start = aligned 99 + .end 100 + .checked_sub(bookkeeping_size_frames * size_of::<Frame>()) 101 + .unwrap() 102 + .align_down(A::PAGE_SIZE); 103 + 104 + // The area has no space to hold its own bookkeeping 105 + if bookkeeping_start < aligned.start { 106 + return Err(SelectionError { range: aligned }); 107 + } 108 + 109 + let bookkeeping = unsafe { 110 + slice::from_raw_parts_mut( 111 + bookkeeping_start.as_mut_ptr().cast(), 112 + bookkeeping_size_frames, 113 + ) 114 + }; 115 + aligned.end = bookkeeping_start; 116 + 117 + Ok(Some(AreaSelection { 118 + area: aligned, 119 + bookkeeping, 120 + wasted_bytes: mem::take(&mut self.wasted_bytes), 121 + })) 122 + } 123 + } 124 + 125 + // ===== impl SelectionError ===== 126 + 127 + impl core::fmt::Display for SelectionError { 128 + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { 129 + todo!() 130 + } 131 + } 132 + 133 + impl core::error::Error for SelectionError {}
+18
libs/mem/src/lib.rs
··· 1 + #![cfg_attr(not(test), no_std)] 2 + extern crate alloc; 3 + 4 + mod access_rules; 5 + pub mod address_space; 6 + mod addresses; 7 + mod frame; 8 + pub mod frame_alloc; 9 + #[cfg(test)] 10 + mod test_utils; 11 + mod utils; 12 + mod vmo; 13 + 14 + pub type Result<T> = anyhow::Result<T>; 15 + 16 + pub use access_rules::{AccessRules, WriteOrExecute}; 17 + pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 18 + pub use frame::{Frame, FrameRef};
+171
libs/mem/src/test_utils.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + extern crate std; 9 + 10 + use alloc::collections::BTreeMap; 11 + use core::marker::PhantomData; 12 + use core::num::NonZeroUsize; 13 + 14 + use crate::address_space::{Flush, RawAddressSpace}; 15 + use crate::{AccessRules, PhysicalAddress, VirtualAddress}; 16 + 17 + #[derive(Debug)] 18 + pub struct TestAddressSpace<const PAGE_SIZE: usize, const ADDR_BITS: u32> { 19 + mappings: BTreeMap<VirtualAddress, Mapping>, 20 + } 21 + 22 + #[derive(Debug)] 23 + pub struct Mapping { 24 + pub virt: VirtualAddress, 25 + pub phys: PhysicalAddress, 26 + pub len: NonZeroUsize, 27 + pub access_rules: AccessRules, 28 + } 29 + 30 + pub struct TestFlush { 31 + _priv: PhantomData<()>, 32 + } 33 + 34 + impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> TestAddressSpace<PAGE_SIZE, ADDR_BITS> { 35 + pub const fn new() -> Self { 36 + Self { 37 + mappings: BTreeMap::new(), 38 + } 39 + } 40 + 41 + pub fn get_mapping_containing(&self, addr: VirtualAddress) -> Option<&Mapping> { 42 + let (end, mapping) = self.mappings.range(addr..).next()?; 43 + 44 + if addr > *end { None } else { Some(mapping) } 45 + } 46 + 47 + pub fn get_mapping_mut_containing(&mut self, addr: VirtualAddress) -> Option<&mut Mapping> { 48 + let (end, mapping) = self.mappings.range_mut(addr..).next()?; 49 + 50 + if addr > *end { None } else { Some(mapping) } 51 + } 52 + 53 + pub fn remove_mapping_containing(&mut self, addr: VirtualAddress) -> Option<Mapping> { 54 + let (key, _) = self.mappings.range_mut(addr..).next()?; 55 + let key = *key; 56 + 57 + Some(self.mappings.remove(&key).unwrap()) 58 + } 59 + } 60 + 61 + unsafe impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> RawAddressSpace 62 + for TestAddressSpace<PAGE_SIZE, ADDR_BITS> 63 + { 64 + const PAGE_SIZE: usize = PAGE_SIZE; 65 + const VIRT_ADDR_BITS: u32 = ADDR_BITS; 66 + 67 + type Flush = TestFlush; 68 + 69 + fn flush(&self) -> Self::Flush { 70 + TestFlush { _priv: PhantomData } 71 + } 72 + 73 + fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)> { 74 + let mapping = self.get_mapping_containing(virt)?; 75 + 76 + let offset = virt.checked_sub_addr(mapping.virt).unwrap(); 77 + 78 + Some(( 79 + mapping.phys.checked_add(offset).unwrap(), 80 + mapping.access_rules, 81 + )) 82 + } 83 + 84 + unsafe fn map( 85 + &mut self, 86 + virt: VirtualAddress, 87 + phys: PhysicalAddress, 88 + len: NonZeroUsize, 89 + access_rules: AccessRules, 90 + _flush: &mut Self::Flush, 91 + ) -> crate::Result<()> { 92 + assert!(virt.is_aligned_to(Self::PAGE_SIZE)); 93 + assert!(phys.is_aligned_to(Self::PAGE_SIZE)); 94 + assert!(self.get_mapping_containing(virt).is_none()); 95 + 96 + let end_virt = virt.checked_add(len.get() - 1).unwrap(); 97 + assert!(end_virt.is_aligned_to(Self::PAGE_SIZE)); 98 + 99 + let prev = self.mappings.insert( 100 + end_virt, 101 + Mapping { 102 + virt, 103 + phys, 104 + len, 105 + access_rules, 106 + }, 107 + ); 108 + assert!(prev.is_none()); 109 + 110 + Ok(()) 111 + } 112 + 113 + unsafe fn unmap( 114 + &mut self, 115 + mut virt: VirtualAddress, 116 + len: NonZeroUsize, 117 + _flush: &mut Self::Flush, 118 + ) { 119 + assert!(virt.is_aligned_to(Self::PAGE_SIZE)); 120 + assert!( 121 + virt.checked_add(len.get()) 122 + .unwrap() 123 + .is_aligned_to(Self::PAGE_SIZE) 124 + ); 125 + 126 + let mut bytes_remaining = len.get(); 127 + 128 + while bytes_remaining > 0 { 129 + let mapping = self.remove_mapping_containing(virt).unwrap(); 130 + assert_eq!(mapping.virt, virt); 131 + 132 + bytes_remaining -= mapping.len.get(); 133 + virt = virt.checked_sub(mapping.len.get()).unwrap(); 134 + } 135 + } 136 + 137 + unsafe fn set_access_rules( 138 + &mut self, 139 + mut virt: VirtualAddress, 140 + len: NonZeroUsize, 141 + access_rules: AccessRules, 142 + _flush: &mut Self::Flush, 143 + ) { 144 + assert!(virt.is_aligned_to(Self::PAGE_SIZE)); 145 + assert!( 146 + virt.checked_add(len.get()) 147 + .unwrap() 148 + .is_aligned_to(Self::PAGE_SIZE) 149 + ); 150 + 151 + let mut bytes_remaining = len.get(); 152 + 153 + while bytes_remaining > 0 { 154 + let mapping = self.get_mapping_mut_containing(virt).unwrap(); 155 + assert_eq!(mapping.virt, virt); 156 + 157 + mapping.access_rules = access_rules; 158 + 159 + bytes_remaining -= mapping.len.get(); 160 + virt = virt.checked_sub(mapping.len.get()).unwrap(); 161 + } 162 + } 163 + } 164 + 165 + // ===== impl TestFlush ===== 166 + 167 + impl Flush for TestFlush { 168 + fn flush(self) -> crate::Result<()> { 169 + Ok(()) 170 + } 171 + }
+31
libs/mem/src/utils.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + macro_rules! assert_unsafe_precondition_ { 9 + ($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => { 10 + { 11 + // This check is inlineable, but not by the MIR inliner. 12 + // The reason for this is that the MIR inliner is in an exceptionally bad position 13 + // to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`, 14 + // which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and 15 + // would be bad for compile times. 16 + // 17 + // LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without 18 + // inlining the check. If it's `true`, it can inline it and get significantly better performance. 19 + #[inline] 20 + const fn precondition_check($($name:$ty),*) { 21 + assert!($e, concat!("unsafe precondition(s) violated: ", $message, 22 + "\n\nThis indicates a bug in the program. \ 23 + This Undefined Behavior check is optional, and cannot be relied on for safety.")) 24 + } 25 + 26 + #[cfg(debug_assertions)] 27 + precondition_check($($arg,)*); 28 + } 29 + }; 30 + } 31 + pub(crate) use assert_unsafe_precondition_;
+418
libs/mem/src/vmo.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use alloc::sync::Arc; 9 + use core::ops::{Bound, Range, RangeBounds}; 10 + use core::{fmt, ptr}; 11 + 12 + use anyhow::ensure; 13 + use fallible_iterator::FallibleIterator; 14 + use lock_api::RwLock; 15 + use smallvec::SmallVec; 16 + 17 + use crate::frame_list::FrameList; 18 + use crate::{FrameRef, PhysicalAddress}; 19 + 20 + pub struct Vmo { 21 + name: &'static str, 22 + vmo: RawVmo, 23 + } 24 + 25 + #[derive(Debug)] 26 + struct RawVmo { 27 + data: *const (), 28 + vtable: &'static RawVmoVTable, 29 + } 30 + 31 + #[derive(Copy, Clone, Debug)] 32 + struct RawVmoVTable { 33 + clone: unsafe fn(*const ()) -> RawVmo, 34 + acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 35 + release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 36 + clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 37 + len: unsafe fn(*const ()) -> usize, 38 + resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>, 39 + drop: unsafe fn(*const ()), 40 + } 41 + 42 + // ===== impl Vmo ===== 43 + 44 + impl Unpin for Vmo {} 45 + 46 + // Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Send 47 + // therefore Vmo is Send too 48 + unsafe impl Send for Vmo {} 49 + // Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Sync 50 + // therefore Vmo is Sync too 51 + unsafe impl Sync for Vmo {} 52 + 53 + impl Clone for Vmo { 54 + #[inline] 55 + fn clone(&self) -> Self { 56 + Self { 57 + vmo: unsafe { (self.vmo.vtable.clone)(self.vmo.data) }, 58 + name: self.name, 59 + } 60 + } 61 + } 62 + 63 + impl Drop for Vmo { 64 + #[inline] 65 + fn drop(&mut self) { 66 + unsafe { (self.vmo.vtable.drop)(self.vmo.data) } 67 + } 68 + } 69 + 70 + impl fmt::Debug for Vmo { 71 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 72 + let vtable_ptr = self.vmo.vtable as *const RawVmoVTable; 73 + f.debug_struct("Vmo") 74 + .field("name", &self.name) 75 + .field("data", &self.vmo.data) 76 + .field("vtable", &vtable_ptr) 77 + .finish() 78 + } 79 + } 80 + 81 + impl Vmo { 82 + /// Creates a new `Vmo` from the provided `len`, `data` pointer and `vtable`. 83 + /// 84 + /// TODO 85 + /// 86 + /// The `data` pointer can be used to store arbitrary data as required by the vmo implementation. 87 + /// This could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state. 88 + /// The value of this pointer will get passed to all functions that are part 89 + /// of the `vtable` as the first parameter. 90 + /// 91 + /// It is important to consider that the `data` pointer must point to a 92 + /// thread safe type such as an `Arc`. 93 + /// 94 + /// The `vtable` customizes the behavior of a `Cmo`. For each operation 95 + /// on the `Clock`, the associated function in the `vtable` will be called. 96 + /// 97 + /// # Safety 98 + /// 99 + /// The behavior of the returned `Vmo` is undefined if the contract defined 100 + /// in [`RawVmoVTable`]'s documentation is not upheld. 101 + #[inline] 102 + #[must_use] 103 + pub const unsafe fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self { 104 + // Safety: ensured by caller 105 + unsafe { Self::from_raw(RawVmo { data, vtable }) } 106 + } 107 + 108 + /// Creates a new `Vmo` from a [`RawVmo`]. 109 + /// 110 + /// # Safety 111 + /// 112 + /// The behavior of the returned `Vmo` is undefined if the contract defined 113 + /// in [`RawVmo`]'s and [`RawVmoVTable`]'s documentation is not upheld. 114 + #[inline] 115 + #[must_use] 116 + pub const unsafe fn from_raw(vmo: RawVmo) -> Self { 117 + Self { 118 + vmo, 119 + name: "<unnamed mystery VMO>", 120 + } 121 + } 122 + 123 + /// Add an arbitrary user-defined name to this `Vmo`. 124 + pub fn named(mut self, name: &'static str) -> Self { 125 + self.name = name; 126 + self 127 + } 128 + 129 + /// Returns this `Vmo`'s name, if it was given one using the [`Vmo::named`] 130 + /// method. 131 + pub fn name(&self) -> &'static str { 132 + self.name 133 + } 134 + 135 + pub fn len(&self) -> usize { 136 + unsafe { (self.vmo.vtable.len)(self.vmo.data) } 137 + } 138 + 139 + pub fn has_content_source(&self) -> bool { 140 + self.content_source().is_some() 141 + } 142 + 143 + pub fn content_source(&self) -> Option<()> { 144 + todo!() 145 + } 146 + 147 + /// Gets the `data` pointer used to create this `Vmo`. 148 + #[inline] 149 + #[must_use] 150 + pub fn data(&self) -> *const () { 151 + self.vmo.data 152 + } 153 + 154 + /// Gets the `vtable` pointer used to create this `Vmo`. 155 + #[inline] 156 + #[must_use] 157 + pub fn vtable(&self) -> &'static RawVmoVTable { 158 + self.vmo.vtable 159 + } 160 + 161 + // Release the frame at the given `index`. After this call succeeds, all accessed following the 162 + // given `access_rules` MUST NOT fault. 163 + // UNIT: frames 164 + pub fn acquire<R>( 165 + &self, 166 + range: R, 167 + ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> 168 + where 169 + R: RangeBounds<usize>, 170 + { 171 + let range = self.bound_check(range); 172 + 173 + let i = range 174 + .into_iter() 175 + .flat_map(|r| r) 176 + .filter_map(|idx| unsafe { (self.vmo.vtable.acquire)(self.vmo.data, idx).transpose() }); 177 + 178 + fallible_iterator::convert(i) 179 + } 180 + 181 + // Release the frame at the given `index`. After this call succeeds, all accessed to the frame 182 + // MUST fault. Returns the base physical address of the release frame. 183 + // UNIT: frames 184 + pub fn release<R>( 185 + &self, 186 + range: R, 187 + ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> 188 + where 189 + R: RangeBounds<usize>, 190 + { 191 + let range = self.bound_check(range); 192 + 193 + let i = range 194 + .into_iter() 195 + .flat_map(|r| r) 196 + .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() }); 197 + 198 + fallible_iterator::convert(i) 199 + } 200 + 201 + // Release the frame at the given `index`. After this call succeeds, all accessed to the frame 202 + // MUST fault. Returns the base physical address of the release frame. 203 + // UNIT: frames 204 + pub fn clear<R>( 205 + &self, 206 + range: R, 207 + ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> 208 + where 209 + R: RangeBounds<usize>, 210 + { 211 + let range = self.bound_check(range); 212 + 213 + let i = range 214 + .into_iter() 215 + .flat_map(|r| r) 216 + .filter_map(|idx| unsafe { (self.vmo.vtable.clear)(self.vmo.data, idx).transpose() }); 217 + 218 + fallible_iterator::convert(i) 219 + } 220 + 221 + // Grow the VMO to `new_size` (guaranteed to be larger than or equal to the current size). 222 + fn grow(&self, new_len: usize) -> crate::Result<()> { 223 + debug_assert!(new_len >= self.len()); 224 + 225 + unsafe { (self.vmo.vtable.resize)(self.vmo.data, new_len)? }; 226 + 227 + Ok(()) 228 + } 229 + 230 + // Shrink the VMO to `new_size` (guaranteed to be smaller than or equal to the current size). 231 + // After this call succeeds, all accesses outside the new range MUST fault. 232 + // UNIT: frames 233 + pub fn shrink( 234 + &self, 235 + new_len: usize, 236 + ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> { 237 + debug_assert!(new_len <= self.len()); 238 + 239 + let old_len = self.len(); 240 + 241 + unsafe { 242 + (self.vmo.vtable.resize)(self.vmo.data, new_len)?; 243 + }; 244 + 245 + let i = (new_len..old_len) 246 + .into_iter() 247 + .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() }); 248 + 249 + fallible_iterator::convert(i) 250 + } 251 + 252 + #[inline] 253 + fn bound_check<R>(&self, range: R) -> crate::Result<Range<usize>> 254 + where 255 + R: RangeBounds<usize>, 256 + { 257 + let start = match range.start_bound() { 258 + Bound::Included(b) => *b, 259 + Bound::Excluded(b) => *b + 1, 260 + Bound::Unbounded => 0, 261 + }; 262 + let end = match range.end_bound() { 263 + Bound::Included(b) => *b + 1, 264 + Bound::Excluded(b) => *b, 265 + Bound::Unbounded => self.len(), 266 + }; 267 + 268 + ensure!(end <= self.len()); 269 + 270 + Ok(start..end) 271 + } 272 + } 273 + 274 + // ===== impl RawVmo ===== 275 + 276 + impl RawVmo { 277 + /// Creates a new `RawVmo` from the provided `data` pointer and `vtable`. 278 + /// 279 + /// The `data` pointer can be used to store arbitrary data as required by the VMO implementation. 280 + /// his could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state. 281 + /// The value of this pointer will get passed to all functions that are part 282 + /// of the `vtable` as the first parameter. 283 + /// 284 + /// It is important to consider that the `data` pointer must point to a 285 + /// thread safe type such as an `Arc`. 286 + /// 287 + /// The `vtable` customizes the behavior of a `Vmo`. For each operation 288 + /// on the `Vmo`, the associated function in the `vtable` will be called. 289 + #[inline] 290 + #[must_use] 291 + pub const fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self { 292 + Self { data, vtable } 293 + } 294 + } 295 + 296 + // ===== impl RawVmoVTable ===== 297 + 298 + impl RawVmoVTable { 299 + pub const fn new( 300 + clone: unsafe fn(*const ()) -> RawVmo, 301 + acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 302 + release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 303 + clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 304 + len: unsafe fn(*const ()) -> usize, 305 + resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>, 306 + drop: unsafe fn(*const ()), 307 + ) -> Self { 308 + Self { 309 + clone, 310 + acquire, 311 + release, 312 + clear, 313 + len, 314 + resize, 315 + drop, 316 + } 317 + } 318 + } 319 + 320 + pub fn stub_vmo() -> Vmo { 321 + const WIRED_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new( 322 + stub_clone, 323 + stub_acquire, 324 + stub_release, 325 + stub_clear, 326 + stub_len, 327 + stub_resize, 328 + stub_drop, 329 + ); 330 + 331 + unsafe fn stub_clone(ptr: *const ()) -> RawVmo { 332 + debug_assert!(ptr.is_null()); 333 + RawVmo::new(ptr, &WIRED_VMO_VTABLE) 334 + } 335 + 336 + unsafe fn stub_acquire(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> { 337 + debug_assert!(ptr.is_null()); 338 + unreachable!() 339 + } 340 + unsafe fn stub_release(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> { 341 + debug_assert!(ptr.is_null()); 342 + unreachable!() 343 + } 344 + unsafe fn stub_clear(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> { 345 + debug_assert!(ptr.is_null()); 346 + unreachable!() 347 + } 348 + unsafe fn stub_len(ptr: *const ()) -> usize { 349 + debug_assert!(ptr.is_null()); 350 + unreachable!() 351 + } 352 + unsafe fn stub_resize(ptr: *const (), _new_len: usize) -> crate::Result<()> { 353 + debug_assert!(ptr.is_null()); 354 + unreachable!() 355 + } 356 + unsafe fn stub_drop(ptr: *const ()) { 357 + debug_assert!(ptr.is_null()); 358 + } 359 + 360 + unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) } 361 + } 362 + 363 + struct PagedVmo<R: lock_api::RawRwLock> { 364 + list: RwLock<R, SmallVec<[FrameRef; 64]>>, 365 + } 366 + 367 + impl<R: lock_api::RawRwLock> PagedVmo<R> { 368 + pub const fn new(phys: Range<PhysicalAddress>) -> Self { 369 + todo!() 370 + } 371 + 372 + const VMO_VTABLE: RawVmoVTable = RawVmoVTable::new( 373 + Self::clone, 374 + Self::acquire, 375 + Self::release, 376 + Self::clear, 377 + Self::len, 378 + Self::resize, 379 + Self::drop, 380 + ); 381 + 382 + unsafe fn clone(ptr: *const ()) -> RawVmo { 383 + unsafe { 384 + Arc::increment_strong_count(ptr.cast::<Self>()); 385 + } 386 + RawVmo::new(ptr, &Self::VMO_VTABLE) 387 + } 388 + 389 + unsafe fn drop(ptr: *const ()) { 390 + drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) }); 391 + } 392 + 393 + unsafe fn acquire(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 394 + let me = ptr.cast::<Self>().as_ref().unwrap(); 395 + 396 + let mut list = me.list.write(); 397 + 398 + list.entry(index).or_insert_with(|| todo!("allocate frame")); 399 + 400 + // list 401 + } 402 + 403 + unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 404 + todo!() 405 + } 406 + 407 + unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 408 + todo!() 409 + } 410 + 411 + unsafe fn len(ptr: *const ()) -> usize { 412 + todo!() 413 + } 414 + 415 + unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> { 416 + todo!() 417 + } 418 + }
+6 -2
libs/wavltree/src/cursor.rs
··· 88 88 pub unsafe fn get_ptr(&self) -> Link<T> { 89 89 self.current 90 90 } 91 - pub fn get(&self) -> Option<&'a T> { 92 - unsafe { self.current.map(|ptr| ptr.as_ref()) } 91 + pub const fn get(&self) -> Option<&'a T> { 92 + if let Some(ptr) = self.current { 93 + Some(unsafe { ptr.as_ref() }) 94 + } else { 95 + None 96 + } 93 97 } 94 98 pub fn get_mut(&mut self) -> Option<Pin<&'a mut T>> { 95 99 unsafe { self.current.map(|mut ptr| Pin::new_unchecked(ptr.as_mut())) }
+1 -1
libs/wavltree/src/lib.rs
··· 1510 1510 } 1511 1511 1512 1512 /// Returns `true` if this node is currently linked to a [WAVLTree]. 1513 - pub fn is_linked(&self) -> bool { 1513 + pub const fn is_linked(&self) -> bool { 1514 1514 let inner = unsafe { &*self.inner.get() }; 1515 1515 inner.up.is_some() || inner.left.is_some() || inner.right.is_some() 1516 1516 }