+4354
-7
Diff
round #4
+163
-4
Cargo.lock
+163
-4
Cargo.lock
···
46
46
source = "registry+https://github.com/rust-lang/crates.io-index"
47
47
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
48
48
49
+
[[package]]
50
+
name = "allocator-api2"
51
+
version = "0.3.0"
52
+
source = "registry+https://github.com/rust-lang/crates.io-index"
53
+
checksum = "78200ac3468a57d333cd0ea5dd398e25111194dcacd49208afca95c629a6311d"
54
+
49
55
[[package]]
50
56
name = "anes"
51
57
version = "0.1.6"
···
135
141
"windows-targets",
136
142
]
137
143
144
+
[[package]]
145
+
name = "bit-set"
146
+
version = "0.8.0"
147
+
source = "registry+https://github.com/rust-lang/crates.io-index"
148
+
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
149
+
dependencies = [
150
+
"bit-vec",
151
+
]
152
+
153
+
[[package]]
154
+
name = "bit-vec"
155
+
version = "0.8.0"
156
+
source = "registry+https://github.com/rust-lang/crates.io-index"
157
+
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
158
+
138
159
[[package]]
139
160
name = "bitflags"
140
161
version = "2.9.2"
141
162
source = "registry+https://github.com/rust-lang/crates.io-index"
142
163
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
143
164
165
+
[[package]]
166
+
name = "brie-tree"
167
+
version = "0.1.2"
168
+
source = "registry+https://github.com/rust-lang/crates.io-index"
169
+
checksum = "fc07bcb47a1eaa44b6eb9ae3dd5e895cbf222f7f378ecbe014e1dae4bba30a66"
170
+
dependencies = [
171
+
"allocator-api2 0.3.0",
172
+
"cfg-if",
173
+
"nonmax",
174
+
]
175
+
144
176
[[package]]
145
177
name = "bumpalo"
146
178
version = "3.19.0"
147
179
source = "registry+https://github.com/rust-lang/crates.io-index"
148
180
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
149
181
dependencies = [
150
-
"allocator-api2",
182
+
"allocator-api2 0.2.21",
151
183
]
152
184
153
185
[[package]]
···
562
594
source = "registry+https://github.com/rust-lang/crates.io-index"
563
595
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
564
596
597
+
[[package]]
598
+
name = "errno"
599
+
version = "0.3.13"
600
+
source = "registry+https://github.com/rust-lang/crates.io-index"
601
+
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
602
+
dependencies = [
603
+
"libc",
604
+
"windows-sys 0.59.0",
605
+
]
606
+
565
607
[[package]]
566
608
name = "escape8259"
567
609
version = "0.5.3"
···
591
633
"criterion",
592
634
]
593
635
636
+
[[package]]
637
+
name = "fastrand"
638
+
version = "2.3.0"
639
+
source = "registry+https://github.com/rust-lang/crates.io-index"
640
+
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
641
+
594
642
[[package]]
595
643
name = "fdt"
596
644
version = "0.1.0"
···
952
1000
"cordyceps",
953
1001
"cpu-local",
954
1002
"criterion",
955
-
"fastrand",
1003
+
"fastrand 0.1.0",
956
1004
"futures",
957
1005
"lazy_static",
958
1006
"loom",
···
995
1043
"cranelift-entity",
996
1044
"cranelift-frontend",
997
1045
"fallible-iterator",
998
-
"fastrand",
1046
+
"fastrand 0.1.0",
999
1047
"fdt",
1000
1048
"futures",
1001
1049
"gimli",
···
1119
1167
"vcpkg",
1120
1168
]
1121
1169
1170
+
[[package]]
1171
+
name = "linux-raw-sys"
1172
+
version = "0.9.4"
1173
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1174
+
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
1175
+
1122
1176
[[package]]
1123
1177
name = "litemap"
1124
1178
version = "0.8.0"
···
1195
1249
"regex-automata 0.4.9",
1196
1250
]
1197
1251
1252
+
[[package]]
1253
+
name = "mem"
1254
+
version = "0.1.0"
1255
+
dependencies = [
1256
+
"anyhow",
1257
+
"brie-tree",
1258
+
"cordyceps",
1259
+
"cpu-local",
1260
+
"fallible-iterator",
1261
+
"kasync",
1262
+
"lock_api",
1263
+
"mycelium-bitfield",
1264
+
"pin-project",
1265
+
"proptest",
1266
+
"rand",
1267
+
"rand_chacha",
1268
+
"smallvec",
1269
+
"wavltree",
1270
+
]
1271
+
1198
1272
[[package]]
1199
1273
name = "memchr"
1200
1274
version = "2.7.5"
···
1216
1290
source = "registry+https://github.com/rust-lang/crates.io-index"
1217
1291
checksum = "24e0cc5e2c585acbd15c5ce911dff71e1f4d5313f43345873311c4f5efd741cc"
1218
1292
1293
+
[[package]]
1294
+
name = "nonmax"
1295
+
version = "0.5.5"
1296
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1297
+
checksum = "610a5acd306ec67f907abe5567859a3c693fb9886eb1f012ab8f2a47bef3db51"
1298
+
1219
1299
[[package]]
1220
1300
name = "nu-ansi-term"
1221
1301
version = "0.46.0"
···
1467
1547
"yansi",
1468
1548
]
1469
1549
1550
+
[[package]]
1551
+
name = "proptest"
1552
+
version = "1.7.0"
1553
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1554
+
checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f"
1555
+
dependencies = [
1556
+
"bit-set",
1557
+
"bit-vec",
1558
+
"bitflags",
1559
+
"lazy_static",
1560
+
"num-traits",
1561
+
"rand",
1562
+
"rand_chacha",
1563
+
"rand_xorshift",
1564
+
"regex-syntax 0.8.5",
1565
+
"rusty-fork",
1566
+
"tempfile",
1567
+
"unarray",
1568
+
]
1569
+
1570
+
[[package]]
1571
+
name = "quick-error"
1572
+
version = "1.2.3"
1573
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1574
+
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
1575
+
1470
1576
[[package]]
1471
1577
name = "quote"
1472
1578
version = "1.0.40"
···
1511
1617
"getrandom",
1512
1618
]
1513
1619
1620
+
[[package]]
1621
+
name = "rand_xorshift"
1622
+
version = "0.4.0"
1623
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1624
+
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
1625
+
dependencies = [
1626
+
"rand_core",
1627
+
]
1628
+
1514
1629
[[package]]
1515
1630
name = "rayon"
1516
1631
version = "1.10.0"
···
1536
1651
version = "0.11.1"
1537
1652
source = "git+https://github.com/JonasKruckenberg/regalloc2?branch=jonas%2Frefactor%2Fstatic-machine-env#305811667665047d750521973be4b3b7a6a7d312"
1538
1653
dependencies = [
1539
-
"allocator-api2",
1654
+
"allocator-api2 0.2.21",
1540
1655
"bumpalo",
1541
1656
"hashbrown",
1542
1657
"log",
···
1619
1734
"semver",
1620
1735
]
1621
1736
1737
+
[[package]]
1738
+
name = "rustix"
1739
+
version = "1.0.8"
1740
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1741
+
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
1742
+
dependencies = [
1743
+
"bitflags",
1744
+
"errno",
1745
+
"libc",
1746
+
"linux-raw-sys",
1747
+
"windows-sys 0.59.0",
1748
+
]
1749
+
1622
1750
[[package]]
1623
1751
name = "rustversion"
1624
1752
version = "1.0.21"
1625
1753
source = "registry+https://github.com/rust-lang/crates.io-index"
1626
1754
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
1627
1755
1756
+
[[package]]
1757
+
name = "rusty-fork"
1758
+
version = "0.3.0"
1759
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1760
+
checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f"
1761
+
dependencies = [
1762
+
"fnv",
1763
+
"quick-error",
1764
+
"tempfile",
1765
+
"wait-timeout",
1766
+
]
1767
+
1628
1768
[[package]]
1629
1769
name = "ryu"
1630
1770
version = "1.0.20"
···
1788
1928
source = "registry+https://github.com/rust-lang/crates.io-index"
1789
1929
checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a"
1790
1930
1931
+
[[package]]
1932
+
name = "tempfile"
1933
+
version = "3.20.0"
1934
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1935
+
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
1936
+
dependencies = [
1937
+
"fastrand 2.3.0",
1938
+
"getrandom",
1939
+
"once_cell",
1940
+
"rustix",
1941
+
"windows-sys 0.59.0",
1942
+
]
1943
+
1791
1944
[[package]]
1792
1945
name = "thiserror"
1793
1946
version = "2.0.12"
···
2093
2246
"spin",
2094
2247
]
2095
2248
2249
+
[[package]]
2250
+
name = "unarray"
2251
+
version = "0.1.4"
2252
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2253
+
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
2254
+
2096
2255
[[package]]
2097
2256
name = "unicode-ident"
2098
2257
version = "1.0.18"
+1
libs/kasync/src/task.rs
+1
libs/kasync/src/task.rs
+29
libs/mem/Cargo.toml
+29
libs/mem/Cargo.toml
···
1
+
[package]
2
+
name = "mem"
3
+
version.workspace = true
4
+
edition.workspace = true
5
+
authors.workspace = true
6
+
license.workspace = true
7
+
8
+
[dependencies]
9
+
cpu-local.workspace = true
10
+
kasync.workspace = true
11
+
12
+
# 3rd-party dependencies
13
+
mycelium-bitfield.workspace = true
14
+
anyhow.workspace = true
15
+
cordyceps.workspace = true
16
+
pin-project.workspace = true
17
+
lock_api.workspace = true
18
+
fallible-iterator.workspace = true
19
+
smallvec.workspace = true
20
+
wavltree.workspace = true
21
+
rand_chacha.workspace = true
22
+
rand.workspace = true
23
+
brie-tree = "0.1.2"
24
+
25
+
[dev-dependencies]
26
+
proptest = "1.7.0"
27
+
28
+
[lints]
29
+
workspace = true
+13
libs/mem/proptest-regressions/frame.txt
+13
libs/mem/proptest-regressions/frame.txt
···
1
+
# Seeds for failure cases proptest has generated in the past. It is
2
+
# automatically read and these particular cases re-run before any
3
+
# novel cases are generated.
4
+
#
5
+
# It is recommended to check this file in to source control so that
6
+
# everyone who runs the test benefits from these saved cases.
7
+
cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1
8
+
cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257
9
+
cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257
10
+
cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354
11
+
cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257
12
+
cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20
13
+
cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+98
libs/mem/src/access_rules.rs
+98
libs/mem/src/access_rules.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mycelium_bitfield::bitfield! {
9
+
/// Rules that dictate how a region of virtual memory may be accessed.
10
+
///
11
+
/// # W^X
12
+
///
13
+
/// In order to prevent malicious code execution as proactively as possible,
14
+
/// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
15
+
/// through the [`WriteOrExecute`] enum field.
16
+
#[derive(PartialEq, Eq)]
17
+
pub struct AccessRules<u8> {
18
+
/// If set, reading from the memory region is allowed.
19
+
pub const READ: bool;
20
+
/// Whether executing, or writing this memory region is allowed (or neither).
21
+
pub const WRITE_OR_EXECUTE: WriteOrExecute;
22
+
/// If set, requires code in the memory region to use aarch64 Branch Target Identification.
23
+
/// Does nothing on non-aarch64 architectures.
24
+
pub const BTI: bool;
25
+
}
26
+
}
27
+
28
+
/// Whether executing, or writing this memory region is allowed (or neither).
29
+
///
30
+
/// This is an enum to enforce [`W^X`] at the type-level.
31
+
///
32
+
/// [`W^X`]: AccessRules
33
+
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
34
+
#[repr(u8)]
35
+
pub enum WriteOrExecute {
36
+
/// Neither writing nor execution of the memory region is allowed.
37
+
Neither = 0b00,
38
+
/// Writing to the memory region is allowed.
39
+
Write = 0b01,
40
+
/// Executing code from the memory region is allowed.
41
+
Execute = 0b10,
42
+
}
43
+
44
+
// ===== impl AccessRules =====
45
+
46
+
impl AccessRules {
47
+
48
+
pub const fn is_read_only(&self) -> bool {
49
+
const READ_MASK: u8 = AccessRules::READ.max_value();
50
+
assert!(READ_MASK == 1);
51
+
self.0 & READ_MASK == 1
52
+
}
53
+
54
+
pub fn allows_read(&self) -> bool {
55
+
self.get(Self::READ)
56
+
}
57
+
58
+
pub fn allows_write(&self) -> bool {
59
+
match self.get(Self::WRITE_OR_EXECUTE) {
60
+
WriteOrExecute::Write => true,
61
+
_ => false,
62
+
}
63
+
}
64
+
65
+
pub fn allows_execution(&self) -> bool {
66
+
match self.get(Self::WRITE_OR_EXECUTE) {
67
+
WriteOrExecute::Execute => true,
68
+
_ => false,
69
+
}
70
+
}
71
+
}
72
+
73
+
// ===== impl WriteOrExecute =====
74
+
75
+
impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
76
+
type Error = core::convert::Infallible;
77
+
78
+
/// The number of bits required to represent a value of this type.
79
+
const BITS: u32 = 2;
80
+
81
+
#[inline]
82
+
fn try_from_bits(bits: u8) -> Result<Self, Self::Error> {
83
+
match bits {
84
+
b if b == Self::Neither as u8 => Ok(Self::Neither),
85
+
b if b == Self::Write as u8 => Ok(Self::Write),
86
+
b if b == Self::Execute as u8 => Ok(Self::Execute),
87
+
_ => {
88
+
// this should never happen unless the bitpacking code is broken
89
+
unreachable!("invalid memory region access rules {bits:#b}")
90
+
}
91
+
}
92
+
}
93
+
94
+
#[inline]
95
+
fn into_bits(self) -> u8 {
96
+
self as u8
97
+
}
98
+
}
+1007
libs/mem/src/address_space.rs
+1007
libs/mem/src/address_space.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mod batch;
9
+
mod region;
10
+
11
+
use alloc::boxed::Box;
12
+
use alloc::sync::Arc;
13
+
use core::alloc::Layout;
14
+
use core::num::NonZeroUsize;
15
+
use core::ops::{Bound, ControlFlow, Range};
16
+
use core::ptr::NonNull;
17
+
18
+
use anyhow::{format_err, Context};
19
+
pub(crate) use batch::Batch;
20
+
use rand::distr::Uniform;
21
+
use rand::Rng;
22
+
use rand_chacha::ChaCha20Rng;
23
+
use region::AddressSpaceRegion;
24
+
use wavltree::{CursorMut, WAVLTree};
25
+
26
+
use crate::access_rules::AccessRules;
27
+
use crate::frame_alloc::FrameAllocator;
28
+
use crate::utils::assert_unsafe_precondition_;
29
+
use crate::vmo::PagedVmo;
30
+
use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
31
+
32
+
pub unsafe trait RawAddressSpace {
33
+
/// The smallest addressable chunk of memory of this address space. All address argument provided
34
+
/// to methods of this type (both virtual and physical) must be aligned to this.
35
+
const PAGE_SIZE: usize;
36
+
const VIRT_ADDR_BITS: u32;
37
+
38
+
const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8;
39
+
const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1);
40
+
41
+
/// The [`Flush`] implementation for this address space.
42
+
type Flush: Flush;
43
+
44
+
/// Return a new, empty flush for this address space.
45
+
fn flush(&self) -> Self::Flush;
46
+
47
+
/// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given
48
+
/// [`VirtualAddress`] if mapped.
49
+
fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>;
50
+
51
+
/// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the
52
+
/// specified access rules.
53
+
///
54
+
/// If this returns `Ok`, the mapping is added to the raw address space and all future
55
+
/// accesses to the virtual address range will translate to accesses of the physical address
56
+
/// range.
57
+
///
58
+
/// # Safety
59
+
///
60
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
61
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
62
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
63
+
///
64
+
/// # Errors
65
+
///
66
+
/// Returning `Err` indicates the mapping cannot be established and the virtual address range
67
+
/// remains unaltered.
68
+
unsafe fn map(
69
+
&mut self,
70
+
virt: VirtualAddress,
71
+
phys: PhysicalAddress,
72
+
len: NonZeroUsize,
73
+
access_rules: AccessRules,
74
+
flush: &mut Self::Flush,
75
+
) -> crate::Result<()>;
76
+
77
+
/// Unmap a contiguous range of `len` virtual addresses.
78
+
///
79
+
/// After this returns all accesses to the virtual address region will cause a fault.
80
+
///
81
+
/// # Safety
82
+
///
83
+
/// - `virt..virt+len` must be mapped
84
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
85
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
86
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
87
+
unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush);
88
+
89
+
/// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses.
90
+
///
91
+
/// After this returns all accesses to the virtual address region must follow the
92
+
/// specified `AccessRules` or cause a fault.
93
+
///
94
+
/// # Safety
95
+
///
96
+
/// - `virt..virt+len` must be mapped
97
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
98
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
99
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
100
+
unsafe fn set_access_rules(
101
+
&mut self,
102
+
virt: VirtualAddress,
103
+
len: NonZeroUsize,
104
+
access_rules: AccessRules,
105
+
flush: &mut Self::Flush,
106
+
);
107
+
}
108
+
109
+
/// A type that can flush changes made to a [`RawAddressSpace`].
110
+
///
111
+
/// Note: [`Flush`] is purely optional, it exists so implementation MAY batch
112
+
/// Note that the implementation is not required to delay materializing changes until [`Flush::flush`]
113
+
/// is called.
114
+
pub trait Flush {
115
+
/// Flush changes made to its [`RawAddressSpace`].
116
+
///
117
+
/// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across
118
+
/// all affected threads/CPUs.
119
+
///
120
+
/// # Errors
121
+
///
122
+
/// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might
123
+
/// still have taken effect across all or some of the threads/CPUs.
124
+
fn flush(self) -> crate::Result<()>;
125
+
}
126
+
127
+
pub struct AddressSpace<R: RawAddressSpace> {
128
+
raw: R,
129
+
regions: WAVLTree<AddressSpaceRegion<R>>,
130
+
batch: Batch,
131
+
max_range: Range<VirtualAddress>,
132
+
rng: Option<ChaCha20Rng>,
133
+
frame_alloc: &'static dyn FrameAllocator,
134
+
}
135
+
136
+
impl<A: RawAddressSpace> AddressSpace<A> {
137
+
pub fn new(raw: A, rng: Option<ChaCha20Rng>, frame_alloc: &'static dyn FrameAllocator) -> Self {
138
+
Self {
139
+
raw,
140
+
regions: WAVLTree::new(),
141
+
batch: Batch::new(),
142
+
max_range: VirtualAddress::MIN..VirtualAddress::MAX,
143
+
rng,
144
+
frame_alloc,
145
+
}
146
+
}
147
+
148
+
/// Attempts to reserve a region of virtual memory.
149
+
///
150
+
/// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
151
+
/// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault.
152
+
///
153
+
/// The returned region may have a larger size than specified by `layout.size()`, and may or may
154
+
/// not have its contents initialized.
155
+
///
156
+
/// The returned region of virtual memory remains mapped as long as it is [*currently mapped*]
157
+
/// and the address space type itself has not been dropped.
158
+
///
159
+
/// [*currently mapped*]: #currently-mapped-memory
160
+
///
161
+
/// # Errors
162
+
///
163
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
164
+
/// constraints, virtual memory is exhausted, or mapping otherwise fails.
165
+
pub fn map<R: lock_api::RawRwLock>(
166
+
&mut self,
167
+
layout: Layout,
168
+
access_rules: AccessRules,
169
+
) -> crate::Result<NonNull<[u8]>> {
170
+
#[cfg(debug_assertions)]
171
+
self.assert_valid("[AddressSpace::map]");
172
+
173
+
let layout = layout.align_to(A::PAGE_SIZE).unwrap();
174
+
175
+
let spot = self
176
+
.find_spot_for(layout)
177
+
.context(format_err!("cannot find free spot for layout {layout:?}"))?;
178
+
179
+
// TODO "relaxed" frame provider
180
+
let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo();
181
+
let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0);
182
+
183
+
let region = self.regions.insert(Box::pin(region));
184
+
185
+
// TODO OPTIONAL eagerly commit a few pages
186
+
187
+
self.batch.flush_changes(&mut self.raw)?;
188
+
189
+
Ok(region.as_non_null())
190
+
}
191
+
192
+
/// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region
193
+
/// is zero-initialized.
194
+
///
195
+
/// # Errors
196
+
///
197
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
198
+
/// constraints, virtual memory is exhausted, or mapping otherwise fails.
199
+
pub fn map_zeroed<R: lock_api::RawRwLock>(
200
+
&mut self,
201
+
layout: Layout,
202
+
access_rules: AccessRules,
203
+
) -> crate::Result<NonNull<[u8]>> {
204
+
#[cfg(debug_assertions)]
205
+
self.assert_valid("[AddressSpace::map_zeroed]");
206
+
207
+
let layout = layout.align_to(A::PAGE_SIZE).unwrap();
208
+
209
+
let spot = self
210
+
.find_spot_for(layout)
211
+
.context(format_err!("cannot find free spot for layout {layout:?}"))?;
212
+
213
+
// TODO "zeroed" frame provider
214
+
let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo();
215
+
let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0);
216
+
217
+
let region = self.regions.insert(Box::pin(region));
218
+
219
+
// TODO OPTIONAL eagerly commit a few pages
220
+
221
+
self.batch.flush_changes(&mut self.raw)?;
222
+
223
+
Ok(region.as_non_null())
224
+
}
225
+
226
+
/// Unmaps the virtual memory region referenced by `ptr`.
227
+
///
228
+
/// # Safety
229
+
///
230
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
231
+
/// * `layout` must [*fit*] that region of memory.
232
+
///
233
+
/// [*currently mapped*]: #currently-mapped-memory
234
+
/// [*fit*]: #memory-fitting
235
+
pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) {
236
+
#[cfg(debug_assertions)]
237
+
self.assert_valid("[AddressSpace::unmap]");
238
+
239
+
// Safety: responsibility of caller
240
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
241
+
242
+
// Safety: responsibility of caller
243
+
let mut region = unsafe { cursor.remove().unwrap_unchecked() };
244
+
245
+
region.decommit(.., &mut self.batch, &mut self.raw).unwrap();
246
+
}
247
+
248
+
/// Attempts to extend the virtual memory reservation.
249
+
///
250
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
251
+
/// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
252
+
/// this, the address space may extend the mapping referenced by `ptr` to fit the new layout.
253
+
///
254
+
/// TODO describe how extending a file-backed, of DMA-backed mapping works
255
+
///
256
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
257
+
///
258
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
259
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
260
+
/// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer
261
+
/// for accessing this region now.
262
+
///
263
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
264
+
/// this address space, and the contents of the region are unaltered.
265
+
///
266
+
/// [*Undefined Behavior*]
267
+
///
268
+
/// # Safety
269
+
///
270
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
271
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
272
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
273
+
///
274
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
275
+
///
276
+
/// [*currently mapped*]: #currently-mapped-memory
277
+
/// [*fit*]: #memory-fitting
278
+
///
279
+
/// # Errors
280
+
///
281
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
282
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
283
+
pub unsafe fn grow(
284
+
&mut self,
285
+
ptr: NonNull<u8>,
286
+
old_layout: Layout,
287
+
new_layout: Layout,
288
+
) -> crate::Result<NonNull<[u8]>> {
289
+
#[cfg(debug_assertions)]
290
+
self.assert_valid("[AddressSpace::grow]");
291
+
292
+
assert_unsafe_precondition_!(
293
+
"TODO",
294
+
(old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
295
+
old_layout.align().is_multiple_of(page_size)
296
+
}
297
+
);
298
+
299
+
assert_unsafe_precondition_!(
300
+
"TODO",
301
+
(new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
302
+
new_layout.align().is_multiple_of(page_size)
303
+
}
304
+
);
305
+
306
+
if new_layout == old_layout {
307
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
308
+
}
309
+
310
+
assert_unsafe_precondition_!(
311
+
"TODO",
312
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
313
+
new_layout.size() >= old_layout.size()
314
+
}
315
+
);
316
+
317
+
if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } {
318
+
Ok(ptr)
319
+
} else {
320
+
unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
321
+
}
322
+
}
323
+
324
+
/// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place.
325
+
///
326
+
/// # Safety
327
+
///
328
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
329
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
330
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
331
+
///
332
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
333
+
///
334
+
/// [*currently mapped*]: #currently-mapped-memory
335
+
/// [*fit*]: #memory-fitting
336
+
///
337
+
/// # Errors
338
+
///
339
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
340
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
341
+
pub unsafe fn grow_in_place(
342
+
&mut self,
343
+
ptr: NonNull<u8>,
344
+
old_layout: Layout,
345
+
new_layout: Layout,
346
+
) -> crate::Result<NonNull<[u8]>> {
347
+
#[cfg(debug_assertions)]
348
+
self.assert_valid("[AddressSpace::grow_in_place]");
349
+
350
+
assert_unsafe_precondition_!(
351
+
"TODO",
352
+
(old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
353
+
old_layout.align().is_multiple_of(page_size)
354
+
}
355
+
);
356
+
357
+
assert_unsafe_precondition_!(
358
+
"TODO",
359
+
(new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
360
+
new_layout.align().is_multiple_of(page_size)
361
+
}
362
+
);
363
+
364
+
if new_layout == old_layout {
365
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
366
+
}
367
+
368
+
assert_unsafe_precondition_!(
369
+
"TODO",
370
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
371
+
new_layout.size() >= old_layout.size()
372
+
}
373
+
);
374
+
375
+
unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) }
376
+
}
377
+
378
+
/// Attempts to shrink the virtual memory reservation.
379
+
///
380
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
381
+
/// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
382
+
/// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout.
383
+
///
384
+
/// TODO describe how shrinking a file-backed, of DMA-backed mapping works
385
+
///
386
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
387
+
///
388
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
389
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
390
+
/// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer
391
+
/// for accessing this region now.
392
+
///
393
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
394
+
/// this address space, and the contents of the region are unaltered.
395
+
///
396
+
/// [*Undefined Behavior*]
397
+
///
398
+
/// # Safety
399
+
///
400
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
401
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
402
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
403
+
///
404
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
405
+
///
406
+
/// [*currently mapped*]: #currently-mapped-memory
407
+
/// [*fit*]: #memory-fitting
408
+
///
409
+
/// # Errors
410
+
///
411
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
412
+
/// constraints, virtual memory is exhausted, or shrinking otherwise fails.
413
+
pub unsafe fn shrink(
414
+
&mut self,
415
+
ptr: NonNull<u8>,
416
+
old_layout: Layout,
417
+
new_layout: Layout,
418
+
) -> crate::Result<NonNull<[u8]>> {
419
+
#[cfg(debug_assertions)]
420
+
self.assert_valid("[AddressSpace::shrink]");
421
+
422
+
assert_unsafe_precondition_!(
423
+
"TODO",
424
+
(old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
425
+
old_layout.align().is_multiple_of(page_size)
426
+
}
427
+
);
428
+
429
+
assert_unsafe_precondition_!(
430
+
"TODO",
431
+
(new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
432
+
new_layout.align().is_multiple_of(page_size)
433
+
}
434
+
);
435
+
436
+
if new_layout == old_layout {
437
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
438
+
}
439
+
440
+
assert_unsafe_precondition_!(
441
+
"TODO",
442
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
443
+
new_layout.size() <= old_layout.size()
444
+
}
445
+
);
446
+
447
+
if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } {
448
+
Ok(ptr)
449
+
} else {
450
+
unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
451
+
}
452
+
}
453
+
454
+
/// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be
455
+
/// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned.
456
+
///
457
+
/// # Safety
458
+
///
459
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
460
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
461
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
462
+
///
463
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
464
+
///
465
+
/// [*currently mapped*]: #currently-mapped-memory
466
+
/// [*fit*]: #memory-fitting
467
+
///
468
+
/// # Errors
469
+
///
470
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
471
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
472
+
pub unsafe fn shrink_in_place(
473
+
&mut self,
474
+
ptr: NonNull<u8>,
475
+
old_layout: Layout,
476
+
new_layout: Layout,
477
+
) -> crate::Result<NonNull<[u8]>> {
478
+
#[cfg(debug_assertions)]
479
+
self.assert_valid("[AddressSpace::shrink_in_place]");
480
+
481
+
assert_unsafe_precondition_!(
482
+
"TODO",
483
+
(old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => {
484
+
old_layout.align().is_multiple_of(page_size)
485
+
}
486
+
);
487
+
488
+
assert_unsafe_precondition_!(
489
+
"TODO",
490
+
(new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => {
491
+
new_layout.align().is_multiple_of(page_size)
492
+
}
493
+
);
494
+
495
+
if new_layout == old_layout {
496
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
497
+
}
498
+
499
+
assert_unsafe_precondition_!(
500
+
"TODO",
501
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
502
+
new_layout.size() <= old_layout.size()
503
+
}
504
+
);
505
+
506
+
unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) }
507
+
}
508
+
509
+
/// Updates the access rules for the virtual memory region referenced by `ptr`.
510
+
///
511
+
/// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault.
512
+
///
513
+
/// If this method returns `Err`, the access rules of the memory region are unaltered.
514
+
///
515
+
/// # Safety
516
+
///
517
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
518
+
/// * `layout` must [*fit*] that region of memory.
519
+
///
520
+
/// [*currently mapped*]: #currently-mapped-memory
521
+
/// [*fit*]: #memory-fitting
522
+
pub unsafe fn update_access_rules(
523
+
&mut self,
524
+
ptr: NonNull<u8>,
525
+
layout: Layout,
526
+
access_rules: AccessRules,
527
+
) -> crate::Result<()> {
528
+
#[cfg(debug_assertions)]
529
+
self.assert_valid("[AddressSpace::update_access_rules]");
530
+
531
+
// Safety: responsibility of caller
532
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
533
+
534
+
// Safety: responsibility of caller
535
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
536
+
537
+
region.update_access_rules(access_rules, &mut self.batch)?;
538
+
539
+
self.batch.flush_changes(&mut self.raw)?;
540
+
541
+
Ok(())
542
+
}
543
+
544
+
/// Attempts to fill the virtual memory region referenced by `ptr` with zeroes.
545
+
///
546
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
547
+
/// mapped region. The pointer is suitable for holding data described by `new_layout` and is
548
+
/// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the
549
+
/// virtual memory region.
550
+
///
551
+
/// TODO describe how clearing a file-backed, of DMA-backed mapping works
552
+
///
553
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
554
+
///
555
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
556
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
557
+
/// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer
558
+
/// for accessing this region now.
559
+
///
560
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
561
+
/// this address space, and the contents of the region are unaltered.
562
+
///
563
+
/// [*Undefined Behavior*]
564
+
///
565
+
/// # Safety
566
+
///
567
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
568
+
/// * `layout` must [*fit*] that region of memory.
569
+
///
570
+
/// [*currently mapped*]: #currently-mapped-memory
571
+
/// [*fit*]: #memory-fitting
572
+
///
573
+
/// # Errors
574
+
///
575
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
576
+
/// constraints, clearing a virtual memory region is not supported by the backing storage, or
577
+
/// clearing otherwise fails.
578
+
pub unsafe fn clear(
579
+
&mut self,
580
+
ptr: NonNull<u8>,
581
+
layout: Layout,
582
+
) -> crate::Result<NonNull<[u8]>> {
583
+
#[cfg(debug_assertions)]
584
+
self.assert_valid("[AddressSpace::clear]");
585
+
586
+
// Safety: responsibility of caller
587
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
588
+
589
+
// Safety: responsibility of caller
590
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
591
+
592
+
region.clear(.., &mut self.batch)?;
593
+
594
+
self.batch.flush_changes(&mut self.raw)?;
595
+
596
+
Ok(region.as_non_null())
597
+
}
598
+
599
+
pub fn assert_valid(&self, msg: &str) {
600
+
let mut regions = self.regions.iter();
601
+
602
+
let Some(first_region) = regions.next() else {
603
+
assert!(
604
+
self.regions.is_empty(),
605
+
"{msg}region iterator is empty but tree is not."
606
+
);
607
+
608
+
return;
609
+
};
610
+
611
+
first_region.assert_valid(msg);
612
+
613
+
let mut seen_range = first_region.range().clone();
614
+
615
+
while let Some(region) = regions.next() {
616
+
assert!(
617
+
!region.range().is_overlapping(&seen_range),
618
+
"{msg}region cannot overlap previous region; region={region:?}"
619
+
);
620
+
assert!(
621
+
region.range().start >= self.max_range.start
622
+
&& region.range().end <= self.max_range.end,
623
+
"{msg}region cannot lie outside of max address space range; region={region:?}"
624
+
);
625
+
626
+
seen_range = seen_range.start..region.range().end;
627
+
628
+
region.assert_valid(msg);
629
+
630
+
// TODO assert validity of of VMO against phys addresses
631
+
// let (_phys, access_rules) = self
632
+
// .batched_raw
633
+
// .raw_address_space()
634
+
// .lookup(region.range().start)
635
+
// .unwrap_or_else(|| {
636
+
// panic!("{msg}region base address is not mapped in raw address space region={region:?}")
637
+
// });
638
+
//
639
+
// assert_eq!(
640
+
// access_rules,
641
+
// region.access_rules(),
642
+
// "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}",
643
+
// region.access_rules(),
644
+
// );
645
+
}
646
+
}
647
+
648
+
/// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`]
649
+
/// and [`Self::shrink_in_place`].
650
+
///
651
+
/// # Safety
652
+
///
653
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
654
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
655
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
656
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
657
+
unsafe fn grow_in_place_inner(
658
+
&mut self,
659
+
ptr: NonNull<u8>,
660
+
old_layout: Layout,
661
+
new_layout: Layout,
662
+
) -> crate::Result<NonNull<[u8]>> {
663
+
// Safety: responsibility of caller
664
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
665
+
666
+
let next_range = cursor.peek_next().map(|region| region.range().clone());
667
+
668
+
// Safety: responsibility of caller
669
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
670
+
671
+
todo!();
672
+
region.grow(new_layout.size(), &mut self.batch)?;
673
+
674
+
self.batch.flush_changes(&mut self.raw)?;
675
+
676
+
Ok(region.as_non_null())
677
+
}
678
+
679
+
/// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`]
680
+
/// and [`Self::grow_in_place`].
681
+
///
682
+
/// # Safety
683
+
///
684
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
685
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
686
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
687
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
688
+
unsafe fn shrink_in_place_inner(
689
+
&mut self,
690
+
ptr: NonNull<u8>,
691
+
old_layout: Layout,
692
+
new_layout: Layout,
693
+
) -> crate::Result<NonNull<[u8]>> {
694
+
// Safety: responsibility of caller
695
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
696
+
697
+
// Safety: responsibility of caller
698
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
699
+
700
+
region.shrink(new_layout.size(), &mut self.batch)?;
701
+
702
+
self.batch.flush_changes(&mut self.raw)?;
703
+
704
+
Ok(region.as_non_null())
705
+
}
706
+
707
+
/// Reallocates a virtual address region. This will unmap and remove the old region, allocating
708
+
/// a new region that will be backed the old regions physical memory.
709
+
///
710
+
/// # Safety
711
+
///
712
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
713
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
714
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
715
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
716
+
unsafe fn reallocate_region(
717
+
&mut self,
718
+
ptr: NonNull<u8>,
719
+
old_layout: Layout,
720
+
new_layout: Layout,
721
+
) -> crate::Result<NonNull<[u8]>> {
722
+
// Safety: responsibility of caller
723
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
724
+
let mut region = unsafe { cursor.remove().unwrap_unchecked() };
725
+
726
+
let spot = self.find_spot_for(new_layout).context(format_err!(
727
+
"cannot find free spot for layout {new_layout:?}"
728
+
))?;
729
+
730
+
todo!();
731
+
732
+
// region.move_to(spot, new_layout, &mut self.batch)?;
733
+
734
+
Ok(region.as_non_null())
735
+
}
736
+
737
+
/// Find a spot in the address space that satisfies the given `layout` requirements.
738
+
///
739
+
/// If a spot suitable for holding data described by `layout` is found, the base address of the
740
+
/// address range is returned in `Some`. The returned address is already correct aligned to
741
+
/// `layout.align()`.
742
+
///
743
+
/// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in
744
+
/// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot
745
+
/// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a
746
+
/// different time might still succeed.
747
+
fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> {
748
+
// The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is
749
+
// guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works
750
+
// elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively.
751
+
// Here is how it works:
752
+
// 1. We set up two counters: (see the GapVisitor)
753
+
// - `candidate_spot_count` which we initialize to zero
754
+
// - `target_index` which we either set to a random value between 0..<the maximum number of
755
+
// possible addresses in the address space> if ASLR is requested OR to zero otherwise.
756
+
// 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the
757
+
// gaps between regions. We count the number of addresses in each gap that satisfy the
758
+
// requested `Layout`s size and alignment and add that to the `candidate_spot_count`.
759
+
// IF the number of spots in the gap is greater than our chosen target index, we pick the
760
+
// spot at the target index and finish. ELSE we *decrement* the target index by the number
761
+
// of spots and continue to the next gap.
762
+
// 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original
763
+
// guess for `target_index` was too big, in which case we need to retry.
764
+
// 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know
765
+
// the *actual* number of possible spots in the address space since we just counted them
766
+
// during the first attempt. We initialize `target_index` to `0..candidate_spot_count`
767
+
// which is guaranteed to return us a spot.
768
+
// IF `candidate_spot_count` is ZERO after the first attempt, there is no point in
769
+
// retrying since we cannot fulfill the requested layout.
770
+
//
771
+
// Note that in practice, we use a binary tree to keep track of regions, and we use binary search
772
+
// to optimize the search for a suitable gap instead of linear iteration.
773
+
774
+
let layout = layout.pad_to_align();
775
+
776
+
// First attempt: guess a random target index
777
+
let max_candidate_spots = self.max_range.size();
778
+
779
+
let target_index: usize = self
780
+
.rng
781
+
.as_mut()
782
+
.map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap()))
783
+
.unwrap_or_default();
784
+
785
+
// First attempt: visit the binary search tree to find a gap
786
+
let mut v = GapVisitor::new(layout, target_index);
787
+
self.visit_gaps(&mut v);
788
+
789
+
// if we found a spot already we're done
790
+
if let Some(chosen) = v.chosen {
791
+
return Some(chosen);
792
+
}
793
+
794
+
// otherwise, Second attempt: we need to retry with the correct candidate spot count
795
+
// but if we counted no suitable candidate spots during the first attempt, we cannot fulfill
796
+
// the request.
797
+
if v.candidate_spots == 0 {
798
+
return None;
799
+
}
800
+
801
+
// Second attempt: pick a new target_index that's actually fulfillable
802
+
let target_index: usize = self
803
+
.rng
804
+
.as_mut()
805
+
.map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap()))
806
+
.unwrap_or_default();
807
+
808
+
// Second attempt: visit the binary search tree to find a gap
809
+
let mut v = GapVisitor::new(layout, target_index);
810
+
self.visit_gaps(&mut v);
811
+
812
+
let chosen = v
813
+
.chosen
814
+
.expect("There must be a chosen spot after the first attempt. This is a bug!");
815
+
816
+
debug_assert!(chosen.is_canonical::<A>());
817
+
818
+
Some(chosen)
819
+
}
820
+
821
+
/// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space
822
+
/// from lowest to highest addresses.
823
+
fn visit_gaps(&self, v: &mut GapVisitor) {
824
+
let Some(root) = self.regions.root().get() else {
825
+
// if the tree is empty, we treat the entire max_range as the gap
826
+
// note that we do not care about the returned ControlFlow, as there is nothing else we
827
+
// could try to find a spot anyway
828
+
let _ = v.visit(self.max_range.clone());
829
+
830
+
return;
831
+
};
832
+
833
+
// see if there is a suitable gap between BEFORE the first address space region
834
+
if v.visit(self.max_range.start..root.subtree_range().start)
835
+
.is_break()
836
+
{
837
+
return;
838
+
}
839
+
840
+
// now comes the main part of the search. we start at the WAVLTree root node and do a
841
+
// binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion`
842
+
// to speed up this search. See `AddressSpaceRegion` for details on how this works.
843
+
844
+
let mut maybe_current = self.regions.root().get();
845
+
let mut already_visited = VirtualAddress::MIN;
846
+
847
+
while let Some(current) = maybe_current {
848
+
// If there is no suitable gap in this entire
849
+
if current.suitable_gap_in_subtree(v.layout()) {
850
+
// First, look at the left subtree
851
+
if let Some(left) = current.left_child() {
852
+
if left.suitable_gap_in_subtree(v.layout())
853
+
&& left.subtree_range().end > already_visited
854
+
{
855
+
maybe_current = Some(left);
856
+
continue;
857
+
}
858
+
859
+
if v.visit(left.subtree_range().end..current.range().start)
860
+
.is_break()
861
+
{
862
+
return;
863
+
}
864
+
}
865
+
866
+
if let Some(right) = current.right_child() {
867
+
if v.visit(current.range().end..right.subtree_range().start)
868
+
.is_break()
869
+
{
870
+
return;
871
+
}
872
+
873
+
if right.suitable_gap_in_subtree(v.layout())
874
+
&& right.subtree_range().end > already_visited
875
+
{
876
+
maybe_current = Some(right);
877
+
continue;
878
+
}
879
+
}
880
+
}
881
+
882
+
already_visited = current.subtree_range().end;
883
+
maybe_current = current.parent();
884
+
}
885
+
886
+
// see if there is a suitable gap between AFTER the last address space region
887
+
if v.visit(root.subtree_range().end..self.max_range.end)
888
+
.is_break()
889
+
{
890
+
return;
891
+
}
892
+
}
893
+
}
894
+
895
+
/// # Safety
896
+
///
897
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
898
+
/// * `layout` must [*fit*] that region of memory.
899
+
///
900
+
/// [*currently mapped*]: #currently-mapped-memory
901
+
/// [*fit*]: #memory-fitting
902
+
unsafe fn get_region_containing_ptr<A: RawAddressSpace>(
903
+
regions: &mut WAVLTree<AddressSpaceRegion<A>>,
904
+
ptr: NonNull<u8>,
905
+
layout: Layout,
906
+
) -> CursorMut<'_, AddressSpaceRegion<A>> {
907
+
let addr = VirtualAddress::from_non_null(ptr);
908
+
909
+
let cursor = regions.lower_bound_mut(Bound::Included(&addr));
910
+
911
+
// assert_unsafe_precondition_!(
912
+
// "TODO",
913
+
// (cursor: &CursorMut<AddressSpaceRegion<A>> = &cursor) => cursor.get().is_some()
914
+
// );
915
+
916
+
// Safety: The caller guarantees the pointer is currently mapped which means we must have
917
+
// a corresponding address space region for it
918
+
let region = unsafe { cursor.get().unwrap_unchecked() };
919
+
920
+
// assert_unsafe_precondition_!(
921
+
// "TODO",
922
+
// (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => {
923
+
// let range = region.range();
924
+
//
925
+
// range.start.get() <= addr.get() && addr.get() < range.end.get()
926
+
// }
927
+
// );
928
+
//
929
+
// assert_unsafe_precondition_!(
930
+
// "`layout` does not fit memory region",
931
+
// (layout: Layout = layout, region: &AddressSpaceRegion = ®ion) => region.layout_fits_region(layout)
932
+
// );
933
+
934
+
cursor
935
+
}
936
+
937
+
pub(crate) struct GapVisitor {
938
+
layout: Layout,
939
+
target_index: usize,
940
+
candidate_spots: usize,
941
+
chosen: Option<VirtualAddress>,
942
+
}
943
+
944
+
impl GapVisitor {
945
+
fn new(layout: Layout, target_index: usize) -> Self {
946
+
Self {
947
+
layout,
948
+
target_index,
949
+
candidate_spots: 0,
950
+
chosen: None,
951
+
}
952
+
}
953
+
954
+
pub fn layout(&self) -> Layout {
955
+
self.layout
956
+
}
957
+
958
+
/// Returns the number of spots in the given range that satisfy the layout we require
959
+
fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize {
960
+
debug_assert!(
961
+
range.start.is_aligned_to(self.layout.align())
962
+
&& range.end.is_aligned_to(self.layout.align())
963
+
);
964
+
965
+
// ranges passed in here can become empty for a number of reasons (aligning might produce ranges
966
+
// where end > start, or the range might be empty to begin with) in either case an empty
967
+
// range means no spots are available
968
+
if range.is_empty() {
969
+
return 0;
970
+
}
971
+
972
+
let range_size = range.size();
973
+
if range_size >= self.layout.size() {
974
+
((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1
975
+
} else {
976
+
0
977
+
}
978
+
}
979
+
980
+
pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> {
981
+
// if we have already chosen a spot, signal the caller to stop
982
+
if self.chosen.is_some() {
983
+
return ControlFlow::Break(());
984
+
}
985
+
986
+
let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap();
987
+
988
+
let spot_count = self.spots_in_range(&aligned_gap);
989
+
990
+
self.candidate_spots += spot_count;
991
+
992
+
if self.target_index < spot_count {
993
+
self.chosen = Some(
994
+
aligned_gap
995
+
.start
996
+
.checked_add(self.target_index << self.layout.align().ilog2())
997
+
.unwrap(),
998
+
);
999
+
1000
+
ControlFlow::Break(())
1001
+
} else {
1002
+
self.target_index -= spot_count;
1003
+
1004
+
ControlFlow::Continue(())
1005
+
}
1006
+
}
1007
+
}
+336
libs/mem/src/address_space/batch.rs
+336
libs/mem/src/address_space/batch.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::cmp;
9
+
use core::num::{NonZero, NonZeroUsize};
10
+
11
+
use smallvec::SmallVec;
12
+
13
+
use crate::address_space::{Flush, RawAddressSpace};
14
+
use crate::{AccessRules, PhysicalAddress, VirtualAddress};
15
+
16
+
/// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`.
17
+
///
18
+
/// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space
19
+
/// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB
20
+
/// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce
21
+
/// this number.
22
+
pub struct Batch {
23
+
ops: SmallVec<[BatchOperation; 4]>,
24
+
}
25
+
26
+
enum BatchOperation {
27
+
Map(MapOperation),
28
+
Unmap(UnmapOperation),
29
+
SetAccessRules(SetAccessRulesOperation),
30
+
}
31
+
32
+
struct MapOperation {
33
+
virt: VirtualAddress,
34
+
phys: PhysicalAddress,
35
+
len: NonZeroUsize,
36
+
access_rules: AccessRules,
37
+
}
38
+
39
+
struct UnmapOperation {
40
+
virt: VirtualAddress,
41
+
len: NonZeroUsize,
42
+
}
43
+
44
+
struct SetAccessRulesOperation {
45
+
virt: VirtualAddress,
46
+
len: NonZeroUsize,
47
+
access_rules: AccessRules,
48
+
}
49
+
50
+
// ===== impl Batch =====
51
+
52
+
impl Batch {
53
+
/// Construct a new empty [`Batch`].
54
+
pub fn new() -> Self {
55
+
Self {
56
+
ops: SmallVec::new(),
57
+
}
58
+
}
59
+
60
+
/// Add a [`map`] operation to the set of batched operations.
61
+
///
62
+
/// # Safety
63
+
///
64
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
65
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
66
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
67
+
///
68
+
/// [`map`]: RawAddressSpace::map
69
+
pub unsafe fn map(
70
+
&mut self,
71
+
virt: VirtualAddress,
72
+
phys: PhysicalAddress,
73
+
len: NonZeroUsize,
74
+
access_rules: AccessRules,
75
+
) {
76
+
let mut new = MapOperation {
77
+
virt,
78
+
phys,
79
+
len,
80
+
access_rules,
81
+
};
82
+
83
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
84
+
BatchOperation::Map(op) => Some(op),
85
+
_ => None,
86
+
});
87
+
88
+
for op in ops {
89
+
match op.try_merge_with(new) {
90
+
Ok(()) => return,
91
+
Err(new_) => new = new_,
92
+
}
93
+
}
94
+
95
+
self.ops.push(BatchOperation::Map(new));
96
+
}
97
+
98
+
/// Add an [`unmap`] operation to the set of batched operations.
99
+
///
100
+
/// # Safety
101
+
///
102
+
/// - virt..virt+len must be mapped
103
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
104
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
105
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
106
+
///
107
+
/// [`unmap`]: RawAddressSpace::unmap
108
+
pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) {
109
+
let mut new = UnmapOperation { virt, len };
110
+
111
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
112
+
BatchOperation::Unmap(op) => Some(op),
113
+
_ => None,
114
+
});
115
+
116
+
for op in ops {
117
+
match op.try_merge_with(new) {
118
+
Ok(()) => return,
119
+
Err(new_) => new = new_,
120
+
}
121
+
}
122
+
123
+
self.ops.push(BatchOperation::Unmap(new));
124
+
}
125
+
126
+
/// Add a [`set_access_rules`] operation to the set of batched operations.
127
+
///
128
+
/// # Safety
129
+
///
130
+
/// - virt..virt+len must be mapped
131
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
132
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
133
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
134
+
///
135
+
/// [`set_access_rules`]: RawAddressSpace::set_access_rules
136
+
pub fn set_access_rules(
137
+
&mut self,
138
+
virt: VirtualAddress,
139
+
len: NonZeroUsize,
140
+
access_rules: AccessRules,
141
+
) {
142
+
let mut new = SetAccessRulesOperation {
143
+
virt,
144
+
len,
145
+
access_rules,
146
+
};
147
+
148
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
149
+
BatchOperation::SetAccessRules(op) => Some(op),
150
+
_ => None,
151
+
});
152
+
153
+
for op in ops {
154
+
match op.try_merge_with(new) {
155
+
Ok(()) => return,
156
+
Err(new_) => new = new_,
157
+
}
158
+
}
159
+
160
+
self.ops.push(BatchOperation::SetAccessRules(new));
161
+
}
162
+
163
+
/// Flushes the `Batch` ensuring all changes are materialized into the raw address space.
164
+
pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> {
165
+
let mut flush = raw_aspace.flush();
166
+
for op in self.ops.drain(..) {
167
+
match op {
168
+
BatchOperation::Map(op) => {
169
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
170
+
debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE));
171
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
172
+
173
+
// Safety: the caller promised the correctness of the values on construction of
174
+
// the operation.
175
+
unsafe {
176
+
raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
177
+
}
178
+
}
179
+
BatchOperation::Unmap(op) => {
180
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
181
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
182
+
183
+
// Safety: the caller promised the correctness of the values on construction of
184
+
// the operation.
185
+
unsafe {
186
+
raw_aspace.unmap(op.virt, op.len, &mut flush);
187
+
}
188
+
}
189
+
BatchOperation::SetAccessRules(op) => {
190
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
191
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
192
+
193
+
// Safety: the caller promised the correctness of the values on construction of
194
+
// the operation.
195
+
unsafe {
196
+
raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
197
+
}
198
+
}
199
+
};
200
+
}
201
+
flush.flush()
202
+
}
203
+
}
204
+
205
+
// ===== impl MapOperation =====
206
+
207
+
impl MapOperation {
208
+
/// Returns true if this operation can be merged with `other`.
209
+
///
210
+
/// Map operations can be merged if:
211
+
/// - their [`AccessRules`] are the same
212
+
/// - their virtual address ranges are contiguous (no gap between self and other)
213
+
/// - their physical address ranges are contiguous
214
+
/// - the resulting virtual address range still has the same size as the resulting
215
+
/// physical address range
216
+
const fn can_merge_with(&self, other: &Self) -> bool {
217
+
// the access rules need to be the same
218
+
let same_rules = self.access_rules.bits() == other.access_rules.bits();
219
+
220
+
let overlap_virt = self.virt.get() <= other.len.get()
221
+
&& other.virt.get() <= self.virt.get() + self.len.get();
222
+
223
+
let overlap_phys = self.phys.get() <= other.len.get()
224
+
&& other.phys.get() <= self.phys.get() + self.len.get();
225
+
226
+
let offset_virt = self.virt.get().wrapping_sub(other.virt.get());
227
+
let offset_phys = self.virt.get().wrapping_sub(other.virt.get());
228
+
let same_offset = offset_virt == offset_phys;
229
+
230
+
same_rules && overlap_virt && overlap_phys && same_offset
231
+
}
232
+
233
+
/// Attempt to merge this operation with `other`.
234
+
///
235
+
/// If this returns `Ok`, `other` has been merged into `self`.
236
+
///
237
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
238
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
239
+
if self.can_merge_with(&other) {
240
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
241
+
let len = self
242
+
.len
243
+
.get()
244
+
.checked_add(other.len.get())
245
+
.unwrap()
246
+
.wrapping_add(offset);
247
+
248
+
self.virt = cmp::min(self.virt, other.virt);
249
+
self.phys = cmp::min(self.phys, other.phys);
250
+
self.len = NonZero::new(len).ok_or(other)?;
251
+
252
+
Ok(())
253
+
} else {
254
+
Err(other)
255
+
}
256
+
}
257
+
}
258
+
259
+
// ===== impl UnmapOperation =====
260
+
261
+
impl UnmapOperation {
262
+
/// Returns true if this operation can be merged with `other`.
263
+
///
264
+
/// Unmap operations can be merged if:
265
+
/// - their virtual address ranges are contiguous (no gap between self and other)
266
+
const fn can_merge_with(&self, other: &Self) -> bool {
267
+
self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get()
268
+
}
269
+
270
+
/// Attempt to merge this operation with `other`.
271
+
///
272
+
/// If this returns `Ok`, `other` has been merged into `self`.
273
+
///
274
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
275
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
276
+
if self.can_merge_with(&other) {
277
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
278
+
let len = self
279
+
.len
280
+
.get()
281
+
.checked_add(other.len.get())
282
+
.unwrap()
283
+
.wrapping_add(offset);
284
+
285
+
self.virt = cmp::min(self.virt, other.virt);
286
+
self.len = NonZero::new(len).ok_or(other)?;
287
+
288
+
Ok(())
289
+
} else {
290
+
Err(other)
291
+
}
292
+
}
293
+
}
294
+
295
+
// ===== impl ProtectOperation =====
296
+
297
+
impl SetAccessRulesOperation {
298
+
/// Returns true if this operation can be merged with `other`.
299
+
///
300
+
/// Protect operations can be merged if:
301
+
/// - their [`AccessRules`] are the same
302
+
/// - their virtual address ranges are contiguous (no gap between self and other)
303
+
const fn can_merge_with(&self, other: &Self) -> bool {
304
+
// the access rules need to be the same
305
+
let same_rules = self.access_rules.bits() == other.access_rules.bits();
306
+
307
+
let overlap = self.virt.get() <= other.len.get()
308
+
&& other.virt.get() <= self.virt.get() + self.len.get();
309
+
310
+
same_rules && overlap
311
+
}
312
+
313
+
/// Attempt to merge this operation with `other`.
314
+
///
315
+
/// If this returns `Ok`, `other` has been merged into `self`.
316
+
///
317
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
318
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
319
+
if self.can_merge_with(&other) {
320
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
321
+
let len = self
322
+
.len
323
+
.get()
324
+
.checked_add(other.len.get())
325
+
.unwrap()
326
+
.wrapping_add(offset);
327
+
328
+
self.virt = cmp::min(self.virt, other.virt);
329
+
self.len = NonZero::new(len).ok_or(other)?;
330
+
331
+
Ok(())
332
+
} else {
333
+
Err(other)
334
+
}
335
+
}
336
+
}
+564
libs/mem/src/address_space/region.rs
+564
libs/mem/src/address_space/region.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use alloc::boxed::Box;
9
+
use core::alloc::Layout;
10
+
use core::fmt::Formatter;
11
+
use core::marker::PhantomData;
12
+
use core::mem::offset_of;
13
+
use core::num::NonZeroUsize;
14
+
use core::ops::{Bound, Range, RangeBounds};
15
+
use core::pin::Pin;
16
+
use core::ptr::NonNull;
17
+
use core::{cmp, fmt, mem, slice};
18
+
19
+
use fallible_iterator::FallibleIterator;
20
+
use pin_project::pin_project;
21
+
22
+
use crate::address_space::{Batch, RawAddressSpace};
23
+
use crate::vmo::Vmo;
24
+
use crate::{AccessRules, AddressRangeExt, VirtualAddress};
25
+
26
+
#[pin_project]
27
+
pub struct AddressSpaceRegion<A> {
28
+
range: Range<VirtualAddress>,
29
+
access_rules: AccessRules,
30
+
layout: Layout,
31
+
vmo: Vmo,
32
+
vmo_offset: usize,
33
+
34
+
/// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
35
+
subtree_range: Range<VirtualAddress>,
36
+
/// The largest gap in this subtree, used when allocating new regions
37
+
max_gap: usize,
38
+
/// Links to other regions in the WAVL tree
39
+
links: wavltree::Links<AddressSpaceRegion<A>>,
40
+
41
+
_raw_aspace: PhantomData<A>,
42
+
}
43
+
44
+
impl<A> fmt::Debug for AddressSpaceRegion<A> {
45
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
46
+
f.debug_struct("AddressSpaceRegion")
47
+
.field("range", &self.range)
48
+
.field("access_rules", &self.access_rules)
49
+
.field("layout", &self.layout)
50
+
.field("vmo", &self.vmo)
51
+
.field("vmo_offset", &self.vmo_offset)
52
+
.field("subtree_range", &self.subtree_range)
53
+
.field("max_gap", &self.max_gap)
54
+
.field("links", &self.links)
55
+
.finish()
56
+
}
57
+
}
58
+
59
+
impl<A: RawAddressSpace> AddressSpaceRegion<A> {
60
+
pub const fn new(
61
+
spot: VirtualAddress,
62
+
layout: Layout,
63
+
access_rules: AccessRules,
64
+
vmo: Vmo,
65
+
vmo_offset: usize,
66
+
) -> Self {
67
+
Self {
68
+
range: spot..spot.checked_add(layout.size()).unwrap(),
69
+
access_rules,
70
+
layout,
71
+
vmo,
72
+
vmo_offset,
73
+
74
+
max_gap: 0,
75
+
subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
76
+
links: wavltree::Links::new(),
77
+
78
+
_raw_aspace: PhantomData,
79
+
}
80
+
}
81
+
82
+
pub const fn range(&self) -> &Range<VirtualAddress> {
83
+
&self.range
84
+
}
85
+
86
+
pub const fn subtree_range(&self) -> &Range<VirtualAddress> {
87
+
&self.subtree_range
88
+
}
89
+
90
+
pub const fn access_rules(&self) -> AccessRules {
91
+
self.access_rules
92
+
}
93
+
94
+
pub fn as_slice(&self) -> &[u8] {
95
+
let ptr = self.range.start.as_ptr();
96
+
let len = self.range.size();
97
+
98
+
unsafe { slice::from_raw_parts(ptr, len) }
99
+
}
100
+
101
+
pub fn as_slice_mut(&mut self) -> &mut [u8] {
102
+
let ptr = self.range.start.as_mut_ptr();
103
+
let len = self.range.size();
104
+
105
+
unsafe { slice::from_raw_parts_mut(ptr, len) }
106
+
}
107
+
108
+
pub fn as_non_null(&self) -> NonNull<[u8]> {
109
+
let ptr = self.range.start.as_non_null().unwrap();
110
+
NonNull::slice_from_raw_parts(ptr, self.range.size())
111
+
}
112
+
113
+
pub const fn layout_fits_region(&self, layout: Layout) -> bool {
114
+
self.range.start.is_aligned_to(layout.align())
115
+
&& layout.size() >= self.layout.size()
116
+
&& layout.size() <= self.range.end.get() - self.range.start.get()
117
+
}
118
+
119
+
/// Find physical memory frames to back the given `range`.
120
+
/// After this call succeeds, accesses that align with the given `access` are guaranteed to
121
+
/// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
122
+
///
123
+
/// # Errors
124
+
///
125
+
/// - `range` is out of bounds
126
+
/// - `access_rules` is NOT a subset of self.access_rules
127
+
pub fn commit(
128
+
&mut self,
129
+
range: impl RangeBounds<VirtualAddress>,
130
+
access_rules: AccessRules,
131
+
batch: &mut Batch,
132
+
raw_aspace: &mut A,
133
+
) -> crate::Result<()> {
134
+
let vmo_relative = self.bounds_to_vmo_relative(range);
135
+
136
+
let mut acquired_frames = self.vmo.acquire(vmo_relative, access_rules).enumerate();
137
+
while let Some((idx, frame)) = acquired_frames.next()? {
138
+
let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap();
139
+
140
+
unsafe {
141
+
batch.map(
142
+
virt,
143
+
frame.addr(),
144
+
NonZeroUsize::new(A::PAGE_SIZE).unwrap(),
145
+
access_rules,
146
+
);
147
+
}
148
+
149
+
if self.vmo.has_content_source() {
150
+
// TODO add virt addr to coalescer
151
+
}
152
+
}
153
+
154
+
// materialize changes
155
+
batch.flush_changes(raw_aspace)?;
156
+
157
+
// initialize patched holes if necessary
158
+
if self.vmo.has_content_source() {
159
+
// for every region in coalescer
160
+
// figure out content source offset
161
+
// read from content source at offset into region
162
+
}
163
+
164
+
Ok(())
165
+
}
166
+
167
+
/// Release physical memory frames backing the given `range`.
168
+
/// After this call succeeds, accesses will page fault.
169
+
///
170
+
/// # Errors
171
+
///
172
+
/// - `range` is out of bounds
173
+
pub fn decommit(
174
+
&mut self,
175
+
range: impl RangeBounds<VirtualAddress>,
176
+
batch: &mut Batch,
177
+
raw_aspace: &mut A,
178
+
) -> crate::Result<()> {
179
+
let vmo_relative = self.bounds_to_vmo_relative(range);
180
+
181
+
let mut released_frames = self.vmo.release(vmo_relative).enumerate();
182
+
while let Some((idx, _frame)) = released_frames.next()? {
183
+
let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap();
184
+
unsafe { batch.unmap(virt, NonZeroUsize::new(A::PAGE_SIZE).unwrap()) };
185
+
186
+
// if VMO has content source && frame is dirty
187
+
// add virt addr to coalescer
188
+
}
189
+
190
+
// for every region in coalescer
191
+
// figure out content source offset
192
+
// write region to content source at offset
193
+
194
+
// materialize changes
195
+
batch.flush_changes(raw_aspace)?;
196
+
197
+
Ok(())
198
+
}
199
+
200
+
/// Zero out the memory in the given `range`.
201
+
/// This MAY release physical memory frames backing the `range`.
202
+
///
203
+
/// # Errors
204
+
///
205
+
/// - `range` is out of bounds
206
+
pub fn clear(
207
+
&mut self,
208
+
range: impl RangeBounds<VirtualAddress>,
209
+
batch: &mut Batch,
210
+
) -> crate::Result<()> {
211
+
todo!()
212
+
}
213
+
214
+
/// Update the access rules of this `AddressSpaceRegion`.
215
+
pub fn update_access_rules(
216
+
&mut self,
217
+
access_rules: AccessRules,
218
+
batch: &mut Batch,
219
+
) -> crate::Result<()> {
220
+
todo!()
221
+
}
222
+
223
+
/// Fetches content in the given `range`. This operates logically equivalent to
224
+
/// a read, write, or instruction fetch (depending on `access_rules`) so that future accesses
225
+
/// are quicker. The provided `access_rules` MUST be a subset or equal to this regions access rules.
226
+
///
227
+
/// # Errors
228
+
///
229
+
/// - `range` is out of bounds
230
+
/// - `access_rules` is NOT a subset of self.access_rules
231
+
pub fn prefetch(
232
+
&mut self,
233
+
range: impl RangeBounds<VirtualAddress>,
234
+
access_rules: AccessRules,
235
+
) -> crate::Result<()> {
236
+
todo!()
237
+
}
238
+
239
+
/// Attempts to grow the address space region to `new_len`.
240
+
/// `new_len` MUST be larger than or equal to the current length.
241
+
pub fn grow(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
242
+
todo!()
243
+
}
244
+
245
+
/// Attempts to shrink the address space region to `new_len`.
246
+
/// `new_len` MUST be smaller than or equal to the current length.
247
+
pub fn shrink(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
248
+
todo!()
249
+
}
250
+
251
+
// /// grow region to `new_len`, attempting to grow the VMO accordingly
252
+
// /// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
253
+
// pub fn grow_in_place(
254
+
// &mut self,
255
+
// new_layout: Layout,
256
+
// next_range: Option<Range<VirtualAddress>>,
257
+
// batch: &mut Batch,
258
+
// ) -> crate::Result<()> {
259
+
// if new_layout.align() > self.layout.align() {
260
+
// bail!("cannot grow in-place: New alignment greater than current");
261
+
// }
262
+
//
263
+
// let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
264
+
//
265
+
// if let Some(next_range) = next_range
266
+
// && next_range.is_overlapping(&new_range)
267
+
// {
268
+
// bail!("cannot grow in-place: New overlapping with next range");
269
+
// }
270
+
//
271
+
// self.vmo.resize(new_range.size(), batch)?;
272
+
//
273
+
// self.update_range(new_range);
274
+
//
275
+
// Ok(())
276
+
// }
277
+
//
278
+
// /// shrink region to the first `len` bytes, dropping the rest frames.
279
+
// /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
280
+
// pub fn shrink(&mut self, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
281
+
// if new_layout.align() > self.layout.align() {
282
+
// bail!("cannot grow in-place: New alignment greater than current");
283
+
// }
284
+
//
285
+
// let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
286
+
//
287
+
// self.vmo.resize(new_range.size(), batch)?;
288
+
//
289
+
// self.update_range(new_range);
290
+
//
291
+
// Ok(())
292
+
// }
293
+
//
294
+
// /// move the entire region to the new base address, remapping any already mapped frames
295
+
// pub fn move_to(
296
+
// &mut self,
297
+
// new_base: VirtualAddress,
298
+
// new_layout: Layout,
299
+
// batch: &mut Batch,
300
+
// ) -> crate::Result<()> {
301
+
// let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
302
+
//
303
+
// self.vmo.resize(new_range.size(), batch)?;
304
+
// self.update_range(new_range);
305
+
//
306
+
// // - for every frame in VMO
307
+
// // - attempt to map at new offset (add maps to batch)
308
+
//
309
+
// todo!()
310
+
// }
311
+
//
312
+
// pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
313
+
// where
314
+
// R: RangeBounds<VirtualAddress>,
315
+
// {
316
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
317
+
//
318
+
// self.vmo.commit(bounds, will_write, batch)
319
+
// }
320
+
//
321
+
// pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
322
+
// where
323
+
// R: RangeBounds<VirtualAddress>,
324
+
// {
325
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
326
+
//
327
+
// self.vmo.decommit(bounds, batch)
328
+
// }
329
+
//
330
+
// /// updates the access rules of this region
331
+
// pub fn update_access_rules(
332
+
// &mut self,
333
+
// access_rules: AccessRules,
334
+
// batch: &mut Batch,
335
+
// ) -> crate::Result<()> {
336
+
// // TODO
337
+
// // - for every frame in VMO
338
+
// // - update access rules (add protects to batch)
339
+
// // - update self access rules
340
+
//
341
+
// todo!()
342
+
// }
343
+
//
344
+
// pub fn clear<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
345
+
// where
346
+
// R: RangeBounds<VirtualAddress>,
347
+
// {
348
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
349
+
//
350
+
// self.vmo.clear(bounds, batch)
351
+
// }
352
+
//
353
+
// pub fn prefetch<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
354
+
// where
355
+
// R: RangeBounds<VirtualAddress>,
356
+
// {
357
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
358
+
//
359
+
// self.vmo.prefetch(bounds, batch)
360
+
// }
361
+
362
+
pub fn assert_valid(&self, msg: &str) {
363
+
assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
364
+
assert!(
365
+
self.subtree_range.start <= self.range.start
366
+
&& self.range.end <= self.subtree_range.end,
367
+
"{msg}region range cannot be bigger than its subtree range; region={self:?}"
368
+
);
369
+
assert!(
370
+
self.max_gap < self.subtree_range.size(),
371
+
"{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}"
372
+
);
373
+
assert!(
374
+
self.range.start.is_aligned_to(self.layout.align()),
375
+
"{msg}region range is not aligned to its layout; region={self:?}"
376
+
);
377
+
assert!(
378
+
self.range.size() >= self.layout.size(),
379
+
"{msg}region range is smaller than its layout; region={self:?}"
380
+
);
381
+
382
+
self.links.assert_valid();
383
+
}
384
+
385
+
/// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used
386
+
/// during gap-searching.
387
+
pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool {
388
+
// we need the layout to be padded to alignment
389
+
debug_assert!(layout.size().is_multiple_of(layout.align()));
390
+
391
+
self.max_gap >= layout.size()
392
+
}
393
+
394
+
/// Returns the left child node in the search tree of regions, used during gap-searching.
395
+
pub fn left_child(&self) -> Option<&Self> {
396
+
Some(unsafe { self.links.left()?.as_ref() })
397
+
}
398
+
399
+
/// Returns the right child node in the search tree of regions, used during gap-searching.
400
+
pub fn right_child(&self) -> Option<&Self> {
401
+
Some(unsafe { self.links.right()?.as_ref() })
402
+
}
403
+
404
+
/// Returns the parent node in the search tree of regions, used during gap-searching.
405
+
pub fn parent(&self) -> Option<&Self> {
406
+
Some(unsafe { self.links.parent()?.as_ref() })
407
+
}
408
+
409
+
#[inline]
410
+
fn bounds_to_vmo_relative(
411
+
&self,
412
+
bounds: impl RangeBounds<VirtualAddress>,
413
+
) -> (Bound<usize>, Bound<usize>) {
414
+
let start = bounds.start_bound().map(|addr| {
415
+
(addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset
416
+
});
417
+
let end = bounds.end_bound().map(|addr| {
418
+
(addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset
419
+
});
420
+
421
+
(start, end)
422
+
}
423
+
424
+
fn update_range(&mut self, new_range: Range<VirtualAddress>) {
425
+
self.range = new_range;
426
+
// We also must propagate the information about our changed range to the rest of the tree
427
+
// so searching for a free spot returns the correct results.
428
+
Self::propagate_update_to_parent(Some(NonNull::from(self)));
429
+
}
430
+
431
+
/// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`]
432
+
/// implementation below after each tree mutation that impacted this node or its subtree in some way
433
+
/// (insertion, rotation, deletion).
434
+
///
435
+
/// Returns `true` if this nodes metadata changed.
436
+
#[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
437
+
fn update_gap_metadata(
438
+
mut node: NonNull<Self>,
439
+
left: Option<NonNull<Self>>,
440
+
right: Option<NonNull<Self>>,
441
+
) -> bool {
442
+
fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize {
443
+
right_first_byte
444
+
.checked_sub_addr(left_last_byte)
445
+
.unwrap_or_default() // TODO use saturating_sub_addr
446
+
}
447
+
448
+
let node = unsafe { node.as_mut() };
449
+
let mut left_max_gap = 0;
450
+
let mut right_max_gap = 0;
451
+
452
+
// recalculate the subtree_range start
453
+
let old_subtree_range_start = if let Some(left) = left {
454
+
let left = unsafe { left.as_ref() };
455
+
let left_gap = gap(left.subtree_range.end, node.range.start);
456
+
left_max_gap = cmp::max(left_gap, left.max_gap);
457
+
mem::replace(&mut node.subtree_range.start, left.subtree_range.start)
458
+
} else {
459
+
mem::replace(&mut node.subtree_range.start, node.range.start)
460
+
};
461
+
462
+
// recalculate the subtree range end
463
+
let old_subtree_range_end = if let Some(right) = right {
464
+
let right = unsafe { right.as_ref() };
465
+
let right_gap = gap(node.range.end, right.subtree_range.start);
466
+
right_max_gap = cmp::max(right_gap, right.max_gap);
467
+
mem::replace(&mut node.subtree_range.end, right.subtree_range.end)
468
+
} else {
469
+
mem::replace(&mut node.subtree_range.end, node.range.end)
470
+
};
471
+
472
+
// recalculate the map_gap
473
+
let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap));
474
+
475
+
old_max_gap != node.max_gap
476
+
|| old_subtree_range_start != node.subtree_range.start
477
+
|| old_subtree_range_end != node.subtree_range.end
478
+
}
479
+
480
+
// Propagate metadata updates to this regions parent in the search tree. If we had to update
481
+
// our metadata the parent must update its metadata too.
482
+
#[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
483
+
fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) {
484
+
while let Some(node) = maybe_node {
485
+
let links = unsafe { &node.as_ref().links };
486
+
let changed = Self::update_gap_metadata(node, links.left(), links.right());
487
+
488
+
// if the metadata didn't actually change, we don't need to recalculate parents
489
+
if !changed {
490
+
return;
491
+
}
492
+
493
+
maybe_node = links.parent();
494
+
}
495
+
}
496
+
}
497
+
498
+
unsafe impl<A: RawAddressSpace> wavltree::Linked for AddressSpaceRegion<A> {
499
+
/// Any heap-allocated type that owns an element may be used.
500
+
///
501
+
/// An element *must not* move while part of an intrusive data
502
+
/// structure. In many cases, `Pin` may be used to enforce this.
503
+
type Handle = Pin<Box<Self>>; // TODO better handle type
504
+
505
+
type Key = VirtualAddress;
506
+
507
+
/// Convert an owned `Handle` into a raw pointer
508
+
fn into_ptr(handle: Self::Handle) -> NonNull<Self> {
509
+
// Safety: wavltree treats the ptr as pinned
510
+
unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
511
+
}
512
+
513
+
/// Convert a raw pointer back into an owned `Handle`.
514
+
unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
515
+
// Safety: `NonNull` *must* be constructed from a pinned reference
516
+
// which the tree implementation upholds.
517
+
unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) }
518
+
}
519
+
520
+
unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> {
521
+
ptr.map_addr(|addr| {
522
+
let offset = offset_of!(Self, links);
523
+
addr.checked_add(offset).unwrap()
524
+
})
525
+
.cast()
526
+
}
527
+
528
+
fn get_key(&self) -> &Self::Key {
529
+
&self.range.start
530
+
}
531
+
532
+
fn after_insert(self: Pin<&mut Self>) {
533
+
debug_assert_eq!(self.subtree_range.start, self.range.start);
534
+
debug_assert_eq!(self.subtree_range.end, self.range.end);
535
+
debug_assert_eq!(self.max_gap, 0);
536
+
Self::propagate_update_to_parent(self.links.parent());
537
+
}
538
+
539
+
fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) {
540
+
Self::propagate_update_to_parent(parent);
541
+
}
542
+
543
+
fn after_rotate(
544
+
self: Pin<&mut Self>,
545
+
parent: NonNull<Self>,
546
+
sibling: Option<NonNull<Self>>,
547
+
lr_child: Option<NonNull<Self>>,
548
+
side: wavltree::Side,
549
+
) {
550
+
let this = self.project();
551
+
// Safety: caller ensures ptr is valid
552
+
let _parent = unsafe { parent.as_ref() };
553
+
554
+
this.subtree_range.start = _parent.subtree_range.start;
555
+
this.subtree_range.end = _parent.subtree_range.end;
556
+
*this.max_gap = _parent.max_gap;
557
+
558
+
if side == wavltree::Side::Left {
559
+
Self::update_gap_metadata(parent, sibling, lr_child);
560
+
} else {
561
+
Self::update_gap_metadata(parent, lr_child, sibling);
562
+
}
563
+
}
564
+
}
+414
libs/mem/src/addresses.rs
+414
libs/mem/src/addresses.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::{Layout, LayoutError};
9
+
use core::ops::Range;
10
+
11
+
use crate::address_space::RawAddressSpace;
12
+
13
+
macro_rules! impl_address {
14
+
($address_ty:ident) => {
15
+
impl $address_ty {
16
+
pub const MAX: Self = Self(usize::MAX);
17
+
pub const MIN: Self = Self(0);
18
+
pub const ZERO: Self = Self(0);
19
+
pub const BITS: u32 = usize::BITS;
20
+
21
+
#[inline]
22
+
pub const fn get(&self) -> usize {
23
+
self.0
24
+
}
25
+
26
+
#[must_use]
27
+
#[inline]
28
+
pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
29
+
Self(ptr.expose_provenance())
30
+
}
31
+
32
+
#[must_use]
33
+
#[inline]
34
+
pub fn from_mut_ptr<T: ?Sized>(ptr: *mut T) -> Self {
35
+
Self(ptr.expose_provenance())
36
+
}
37
+
38
+
#[must_use]
39
+
#[inline]
40
+
pub fn from_non_null<T: ?Sized>(ptr: ::core::ptr::NonNull<T>) -> Self {
41
+
Self(ptr.addr().get())
42
+
}
43
+
44
+
#[inline]
45
+
pub fn as_ptr(self) -> *const u8 {
46
+
::core::ptr::with_exposed_provenance(self.0)
47
+
}
48
+
49
+
#[inline]
50
+
pub fn as_mut_ptr(self) -> *mut u8 {
51
+
::core::ptr::with_exposed_provenance_mut(self.0)
52
+
}
53
+
54
+
#[inline]
55
+
pub fn as_non_null(self) -> Option<::core::ptr::NonNull<u8>> {
56
+
::core::num::NonZeroUsize::new(self.0)
57
+
.map(::core::ptr::NonNull::with_exposed_provenance)
58
+
}
59
+
60
+
#[must_use]
61
+
#[inline]
62
+
pub const fn checked_add(self, rhs: usize) -> Option<Self> {
63
+
if let Some(out) = self.0.checked_add(rhs) {
64
+
Some(Self(out))
65
+
} else {
66
+
None
67
+
}
68
+
}
69
+
70
+
#[must_use]
71
+
#[inline]
72
+
pub const fn checked_add_signed(self, rhs: isize) -> Option<Self> {
73
+
if let Some(out) = self.0.checked_add_signed(rhs) {
74
+
Some(Self(out))
75
+
} else {
76
+
None
77
+
}
78
+
}
79
+
80
+
#[must_use]
81
+
#[inline]
82
+
pub const fn checked_sub(self, rhs: usize) -> Option<Self> {
83
+
if let Some(out) = self.0.checked_sub(rhs) {
84
+
Some(Self(out))
85
+
} else {
86
+
None
87
+
}
88
+
}
89
+
#[must_use]
90
+
#[inline]
91
+
pub const fn checked_div(self, rhs: usize) -> Option<Self> {
92
+
if let Some(out) = self.0.checked_div(rhs) {
93
+
Some(Self(out))
94
+
} else {
95
+
None
96
+
}
97
+
}
98
+
#[must_use]
99
+
#[inline]
100
+
pub const fn checked_mul(self, rhs: usize) -> Option<Self> {
101
+
if let Some(out) = self.0.checked_mul(rhs) {
102
+
Some(Self(out))
103
+
} else {
104
+
None
105
+
}
106
+
}
107
+
#[must_use]
108
+
#[inline]
109
+
pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
110
+
if let Some(out) = self.0.checked_shl(rhs) {
111
+
Some(Self(out))
112
+
} else {
113
+
None
114
+
}
115
+
}
116
+
#[must_use]
117
+
#[inline]
118
+
pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
119
+
if let Some(out) = self.0.checked_shr(rhs) {
120
+
Some(Self(out))
121
+
} else {
122
+
None
123
+
}
124
+
}
125
+
// #[must_use]
126
+
// #[inline]
127
+
// pub const fn saturating_add(self, rhs: usize) -> Self {
128
+
// Self(self.0.saturating_add(rhs))
129
+
// }
130
+
// #[must_use]
131
+
// #[inline]
132
+
// pub const fn saturating_add_signed(self, rhs: isize) -> Self {
133
+
// Self(self.0.saturating_add_signed(rhs))
134
+
// }
135
+
// #[must_use]
136
+
// #[inline]
137
+
// pub const fn saturating_div(self, rhs: usize) -> Self {
138
+
// Self(self.0.saturating_div(rhs))
139
+
// }
140
+
// #[must_use]
141
+
// #[inline]
142
+
// pub const fn saturating_sub(self, rhs: usize) -> Self {
143
+
// Self(self.0.saturating_sub(rhs))
144
+
// }
145
+
// #[must_use]
146
+
// #[inline]
147
+
// pub const fn saturating_mul(self, rhs: usize) -> Self {
148
+
// Self(self.0.saturating_mul(rhs))
149
+
// }
150
+
#[must_use]
151
+
#[inline]
152
+
pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
153
+
let (a, b) = self.0.overflowing_shl(rhs);
154
+
(Self(a), b)
155
+
}
156
+
#[must_use]
157
+
#[inline]
158
+
pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
159
+
let (a, b) = self.0.overflowing_shr(rhs);
160
+
(Self(a), b)
161
+
}
162
+
163
+
#[must_use]
164
+
#[inline]
165
+
pub const fn checked_sub_addr(self, rhs: Self) -> Option<usize> {
166
+
self.0.checked_sub(rhs.0)
167
+
}
168
+
169
+
// #[must_use]
170
+
// #[inline]
171
+
// pub const fn saturating_sub_addr(self, rhs: Self) -> usize {
172
+
// self.0.saturating_sub(rhs.0)
173
+
// }
174
+
175
+
#[must_use]
176
+
#[inline]
177
+
pub const fn is_aligned_to(&self, align: usize) -> bool {
178
+
assert!(
179
+
align.is_power_of_two(),
180
+
"is_aligned_to: align is not a power-of-two"
181
+
);
182
+
183
+
self.0 & (align - 1) == 0
184
+
}
185
+
186
+
#[must_use]
187
+
#[inline]
188
+
pub const fn checked_align_up(self, align: usize) -> Option<Self> {
189
+
if !align.is_power_of_two() {
190
+
panic!("checked_align_up: align is not a power-of-two");
191
+
}
192
+
193
+
// SAFETY: `align` has been checked to be a power of 2 above
194
+
let align_minus_one = unsafe { align.unchecked_sub(1) };
195
+
196
+
// addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
197
+
if let Some(addr_plus_align) = self.0.checked_add(align_minus_one) {
198
+
let aligned = Self(addr_plus_align & 0usize.wrapping_sub(align));
199
+
debug_assert!(aligned.is_aligned_to(align));
200
+
debug_assert!(aligned.0 >= self.0);
201
+
Some(aligned)
202
+
} else {
203
+
None
204
+
}
205
+
}
206
+
207
+
// #[must_use]
208
+
// #[inline]
209
+
// pub const fn wrapping_align_up(self, align: usize) -> Self {
210
+
// if !align.is_power_of_two() {
211
+
// panic!("checked_align_up: align is not a power-of-two");
212
+
// }
213
+
//
214
+
// // SAFETY: `align` has been checked to be a power of 2 above
215
+
// let align_minus_one = unsafe { align.unchecked_sub(1) };
216
+
//
217
+
// // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
218
+
// let out = addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align);
219
+
// debug_assert!(out.is_aligned_to(align));
220
+
// out
221
+
// }
222
+
223
+
#[inline]
224
+
pub const fn alignment(&self) -> usize {
225
+
self.0 & (!self.0 + 1)
226
+
}
227
+
228
+
#[must_use]
229
+
#[inline]
230
+
pub const fn align_down(self, align: usize) -> Self {
231
+
if !align.is_power_of_two() {
232
+
panic!("checked_align_up: align is not a power-of-two");
233
+
}
234
+
235
+
let aligned = Self(self.0 & 0usize.wrapping_sub(align));
236
+
debug_assert!(aligned.is_aligned_to(align));
237
+
debug_assert!(aligned.0 <= self.0);
238
+
aligned
239
+
}
240
+
}
241
+
242
+
impl ::core::fmt::Display for $address_ty {
243
+
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
244
+
f.write_fmt(format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x
245
+
}
246
+
}
247
+
248
+
impl ::core::fmt::Debug for $address_ty {
249
+
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
250
+
f.debug_tuple(stringify!($address_ty))
251
+
.field(&format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x
252
+
.finish()
253
+
}
254
+
}
255
+
};
256
+
}
257
+
258
+
#[repr(transparent)]
259
+
#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
260
+
pub struct VirtualAddress(usize);
261
+
impl_address!(VirtualAddress);
262
+
263
+
impl VirtualAddress {
264
+
#[must_use]
265
+
pub const fn new(n: usize) -> Self {
266
+
Self(n)
267
+
}
268
+
269
+
pub const fn is_canonical<A: RawAddressSpace>(self) -> bool {
270
+
(self.0 & A::CANONICAL_ADDRESS_MASK).wrapping_sub(1) >= A::CANONICAL_ADDRESS_MASK - 1
271
+
}
272
+
273
+
#[inline]
274
+
pub const fn is_user_accessible<A: RawAddressSpace>(self) -> bool {
275
+
// This address refers to userspace if it is in the lower half of the
276
+
// canonical addresses. IOW - if all of the bits in the canonical address
277
+
// mask are zero.
278
+
(self.0 & A::CANONICAL_ADDRESS_MASK) == 0
279
+
}
280
+
}
281
+
282
+
#[repr(transparent)]
283
+
#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
284
+
pub struct PhysicalAddress(usize);
285
+
impl_address!(PhysicalAddress);
286
+
287
+
impl PhysicalAddress {
288
+
pub const fn new(n: usize) -> Self {
289
+
Self(n)
290
+
}
291
+
}
292
+
293
+
macro_rules! address_range_impl {
294
+
() => {
295
+
fn size(&self) -> usize {
296
+
debug_assert!(self.start <= self.end);
297
+
let is = self.end.checked_sub_addr(self.start).unwrap_or_default();
298
+
let should = if self.is_empty() {
299
+
0
300
+
} else {
301
+
self.end.get() - self.start.get()
302
+
};
303
+
debug_assert_eq!(is, should);
304
+
is
305
+
}
306
+
fn checked_add(self, offset: usize) -> Option<Self> {
307
+
Some(Range::from(
308
+
self.start.checked_add(offset)?..self.end.checked_add(offset)?,
309
+
))
310
+
}
311
+
fn as_ptr_range(&self) -> Range<*const u8> {
312
+
Range::from(self.start.as_ptr()..self.end.as_ptr())
313
+
}
314
+
fn as_mut_ptr_range(&self) -> Range<*mut u8> {
315
+
Range::from(self.start.as_mut_ptr()..self.end.as_mut_ptr())
316
+
}
317
+
fn checked_align_in(self, align: usize) -> Option<Self>
318
+
where
319
+
Self: Sized,
320
+
{
321
+
let res = Range::from(self.start.checked_align_up(align)?..self.end.align_down(align));
322
+
Some(res)
323
+
}
324
+
fn checked_align_out(self, align: usize) -> Option<Self>
325
+
where
326
+
Self: Sized,
327
+
{
328
+
let res = Range::from(self.start.align_down(align)..self.end.checked_align_up(align)?);
329
+
// aligning outwards can only increase the size
330
+
debug_assert!(res.start.0 <= res.end.0);
331
+
Some(res)
332
+
}
333
+
// fn saturating_align_in(self, align: usize) -> Self {
334
+
// self.start.saturating_align_up(align)..self.end.saturating_align_down(align)
335
+
// }
336
+
// fn saturating_align_out(self, align: usize) -> Self {
337
+
// self.start.saturating_align_down(align)..self.end.saturating_align_up(align)
338
+
// }
339
+
340
+
// TODO test
341
+
fn alignment(&self) -> usize {
342
+
self.start.alignment()
343
+
}
344
+
fn into_layout(self) -> core::result::Result<Layout, core::alloc::LayoutError> {
345
+
Layout::from_size_align(self.size(), self.alignment())
346
+
}
347
+
fn is_overlapping(&self, other: &Self) -> bool {
348
+
(self.start < other.end) & (other.start < self.end)
349
+
}
350
+
fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) {
351
+
debug_assert!(self.is_overlapping(&other));
352
+
let a = Range::from(self.start..other.start);
353
+
let b = Range::from(other.end..self.end);
354
+
((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b))
355
+
}
356
+
fn clamp(&self, range: Self) -> Self {
357
+
Range::from(self.start.max(range.start)..self.end.min(range.end))
358
+
}
359
+
};
360
+
}
361
+
362
+
pub trait AddressRangeExt {
363
+
fn size(&self) -> usize;
364
+
#[must_use]
365
+
fn checked_add(self, offset: usize) -> Option<Self>
366
+
where
367
+
Self: Sized;
368
+
#[must_use]
369
+
fn as_ptr_range(&self) -> Range<*const u8>;
370
+
#[must_use]
371
+
fn as_mut_ptr_range(&self) -> Range<*mut u8>;
372
+
#[must_use]
373
+
fn checked_align_in(self, align: usize) -> Option<Self>
374
+
where
375
+
Self: Sized;
376
+
#[must_use]
377
+
fn checked_align_out(self, align: usize) -> Option<Self>
378
+
where
379
+
Self: Sized;
380
+
// #[must_use]
381
+
// fn saturating_align_in(self, align: usize) -> Self;
382
+
// #[must_use]
383
+
// fn saturating_align_out(self, align: usize) -> Self;
384
+
fn alignment(&self) -> usize;
385
+
fn into_layout(self) -> Result<Layout, LayoutError>;
386
+
fn is_overlapping(&self, other: &Self) -> bool;
387
+
fn difference(&self, other: Self) -> (Option<Self>, Option<Self>)
388
+
where
389
+
Self: Sized;
390
+
fn clamp(&self, range: Self) -> Self;
391
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool;
392
+
}
393
+
394
+
impl AddressRangeExt for Range<PhysicalAddress> {
395
+
address_range_impl!();
396
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool {
397
+
unimplemented!("PhysicalAddress is never user accessible")
398
+
}
399
+
}
400
+
401
+
impl AddressRangeExt for Range<VirtualAddress> {
402
+
address_range_impl!();
403
+
404
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool {
405
+
if self.is_empty() {
406
+
return false;
407
+
}
408
+
let Some(end_minus_one) = self.end.checked_sub(1) else {
409
+
return false;
410
+
};
411
+
412
+
self.start.is_user_accessible::<A>() && end_minus_one.is_user_accessible::<A>()
413
+
}
414
+
}
+202
libs/mem/src/frame.rs
+202
libs/mem/src/frame.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::Layout;
9
+
use core::cmp::PartialEq;
10
+
use core::fmt;
11
+
use core::fmt::Debug;
12
+
use core::mem::offset_of;
13
+
use core::ops::Deref;
14
+
use core::ptr::NonNull;
15
+
use core::sync::atomic;
16
+
use core::sync::atomic::{AtomicUsize, Ordering};
17
+
18
+
use cordyceps::{Linked, list};
19
+
use pin_project::pin_project;
20
+
21
+
use crate::PhysicalAddress;
22
+
use crate::frame_alloc::FrameAllocator;
23
+
24
+
/// Soft limit on the amount of references that may be made to a `Frame`.
25
+
const MAX_REFCOUNT: usize = isize::MAX as usize;
26
+
27
+
pub struct FrameRef {
28
+
frame: NonNull<Frame>,
29
+
frame_alloc: &'static dyn FrameAllocator,
30
+
}
31
+
32
+
#[pin_project(!Unpin)]
33
+
#[derive(Debug)]
34
+
pub struct Frame {
35
+
addr: PhysicalAddress,
36
+
refcount: AtomicUsize,
37
+
#[pin]
38
+
links: list::Links<Self>,
39
+
}
40
+
41
+
// ===== impl FrameRef =====
42
+
43
+
impl Clone for FrameRef {
44
+
/// Makes a clone of the `Frame`.
45
+
///
46
+
/// This creates reference to the same `FrameInfo`, increasing the reference count by one.
47
+
fn clone(&self) -> Self {
48
+
// Increase the reference count by one. Using relaxed ordering, as knowledge of the
49
+
// original reference prevents other threads from erroneously deleting
50
+
// the object.
51
+
//
52
+
// Again, restating what the `Arc` implementation quotes from the
53
+
// [Boost documentation][1]:
54
+
//
55
+
// > Increasing the reference counter can always be done with memory_order_relaxed: New
56
+
// > references to an object can only be formed from an existing
57
+
// > reference, and passing an existing reference from one thread to
58
+
// > another must already provide any required synchronization.
59
+
//
60
+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
61
+
let old_size = self.refcount.fetch_add(1, Ordering::Relaxed);
62
+
debug_assert_ne!(old_size, 0);
63
+
64
+
// Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
65
+
// `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount
66
+
// would *really* bad as it would treat the frame as free and potentially cause a use-after-free
67
+
// scenario. Realistically this branch should never be taken.
68
+
//
69
+
// Also worth noting: Just like `Arc`, the refcount could still overflow when in between
70
+
// the load above and this check some other cpu increased the refcount from `isize::MAX` to
71
+
// `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in
72
+
// one conditional atomic operation produces much worse code, so if its good enough for the
73
+
// standard library, it is good enough for us.
74
+
assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow");
75
+
76
+
unsafe { Self::from_raw_parts(self.frame, self.frame_alloc.clone()) }
77
+
}
78
+
}
79
+
80
+
impl Drop for FrameRef {
81
+
/// Drops the `Frame`.
82
+
///
83
+
/// This will decrement the reference count. If the reference count reaches zero
84
+
/// then this frame will be marked as free and returned to the frame allocator.
85
+
fn drop(&mut self) {
86
+
if self.refcount.fetch_sub(1, Ordering::Release) != 1 {
87
+
return;
88
+
}
89
+
90
+
// Ensure uses of `FrameInfo` happen before freeing it.
91
+
// Because it is marked `Release`, the decreasing of the reference count synchronizes
92
+
// with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing
93
+
// the reference count, which happens before this fence, which happens before freeing `FrameInfo`.
94
+
//
95
+
// This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and
96
+
// may explain further:
97
+
//
98
+
// > It is important to enforce any possible access to the object in one
99
+
// > thread (through an existing reference) to *happen before* deleting
100
+
// > the object in a different thread. This is achieved by a "release"
101
+
// > operation after dropping a reference (any access to the object
102
+
// > through this reference must obviously happened before), and an
103
+
// > "acquire" operation before deleting the object.
104
+
//
105
+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
106
+
atomic::fence(Ordering::Acquire);
107
+
108
+
self.drop_slow();
109
+
}
110
+
}
111
+
112
+
impl Debug for FrameRef {
113
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
114
+
f.debug_struct("FrameRef")
115
+
.field("ptr", &self.frame)
116
+
.finish_non_exhaustive()
117
+
}
118
+
}
119
+
120
+
impl Deref for FrameRef {
121
+
type Target = Frame;
122
+
123
+
fn deref(&self) -> &Self::Target {
124
+
unsafe { self.frame.as_ref() }
125
+
}
126
+
}
127
+
128
+
impl FrameRef {
129
+
pub unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self {
130
+
Self { frame, frame_alloc: alloc }
131
+
}
132
+
133
+
pub fn ptr_eq(a: &Self, b: &Self) -> bool {
134
+
a.frame == b.frame
135
+
}
136
+
137
+
#[inline(never)]
138
+
fn drop_slow(&mut self) {
139
+
let layout = unsafe {
140
+
Layout::from_size_align_unchecked(self.frame_alloc.page_size(), self.frame_alloc.page_size())
141
+
};
142
+
unsafe {
143
+
self.frame_alloc.deallocate(self.frame, layout);
144
+
}
145
+
}
146
+
}
147
+
148
+
// ===== impl Frame =====
149
+
150
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
151
+
unsafe impl Send for Frame {}
152
+
153
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
154
+
unsafe impl Sync for Frame {}
155
+
156
+
impl PartialEq<Frame> for &Frame {
157
+
fn eq(&self, other: &Frame) -> bool {
158
+
self.refcount() == other.refcount() && self.addr == other.addr
159
+
}
160
+
}
161
+
162
+
impl Frame {
163
+
pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self {
164
+
Self {
165
+
addr,
166
+
refcount: AtomicUsize::new(initial_refcount),
167
+
links: list::Links::new(),
168
+
}
169
+
}
170
+
171
+
pub fn refcount(&self) -> usize {
172
+
self.refcount.load(Ordering::Relaxed)
173
+
}
174
+
175
+
pub fn is_unique(&self) -> bool {
176
+
self.refcount() == 1
177
+
}
178
+
179
+
pub fn addr(&self) -> PhysicalAddress {
180
+
self.addr
181
+
}
182
+
}
183
+
184
+
unsafe impl Linked<list::Links<Self>> for Frame {
185
+
type Handle = NonNull<Self>;
186
+
187
+
fn into_ptr(r: Self::Handle) -> NonNull<Self> {
188
+
r
189
+
}
190
+
191
+
unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
192
+
ptr
193
+
}
194
+
195
+
unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> {
196
+
ptr.map_addr(|addr| {
197
+
let offset = offset_of!(Self, links);
198
+
addr.checked_add(offset).unwrap()
199
+
})
200
+
.cast()
201
+
}
202
+
}
+137
libs/mem/src/frame_alloc.rs
+137
libs/mem/src/frame_alloc.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mod area;
9
+
mod area_selection;
10
+
11
+
use core::alloc::Layout;
12
+
use core::cell::RefCell;
13
+
use core::cmp;
14
+
use core::ops::Range;
15
+
use core::ptr::NonNull;
16
+
use core::sync::atomic::{AtomicUsize, Ordering};
17
+
18
+
use cordyceps::List;
19
+
use cpu_local::collection::CpuLocal;
20
+
use fallible_iterator::FallibleIterator;
21
+
use lock_api::Mutex;
22
+
use smallvec::SmallVec;
23
+
24
+
use crate::address_space::RawAddressSpace;
25
+
use crate::frame_alloc::area::Area;
26
+
use crate::frame_alloc::area_selection::select_areas;
27
+
use crate::{Frame, PhysicalAddress};
28
+
29
+
#[derive(Debug)]
30
+
pub struct AllocError;
31
+
32
+
pub unsafe trait FrameAllocator: Send + Sync + 'static {
33
+
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
34
+
unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
35
+
fn page_size(&self) -> usize;
36
+
}
37
+
38
+
const MAX_FRAMES_IN_CACHE: usize = 256;
39
+
40
+
pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> {
41
+
areas: Mutex<L, SmallVec<[Area<A>; 4]>>,
42
+
cpu_local_cache: CpuLocal<RefCell<List<Frame>>>,
43
+
max_alignment_hint: AtomicUsize,
44
+
}
45
+
46
+
impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> {
47
+
pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> {
48
+
let mut max_alignment_hint = 0;
49
+
let mut areas = SmallVec::new();
50
+
51
+
let mut selections = select_areas::<A>(allocatable_regions);
52
+
while let Some(selection) = selections.next()? {
53
+
let area = Area::new(selection.area, selection.bookkeeping);
54
+
max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint());
55
+
areas.push(area);
56
+
}
57
+
58
+
Ok(Self {
59
+
areas: Mutex::new(areas),
60
+
cpu_local_cache: CpuLocal::new(),
61
+
max_alignment_hint: AtomicUsize::new(max_alignment_hint),
62
+
})
63
+
}
64
+
65
+
pub fn max_alignment_hint(&self) -> usize {
66
+
self.max_alignment_hint.load(Ordering::Relaxed)
67
+
}
68
+
69
+
fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> {
70
+
if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
71
+
let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
72
+
cache.pop_back()
73
+
} else {
74
+
None
75
+
}
76
+
}
77
+
78
+
fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool {
79
+
if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
80
+
let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
81
+
82
+
if cache.len() < MAX_FRAMES_IN_CACHE {
83
+
cache.push_back(block);
84
+
return true;
85
+
}
86
+
}
87
+
88
+
false
89
+
}
90
+
}
91
+
92
+
unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
93
+
for &'static FrameAlloc<L, A>
94
+
{
95
+
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
96
+
// attempt to allocate from the CPU-local cache first
97
+
if let Some(frame) = self.allocate_local(layout) {
98
+
return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1));
99
+
}
100
+
101
+
let mut areas = self.areas.lock();
102
+
for area in areas.iter_mut() {
103
+
if let Ok(frames) = area.allocate(layout) {
104
+
return Ok(frames);
105
+
}
106
+
}
107
+
108
+
Err(AllocError)
109
+
}
110
+
111
+
unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) {
112
+
// attempt to place the frame into the CPU-local cache first
113
+
if self.deallocate_local(block, layout) {
114
+
return;
115
+
}
116
+
117
+
let mut areas = self.areas.lock();
118
+
for area in areas.iter_mut() {
119
+
let block_ = unsafe { block.as_ref() };
120
+
121
+
if area.contains_frame(block_.addr()) {
122
+
unsafe { area.deallocate(block, layout) };
123
+
124
+
self.max_alignment_hint
125
+
.fetch_max(area.max_alignment_hint(), Ordering::Relaxed);
126
+
127
+
return;
128
+
}
129
+
}
130
+
131
+
unreachable!();
132
+
}
133
+
134
+
fn page_size(&self) -> usize {
135
+
A::PAGE_SIZE
136
+
}
137
+
}
+444
libs/mem/src/frame_alloc/area.rs
+444
libs/mem/src/frame_alloc/area.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::Layout;
9
+
use core::marker::PhantomData;
10
+
use core::mem::MaybeUninit;
11
+
use core::ops::Range;
12
+
use core::ptr::NonNull;
13
+
use core::{cmp, fmt};
14
+
15
+
use cordyceps::List;
16
+
17
+
use crate::address_space::RawAddressSpace;
18
+
use crate::frame_alloc::AllocError;
19
+
use crate::{AddressRangeExt, Frame, PhysicalAddress};
20
+
21
+
const MAX_ORDER: usize = 11;
22
+
23
+
pub struct Area<A: RawAddressSpace> {
24
+
area: Range<PhysicalAddress>,
25
+
frames: &'static mut [MaybeUninit<Frame>],
26
+
27
+
free_lists: [List<Frame>; MAX_ORDER],
28
+
29
+
max_order: usize,
30
+
total_frames: usize,
31
+
used_frames: usize,
32
+
33
+
_aspace: PhantomData<A>,
34
+
}
35
+
36
+
unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {}
37
+
unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {}
38
+
39
+
impl<A: RawAddressSpace> fmt::Debug for Area<A> {
40
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
41
+
f.debug_struct("Area")
42
+
.field("area", &self.area)
43
+
.field(
44
+
"frames",
45
+
&format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()),
46
+
)
47
+
.field("free_lists", &self.free_lists)
48
+
.field("max_order", &self.max_order)
49
+
.field("total_frames", &self.total_frames)
50
+
.field("used_frames", &self.used_frames)
51
+
.finish()
52
+
}
53
+
}
54
+
55
+
impl<A: RawAddressSpace> Area<A> {
56
+
pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self {
57
+
let mut free_lists = [const { List::new() }; MAX_ORDER];
58
+
let mut total_frames = 0;
59
+
let mut max_order = 0;
60
+
61
+
let mut remaining_bytes = area.size();
62
+
let mut addr = area.start;
63
+
64
+
// This is the main area initialization loop. We loop through the `area` "chopping off" the
65
+
// largest possible min_block_size-aligned block from the area and add that to its corresponding
66
+
// free list.
67
+
//
68
+
// Note: Remember that for buddy allocators `size == align`. That means we both need to check
69
+
// the alignment and size of our remaining area and can only chop off whatever is smaller.
70
+
while remaining_bytes > 0 {
71
+
// println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}");
72
+
73
+
// the largest size we can chop off given the alignment of the remaining area
74
+
let max_align = if addr == PhysicalAddress::ZERO {
75
+
// if area happens to start exactly at address 0x0 our calculation below doesn't work.
76
+
// address 0x0 actually supports *any* alignment so we special-case it and return `MAX`
77
+
usize::MAX
78
+
} else {
79
+
// otherwise mask out the least significant bit of the address to figure out its alignment
80
+
addr.get() & (!addr.get() + 1)
81
+
};
82
+
// the largest size we can chop off given the size of the remaining area
83
+
// which is the next smaller power of two
84
+
let max_size = 1 << remaining_bytes.ilog2();
85
+
86
+
// our chosen size will be the smallest of
87
+
// - the maximum size by remaining areas alignment
88
+
// - the maximum size by remaining areas size
89
+
// - the maximum block size supported by this allocator
90
+
let size = cmp::min(
91
+
cmp::min(max_align, max_size),
92
+
A::PAGE_SIZE << (MAX_ORDER - 1),
93
+
);
94
+
debug_assert!(size.is_multiple_of(A::PAGE_SIZE));
95
+
96
+
let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize;
97
+
98
+
{
99
+
let frame = frames[total_frames].write(Frame::new(addr, 0));
100
+
101
+
free_lists[order].push_back(NonNull::from(frame));
102
+
}
103
+
104
+
total_frames += 1 << order;
105
+
max_order = cmp::max(max_order, order);
106
+
addr = addr.checked_add(size).unwrap();
107
+
remaining_bytes -= size;
108
+
}
109
+
110
+
// Make sure we've accounted for all frames
111
+
debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE);
112
+
113
+
Self {
114
+
area,
115
+
frames,
116
+
117
+
free_lists,
118
+
119
+
max_order,
120
+
total_frames,
121
+
used_frames: 0,
122
+
123
+
_aspace: PhantomData,
124
+
}
125
+
}
126
+
127
+
pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
128
+
#[cfg(debug_assertions)]
129
+
self.assert_valid();
130
+
131
+
let min_order = self.allocation_order(layout)?;
132
+
133
+
// Starting at the smallest sufficient size class, search for a free block. If we find one in
134
+
// a free list, return it and its order.
135
+
let (block, block_order) = self.free_lists[min_order..]
136
+
.iter_mut()
137
+
.enumerate()
138
+
.find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order)))
139
+
.ok_or(AllocError)?;
140
+
141
+
// if the block we found is larger than the `min_order` we need, we repeatedly split off
142
+
// the upper half (of decreasing size) until we reach the desired size. The split off blocks
143
+
// are returned to their appropriate free lists.
144
+
for order in (min_order..block_order).rev() {
145
+
let block = unsafe { block.as_ref() };
146
+
let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap();
147
+
let buddy = self.frame_for_addr(buddy_addr).unwrap();
148
+
149
+
let buddy = buddy.write(Frame::new(buddy_addr, 0));
150
+
let buddy = NonNull::from(buddy);
151
+
152
+
self.free_lists[order].push_back(buddy);
153
+
}
154
+
155
+
let alloc_size_frames = 1 << min_order;
156
+
157
+
// lazily initialize all frames
158
+
for idx in 0..alloc_size_frames {
159
+
let block = unsafe { block.as_ref() };
160
+
let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap();
161
+
162
+
let frame = self.frame_for_addr(addr).unwrap();
163
+
frame.write(Frame::new(addr, 1));
164
+
}
165
+
166
+
self.used_frames += alloc_size_frames;
167
+
168
+
#[cfg(debug_assertions)]
169
+
self.assert_valid();
170
+
171
+
Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames))
172
+
}
173
+
174
+
pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) {
175
+
#[cfg(debug_assertions)]
176
+
self.assert_valid();
177
+
178
+
let initial_order = self.allocation_order(layout).unwrap();
179
+
let mut order = initial_order;
180
+
181
+
while order < self.free_lists.len() - 1 {
182
+
let block_ = unsafe { block.as_ref() };
183
+
if let Some(buddy) = self.buddy_addr(order, block_.addr())
184
+
&& cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1))
185
+
&& self.remove_from_free_list(order, buddy)
186
+
{
187
+
let buddy: NonNull<Frame> =
188
+
NonNull::from(self.frame_for_addr(buddy).unwrap()).cast();
189
+
block = cmp::min(buddy, block);
190
+
order += 1;
191
+
} else {
192
+
break;
193
+
}
194
+
}
195
+
196
+
self.free_lists[order].push_back(block);
197
+
self.used_frames -= 1 << initial_order;
198
+
self.max_order = cmp::max(self.max_order, order);
199
+
200
+
#[cfg(debug_assertions)]
201
+
self.assert_valid();
202
+
}
203
+
204
+
pub fn max_alignment_hint(&self) -> usize {
205
+
self.order_size(self.max_order)
206
+
}
207
+
208
+
fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> {
209
+
let relative = addr.checked_sub_addr(self.area.start).unwrap();
210
+
let idx = relative >> A::PAGE_SIZE_LOG_2;
211
+
Some(&mut self.frames[idx])
212
+
}
213
+
214
+
pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool {
215
+
self.area.contains(&addr)
216
+
}
217
+
218
+
fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> {
219
+
assert!(block >= self.area.start);
220
+
assert!(block.is_aligned_to(A::PAGE_SIZE << order));
221
+
222
+
let relative = block.checked_sub_addr(self.area.start).unwrap();
223
+
let size = self.order_size(order);
224
+
if size >= self.area.size() {
225
+
// MAX_ORDER blocks do not have buddies
226
+
None
227
+
} else {
228
+
// Fun: We can find our buddy by xoring the right bit in our
229
+
// offset from the base of the heap.
230
+
Some(self.area.start.checked_add(relative ^ size).unwrap())
231
+
}
232
+
}
233
+
234
+
fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool {
235
+
let mut c = self.free_lists[order].cursor_front_mut();
236
+
237
+
while let Some(candidate) = c.current() {
238
+
if candidate.addr() == to_remove {
239
+
c.remove_current().unwrap();
240
+
return true;
241
+
}
242
+
243
+
c.move_next();
244
+
}
245
+
246
+
false
247
+
}
248
+
249
+
// The size of the blocks we allocate for a given order.
250
+
const fn order_size(&self, order: usize) -> usize {
251
+
1 << (A::PAGE_SIZE_LOG_2 as usize + order)
252
+
}
253
+
254
+
const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> {
255
+
// We can only allocate blocks that are at least one page
256
+
if !layout.size().is_multiple_of(A::PAGE_SIZE) {
257
+
return Err(AllocError);
258
+
}
259
+
260
+
// We can only allocate blocks that are at least page aligned
261
+
if !layout.align().is_multiple_of(A::PAGE_SIZE) {
262
+
return Err(AllocError);
263
+
}
264
+
265
+
let size = layout.size().next_power_of_two();
266
+
267
+
// We cannot allocate blocks larger than our largest size class
268
+
if size > self.order_size(self.free_lists.len()) {
269
+
return Err(AllocError);
270
+
}
271
+
272
+
Ok(size)
273
+
}
274
+
275
+
const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> {
276
+
if let Ok(size) = self.allocation_size(layout) {
277
+
Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize)
278
+
} else {
279
+
Err(AllocError)
280
+
}
281
+
}
282
+
283
+
fn assert_valid(&self) {
284
+
for (order, l) in self.free_lists.iter().enumerate() {
285
+
l.assert_valid();
286
+
287
+
for f in l {
288
+
assert!(
289
+
f.addr().is_aligned_to(A::PAGE_SIZE << order),
290
+
"frame {f:?} is not aligned to order {order}"
291
+
);
292
+
}
293
+
}
294
+
295
+
assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames);
296
+
}
297
+
}
298
+
299
+
fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize {
300
+
let mut frames = 0;
301
+
for (order, l) in area.free_lists.iter().enumerate() {
302
+
frames += l.len() << order;
303
+
}
304
+
frames
305
+
}
306
+
307
+
#[cfg(test)]
308
+
mod tests {
309
+
use alloc::vec::Vec;
310
+
311
+
use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest};
312
+
313
+
use super::*;
314
+
use crate::test_utils::TestAddressSpace;
315
+
316
+
const PAGE_SIZE: usize = 4096;
317
+
318
+
prop_compose! {
319
+
// Generate arbitrary integers up to half the maximum desired value,
320
+
// then multiply them by 2, thus producing only even integers in the
321
+
// desired range.
322
+
fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE }
323
+
}
324
+
325
+
proptest! {
326
+
#[test]
327
+
fn new_fixed_base(num_frames in 0..50_000usize) {
328
+
let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
329
+
PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE),
330
+
{
331
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
332
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
333
+
frames.leak()
334
+
}
335
+
);
336
+
area.assert_valid();
337
+
338
+
// let's check whether the area correctly initialized itself
339
+
//
340
+
// since we start on an aligned base address (0x0) we expect it have split off chunks
341
+
// largest-to-smallest. We replicate the process here, but take a block from its free list.
342
+
let mut frames_remaining = num_frames;
343
+
while frames_remaining > 0 {
344
+
// clamp the order we calculate at the max possible order
345
+
let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1);
346
+
347
+
let chunk = area.free_lists[chunk_order].pop_back();
348
+
prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}");
349
+
350
+
frames_remaining -= 1 << chunk_order;
351
+
}
352
+
// At the end of this process we expect all free lists to be empty
353
+
prop_assert!(area.free_lists.iter().all(|list| list.is_empty()));
354
+
}
355
+
356
+
#[test]
357
+
fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) {
358
+
359
+
let area = {
360
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
361
+
prop_assume!(area_end.is_some());
362
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
363
+
};
364
+
365
+
let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
366
+
area,
367
+
{
368
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
369
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
370
+
frames.leak()
371
+
}
372
+
);
373
+
area.assert_valid();
374
+
375
+
// TODO figure out if we can test the free lists in a sensible way
376
+
}
377
+
378
+
#[test]
379
+
fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) {
380
+
let area = {
381
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
382
+
prop_assume!(area_end.is_some());
383
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
384
+
};
385
+
386
+
let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
387
+
area,
388
+
{
389
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
390
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
391
+
frames.leak()
392
+
}
393
+
);
394
+
area.assert_valid();
395
+
396
+
debug_assert_eq!(frames_in_area(&mut area), num_frames);
397
+
}
398
+
399
+
#[test]
400
+
fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) {
401
+
let area = {
402
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
403
+
prop_assume!(area_end.is_some());
404
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
405
+
};
406
+
407
+
let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
408
+
area.clone(),
409
+
{
410
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
411
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
412
+
frames.leak()
413
+
}
414
+
);
415
+
area1.assert_valid();
416
+
417
+
let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
418
+
area,
419
+
{
420
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
421
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
422
+
frames.leak()
423
+
}
424
+
);
425
+
area2.assert_valid();
426
+
427
+
// we can only allocate contiguous blocks of the largest order available
428
+
prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE));
429
+
430
+
let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap();
431
+
432
+
let block = area2.allocate(layout).unwrap();
433
+
prop_assert!(block.len() >= alloc_frames);
434
+
435
+
unsafe { area2.deallocate(block.cast(), layout); }
436
+
437
+
assert_eq!(frames_in_area(&area2), num_frames);
438
+
439
+
for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() {
440
+
prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len());
441
+
}
442
+
}
443
+
}
444
+
}
+133
libs/mem/src/frame_alloc/area_selection.rs
+133
libs/mem/src/frame_alloc/area_selection.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use alloc::slice;
9
+
use core::fmt::Formatter;
10
+
use core::marker::PhantomData;
11
+
use core::mem;
12
+
use core::mem::MaybeUninit;
13
+
use core::ops::Range;
14
+
15
+
use fallible_iterator::FallibleIterator;
16
+
use smallvec::SmallVec;
17
+
18
+
use crate::address_space::RawAddressSpace;
19
+
use crate::{AddressRangeExt, Frame, PhysicalAddress};
20
+
21
+
const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB
22
+
23
+
#[derive(Debug)]
24
+
pub struct AreaSelection {
25
+
pub area: Range<PhysicalAddress>,
26
+
pub bookkeeping: &'static mut [MaybeUninit<Frame>],
27
+
pub wasted_bytes: usize,
28
+
}
29
+
30
+
#[derive(Debug)]
31
+
pub struct SelectionError {
32
+
pub range: Range<PhysicalAddress>,
33
+
}
34
+
35
+
pub struct ArenaSelections<A: RawAddressSpace> {
36
+
allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
37
+
wasted_bytes: usize,
38
+
39
+
_aspace: PhantomData<A>,
40
+
}
41
+
42
+
pub fn select_areas<A: RawAddressSpace>(
43
+
allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
44
+
) -> ArenaSelections<A> {
45
+
ArenaSelections {
46
+
allocatable_regions,
47
+
wasted_bytes: 0,
48
+
49
+
_aspace: PhantomData,
50
+
}
51
+
}
52
+
53
+
impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> {
54
+
type Item = AreaSelection;
55
+
type Error = SelectionError;
56
+
57
+
fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
58
+
let Some(mut area) = self.allocatable_regions.pop() else {
59
+
return Ok(None);
60
+
};
61
+
62
+
while let Some(region) = self.allocatable_regions.pop() {
63
+
debug_assert!(!area.is_overlapping(®ion));
64
+
65
+
let pages_in_hole = if area.end <= region.start {
66
+
// the region is higher than the current area
67
+
region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE
68
+
} else {
69
+
debug_assert!(region.end <= area.start);
70
+
// the region is lower than the current area
71
+
area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE
72
+
};
73
+
74
+
let waste_from_hole = size_of::<Frame>() * pages_in_hole;
75
+
76
+
if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES {
77
+
self.allocatable_regions.push(region);
78
+
break;
79
+
} else {
80
+
self.wasted_bytes += waste_from_hole;
81
+
82
+
if area.end <= region.start {
83
+
area.end = region.end;
84
+
} else {
85
+
area.start = region.start;
86
+
}
87
+
}
88
+
}
89
+
90
+
let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap();
91
+
// We can't use empty areas anyway
92
+
if aligned.is_empty() {
93
+
return Err(SelectionError { range: aligned });
94
+
}
95
+
96
+
let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE;
97
+
98
+
let bookkeeping_start = aligned
99
+
.end
100
+
.checked_sub(bookkeeping_size_frames * size_of::<Frame>())
101
+
.unwrap()
102
+
.align_down(A::PAGE_SIZE);
103
+
104
+
// The area has no space to hold its own bookkeeping
105
+
if bookkeeping_start < aligned.start {
106
+
return Err(SelectionError { range: aligned });
107
+
}
108
+
109
+
let bookkeeping = unsafe {
110
+
slice::from_raw_parts_mut(
111
+
bookkeeping_start.as_mut_ptr().cast(),
112
+
bookkeeping_size_frames,
113
+
)
114
+
};
115
+
aligned.end = bookkeeping_start;
116
+
117
+
Ok(Some(AreaSelection {
118
+
area: aligned,
119
+
bookkeeping,
120
+
wasted_bytes: mem::take(&mut self.wasted_bytes),
121
+
}))
122
+
}
123
+
}
124
+
125
+
// ===== impl SelectionError =====
126
+
127
+
impl core::fmt::Display for SelectionError {
128
+
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
129
+
todo!()
130
+
}
131
+
}
132
+
133
+
impl core::error::Error for SelectionError {}
+19
libs/mem/src/lib.rs
+19
libs/mem/src/lib.rs
···
1
+
#![cfg_attr(not(test), no_std)]
2
+
extern crate alloc;
3
+
4
+
mod access_rules;
5
+
pub mod address_space;
6
+
mod addresses;
7
+
mod frame;
8
+
pub mod frame_alloc;
9
+
#[cfg(test)]
10
+
mod test_utils;
11
+
mod utils;
12
+
mod vmo;
13
+
mod test;
14
+
15
+
pub type Result<T> = anyhow::Result<T>;
16
+
17
+
pub use access_rules::{AccessRules, WriteOrExecute};
18
+
pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress};
19
+
pub use frame::{Frame, FrameRef};
+56
libs/mem/src/test.rs
+56
libs/mem/src/test.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::ops::RangeBounds;
9
+
use crate::{AccessRules, VirtualAddress};
10
+
use crate::address_space::Batch;
11
+
12
+
struct AddressSpaceRegion<A> {
13
+
_aspace: A,
14
+
}
15
+
16
+
impl<A> AddressSpaceRegion<A> {
17
+
/// Map physical memory to back the given `range`
18
+
///
19
+
/// After this call succeeds, accesses that align with the given `access` are guaranteed to
20
+
/// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
21
+
///
22
+
/// # Errors
23
+
///
24
+
/// - `range` is out of bounds
25
+
/// - `access_rules` is NOT a subset of self.access_rules
26
+
pub fn commit(
27
+
&mut self,
28
+
range: impl RangeBounds<VirtualAddress>,
29
+
access_rules: AccessRules,
30
+
batch: &mut Batch,
31
+
raw_aspace: &mut A,
32
+
) -> crate::Result<()> {
33
+
34
+
35
+
36
+
37
+
38
+
todo!()
39
+
}
40
+
41
+
/// Release physical memory frames backing the given `range`.
42
+
///
43
+
/// After this call succeeds, accesses will page fault.
44
+
///
45
+
/// # Errors
46
+
///
47
+
/// - `range` is out of bounds for this address space region
48
+
pub fn decommit(
49
+
&mut self,
50
+
range: impl RangeBounds<VirtualAddress>,
51
+
batch: &mut Batch,
52
+
raw_aspace: &mut A,
53
+
) -> crate::Result<()> {
54
+
todo!()
55
+
}
56
+
}
+171
libs/mem/src/test_utils.rs
+171
libs/mem/src/test_utils.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
extern crate std;
9
+
10
+
use alloc::collections::BTreeMap;
11
+
use core::marker::PhantomData;
12
+
use core::num::NonZeroUsize;
13
+
14
+
use crate::address_space::{Flush, RawAddressSpace};
15
+
use crate::{AccessRules, PhysicalAddress, VirtualAddress};
16
+
17
+
#[derive(Debug)]
18
+
pub struct TestAddressSpace<const PAGE_SIZE: usize, const ADDR_BITS: u32> {
19
+
mappings: BTreeMap<VirtualAddress, Mapping>,
20
+
}
21
+
22
+
#[derive(Debug)]
23
+
pub struct Mapping {
24
+
pub virt: VirtualAddress,
25
+
pub phys: PhysicalAddress,
26
+
pub len: NonZeroUsize,
27
+
pub access_rules: AccessRules,
28
+
}
29
+
30
+
pub struct TestFlush {
31
+
_priv: PhantomData<()>,
32
+
}
33
+
34
+
impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> TestAddressSpace<PAGE_SIZE, ADDR_BITS> {
35
+
pub const fn new() -> Self {
36
+
Self {
37
+
mappings: BTreeMap::new(),
38
+
}
39
+
}
40
+
41
+
pub fn get_mapping_containing(&self, addr: VirtualAddress) -> Option<&Mapping> {
42
+
let (end, mapping) = self.mappings.range(addr..).next()?;
43
+
44
+
if addr > *end { None } else { Some(mapping) }
45
+
}
46
+
47
+
pub fn get_mapping_mut_containing(&mut self, addr: VirtualAddress) -> Option<&mut Mapping> {
48
+
let (end, mapping) = self.mappings.range_mut(addr..).next()?;
49
+
50
+
if addr > *end { None } else { Some(mapping) }
51
+
}
52
+
53
+
pub fn remove_mapping_containing(&mut self, addr: VirtualAddress) -> Option<Mapping> {
54
+
let (key, _) = self.mappings.range_mut(addr..).next()?;
55
+
let key = *key;
56
+
57
+
Some(self.mappings.remove(&key).unwrap())
58
+
}
59
+
}
60
+
61
+
unsafe impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> RawAddressSpace
62
+
for TestAddressSpace<PAGE_SIZE, ADDR_BITS>
63
+
{
64
+
const PAGE_SIZE: usize = PAGE_SIZE;
65
+
const VIRT_ADDR_BITS: u32 = ADDR_BITS;
66
+
67
+
type Flush = TestFlush;
68
+
69
+
fn flush(&self) -> Self::Flush {
70
+
TestFlush { _priv: PhantomData }
71
+
}
72
+
73
+
fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)> {
74
+
let mapping = self.get_mapping_containing(virt)?;
75
+
76
+
let offset = virt.checked_sub_addr(mapping.virt).unwrap();
77
+
78
+
Some((
79
+
mapping.phys.checked_add(offset).unwrap(),
80
+
mapping.access_rules,
81
+
))
82
+
}
83
+
84
+
unsafe fn map(
85
+
&mut self,
86
+
virt: VirtualAddress,
87
+
phys: PhysicalAddress,
88
+
len: NonZeroUsize,
89
+
access_rules: AccessRules,
90
+
_flush: &mut Self::Flush,
91
+
) -> crate::Result<()> {
92
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
93
+
assert!(phys.is_aligned_to(Self::PAGE_SIZE));
94
+
assert!(self.get_mapping_containing(virt).is_none());
95
+
96
+
let end_virt = virt.checked_add(len.get() - 1).unwrap();
97
+
assert!(end_virt.is_aligned_to(Self::PAGE_SIZE));
98
+
99
+
let prev = self.mappings.insert(
100
+
end_virt,
101
+
Mapping {
102
+
virt,
103
+
phys,
104
+
len,
105
+
access_rules,
106
+
},
107
+
);
108
+
assert!(prev.is_none());
109
+
110
+
Ok(())
111
+
}
112
+
113
+
unsafe fn unmap(
114
+
&mut self,
115
+
mut virt: VirtualAddress,
116
+
len: NonZeroUsize,
117
+
_flush: &mut Self::Flush,
118
+
) {
119
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
120
+
assert!(
121
+
virt.checked_add(len.get())
122
+
.unwrap()
123
+
.is_aligned_to(Self::PAGE_SIZE)
124
+
);
125
+
126
+
let mut bytes_remaining = len.get();
127
+
128
+
while bytes_remaining > 0 {
129
+
let mapping = self.remove_mapping_containing(virt).unwrap();
130
+
assert_eq!(mapping.virt, virt);
131
+
132
+
bytes_remaining -= mapping.len.get();
133
+
virt = virt.checked_sub(mapping.len.get()).unwrap();
134
+
}
135
+
}
136
+
137
+
unsafe fn set_access_rules(
138
+
&mut self,
139
+
mut virt: VirtualAddress,
140
+
len: NonZeroUsize,
141
+
access_rules: AccessRules,
142
+
_flush: &mut Self::Flush,
143
+
) {
144
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
145
+
assert!(
146
+
virt.checked_add(len.get())
147
+
.unwrap()
148
+
.is_aligned_to(Self::PAGE_SIZE)
149
+
);
150
+
151
+
let mut bytes_remaining = len.get();
152
+
153
+
while bytes_remaining > 0 {
154
+
let mapping = self.get_mapping_mut_containing(virt).unwrap();
155
+
assert_eq!(mapping.virt, virt);
156
+
157
+
mapping.access_rules = access_rules;
158
+
159
+
bytes_remaining -= mapping.len.get();
160
+
virt = virt.checked_sub(mapping.len.get()).unwrap();
161
+
}
162
+
}
163
+
}
164
+
165
+
// ===== impl TestFlush =====
166
+
167
+
impl Flush for TestFlush {
168
+
fn flush(self) -> crate::Result<()> {
169
+
Ok(())
170
+
}
171
+
}
+31
libs/mem/src/utils.rs
+31
libs/mem/src/utils.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
macro_rules! assert_unsafe_precondition_ {
9
+
($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => {
10
+
{
11
+
// This check is inlineable, but not by the MIR inliner.
12
+
// The reason for this is that the MIR inliner is in an exceptionally bad position
13
+
// to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`,
14
+
// which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and
15
+
// would be bad for compile times.
16
+
//
17
+
// LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without
18
+
// inlining the check. If it's `true`, it can inline it and get significantly better performance.
19
+
#[inline]
20
+
const fn precondition_check($($name:$ty),*) {
21
+
assert!($e, concat!("unsafe precondition(s) violated: ", $message,
22
+
"\n\nThis indicates a bug in the program. \
23
+
This Undefined Behavior check is optional, and cannot be relied on for safety."))
24
+
}
25
+
26
+
#[cfg(debug_assertions)]
27
+
precondition_check($($arg,)*);
28
+
}
29
+
};
30
+
}
31
+
pub(crate) use assert_unsafe_precondition_;
+529
libs/mem/src/vmo.rs
+529
libs/mem/src/vmo.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use alloc::sync::Arc;
9
+
use core::alloc::Layout;
10
+
use core::ops::{Bound, Range, RangeBounds};
11
+
use core::{fmt, ptr};
12
+
13
+
use anyhow::{anyhow, ensure};
14
+
use fallible_iterator::FallibleIterator;
15
+
use lock_api::RwLock;
16
+
use smallvec::SmallVec;
17
+
18
+
use crate::frame_alloc::FrameAllocator;
19
+
use crate::{AccessRules, FrameRef};
20
+
21
+
pub struct Vmo {
22
+
name: &'static str,
23
+
vmo: RawVmo,
24
+
}
25
+
26
+
#[derive(Debug)]
27
+
struct RawVmo {
28
+
data: *const (),
29
+
vtable: &'static RawVmoVTable,
30
+
}
31
+
32
+
#[derive(PartialEq, Copy, Clone, Debug)]
33
+
struct RawVmoVTable {
34
+
clone: unsafe fn(*const ()) -> RawVmo,
35
+
acquire: unsafe fn(
36
+
*const (),
37
+
index: usize,
38
+
access_rules: AccessRules,
39
+
) -> crate::Result<Option<FrameRef>>,
40
+
release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
41
+
clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
42
+
len: unsafe fn(*const ()) -> usize,
43
+
resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
44
+
drop: unsafe fn(*const ()),
45
+
}
46
+
47
+
// ===== impl Vmo =====
48
+
49
+
impl Unpin for Vmo {}
50
+
51
+
// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Send
52
+
// therefore Vmo is Send too
53
+
unsafe impl Send for Vmo {}
54
+
// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Sync
55
+
// therefore Vmo is Sync too
56
+
unsafe impl Sync for Vmo {}
57
+
58
+
impl Clone for Vmo {
59
+
#[inline]
60
+
fn clone(&self) -> Self {
61
+
Self {
62
+
vmo: unsafe { (self.vmo.vtable.clone)(self.vmo.data) },
63
+
name: self.name,
64
+
}
65
+
}
66
+
}
67
+
68
+
impl Drop for Vmo {
69
+
#[inline]
70
+
fn drop(&mut self) {
71
+
unsafe { (self.vmo.vtable.drop)(self.vmo.data) }
72
+
}
73
+
}
74
+
75
+
impl fmt::Debug for Vmo {
76
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
77
+
let vtable_ptr = self.vmo.vtable as *const RawVmoVTable;
78
+
f.debug_struct("Vmo")
79
+
.field("name", &self.name)
80
+
.field("data", &self.vmo.data)
81
+
.field("vtable", &vtable_ptr)
82
+
.finish()
83
+
}
84
+
}
85
+
86
+
impl Vmo {
87
+
/// Creates a new `Vmo` from the provided `len`, `data` pointer and `vtable`.
88
+
///
89
+
/// TODO
90
+
///
91
+
/// The `data` pointer can be used to store arbitrary data as required by the vmo implementation.
92
+
/// This could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
93
+
/// The value of this pointer will get passed to all functions that are part
94
+
/// of the `vtable` as the first parameter.
95
+
///
96
+
/// It is important to consider that the `data` pointer must point to a
97
+
/// thread safe type such as an `Arc`.
98
+
///
99
+
/// The `vtable` customizes the behavior of a `Cmo`. For each operation
100
+
/// on the `Clock`, the associated function in the `vtable` will be called.
101
+
///
102
+
/// # Safety
103
+
///
104
+
/// The behavior of the returned `Vmo` is undefined if the contract defined
105
+
/// in [`RawVmoVTable`]'s documentation is not upheld.
106
+
#[inline]
107
+
#[must_use]
108
+
pub const unsafe fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
109
+
// Safety: ensured by caller
110
+
unsafe { Self::from_raw(RawVmo { data, vtable }) }
111
+
}
112
+
113
+
/// Creates a new `Vmo` from a [`RawVmo`].
114
+
///
115
+
/// # Safety
116
+
///
117
+
/// The behavior of the returned `Vmo` is undefined if the contract defined
118
+
/// in [`RawVmo`]'s and [`RawVmoVTable`]'s documentation is not upheld.
119
+
#[inline]
120
+
#[must_use]
121
+
pub const unsafe fn from_raw(vmo: RawVmo) -> Self {
122
+
Self {
123
+
vmo,
124
+
name: "<unnamed mystery VMO>",
125
+
}
126
+
}
127
+
128
+
/// Add an arbitrary user-defined name to this `Vmo`.
129
+
pub fn named(mut self, name: &'static str) -> Self {
130
+
self.name = name;
131
+
self
132
+
}
133
+
134
+
/// Returns this `Vmo`'s name, if it was given one using the [`Vmo::named`]
135
+
/// method.
136
+
pub fn name(&self) -> &'static str {
137
+
self.name
138
+
}
139
+
140
+
pub fn len(&self) -> usize {
141
+
unsafe { (self.vmo.vtable.len)(self.vmo.data) }
142
+
}
143
+
144
+
pub fn has_content_source(&self) -> bool {
145
+
self.content_source().is_some()
146
+
}
147
+
148
+
pub fn content_source(&self) -> Option<()> {
149
+
todo!()
150
+
}
151
+
152
+
/// Gets the `data` pointer used to create this `Vmo`.
153
+
#[inline]
154
+
#[must_use]
155
+
pub fn data(&self) -> *const () {
156
+
self.vmo.data
157
+
}
158
+
159
+
/// Gets the `vtable` pointer used to create this `Vmo`.
160
+
#[inline]
161
+
#[must_use]
162
+
pub fn vtable(&self) -> &'static RawVmoVTable {
163
+
self.vmo.vtable
164
+
}
165
+
166
+
// Release the frame at the given `index`. After this call succeeds, all accessed following the
167
+
// given `access_rules` MUST NOT fault.
168
+
// UNIT: frames
169
+
pub fn acquire<R>(
170
+
&self,
171
+
range: R,
172
+
access_rules: AccessRules,
173
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
174
+
where
175
+
R: RangeBounds<usize>,
176
+
{
177
+
let range = self.bound_check(range);
178
+
179
+
let i = range
180
+
.into_iter()
181
+
.flat_map(|r| r)
182
+
.filter_map(move |idx| unsafe {
183
+
(self.vmo.vtable.acquire)(self.vmo.data, idx, access_rules).transpose()
184
+
});
185
+
186
+
fallible_iterator::convert(i)
187
+
}
188
+
189
+
// Release the frame at the given `index`. After this call succeeds, all accessed to the frame
190
+
// MUST fault. Returns the base physical address of the release frame.
191
+
// UNIT: frames
192
+
pub fn release<R>(
193
+
&self,
194
+
range: R,
195
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
196
+
where
197
+
R: RangeBounds<usize>,
198
+
{
199
+
let range = self.bound_check(range);
200
+
201
+
let i = range
202
+
.into_iter()
203
+
.flat_map(|r| r)
204
+
.filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
205
+
206
+
fallible_iterator::convert(i)
207
+
}
208
+
209
+
// Release the frame at the given `index`. After this call succeeds, all accessed to the frame
210
+
// MUST fault. Returns the base physical address of the release frame.
211
+
// UNIT: frames
212
+
pub fn clear<R>(
213
+
&self,
214
+
range: R,
215
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
216
+
where
217
+
R: RangeBounds<usize>,
218
+
{
219
+
let range = self.bound_check(range);
220
+
221
+
let i = range
222
+
.into_iter()
223
+
.flat_map(|r| r)
224
+
.filter_map(|idx| unsafe { (self.vmo.vtable.clear)(self.vmo.data, idx).transpose() });
225
+
226
+
fallible_iterator::convert(i)
227
+
}
228
+
229
+
// Grow the VMO to `new_size` (guaranteed to be larger than or equal to the current size).
230
+
fn grow(&self, new_len: usize) -> crate::Result<()> {
231
+
debug_assert!(new_len >= self.len());
232
+
233
+
unsafe { (self.vmo.vtable.resize)(self.vmo.data, new_len)? };
234
+
235
+
Ok(())
236
+
}
237
+
238
+
// Shrink the VMO to `new_size` (guaranteed to be smaller than or equal to the current size).
239
+
// After this call succeeds, all accesses outside the new range MUST fault.
240
+
// UNIT: frames
241
+
pub fn shrink(
242
+
&self,
243
+
new_len: usize,
244
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> {
245
+
debug_assert!(new_len <= self.len());
246
+
247
+
let old_len = self.len();
248
+
249
+
todo!();
250
+
fallible_iterator::empty()
251
+
// unsafe {
252
+
// (self.vmo.vtable.resize)(self.vmo.data, new_len)?;
253
+
// };
254
+
//
255
+
// let i = (new_len..old_len)
256
+
// .into_iter()
257
+
// .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
258
+
//
259
+
// fallible_iterator::convert(i)
260
+
}
261
+
262
+
#[inline]
263
+
fn bound_check<R>(&self, range: R) -> crate::Result<Range<usize>>
264
+
where
265
+
R: RangeBounds<usize>,
266
+
{
267
+
let start = match range.start_bound() {
268
+
Bound::Included(b) => *b,
269
+
Bound::Excluded(b) => *b + 1,
270
+
Bound::Unbounded => 0,
271
+
};
272
+
let end = match range.end_bound() {
273
+
Bound::Included(b) => *b + 1,
274
+
Bound::Excluded(b) => *b,
275
+
Bound::Unbounded => self.len(),
276
+
};
277
+
278
+
ensure!(end <= self.len());
279
+
280
+
Ok(start..end)
281
+
}
282
+
}
283
+
284
+
// ===== impl RawVmo =====
285
+
286
+
impl RawVmo {
287
+
/// Creates a new `RawVmo` from the provided `data` pointer and `vtable`.
288
+
///
289
+
/// The `data` pointer can be used to store arbitrary data as required by the VMO implementation.
290
+
/// his could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
291
+
/// The value of this pointer will get passed to all functions that are part
292
+
/// of the `vtable` as the first parameter.
293
+
///
294
+
/// It is important to consider that the `data` pointer must point to a
295
+
/// thread safe type such as an `Arc`.
296
+
///
297
+
/// The `vtable` customizes the behavior of a `Vmo`. For each operation
298
+
/// on the `Vmo`, the associated function in the `vtable` will be called.
299
+
#[inline]
300
+
#[must_use]
301
+
pub const fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
302
+
Self { data, vtable }
303
+
}
304
+
}
305
+
306
+
// ===== impl RawVmoVTable =====
307
+
308
+
impl RawVmoVTable {
309
+
pub const fn new(
310
+
clone: unsafe fn(*const ()) -> RawVmo,
311
+
acquire: unsafe fn(
312
+
*const (),
313
+
index: usize,
314
+
access_rules: AccessRules,
315
+
) -> crate::Result<Option<FrameRef>>,
316
+
release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
317
+
clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
318
+
len: unsafe fn(*const ()) -> usize,
319
+
resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
320
+
drop: unsafe fn(*const ()),
321
+
) -> Self {
322
+
Self {
323
+
clone,
324
+
acquire,
325
+
release,
326
+
clear,
327
+
len,
328
+
resize,
329
+
drop,
330
+
}
331
+
}
332
+
}
333
+
334
+
pub fn stub_vmo() -> Vmo {
335
+
const WIRED_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
336
+
stub_clone,
337
+
stub_acquire,
338
+
stub_release,
339
+
stub_clear,
340
+
stub_len,
341
+
stub_resize,
342
+
stub_drop,
343
+
);
344
+
345
+
unsafe fn stub_clone(ptr: *const ()) -> RawVmo {
346
+
debug_assert!(ptr.is_null());
347
+
RawVmo::new(ptr, &WIRED_VMO_VTABLE)
348
+
}
349
+
350
+
unsafe fn stub_acquire(
351
+
ptr: *const (),
352
+
_index: usize,
353
+
_access_rules: AccessRules,
354
+
) -> crate::Result<Option<FrameRef>> {
355
+
debug_assert!(ptr.is_null());
356
+
unreachable!()
357
+
}
358
+
unsafe fn stub_release(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
359
+
debug_assert!(ptr.is_null());
360
+
unreachable!()
361
+
}
362
+
unsafe fn stub_clear(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
363
+
debug_assert!(ptr.is_null());
364
+
unreachable!()
365
+
}
366
+
unsafe fn stub_len(ptr: *const ()) -> usize {
367
+
debug_assert!(ptr.is_null());
368
+
unreachable!()
369
+
}
370
+
unsafe fn stub_resize(ptr: *const (), _new_len: usize) -> crate::Result<()> {
371
+
debug_assert!(ptr.is_null());
372
+
unreachable!()
373
+
}
374
+
unsafe fn stub_drop(ptr: *const ()) {
375
+
debug_assert!(ptr.is_null());
376
+
}
377
+
378
+
unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) }
379
+
}
380
+
381
+
pub struct PagedVmo<R: lock_api::RawRwLock> {
382
+
list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>,
383
+
frame_alloc: &'static dyn FrameAllocator,
384
+
}
385
+
386
+
impl<R: lock_api::RawRwLock> PagedVmo<R> {
387
+
const RAW_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
388
+
Self::clone_vmo,
389
+
Self::acquire,
390
+
Self::release,
391
+
Self::clear,
392
+
Self::len,
393
+
Self::resize,
394
+
Self::drop_vmo,
395
+
);
396
+
397
+
pub fn new(frame_alloc: &'static dyn FrameAllocator) -> Self {
398
+
Self {
399
+
list: RwLock::new(SmallVec::new()),
400
+
frame_alloc,
401
+
}
402
+
}
403
+
404
+
#[inline(always)]
405
+
pub fn into_vmo(self: Arc<Self>) -> Vmo {
406
+
unsafe { Vmo::new(Arc::into_raw(self) as *const (), &Self::RAW_VMO_VTABLE) }
407
+
}
408
+
409
+
fn allocate_frame(&self) -> FrameRef {
410
+
let layout =
411
+
Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size())
412
+
.unwrap();
413
+
414
+
let frames = self.frame_alloc.allocate(layout).unwrap();
415
+
debug_assert_eq!(frames.len(), 1);
416
+
417
+
unsafe { FrameRef::from_raw_parts(frames.cast(), self.frame_alloc.clone()) }
418
+
}
419
+
420
+
fn clone_the_zero_frame(&self) -> FrameRef {
421
+
todo!()
422
+
}
423
+
424
+
#[inline(always)]
425
+
unsafe fn clone_vmo(vmo: *const ()) -> RawVmo {
426
+
unsafe {
427
+
Arc::increment_strong_count(vmo.cast::<Self>());
428
+
}
429
+
RawVmo::new(vmo, &Self::RAW_VMO_VTABLE)
430
+
}
431
+
432
+
unsafe fn drop_vmo(ptr: *const ()) {
433
+
drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) });
434
+
}
435
+
436
+
unsafe fn acquire(
437
+
ptr: *const (),
438
+
index: usize,
439
+
access_rules: AccessRules,
440
+
) -> crate::Result<Option<FrameRef>> {
441
+
let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
442
+
443
+
todo!()
444
+
}
445
+
446
+
unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
447
+
let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
448
+
let mut list = me.list.write();
449
+
450
+
let slot = list
451
+
.get_mut(index)
452
+
.ok_or(anyhow!("index out of bounds"))?
453
+
.take();
454
+
455
+
Ok(slot)
456
+
}
457
+
458
+
unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
459
+
let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
460
+
let mut list = me.list.write();
461
+
462
+
let prev_frame = list
463
+
.get_mut(index)
464
+
.ok_or(anyhow!("index out of bounds"))?
465
+
.replace(me.clone_the_zero_frame());
466
+
467
+
Ok(prev_frame)
468
+
}
469
+
470
+
unsafe fn len(ptr: *const ()) -> usize {
471
+
let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
472
+
let list = me.list.read();
473
+
474
+
list.len()
475
+
}
476
+
477
+
unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> {
478
+
let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() };
479
+
let mut list = me.list.write();
480
+
481
+
list.resize(new_len, None);
482
+
483
+
Ok(())
484
+
}
485
+
}
486
+
487
+
struct VVmo<R: lock_api::RawRwLock> {
488
+
list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>,
489
+
frame_alloc: &'static dyn FrameAllocator,
490
+
the_zero_frame: FrameRef,
491
+
}
492
+
493
+
struct Batch {
494
+
freed: SmallVec<[FrameRef; 4]>,
495
+
allocated: SmallVec<[FrameRef; 4]>,
496
+
}
497
+
498
+
impl<R: lock_api::RawRwLock> VVmo<R> {
499
+
fn allocate_one(&self) -> FrameRef {
500
+
let layout =
501
+
Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size())
502
+
.unwrap();
503
+
let frame = self.frame_alloc.allocate(layout).unwrap();
504
+
debug_assert_eq!(frame.len(), 1);
505
+
unsafe { FrameRef::from_raw_parts(frame.cast(), self.frame_alloc.clone()) }
506
+
}
507
+
508
+
pub fn acquire(&self, index: usize, access_rules: AccessRules, batch: &mut Batch) {
509
+
let mut list = self.list.write();
510
+
511
+
if let Some(old_frame) = list.get(index).unwrap() {
512
+
assert!(!old_frame.is_unique());
513
+
514
+
if access_rules.is_read_only() {
515
+
}
516
+
517
+
518
+
519
+
520
+
} else {
521
+
let new_frame = self.allocate_one();
522
+
list.insert(index, Some(new_frame));
523
+
// TODO report new_frame for mapping
524
+
}
525
+
}
526
+
527
+
pub fn release(&self, range: Range<usize>, batch: &mut Batch) {}
528
+
pub fn clear(&self, range: Range<usize>, batch: &mut Batch) {}
529
+
}
+6
-2
libs/wavltree/src/cursor.rs
+6
-2
libs/wavltree/src/cursor.rs
···
88
88
pub unsafe fn get_ptr(&self) -> Link<T> {
89
89
self.current
90
90
}
91
-
pub fn get(&self) -> Option<&'a T> {
92
-
unsafe { self.current.map(|ptr| ptr.as_ref()) }
91
+
pub const fn get(&self) -> Option<&'a T> {
92
+
if let Some(ptr) = self.current {
93
+
Some(unsafe { ptr.as_ref() })
94
+
} else {
95
+
None
96
+
}
93
97
}
94
98
pub fn get_mut(&mut self) -> Option<Pin<&'a mut T>> {
95
99
unsafe { self.current.map(|mut ptr| Pin::new_unchecked(ptr.as_mut())) }
+1
-1
libs/wavltree/src/lib.rs
+1
-1
libs/wavltree/src/lib.rs
···
1510
1510
}
1511
1511
1512
1512
/// Returns `true` if this node is currently linked to a [WAVLTree].
1513
-
pub fn is_linked(&self) -> bool {
1513
+
pub const fn is_linked(&self) -> bool {
1514
1514
let inner = unsafe { &*self.inner.get() };
1515
1515
inner.up.is_some() || inner.left.is_some() || inner.right.is_some()
1516
1516
}
History
5 rounds
0 comments
jonaskruckenberg.de
submitted
#4
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
merge conflicts detected
expand
collapse
expand
collapse
- Cargo.lock:135
- libs/wavltree/src/cursor.rs:88
expand 0 comments
jonaskruckenberg.de
submitted
#3
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#2
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#1
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#0
1 commit
expand
collapse
refactor: separate memory subsystem into own crate