+3494
-5
Diff
round #0
+136
-2
Cargo.lock
+136
-2
Cargo.lock
···
135
135
"windows-targets",
136
136
]
137
137
138
+
[[package]]
139
+
name = "bit-set"
140
+
version = "0.8.0"
141
+
source = "registry+https://github.com/rust-lang/crates.io-index"
142
+
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
143
+
dependencies = [
144
+
"bit-vec",
145
+
]
146
+
147
+
[[package]]
148
+
name = "bit-vec"
149
+
version = "0.8.0"
150
+
source = "registry+https://github.com/rust-lang/crates.io-index"
151
+
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
152
+
138
153
[[package]]
139
154
name = "bitflags"
140
155
version = "2.9.1"
···
562
577
source = "registry+https://github.com/rust-lang/crates.io-index"
563
578
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
564
579
580
+
[[package]]
581
+
name = "errno"
582
+
version = "0.3.13"
583
+
source = "registry+https://github.com/rust-lang/crates.io-index"
584
+
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
585
+
dependencies = [
586
+
"libc",
587
+
"windows-sys 0.59.0",
588
+
]
589
+
565
590
[[package]]
566
591
name = "escape8259"
567
592
version = "0.5.3"
···
591
616
"criterion",
592
617
]
593
618
619
+
[[package]]
620
+
name = "fastrand"
621
+
version = "2.3.0"
622
+
source = "registry+https://github.com/rust-lang/crates.io-index"
623
+
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
624
+
594
625
[[package]]
595
626
name = "fdt"
596
627
version = "0.1.0"
···
952
983
"cordyceps",
953
984
"cpu-local",
954
985
"criterion",
955
-
"fastrand",
986
+
"fastrand 0.1.0",
956
987
"futures",
957
988
"lazy_static",
958
989
"loom",
···
995
1026
"cranelift-entity",
996
1027
"cranelift-frontend",
997
1028
"fallible-iterator",
998
-
"fastrand",
1029
+
"fastrand 0.1.0",
999
1030
"fdt",
1000
1031
"futures",
1001
1032
"gimli",
···
1119
1150
"vcpkg",
1120
1151
]
1121
1152
1153
+
[[package]]
1154
+
name = "linux-raw-sys"
1155
+
version = "0.9.4"
1156
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1157
+
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
1158
+
1122
1159
[[package]]
1123
1160
name = "litemap"
1124
1161
version = "0.8.0"
···
1195
1232
"regex-automata 0.4.9",
1196
1233
]
1197
1234
1235
+
[[package]]
1236
+
name = "mem"
1237
+
version = "0.1.0"
1238
+
dependencies = [
1239
+
"anyhow",
1240
+
"cordyceps",
1241
+
"cpu-local",
1242
+
"fallible-iterator",
1243
+
"lock_api",
1244
+
"mycelium-bitfield",
1245
+
"pin-project",
1246
+
"proptest",
1247
+
"rand",
1248
+
"rand_chacha",
1249
+
"smallvec",
1250
+
"wavltree",
1251
+
]
1252
+
1198
1253
[[package]]
1199
1254
name = "memchr"
1200
1255
version = "2.7.5"
···
1467
1522
"yansi",
1468
1523
]
1469
1524
1525
+
[[package]]
1526
+
name = "proptest"
1527
+
version = "1.7.0"
1528
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1529
+
checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f"
1530
+
dependencies = [
1531
+
"bit-set",
1532
+
"bit-vec",
1533
+
"bitflags",
1534
+
"lazy_static",
1535
+
"num-traits",
1536
+
"rand",
1537
+
"rand_chacha",
1538
+
"rand_xorshift",
1539
+
"regex-syntax 0.8.5",
1540
+
"rusty-fork",
1541
+
"tempfile",
1542
+
"unarray",
1543
+
]
1544
+
1545
+
[[package]]
1546
+
name = "quick-error"
1547
+
version = "1.2.3"
1548
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1549
+
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
1550
+
1470
1551
[[package]]
1471
1552
name = "quote"
1472
1553
version = "1.0.40"
···
1511
1592
"getrandom",
1512
1593
]
1513
1594
1595
+
[[package]]
1596
+
name = "rand_xorshift"
1597
+
version = "0.4.0"
1598
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1599
+
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
1600
+
dependencies = [
1601
+
"rand_core",
1602
+
]
1603
+
1514
1604
[[package]]
1515
1605
name = "rayon"
1516
1606
version = "1.10.0"
···
1619
1709
"semver",
1620
1710
]
1621
1711
1712
+
[[package]]
1713
+
name = "rustix"
1714
+
version = "1.0.8"
1715
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1716
+
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
1717
+
dependencies = [
1718
+
"bitflags",
1719
+
"errno",
1720
+
"libc",
1721
+
"linux-raw-sys",
1722
+
"windows-sys 0.59.0",
1723
+
]
1724
+
1622
1725
[[package]]
1623
1726
name = "rustversion"
1624
1727
version = "1.0.21"
1625
1728
source = "registry+https://github.com/rust-lang/crates.io-index"
1626
1729
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
1627
1730
1731
+
[[package]]
1732
+
name = "rusty-fork"
1733
+
version = "0.3.0"
1734
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1735
+
checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f"
1736
+
dependencies = [
1737
+
"fnv",
1738
+
"quick-error",
1739
+
"tempfile",
1740
+
"wait-timeout",
1741
+
]
1742
+
1628
1743
[[package]]
1629
1744
name = "ryu"
1630
1745
version = "1.0.20"
···
1788
1903
source = "registry+https://github.com/rust-lang/crates.io-index"
1789
1904
checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a"
1790
1905
1906
+
[[package]]
1907
+
name = "tempfile"
1908
+
version = "3.20.0"
1909
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1910
+
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
1911
+
dependencies = [
1912
+
"fastrand 2.3.0",
1913
+
"getrandom",
1914
+
"once_cell",
1915
+
"rustix",
1916
+
"windows-sys 0.59.0",
1917
+
]
1918
+
1791
1919
[[package]]
1792
1920
name = "thiserror"
1793
1921
version = "2.0.12"
···
2093
2221
"spin",
2094
2222
]
2095
2223
2224
+
[[package]]
2225
+
name = "unarray"
2226
+
version = "0.1.4"
2227
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2228
+
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
2229
+
2096
2230
[[package]]
2097
2231
name = "unicode-ident"
2098
2232
version = "1.0.18"
+27
libs/mem/Cargo.toml
+27
libs/mem/Cargo.toml
···
1
+
[package]
2
+
name = "mem"
3
+
version.workspace = true
4
+
edition.workspace = true
5
+
authors.workspace = true
6
+
license.workspace = true
7
+
8
+
[dependencies]
9
+
cpu-local.workspace = true
10
+
11
+
# 3rd-party dependencies
12
+
mycelium-bitfield.workspace = true
13
+
anyhow.workspace = true
14
+
cordyceps.workspace = true
15
+
pin-project.workspace = true
16
+
lock_api.workspace = true
17
+
fallible-iterator.workspace = true
18
+
smallvec.workspace = true
19
+
wavltree.workspace = true
20
+
rand_chacha.workspace = true
21
+
rand.workspace = true
22
+
23
+
[dev-dependencies]
24
+
proptest = "1.7.0"
25
+
26
+
[lints]
27
+
workspace = true
+13
libs/mem/proptest-regressions/frame.txt
+13
libs/mem/proptest-regressions/frame.txt
···
1
+
# Seeds for failure cases proptest has generated in the past. It is
2
+
# automatically read and these particular cases re-run before any
3
+
# novel cases are generated.
4
+
#
5
+
# It is recommended to check this file in to source control so that
6
+
# everyone who runs the test benefits from these saved cases.
7
+
cc 4cf994999dd04e4312e6dd0f9601044b488e1eda3d9c18cdfd57ac4a3e1b00fc # shrinks to num_frames = 0, area_start = 0, alloc_frames = 1
8
+
cc 3a702a85b8b8ece9062ec02861bb17665fa95817c7b65a2897b2a7db347db322 # shrinks to num_frames = 292, area_start = 0, alloc_frames = 257
9
+
cc 3065cda233769bdf9b16f3f134e65dcfe170c9a9462cfb013139b9203a43c6c7 # shrinks to num_frames = 512, area_start = 4096, alloc_frames = 257
10
+
cc d333ce22c6888222b53fa6d21bd2c29aece2aaf1266c7251b2deb86f679221c5 # shrinks to num_frames = 2357, area_start = 3814267094354915328, alloc_frames = 354
11
+
cc 14f06bd08feb57c49cd25113a630c65e48383d6666178b7b3c157099b40d6286 # shrinks to num_frames = 1421, area_start = 12923327278880337920, alloc_frames = 257
12
+
cc 007d0fba2f9391c80693c16b411362c67d3be3995856f30e7352aa40e70bb7cc # shrinks to num_frames = 82, area_start = 5938167848445603840, alloc_frames = 20
13
+
cc 88599b677f8f36a1f4cc363c75d296624989cbefa59b120d7195e209a1a8e897 # shrinks to num_frames = 741, area_start = 9374927382302433280, alloc_frames = 231
+69
libs/mem/src/access_rules.rs
+69
libs/mem/src/access_rules.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mycelium_bitfield::bitfield! {
9
+
/// Rules that dictate how a region of virtual memory may be accessed.
10
+
///
11
+
/// # W^X
12
+
///
13
+
/// In order to prevent malicious code execution as proactively as possible,
14
+
/// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
15
+
/// through the [`WriteOrExecute`] enum field.
16
+
#[derive(PartialEq, Eq)]
17
+
pub struct AccessRules<u8> {
18
+
/// If set, reading from the memory region is allowed.
19
+
pub const READ: bool;
20
+
/// Whether executing, or writing this memory region is allowed (or neither).
21
+
pub const WRITE_OR_EXECUTE: WriteOrExecute;
22
+
/// If set, requires code in the memory region to use aarch64 Branch Target Identification.
23
+
/// Does nothing on non-aarch64 architectures.
24
+
pub const BTI: bool;
25
+
}
26
+
}
27
+
28
+
/// Whether executing, or writing this memory region is allowed (or neither).
29
+
///
30
+
/// This is an enum to enforce [`W^X`] at the type-level.
31
+
///
32
+
/// [`W^X`]: AccessRules
33
+
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
34
+
#[repr(u8)]
35
+
pub enum WriteOrExecute {
36
+
/// Neither writing nor execution of the memory region is allowed.
37
+
Neither = 0b00,
38
+
/// Writing to the memory region is allowed.
39
+
Write = 0b01,
40
+
/// Executing code from the memory region is allowed.
41
+
Execute = 0b10,
42
+
}
43
+
44
+
// ===== impl WriteOrExecute =====
45
+
46
+
impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
47
+
type Error = core::convert::Infallible;
48
+
49
+
/// The number of bits required to represent a value of this type.
50
+
const BITS: u32 = 2;
51
+
52
+
#[inline]
53
+
fn try_from_bits(bits: u8) -> Result<Self, Self::Error> {
54
+
match bits {
55
+
b if b == Self::Neither as u8 => Ok(Self::Neither),
56
+
b if b == Self::Write as u8 => Ok(Self::Write),
57
+
b if b == Self::Execute as u8 => Ok(Self::Execute),
58
+
_ => {
59
+
// this should never happen unless the bitpacking code is broken
60
+
unreachable!("invalid memory region access rules {bits:#b}")
61
+
}
62
+
}
63
+
}
64
+
65
+
#[inline]
66
+
fn into_bits(self) -> u8 {
67
+
self as u8
68
+
}
69
+
}
+1011
libs/mem/src/address_space.rs
+1011
libs/mem/src/address_space.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mod batch;
9
+
mod region;
10
+
11
+
use alloc::boxed::Box;
12
+
use core::alloc::Layout;
13
+
use core::num::NonZeroUsize;
14
+
use core::ops::{Bound, ControlFlow, Range};
15
+
use core::pin::Pin;
16
+
use core::ptr::NonNull;
17
+
18
+
use anyhow::{format_err, Context};
19
+
use rand::distr::Uniform;
20
+
use rand::Rng;
21
+
use rand_chacha::ChaCha20Rng;
22
+
use region::AddressSpaceRegion;
23
+
use wavltree::{CursorMut, WAVLTree};
24
+
25
+
use crate::access_rules::AccessRules;
26
+
use crate::address_space::batch::Batch;
27
+
use crate::utils::assert_unsafe_precondition_;
28
+
use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress};
29
+
30
+
pub unsafe trait RawAddressSpace {
31
+
/// The smallest addressable chunk of memory of this address space. All address argument provided
32
+
/// to methods of this type (both virtual and physical) must be aligned to this.
33
+
const PAGE_SIZE: usize;
34
+
const VIRT_ADDR_BITS: u32;
35
+
36
+
const PAGE_SIZE_LOG_2: u8 = (Self::PAGE_SIZE - 1).count_ones() as u8;
37
+
const CANONICAL_ADDRESS_MASK: usize = !((1 << (Self::VIRT_ADDR_BITS)) - 1);
38
+
39
+
/// The [`Flush`] implementation for this address space.
40
+
type Flush: Flush;
41
+
42
+
/// Return a new, empty flush for this address space.
43
+
fn flush(&self) -> Self::Flush;
44
+
45
+
/// Return the corresponding [`PhysicalAddress`] and [`AccessRules`] for the given
46
+
/// [`VirtualAddress`] if mapped.
47
+
fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)>;
48
+
49
+
/// Map a contiguous range of `len` virtual addresses to `len` physical addresses with the
50
+
/// specified access rules.
51
+
///
52
+
/// If this returns `Ok`, the mapping is added to the raw address space and all future
53
+
/// accesses to the virtual address range will translate to accesses of the physical address
54
+
/// range.
55
+
///
56
+
/// # Safety
57
+
///
58
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
59
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
60
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
61
+
///
62
+
/// # Errors
63
+
///
64
+
/// Returning `Err` indicates the mapping cannot be established and the virtual address range
65
+
/// remains unaltered.
66
+
unsafe fn map(
67
+
&mut self,
68
+
virt: VirtualAddress,
69
+
phys: PhysicalAddress,
70
+
len: NonZeroUsize,
71
+
access_rules: AccessRules,
72
+
flush: &mut Self::Flush,
73
+
) -> crate::Result<()>;
74
+
75
+
/// Unmap a contiguous range of `len` virtual addresses.
76
+
///
77
+
/// After this returns all accesses to the virtual address region will cause a fault.
78
+
///
79
+
/// # Safety
80
+
///
81
+
/// - `virt..virt+len` must be mapped
82
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
83
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
84
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
85
+
unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize, flush: &mut Self::Flush);
86
+
87
+
/// Set the [`AccessRules`] for a contiguous range of `len` virtual addresses.
88
+
///
89
+
/// After this returns all accesses to the virtual address region must follow the
90
+
/// specified `AccessRules` or cause a fault.
91
+
///
92
+
/// # Safety
93
+
///
94
+
/// - `virt..virt+len` must be mapped
95
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
96
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
97
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
98
+
unsafe fn set_access_rules(
99
+
&mut self,
100
+
virt: VirtualAddress,
101
+
len: NonZeroUsize,
102
+
access_rules: AccessRules,
103
+
flush: &mut Self::Flush,
104
+
);
105
+
}
106
+
107
+
/// A type that can flush changes made to a [`RawAddressSpace`].
108
+
///
109
+
/// Note: [`Flush`] is purely optional, it exists so implementation MAY batch
110
+
/// Note that the implementation is not required to delay materializing changes until [`Flush::flush`]
111
+
/// is called.
112
+
pub trait Flush {
113
+
/// Flush changes made to its [`RawAddressSpace`].
114
+
///
115
+
/// If this returns `Ok`, changes made to the address space are REQUIRED to take effect across
116
+
/// all affected threads/CPUs.
117
+
///
118
+
/// # Errors
119
+
///
120
+
/// If this returns `Err`, if flushing the changes failed. The changes, or a subset of them, might
121
+
/// still have taken effect across all or some of the threads/CPUs.
122
+
fn flush(self) -> crate::Result<()>;
123
+
}
124
+
125
+
pub struct AddressSpace<R: RawAddressSpace> {
126
+
raw: R,
127
+
regions: WAVLTree<AddressSpaceRegion>,
128
+
batched_raw: Batch,
129
+
max_range: Range<VirtualAddress>,
130
+
rng: Option<ChaCha20Rng>,
131
+
}
132
+
133
+
impl<R: RawAddressSpace> AddressSpace<R> {
134
+
pub fn new(raw: R, rng: Option<ChaCha20Rng>) -> Self {
135
+
Self {
136
+
raw,
137
+
regions: WAVLTree::new(),
138
+
batched_raw: Batch::new(),
139
+
max_range: VirtualAddress::MIN..VirtualAddress::MAX,
140
+
rng,
141
+
}
142
+
}
143
+
144
+
/// Attempts to reserve a region of virtual memory.
145
+
///
146
+
/// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees
147
+
/// of `layout`. Access to this region must obey the provided `rules` or cause a hardware fault.
148
+
///
149
+
/// The returned region may have a larger size than specified by `layout.size()`, and may or may
150
+
/// not have its contents initialized.
151
+
///
152
+
/// The returned region of virtual memory remains mapped as long as it is [*currently mapped*]
153
+
/// and the address space type itself has not been dropped.
154
+
///
155
+
/// [*currently mapped*]: #currently-mapped-memory
156
+
///
157
+
/// # Errors
158
+
///
159
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
160
+
/// constraints, virtual memory is exhausted, or mapping otherwise fails.
161
+
pub fn map(
162
+
&mut self,
163
+
layout: Layout,
164
+
access_rules: AccessRules,
165
+
) -> crate::Result<NonNull<[u8]>> {
166
+
#[cfg(debug_assertions)]
167
+
self.assert_valid("[AddressSpace::map]");
168
+
169
+
let layout = layout.align_to(R::PAGE_SIZE).unwrap();
170
+
171
+
let spot = self
172
+
.find_spot_for(layout)
173
+
.context(format_err!("cannot find free spot for layout {layout:?}"))?;
174
+
175
+
// TODO "relaxed" frame provider
176
+
let region = AddressSpaceRegion::new(spot, layout, access_rules);
177
+
178
+
let region = self.regions.insert(Box::pin(region));
179
+
180
+
// TODO OPTIONAL eagerly commit a few pages
181
+
182
+
self.batched_raw.flush_changes(&mut self.raw)?;
183
+
184
+
Ok(region.as_non_null())
185
+
}
186
+
187
+
/// Behaves like [`map`][AddressSpace::map], but also *guarantees* the virtual memory region
188
+
/// is zero-initialized.
189
+
///
190
+
/// # Errors
191
+
///
192
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
193
+
/// constraints, virtual memory is exhausted, or mapping otherwise fails.
194
+
pub fn map_zeroed(
195
+
&mut self,
196
+
layout: Layout,
197
+
access_rules: AccessRules,
198
+
) -> crate::Result<NonNull<[u8]>> {
199
+
#[cfg(debug_assertions)]
200
+
self.assert_valid("[AddressSpace::map_zeroed]");
201
+
202
+
let layout = layout.align_to(R::PAGE_SIZE).unwrap();
203
+
204
+
let spot = self
205
+
.find_spot_for(layout)
206
+
.context(format_err!("cannot find free spot for layout {layout:?}"))?;
207
+
208
+
// TODO "zeroed" frame provider
209
+
let region = AddressSpaceRegion::new(spot, layout, access_rules);
210
+
211
+
let region = self.regions.insert(Box::pin(region));
212
+
213
+
// TODO OPTIONAL eagerly commit a few pages
214
+
215
+
self.batched_raw.flush_changes(&mut self.raw)?;
216
+
217
+
Ok(region.as_non_null())
218
+
}
219
+
220
+
/// Unmaps the virtual memory region referenced by `ptr`.
221
+
///
222
+
/// # Safety
223
+
///
224
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
225
+
/// * `layout` must [*fit*] that region of memory.
226
+
///
227
+
/// [*currently mapped*]: #currently-mapped-memory
228
+
/// [*fit*]: #memory-fitting
229
+
pub unsafe fn unmap(&mut self, ptr: NonNull<u8>, layout: Layout) {
230
+
#[cfg(debug_assertions)]
231
+
self.assert_valid("[AddressSpace::unmap]");
232
+
233
+
// Safety: responsibility of caller
234
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
235
+
236
+
// Safety: responsibility of caller
237
+
let mut region = unsafe { cursor.remove().unwrap_unchecked() };
238
+
239
+
region.decommit(.., &mut self.batched_raw).unwrap();
240
+
241
+
self.batched_raw.flush_changes(&mut self.raw).unwrap();
242
+
}
243
+
244
+
/// Attempts to extend the virtual memory reservation.
245
+
///
246
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
247
+
/// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
248
+
/// this, the address space may extend the mapping referenced by `ptr` to fit the new layout.
249
+
///
250
+
/// TODO describe how extending a file-backed, of DMA-backed mapping works
251
+
///
252
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
253
+
///
254
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
255
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
256
+
/// even if the mapping was grown in-place. The newly returned pointer is the only valid pointer
257
+
/// for accessing this region now.
258
+
///
259
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
260
+
/// this address space, and the contents of the region are unaltered.
261
+
///
262
+
/// [*Undefined Behavior*]
263
+
///
264
+
/// # Safety
265
+
///
266
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
267
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
268
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
269
+
///
270
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
271
+
///
272
+
/// [*currently mapped*]: #currently-mapped-memory
273
+
/// [*fit*]: #memory-fitting
274
+
///
275
+
/// # Errors
276
+
///
277
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
278
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
279
+
pub unsafe fn grow(
280
+
&mut self,
281
+
ptr: NonNull<u8>,
282
+
old_layout: Layout,
283
+
new_layout: Layout,
284
+
) -> crate::Result<NonNull<[u8]>> {
285
+
#[cfg(debug_assertions)]
286
+
self.assert_valid("[AddressSpace::grow]");
287
+
288
+
assert_unsafe_precondition_!(
289
+
"TODO",
290
+
(old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
291
+
old_layout.align().is_multiple_of(page_size)
292
+
}
293
+
);
294
+
295
+
assert_unsafe_precondition_!(
296
+
"TODO",
297
+
(new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
298
+
new_layout.align().is_multiple_of(page_size)
299
+
}
300
+
);
301
+
302
+
if new_layout == old_layout {
303
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
304
+
}
305
+
306
+
assert_unsafe_precondition_!(
307
+
"TODO",
308
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
309
+
new_layout.size() >= old_layout.size()
310
+
}
311
+
);
312
+
313
+
if let Ok(ptr) = unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) } {
314
+
Ok(ptr)
315
+
} else {
316
+
unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
317
+
}
318
+
}
319
+
320
+
/// Behaves like [`grow`][AddressSpace::grow], only grows the region if it can be grown in-place.
321
+
///
322
+
/// # Safety
323
+
///
324
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
325
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
326
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
327
+
///
328
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
329
+
///
330
+
/// [*currently mapped*]: #currently-mapped-memory
331
+
/// [*fit*]: #memory-fitting
332
+
///
333
+
/// # Errors
334
+
///
335
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
336
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
337
+
pub unsafe fn grow_in_place(
338
+
&mut self,
339
+
ptr: NonNull<u8>,
340
+
old_layout: Layout,
341
+
new_layout: Layout,
342
+
) -> crate::Result<NonNull<[u8]>> {
343
+
#[cfg(debug_assertions)]
344
+
self.assert_valid("[AddressSpace::grow_in_place]");
345
+
346
+
assert_unsafe_precondition_!(
347
+
"TODO",
348
+
(old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
349
+
old_layout.align().is_multiple_of(page_size)
350
+
}
351
+
);
352
+
353
+
assert_unsafe_precondition_!(
354
+
"TODO",
355
+
(new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
356
+
new_layout.align().is_multiple_of(page_size)
357
+
}
358
+
);
359
+
360
+
if new_layout == old_layout {
361
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
362
+
}
363
+
364
+
assert_unsafe_precondition_!(
365
+
"TODO",
366
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
367
+
new_layout.size() >= old_layout.size()
368
+
}
369
+
);
370
+
371
+
unsafe { self.grow_in_place_inner(ptr, old_layout, new_layout) }
372
+
}
373
+
374
+
/// Attempts to shrink the virtual memory reservation.
375
+
///
376
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
377
+
/// mapped region. The pointer is suitable for holding data described by `new_layout`. To accomplish
378
+
/// this, the address space may shrink the mapping referenced by `ptr` to fit the new layout.
379
+
///
380
+
/// TODO describe how shrinking a file-backed, of DMA-backed mapping works
381
+
///
382
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
383
+
///
384
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
385
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
386
+
/// even if the mapping was shrunk in-place. The newly returned pointer is the only valid pointer
387
+
/// for accessing this region now.
388
+
///
389
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
390
+
/// this address space, and the contents of the region are unaltered.
391
+
///
392
+
/// [*Undefined Behavior*]
393
+
///
394
+
/// # Safety
395
+
///
396
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
397
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
398
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
399
+
///
400
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
401
+
///
402
+
/// [*currently mapped*]: #currently-mapped-memory
403
+
/// [*fit*]: #memory-fitting
404
+
///
405
+
/// # Errors
406
+
///
407
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
408
+
/// constraints, virtual memory is exhausted, or shrinking otherwise fails.
409
+
pub unsafe fn shrink(
410
+
&mut self,
411
+
ptr: NonNull<u8>,
412
+
old_layout: Layout,
413
+
new_layout: Layout,
414
+
) -> crate::Result<NonNull<[u8]>> {
415
+
#[cfg(debug_assertions)]
416
+
self.assert_valid("[AddressSpace::shrink]");
417
+
418
+
assert_unsafe_precondition_!(
419
+
"TODO",
420
+
(old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
421
+
old_layout.align().is_multiple_of(page_size)
422
+
}
423
+
);
424
+
425
+
assert_unsafe_precondition_!(
426
+
"TODO",
427
+
(new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
428
+
new_layout.align().is_multiple_of(page_size)
429
+
}
430
+
);
431
+
432
+
if new_layout == old_layout {
433
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
434
+
}
435
+
436
+
assert_unsafe_precondition_!(
437
+
"TODO",
438
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
439
+
new_layout.size() <= old_layout.size()
440
+
}
441
+
);
442
+
443
+
if let Ok(ptr) = unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) } {
444
+
Ok(ptr)
445
+
} else {
446
+
unsafe { self.reallocate_region(ptr, old_layout, new_layout) }
447
+
}
448
+
}
449
+
450
+
/// Behaves like [`shrink`][AddressSpace::shrink], but *guarantees* that the region will be
451
+
/// shrunk in-place. Both `old_layout` and `new_layout` need to be at least page aligned.
452
+
///
453
+
/// # Safety
454
+
///
455
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
456
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
457
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
458
+
///
459
+
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
460
+
///
461
+
/// [*currently mapped*]: #currently-mapped-memory
462
+
/// [*fit*]: #memory-fitting
463
+
///
464
+
/// # Errors
465
+
///
466
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
467
+
/// constraints, virtual memory is exhausted, or growing otherwise fails.
468
+
pub unsafe fn shrink_in_place(
469
+
&mut self,
470
+
ptr: NonNull<u8>,
471
+
old_layout: Layout,
472
+
new_layout: Layout,
473
+
) -> crate::Result<NonNull<[u8]>> {
474
+
#[cfg(debug_assertions)]
475
+
self.assert_valid("[AddressSpace::shrink_in_place]");
476
+
477
+
assert_unsafe_precondition_!(
478
+
"TODO",
479
+
(old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => {
480
+
old_layout.align().is_multiple_of(page_size)
481
+
}
482
+
);
483
+
484
+
assert_unsafe_precondition_!(
485
+
"TODO",
486
+
(new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => {
487
+
new_layout.align().is_multiple_of(page_size)
488
+
}
489
+
);
490
+
491
+
if new_layout == old_layout {
492
+
return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
493
+
}
494
+
495
+
assert_unsafe_precondition_!(
496
+
"TODO",
497
+
(old_layout: Layout = old_layout, new_layout: Layout = new_layout) => {
498
+
new_layout.size() <= old_layout.size()
499
+
}
500
+
);
501
+
502
+
unsafe { self.shrink_in_place_inner(ptr, old_layout, new_layout) }
503
+
}
504
+
505
+
/// Updates the access rules for the virtual memory region referenced by `ptr`.
506
+
///
507
+
/// If this returns `Ok`, access to this region must obey the new `rules` or cause a hardware fault.
508
+
///
509
+
/// If this method returns `Err`, the access rules of the memory region are unaltered.
510
+
///
511
+
/// # Safety
512
+
///
513
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
514
+
/// * `layout` must [*fit*] that region of memory.
515
+
///
516
+
/// [*currently mapped*]: #currently-mapped-memory
517
+
/// [*fit*]: #memory-fitting
518
+
pub unsafe fn update_access_rules(
519
+
&mut self,
520
+
ptr: NonNull<u8>,
521
+
layout: Layout,
522
+
access_rules: AccessRules,
523
+
) -> crate::Result<()> {
524
+
#[cfg(debug_assertions)]
525
+
self.assert_valid("[AddressSpace::update_access_rules]");
526
+
527
+
// Safety: responsibility of caller
528
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
529
+
530
+
// Safety: responsibility of caller
531
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
532
+
533
+
region.update_access_rules(access_rules, &mut self.batched_raw)?;
534
+
535
+
self.batched_raw.flush_changes(&mut self.raw)?;
536
+
537
+
Ok(())
538
+
}
539
+
540
+
/// Attempts to fill the virtual memory region referenced by `ptr` with zeroes.
541
+
///
542
+
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the
543
+
/// mapped region. The pointer is suitable for holding data described by `new_layout` and is
544
+
/// *guaranteed* to be zero-initialized. To accomplish this, the address space may remap the
545
+
/// virtual memory region.
546
+
///
547
+
/// TODO describe how clearing a file-backed, of DMA-backed mapping works
548
+
///
549
+
/// The [`AccessRules`] of the new virtual memory region are *the same* at the old ones.
550
+
///
551
+
/// If this returns `Ok`, then ownership of the memory region referenced by `ptr` has been
552
+
/// transferred to this address space. Any access to the old `ptr` is [*Undefined Behavior*],
553
+
/// even if the mapping was cleared in-place. The newly returned pointer is the only valid pointer
554
+
/// for accessing this region now.
555
+
///
556
+
/// If this method returns `Err`, then ownership of the memory region has not been transferred to
557
+
/// this address space, and the contents of the region are unaltered.
558
+
///
559
+
/// [*Undefined Behavior*]
560
+
///
561
+
/// # Safety
562
+
///
563
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
564
+
/// * `layout` must [*fit*] that region of memory.
565
+
///
566
+
/// [*currently mapped*]: #currently-mapped-memory
567
+
/// [*fit*]: #memory-fitting
568
+
///
569
+
/// # Errors
570
+
///
571
+
/// Returning `Err` indicates the layout does not meet the address space's size or alignment
572
+
/// constraints, clearing a virtual memory region is not supported by the backing storage, or
573
+
/// clearing otherwise fails.
574
+
pub unsafe fn clear(
575
+
&mut self,
576
+
ptr: NonNull<u8>,
577
+
layout: Layout,
578
+
) -> crate::Result<NonNull<[u8]>> {
579
+
#[cfg(debug_assertions)]
580
+
self.assert_valid("[AddressSpace::clear]");
581
+
582
+
// Safety: responsibility of caller
583
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, layout) };
584
+
585
+
// Safety: responsibility of caller
586
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
587
+
588
+
region.clear(&mut self.batched_raw)?;
589
+
590
+
self.batched_raw.flush_changes(&mut self.raw)?;
591
+
592
+
Ok(region.as_non_null())
593
+
}
594
+
595
+
pub fn assert_valid(&self, msg: &str) {
596
+
let mut regions = self.regions.iter();
597
+
598
+
let Some(first_region) = regions.next() else {
599
+
assert!(
600
+
self.regions.is_empty(),
601
+
"{msg}region iterator is empty but tree is not."
602
+
);
603
+
604
+
return;
605
+
};
606
+
607
+
first_region.assert_valid(msg);
608
+
609
+
let mut seen_range = first_region.range().clone();
610
+
611
+
while let Some(region) = regions.next() {
612
+
assert!(
613
+
!region.range().is_overlapping(&seen_range),
614
+
"{msg}region cannot overlap previous region; region={region:?}"
615
+
);
616
+
assert!(
617
+
region.range().start >= self.max_range.start
618
+
&& region.range().end <= self.max_range.end,
619
+
"{msg}region cannot lie outside of max address space range; region={region:?}"
620
+
);
621
+
622
+
seen_range = seen_range.start..region.range().end;
623
+
624
+
region.assert_valid(msg);
625
+
626
+
// TODO assert validity of of VMO against phys addresses
627
+
// let (_phys, access_rules) = self
628
+
// .batched_raw
629
+
// .raw_address_space()
630
+
// .lookup(region.range().start)
631
+
// .unwrap_or_else(|| {
632
+
// panic!("{msg}region base address is not mapped in raw address space region={region:?}")
633
+
// });
634
+
//
635
+
// assert_eq!(
636
+
// access_rules,
637
+
// region.access_rules(),
638
+
// "{msg}region's access rules do not match access rules in raw address space; region={region:?}, expected={:?}, actual={access_rules:?}",
639
+
// region.access_rules(),
640
+
// );
641
+
}
642
+
}
643
+
644
+
/// Attempts to grow a virtual memory region in-place. This method is shared between [`Self::shrink`]
645
+
/// and [`Self::shrink_in_place`].
646
+
///
647
+
/// # Safety
648
+
///
649
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
650
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
651
+
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
652
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
653
+
unsafe fn grow_in_place_inner(
654
+
&mut self,
655
+
ptr: NonNull<u8>,
656
+
old_layout: Layout,
657
+
new_layout: Layout,
658
+
) -> crate::Result<NonNull<[u8]>> {
659
+
// Safety: responsibility of caller
660
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
661
+
662
+
let next_range = cursor.peek_next().map(|region| region.range().clone());
663
+
664
+
// Safety: responsibility of caller
665
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
666
+
667
+
region.grow_in_place(new_layout, next_range, &mut self.batched_raw)?;
668
+
669
+
self.batched_raw.flush_changes(&mut self.raw)?;
670
+
671
+
Ok(region.as_non_null())
672
+
}
673
+
674
+
/// Attempts to shrink a virtual memory region in-place. This method is shared between [`Self::grow`]
675
+
/// and [`Self::grow_in_place`].
676
+
///
677
+
/// # Safety
678
+
///
679
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
680
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
681
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
682
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
683
+
unsafe fn shrink_in_place_inner(
684
+
&mut self,
685
+
ptr: NonNull<u8>,
686
+
old_layout: Layout,
687
+
new_layout: Layout,
688
+
) -> crate::Result<NonNull<[u8]>> {
689
+
// Safety: responsibility of caller
690
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
691
+
692
+
// Safety: responsibility of caller
693
+
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
694
+
695
+
region.shrink(new_layout, &mut self.batched_raw)?;
696
+
697
+
self.batched_raw.flush_changes(&mut self.raw)?;
698
+
699
+
Ok(region.as_non_null())
700
+
}
701
+
702
+
/// Reallocates a virtual address region. This will unmap and remove the old region, allocating
703
+
/// a new region that will be backed the old regions physical memory.
704
+
///
705
+
/// # Safety
706
+
///
707
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space.
708
+
/// * `old_layout` must [*fit*] that region (The `new_layout` argument need not fit it.).
709
+
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
710
+
/// * `new_layout.align()` must be multiple of PAGE_SIZE
711
+
unsafe fn reallocate_region(
712
+
&mut self,
713
+
ptr: NonNull<u8>,
714
+
old_layout: Layout,
715
+
new_layout: Layout,
716
+
) -> crate::Result<NonNull<[u8]>> {
717
+
// Safety: responsibility of caller
718
+
let mut cursor = unsafe { get_region_containing_ptr(&mut self.regions, ptr, old_layout) };
719
+
let mut region = unsafe { cursor.remove().unwrap_unchecked() };
720
+
721
+
region.decommit(.., &mut self.batched_raw).unwrap();
722
+
723
+
let (_, _, access_rules) = Pin::into_inner(region).into_parts();
724
+
725
+
let spot = self.find_spot_for(new_layout).context(format_err!(
726
+
"cannot find free spot for layout {new_layout:?}"
727
+
))?;
728
+
729
+
// TODO resize VMO
730
+
731
+
let region = AddressSpaceRegion::new(spot, new_layout, access_rules);
732
+
let region = self.regions.insert(Box::pin(region));
733
+
734
+
// TODO replicate old regions committed frames?
735
+
736
+
self.batched_raw.flush_changes(&mut self.raw)?;
737
+
738
+
Ok(region.as_non_null())
739
+
}
740
+
741
+
/// Find a spot in the address space that satisfies the given `layout` requirements.
742
+
///
743
+
/// If a spot suitable for holding data described by `layout` is found, the base address of the
744
+
/// address range is returned in `Some`. The returned address is already correct aligned to
745
+
/// `layout.align()`.
746
+
///
747
+
/// Returns `None` if no suitable spot was found. This *does not* mean there are no more gaps in
748
+
/// the address space just that the *combination* of `layout.size()` and `layout.align()` cannot
749
+
/// be satisfied *at the moment*. Calls to this method will a different size, alignment, or at a
750
+
/// different time might still succeed.
751
+
fn find_spot_for(&mut self, layout: Layout) -> Option<VirtualAddress> {
752
+
// The algorithm we use here - loosely based on Zircon's (Fuchsia's) implementation - is
753
+
// guaranteed to find a spot (if any even exist) with max 2 attempts. Additionally, it works
754
+
// elegantly *with* AND *without* ASLR, picking a random spot or the lowest free spot respectively.
755
+
// Here is how it works:
756
+
// 1. We set up two counters: (see the GapVisitor)
757
+
// - `candidate_spot_count` which we initialize to zero
758
+
// - `target_index` which we either set to a random value between 0..<the maximum number of
759
+
// possible addresses in the address space> if ASLR is requested OR to zero otherwise.
760
+
// 2. We then iterate over all `AddressSpaceRegion`s from lowest to highest looking at the
761
+
// gaps between regions. We count the number of addresses in each gap that satisfy the
762
+
// requested `Layout`s size and alignment and add that to the `candidate_spot_count`.
763
+
// IF the number of spots in the gap is greater than our chosen target index, we pick the
764
+
// spot at the target index and finish. ELSE we *decrement* the target index by the number
765
+
// of spots and continue to the next gap.
766
+
// 3. After we have processed all the gaps, we have EITHER found a suitable spot OR our original
767
+
// guess for `target_index` was too big, in which case we need to retry.
768
+
// 4. When retrying we iterate over all `AddressSpaceRegion`s *again*, but this time we know
769
+
// the *actual* number of possible spots in the address space since we just counted them
770
+
// during the first attempt. We initialize `target_index` to `0..candidate_spot_count`
771
+
// which is guaranteed to return us a spot.
772
+
// IF `candidate_spot_count` is ZERO after the first attempt, there is no point in
773
+
// retrying since we cannot fulfill the requested layout.
774
+
//
775
+
// Note that in practice, we use a binary tree to keep track of regions, and we use binary search
776
+
// to optimize the search for a suitable gap instead of linear iteration.
777
+
778
+
let layout = layout.pad_to_align();
779
+
780
+
// First attempt: guess a random target index
781
+
let max_candidate_spots = self.max_range.size();
782
+
783
+
let target_index: usize = self
784
+
.rng
785
+
.as_mut()
786
+
.map(|prng| prng.sample(Uniform::new(0, max_candidate_spots).unwrap()))
787
+
.unwrap_or_default();
788
+
789
+
// First attempt: visit the binary search tree to find a gap
790
+
let mut v = GapVisitor::new(layout, target_index);
791
+
self.visit_gaps(&mut v);
792
+
793
+
// if we found a spot already we're done
794
+
if let Some(chosen) = v.chosen {
795
+
return Some(chosen);
796
+
}
797
+
798
+
// otherwise, Second attempt: we need to retry with the correct candidate spot count
799
+
// but if we counted no suitable candidate spots during the first attempt, we cannot fulfill
800
+
// the request.
801
+
if v.candidate_spots == 0 {
802
+
return None;
803
+
}
804
+
805
+
// Second attempt: pick a new target_index that's actually fulfillable
806
+
let target_index: usize = self
807
+
.rng
808
+
.as_mut()
809
+
.map(|prng| prng.sample(Uniform::new(0, v.candidate_spots).unwrap()))
810
+
.unwrap_or_default();
811
+
812
+
// Second attempt: visit the binary search tree to find a gap
813
+
let mut v = GapVisitor::new(layout, target_index);
814
+
self.visit_gaps(&mut v);
815
+
816
+
let chosen = v
817
+
.chosen
818
+
.expect("There must be a chosen spot after the first attempt. This is a bug!");
819
+
820
+
// assert!(chosen.is_canonical());
821
+
822
+
Some(chosen)
823
+
}
824
+
825
+
/// Visit all gaps (address ranges not covered by an [`AddressSpaceRegion`]) in this address space
826
+
/// from lowest to highest addresses.
827
+
fn visit_gaps(&self, v: &mut GapVisitor) {
828
+
let Some(root) = self.regions.root().get() else {
829
+
// if the tree is empty, we treat the entire max_range as the gap
830
+
// note that we do not care about the returned ControlFlow, as there is nothing else we
831
+
// could try to find a spot anyway
832
+
let _ = v.visit(self.max_range.clone());
833
+
834
+
return;
835
+
};
836
+
837
+
// see if there is a suitable gap between BEFORE the first address space region
838
+
if v.visit(self.max_range.start..root.subtree_range().start)
839
+
.is_break()
840
+
{
841
+
return;
842
+
}
843
+
844
+
// now comes the main part of the search. we start at the WAVLTree root node and do a
845
+
// binary search for a suitable gap. We use special metadata on each `AddressSpaceRegion`
846
+
// to speed up this search. See `AddressSpaceRegion` for details on how this works.
847
+
848
+
let mut maybe_current = self.regions.root().get();
849
+
let mut already_visited = VirtualAddress::MIN;
850
+
851
+
while let Some(current) = maybe_current {
852
+
// If there is no suitable gap in this entire
853
+
if current.suitable_gap_in_subtree(v.layout()) {
854
+
// First, look at the left subtree
855
+
if let Some(left) = current.left_child() {
856
+
if left.suitable_gap_in_subtree(v.layout())
857
+
&& left.subtree_range().end > already_visited
858
+
{
859
+
maybe_current = Some(left);
860
+
continue;
861
+
}
862
+
863
+
if v.visit(left.subtree_range().end..current.range().start)
864
+
.is_break()
865
+
{
866
+
return;
867
+
}
868
+
}
869
+
870
+
if let Some(right) = current.right_child() {
871
+
if v.visit(current.range().end..right.subtree_range().start)
872
+
.is_break()
873
+
{
874
+
return;
875
+
}
876
+
877
+
if right.suitable_gap_in_subtree(v.layout())
878
+
&& right.subtree_range().end > already_visited
879
+
{
880
+
maybe_current = Some(right);
881
+
continue;
882
+
}
883
+
}
884
+
}
885
+
886
+
already_visited = current.subtree_range().end;
887
+
maybe_current = current.parent();
888
+
}
889
+
890
+
// see if there is a suitable gap between AFTER the last address space region
891
+
if v.visit(root.subtree_range().end..self.max_range.end)
892
+
.is_break()
893
+
{
894
+
return;
895
+
}
896
+
}
897
+
}
898
+
899
+
/// # Safety
900
+
///
901
+
/// * `ptr` must denote a region of memory [*currently mapped*] in this address space, and
902
+
/// * `layout` must [*fit*] that region of memory.
903
+
///
904
+
/// [*currently mapped*]: #currently-mapped-memory
905
+
/// [*fit*]: #memory-fitting
906
+
unsafe fn get_region_containing_ptr(
907
+
regions: &mut WAVLTree<AddressSpaceRegion>,
908
+
ptr: NonNull<u8>,
909
+
layout: Layout,
910
+
) -> CursorMut<'_, AddressSpaceRegion> {
911
+
let addr = VirtualAddress::from_non_null(ptr);
912
+
913
+
let cursor = regions.lower_bound_mut(Bound::Included(&addr));
914
+
915
+
assert_unsafe_precondition_!(
916
+
"TODO",
917
+
(cursor: &CursorMut<AddressSpaceRegion> = &cursor) => cursor.get().is_some()
918
+
);
919
+
920
+
// Safety: The caller guarantees the pointer is currently mapped which means we must have
921
+
// a corresponding address space region for it
922
+
let region = unsafe { cursor.get().unwrap_unchecked() };
923
+
924
+
assert_unsafe_precondition_!(
925
+
"TODO",
926
+
(region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => {
927
+
let range = region.range();
928
+
929
+
range.start.get() <= addr.get() && addr.get() < range.end.get()
930
+
}
931
+
);
932
+
933
+
assert_unsafe_precondition_!(
934
+
"`layout` does not fit memory region",
935
+
(layout: Layout = layout, region: &AddressSpaceRegion = ®ion) => region.layout_fits_region(layout)
936
+
);
937
+
938
+
cursor
939
+
}
940
+
941
+
pub(crate) struct GapVisitor {
942
+
layout: Layout,
943
+
target_index: usize,
944
+
candidate_spots: usize,
945
+
chosen: Option<VirtualAddress>,
946
+
}
947
+
948
+
impl GapVisitor {
949
+
fn new(layout: Layout, target_index: usize) -> Self {
950
+
Self {
951
+
layout,
952
+
target_index,
953
+
candidate_spots: 0,
954
+
chosen: None,
955
+
}
956
+
}
957
+
958
+
pub fn layout(&self) -> Layout {
959
+
self.layout
960
+
}
961
+
962
+
/// Returns the number of spots in the given range that satisfy the layout we require
963
+
fn spots_in_range(&self, range: &Range<VirtualAddress>) -> usize {
964
+
debug_assert!(
965
+
range.start.is_aligned_to(self.layout.align())
966
+
&& range.end.is_aligned_to(self.layout.align())
967
+
);
968
+
969
+
// ranges passed in here can become empty for a number of reasons (aligning might produce ranges
970
+
// where end > start, or the range might be empty to begin with) in either case an empty
971
+
// range means no spots are available
972
+
if range.is_empty() {
973
+
return 0;
974
+
}
975
+
976
+
let range_size = range.size();
977
+
if range_size >= self.layout.size() {
978
+
((range_size - self.layout.size()) >> self.layout.align().ilog2()) + 1
979
+
} else {
980
+
0
981
+
}
982
+
}
983
+
984
+
pub fn visit(&mut self, gap: Range<VirtualAddress>) -> ControlFlow<()> {
985
+
// if we have already chosen a spot, signal the caller to stop
986
+
if self.chosen.is_some() {
987
+
return ControlFlow::Break(());
988
+
}
989
+
990
+
let aligned_gap = gap.checked_align_in(self.layout.align()).unwrap();
991
+
992
+
let spot_count = self.spots_in_range(&aligned_gap);
993
+
994
+
self.candidate_spots += spot_count;
995
+
996
+
if self.target_index < spot_count {
997
+
self.chosen = Some(
998
+
aligned_gap
999
+
.start
1000
+
.checked_add(self.target_index << self.layout.align().ilog2())
1001
+
.unwrap(),
1002
+
);
1003
+
1004
+
ControlFlow::Break(())
1005
+
} else {
1006
+
self.target_index -= spot_count;
1007
+
1008
+
ControlFlow::Continue(())
1009
+
}
1010
+
}
1011
+
}
+336
libs/mem/src/address_space/batch.rs
+336
libs/mem/src/address_space/batch.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::cmp;
9
+
use core::num::{NonZero, NonZeroUsize};
10
+
11
+
use smallvec::SmallVec;
12
+
13
+
use crate::address_space::{Flush, RawAddressSpace};
14
+
use crate::{AccessRules, PhysicalAddress, VirtualAddress};
15
+
16
+
/// [`Batch`] maintains an *unordered* set of batched operations over an `RawAddressSpace`.
17
+
///
18
+
/// Operations are "enqueued" (but unordered) into the batch and executed against the raw address space
19
+
/// when [`Self::flush_changes`] is called. This helps to reduce the number and size of (expensive) TLB
20
+
/// flushes we need to perform. Internally, `Batch` will merge operations if possible to further reduce
21
+
/// this number.
22
+
pub struct Batch {
23
+
ops: SmallVec<[BatchOperation; 4]>,
24
+
}
25
+
26
+
enum BatchOperation {
27
+
Map(MapOperation),
28
+
Unmap(UnmapOperation),
29
+
SetAccessRules(SetAccessRulesOperation),
30
+
}
31
+
32
+
struct MapOperation {
33
+
virt: VirtualAddress,
34
+
phys: PhysicalAddress,
35
+
len: NonZeroUsize,
36
+
access_rules: AccessRules,
37
+
}
38
+
39
+
struct UnmapOperation {
40
+
virt: VirtualAddress,
41
+
len: NonZeroUsize,
42
+
}
43
+
44
+
struct SetAccessRulesOperation {
45
+
virt: VirtualAddress,
46
+
len: NonZeroUsize,
47
+
access_rules: AccessRules,
48
+
}
49
+
50
+
// ===== impl Batch =====
51
+
52
+
impl Batch {
53
+
/// Construct a new empty [`Batch`].
54
+
pub fn new() -> Self {
55
+
Self {
56
+
ops: SmallVec::new(),
57
+
}
58
+
}
59
+
60
+
/// Add a [`map`] operation to the set of batched operations.
61
+
///
62
+
/// # Safety
63
+
///
64
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
65
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
66
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
67
+
///
68
+
/// [`map`]: RawAddressSpace::map
69
+
pub fn map(
70
+
&mut self,
71
+
virt: VirtualAddress,
72
+
phys: PhysicalAddress,
73
+
len: NonZeroUsize,
74
+
access_rules: AccessRules,
75
+
) {
76
+
let mut new = MapOperation {
77
+
virt,
78
+
phys,
79
+
len,
80
+
access_rules,
81
+
};
82
+
83
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
84
+
BatchOperation::Map(op) => Some(op),
85
+
_ => None,
86
+
});
87
+
88
+
for op in ops {
89
+
match op.try_merge_with(new) {
90
+
Ok(()) => return,
91
+
Err(new_) => new = new_,
92
+
}
93
+
}
94
+
95
+
self.ops.push(BatchOperation::Map(new));
96
+
}
97
+
98
+
/// Add an [`unmap`] operation to the set of batched operations.
99
+
///
100
+
/// # Safety
101
+
///
102
+
/// - virt..virt+len must be mapped
103
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
104
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
105
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
106
+
///
107
+
/// [`unmap`]: RawAddressSpace::unmap
108
+
pub unsafe fn unmap(&mut self, virt: VirtualAddress, len: NonZeroUsize) {
109
+
let mut new = UnmapOperation { virt, len };
110
+
111
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
112
+
BatchOperation::Unmap(op) => Some(op),
113
+
_ => None,
114
+
});
115
+
116
+
for op in ops {
117
+
match op.try_merge_with(new) {
118
+
Ok(()) => return,
119
+
Err(new_) => new = new_,
120
+
}
121
+
}
122
+
123
+
self.ops.push(BatchOperation::Unmap(new));
124
+
}
125
+
126
+
/// Add a [`set_access_rules`] operation to the set of batched operations.
127
+
///
128
+
/// # Safety
129
+
///
130
+
/// - virt..virt+len must be mapped
131
+
/// - `virt` must be aligned to `Self::PAGE_SIZE`
132
+
/// - `phys` must be aligned to `Self::PAGE_SIZE`
133
+
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
134
+
///
135
+
/// [`set_access_rules`]: RawAddressSpace::set_access_rules
136
+
pub fn set_access_rules(
137
+
&mut self,
138
+
virt: VirtualAddress,
139
+
len: NonZeroUsize,
140
+
access_rules: AccessRules,
141
+
) {
142
+
let mut new = SetAccessRulesOperation {
143
+
virt,
144
+
len,
145
+
access_rules,
146
+
};
147
+
148
+
let ops = self.ops.iter_mut().filter_map(|op| match op {
149
+
BatchOperation::SetAccessRules(op) => Some(op),
150
+
_ => None,
151
+
});
152
+
153
+
for op in ops {
154
+
match op.try_merge_with(new) {
155
+
Ok(()) => return,
156
+
Err(new_) => new = new_,
157
+
}
158
+
}
159
+
160
+
self.ops.push(BatchOperation::SetAccessRules(new));
161
+
}
162
+
163
+
/// Flushes the `Batch` ensuring all changes are materialized into the raw address space.
164
+
pub fn flush_changes<A: RawAddressSpace>(&mut self, raw_aspace: &mut A) -> crate::Result<()> {
165
+
let mut flush = raw_aspace.flush();
166
+
for op in self.ops.drain(..) {
167
+
match op {
168
+
BatchOperation::Map(op) => {
169
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
170
+
debug_assert!(op.phys.is_aligned_to(A::PAGE_SIZE));
171
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
172
+
173
+
// Safety: the caller promised the correctness of the values on construction of
174
+
// the operation.
175
+
unsafe {
176
+
raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
177
+
}
178
+
}
179
+
BatchOperation::Unmap(op) => {
180
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
181
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
182
+
183
+
// Safety: the caller promised the correctness of the values on construction of
184
+
// the operation.
185
+
unsafe {
186
+
raw_aspace.unmap(op.virt, op.len, &mut flush);
187
+
}
188
+
}
189
+
BatchOperation::SetAccessRules(op) => {
190
+
debug_assert!(op.virt.is_aligned_to(A::PAGE_SIZE));
191
+
debug_assert!(op.len.get().is_multiple_of(A::PAGE_SIZE));
192
+
193
+
// Safety: the caller promised the correctness of the values on construction of
194
+
// the operation.
195
+
unsafe {
196
+
raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
197
+
}
198
+
}
199
+
};
200
+
}
201
+
flush.flush()
202
+
}
203
+
}
204
+
205
+
// ===== impl MapOperation =====
206
+
207
+
impl MapOperation {
208
+
/// Returns true if this operation can be merged with `other`.
209
+
///
210
+
/// Map operations can be merged if:
211
+
/// - their [`AccessRules`] are the same
212
+
/// - their virtual address ranges are contiguous (no gap between self and other)
213
+
/// - their physical address ranges are contiguous
214
+
/// - the resulting virtual address range still has the same size as the resulting
215
+
/// physical address range
216
+
const fn can_merge_with(&self, other: &Self) -> bool {
217
+
// the access rules need to be the same
218
+
let same_rules = self.access_rules.bits() == other.access_rules.bits();
219
+
220
+
let overlap_virt = self.virt.get() <= other.len.get()
221
+
&& other.virt.get() <= self.virt.get() + self.len.get();
222
+
223
+
let overlap_phys = self.phys.get() <= other.len.get()
224
+
&& other.phys.get() <= self.phys.get() + self.len.get();
225
+
226
+
let offset_virt = self.virt.get().wrapping_sub(other.virt.get());
227
+
let offset_phys = self.virt.get().wrapping_sub(other.virt.get());
228
+
let same_offset = offset_virt == offset_phys;
229
+
230
+
same_rules && overlap_virt && overlap_phys && same_offset
231
+
}
232
+
233
+
/// Attempt to merge this operation with `other`.
234
+
///
235
+
/// If this returns `Ok`, `other` has been merged into `self`.
236
+
///
237
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
238
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
239
+
if self.can_merge_with(&other) {
240
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
241
+
let len = self
242
+
.len
243
+
.get()
244
+
.checked_add(other.len.get())
245
+
.unwrap()
246
+
.wrapping_add(offset);
247
+
248
+
self.virt = cmp::min(self.virt, other.virt);
249
+
self.phys = cmp::min(self.phys, other.phys);
250
+
self.len = NonZero::new(len).ok_or(other)?;
251
+
252
+
Ok(())
253
+
} else {
254
+
Err(other)
255
+
}
256
+
}
257
+
}
258
+
259
+
// ===== impl UnmapOperation =====
260
+
261
+
impl UnmapOperation {
262
+
/// Returns true if this operation can be merged with `other`.
263
+
///
264
+
/// Unmap operations can be merged if:
265
+
/// - their virtual address ranges are contiguous (no gap between self and other)
266
+
const fn can_merge_with(&self, other: &Self) -> bool {
267
+
self.virt.get() <= other.len.get() && other.virt.get() <= self.virt.get() + self.len.get()
268
+
}
269
+
270
+
/// Attempt to merge this operation with `other`.
271
+
///
272
+
/// If this returns `Ok`, `other` has been merged into `self`.
273
+
///
274
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
275
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
276
+
if self.can_merge_with(&other) {
277
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
278
+
let len = self
279
+
.len
280
+
.get()
281
+
.checked_add(other.len.get())
282
+
.unwrap()
283
+
.wrapping_add(offset);
284
+
285
+
self.virt = cmp::min(self.virt, other.virt);
286
+
self.len = NonZero::new(len).ok_or(other)?;
287
+
288
+
Ok(())
289
+
} else {
290
+
Err(other)
291
+
}
292
+
}
293
+
}
294
+
295
+
// ===== impl ProtectOperation =====
296
+
297
+
impl SetAccessRulesOperation {
298
+
/// Returns true if this operation can be merged with `other`.
299
+
///
300
+
/// Protect operations can be merged if:
301
+
/// - their [`AccessRules`] are the same
302
+
/// - their virtual address ranges are contiguous (no gap between self and other)
303
+
const fn can_merge_with(&self, other: &Self) -> bool {
304
+
// the access rules need to be the same
305
+
let same_rules = self.access_rules.bits() == other.access_rules.bits();
306
+
307
+
let overlap = self.virt.get() <= other.len.get()
308
+
&& other.virt.get() <= self.virt.get() + self.len.get();
309
+
310
+
same_rules && overlap
311
+
}
312
+
313
+
/// Attempt to merge this operation with `other`.
314
+
///
315
+
/// If this returns `Ok`, `other` has been merged into `self`.
316
+
///
317
+
/// If this returns `Err`, `other` cannot be merged and is returned in the `Err` variant.
318
+
fn try_merge_with(&mut self, other: Self) -> Result<(), Self> {
319
+
if self.can_merge_with(&other) {
320
+
let offset = self.virt.get().wrapping_sub(other.virt.get());
321
+
let len = self
322
+
.len
323
+
.get()
324
+
.checked_add(other.len.get())
325
+
.unwrap()
326
+
.wrapping_add(offset);
327
+
328
+
self.virt = cmp::min(self.virt, other.virt);
329
+
self.len = NonZero::new(len).ok_or(other)?;
330
+
331
+
Ok(())
332
+
} else {
333
+
Err(other)
334
+
}
335
+
}
336
+
}
+376
libs/mem/src/address_space/region.rs
+376
libs/mem/src/address_space/region.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use alloc::boxed::Box;
9
+
use core::alloc::Layout;
10
+
use core::mem::offset_of;
11
+
use core::ops::{Range, RangeBounds};
12
+
use core::pin::Pin;
13
+
use core::ptr::NonNull;
14
+
use core::{cmp, mem, slice};
15
+
16
+
use anyhow::bail;
17
+
use pin_project::pin_project;
18
+
19
+
use crate::address_space::batch::Batch;
20
+
use crate::{AccessRules, AddressRangeExt, VirtualAddress};
21
+
22
+
#[pin_project]
23
+
#[derive(Debug)]
24
+
pub struct AddressSpaceRegion {
25
+
access_rules: AccessRules,
26
+
layout: Layout,
27
+
range: Range<VirtualAddress>,
28
+
/// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
29
+
subtree_range: Range<VirtualAddress>,
30
+
/// The largest gap in this subtree, used when allocating new regions
31
+
max_gap: usize,
32
+
/// Links to other regions in the WAVL tree
33
+
links: wavltree::Links<AddressSpaceRegion>,
34
+
}
35
+
36
+
impl AddressSpaceRegion {
37
+
pub const fn new(spot: VirtualAddress, layout: Layout, access_rules: AccessRules) -> Self {
38
+
Self {
39
+
range: spot..spot.checked_add(layout.size()).unwrap(),
40
+
access_rules,
41
+
layout,
42
+
43
+
max_gap: 0,
44
+
subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
45
+
links: wavltree::Links::new(),
46
+
}
47
+
}
48
+
49
+
pub const fn into_parts(self) -> (VirtualAddress, Layout, AccessRules) {
50
+
debug_assert!(!self.links.is_linked());
51
+
52
+
// TODO add VMO here
53
+
(self.range.start, self.layout, self.access_rules)
54
+
}
55
+
56
+
pub const fn range(&self) -> &Range<VirtualAddress> {
57
+
&self.range
58
+
}
59
+
60
+
pub const fn subtree_range(&self) -> &Range<VirtualAddress> {
61
+
&self.subtree_range
62
+
}
63
+
64
+
pub const fn access_rules(&self) -> AccessRules {
65
+
self.access_rules
66
+
}
67
+
68
+
pub fn as_slice(&self) -> &[u8] {
69
+
let ptr = self.range.start.as_ptr();
70
+
let len = self.range.size();
71
+
72
+
unsafe { slice::from_raw_parts(ptr, len) }
73
+
}
74
+
75
+
pub fn as_slice_mut(&mut self) -> &mut [u8] {
76
+
let ptr = self.range.start.as_mut_ptr();
77
+
let len = self.range.size();
78
+
79
+
unsafe { slice::from_raw_parts_mut(ptr, len) }
80
+
}
81
+
82
+
pub fn as_non_null(&self) -> NonNull<[u8]> {
83
+
let ptr = self.range.start.as_non_null().unwrap();
84
+
NonNull::slice_from_raw_parts(ptr, self.range.size())
85
+
}
86
+
87
+
pub const fn layout_fits_region(&self, layout: Layout) -> bool {
88
+
self.range.start.is_aligned_to(layout.align())
89
+
&& layout.size() >= self.layout.size()
90
+
&& layout.size() <= self.range.end.get() - self.range.start.get()
91
+
}
92
+
93
+
/// grow region to `new_len`, attempting to grow the VMO accordingly
94
+
/// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
95
+
pub fn grow_in_place(
96
+
&mut self,
97
+
new_layout: Layout,
98
+
next_range: Option<Range<VirtualAddress>>,
99
+
_batch: &mut Batch,
100
+
) -> crate::Result<()> {
101
+
if new_layout.align() > self.layout.align() {
102
+
bail!("cannot grow in-place: New alignment greater than current");
103
+
}
104
+
105
+
let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
106
+
107
+
if let Some(next_range) = next_range
108
+
&& next_range.is_overlapping(&new_range)
109
+
{
110
+
bail!("cannot grow in-place: New overlapping with next range");
111
+
}
112
+
113
+
// TODO attempt to resize VMO
114
+
self.update_range(new_range);
115
+
116
+
Ok(())
117
+
}
118
+
119
+
/// shrink region to the first `len` bytes, dropping the rest frames.
120
+
/// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
121
+
pub fn shrink(&mut self, new_layout: Layout, _batch: &mut Batch) -> crate::Result<()> {
122
+
if new_layout.align() > self.layout.align() {
123
+
bail!("cannot grow in-place: New alignment greater than current");
124
+
}
125
+
126
+
let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
127
+
128
+
// TODO drop rest pages in VMO if possible (add unmaps to batch)
129
+
self.update_range(new_range);
130
+
131
+
Ok(())
132
+
}
133
+
134
+
/// move the entire region to the new base address, remapping any already mapped frames
135
+
pub fn move_to(&mut self, base: VirtualAddress, batch: &mut Batch) -> crate::Result<()> {
136
+
// TODO
137
+
// - attempt to resize VMO
138
+
// - update self range
139
+
// - for every frame in VMO
140
+
// - attempt to map at new offset (add maps to batch)
141
+
142
+
todo!()
143
+
}
144
+
145
+
pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
146
+
where
147
+
R: RangeBounds<VirtualAddress>,
148
+
{
149
+
// TODO
150
+
// - for every *uncommited* frame in range
151
+
// - request frame from VMO (add map to batch)
152
+
153
+
todo!()
154
+
}
155
+
156
+
pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
157
+
where
158
+
R: RangeBounds<VirtualAddress>,
159
+
{
160
+
// TODO
161
+
// - for every *committed* frame in range
162
+
// - drop pages in VMO if possible (add unmaps to batch)
163
+
164
+
todo!()
165
+
}
166
+
167
+
/// updates the access rules fo this region
168
+
pub fn update_access_rules(
169
+
&mut self,
170
+
access_rules: AccessRules,
171
+
batch: &mut Batch,
172
+
) -> crate::Result<()> {
173
+
// TODO
174
+
// - for every frame in VMO
175
+
// - update access rules (add protects to batch)
176
+
// - update self access rules
177
+
178
+
todo!()
179
+
}
180
+
181
+
pub fn clear(&mut self, batch: &mut Batch) -> crate::Result<()> {
182
+
// TODO
183
+
// - replace VMO with "zeroed" VMO
184
+
// - drop pages in VMO if possible (add unmaps to batch)
185
+
186
+
todo!()
187
+
}
188
+
189
+
pub fn assert_valid(&self, msg: &str) {
190
+
assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
191
+
assert!(
192
+
self.subtree_range.start <= self.range.start
193
+
&& self.range.end <= self.subtree_range.end,
194
+
"{msg}region range cannot be bigger than its subtree range; region={self:?}"
195
+
);
196
+
assert!(
197
+
self.max_gap < self.subtree_range.size(),
198
+
"{msg}region's subtree max_gap cannot be bigger than its subtree range; region={self:?}"
199
+
);
200
+
assert!(
201
+
self.range.start.is_aligned_to(self.layout.align()),
202
+
"{msg}region range is not aligned to its layout; region={self:?}"
203
+
);
204
+
assert!(
205
+
self.range.size() >= self.layout.size(),
206
+
"{msg}region range is smaller than its layout; region={self:?}"
207
+
);
208
+
209
+
self.links.assert_valid();
210
+
}
211
+
212
+
/// Returns `true` if this nodes subtree contains a gap suitable for the given `layout`, used
213
+
/// during gap-searching.
214
+
pub fn suitable_gap_in_subtree(&self, layout: Layout) -> bool {
215
+
// we need the layout to be padded to alignment
216
+
debug_assert!(layout.size().is_multiple_of(layout.align()));
217
+
218
+
self.max_gap >= layout.size()
219
+
}
220
+
221
+
/// Returns the left child node in the search tree of regions, used during gap-searching.
222
+
pub fn left_child(&self) -> Option<&Self> {
223
+
Some(unsafe { self.links.left()?.as_ref() })
224
+
}
225
+
226
+
/// Returns the right child node in the search tree of regions, used during gap-searching.
227
+
pub fn right_child(&self) -> Option<&Self> {
228
+
Some(unsafe { self.links.right()?.as_ref() })
229
+
}
230
+
231
+
/// Returns the parent node in the search tree of regions, used during gap-searching.
232
+
pub fn parent(&self) -> Option<&Self> {
233
+
Some(unsafe { self.links.parent()?.as_ref() })
234
+
}
235
+
236
+
fn update_range(&mut self, new_range: Range<VirtualAddress>) {
237
+
self.range = new_range;
238
+
// We also must propagate the information about our changed range to the rest of the tree
239
+
// so searching for a free spot returns the correct results.
240
+
Self::propagate_update_to_parent(Some(NonNull::from(self)));
241
+
}
242
+
243
+
/// Update the gap search metadata of this region. This method is called in the [`wavltree::Linked`]
244
+
/// implementation below after each tree mutation that impacted this node or its subtree in some way
245
+
/// (insertion, rotation, deletion).
246
+
///
247
+
/// Returns `true` if this nodes metadata changed.
248
+
#[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
249
+
fn update_gap_metadata(
250
+
mut node: NonNull<Self>,
251
+
left: Option<NonNull<Self>>,
252
+
right: Option<NonNull<Self>>,
253
+
) -> bool {
254
+
fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize {
255
+
right_first_byte
256
+
.checked_sub_addr(left_last_byte)
257
+
.unwrap_or_default() // TODO use saturating_sub_addr
258
+
}
259
+
260
+
let node = unsafe { node.as_mut() };
261
+
let mut left_max_gap = 0;
262
+
let mut right_max_gap = 0;
263
+
264
+
// recalculate the subtree_range start
265
+
let old_subtree_range_start = if let Some(left) = left {
266
+
let left = unsafe { left.as_ref() };
267
+
let left_gap = gap(left.subtree_range.end, node.range.start);
268
+
left_max_gap = cmp::max(left_gap, left.max_gap);
269
+
mem::replace(&mut node.subtree_range.start, left.subtree_range.start)
270
+
} else {
271
+
mem::replace(&mut node.subtree_range.start, node.range.start)
272
+
};
273
+
274
+
// recalculate the subtree range end
275
+
let old_subtree_range_end = if let Some(right) = right {
276
+
let right = unsafe { right.as_ref() };
277
+
let right_gap = gap(node.range.end, right.subtree_range.start);
278
+
right_max_gap = cmp::max(right_gap, right.max_gap);
279
+
mem::replace(&mut node.subtree_range.end, right.subtree_range.end)
280
+
} else {
281
+
mem::replace(&mut node.subtree_range.end, node.range.end)
282
+
};
283
+
284
+
// recalculate the map_gap
285
+
let old_max_gap = mem::replace(&mut node.max_gap, cmp::max(left_max_gap, right_max_gap));
286
+
287
+
old_max_gap != node.max_gap
288
+
|| old_subtree_range_start != node.subtree_range.start
289
+
|| old_subtree_range_end != node.subtree_range.end
290
+
}
291
+
292
+
// Propagate metadata updates to this regions parent in the search tree. If we had to update
293
+
// our metadata the parent must update its metadata too.
294
+
#[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
295
+
fn propagate_update_to_parent(mut maybe_node: Option<NonNull<Self>>) {
296
+
while let Some(node) = maybe_node {
297
+
let links = unsafe { &node.as_ref().links };
298
+
let changed = Self::update_gap_metadata(node, links.left(), links.right());
299
+
300
+
// if the metadata didn't actually change, we don't need to recalculate parents
301
+
if !changed {
302
+
return;
303
+
}
304
+
305
+
maybe_node = links.parent();
306
+
}
307
+
}
308
+
}
309
+
310
+
unsafe impl wavltree::Linked for AddressSpaceRegion {
311
+
/// Any heap-allocated type that owns an element may be used.
312
+
///
313
+
/// An element *must not* move while part of an intrusive data
314
+
/// structure. In many cases, `Pin` may be used to enforce this.
315
+
type Handle = Pin<Box<Self>>; // TODO better handle type
316
+
317
+
type Key = VirtualAddress;
318
+
319
+
/// Convert an owned `Handle` into a raw pointer
320
+
fn into_ptr(handle: Self::Handle) -> NonNull<Self> {
321
+
// Safety: wavltree treats the ptr as pinned
322
+
unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
323
+
}
324
+
325
+
/// Convert a raw pointer back into an owned `Handle`.
326
+
unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
327
+
// Safety: `NonNull` *must* be constructed from a pinned reference
328
+
// which the tree implementation upholds.
329
+
unsafe { Pin::new_unchecked(Box::from_raw(ptr.as_ptr())) }
330
+
}
331
+
332
+
unsafe fn links(ptr: NonNull<Self>) -> NonNull<wavltree::Links<Self>> {
333
+
ptr.map_addr(|addr| {
334
+
let offset = offset_of!(Self, links);
335
+
addr.checked_add(offset).unwrap()
336
+
})
337
+
.cast()
338
+
}
339
+
340
+
fn get_key(&self) -> &Self::Key {
341
+
&self.range.start
342
+
}
343
+
344
+
fn after_insert(self: Pin<&mut Self>) {
345
+
debug_assert_eq!(self.subtree_range.start, self.range.start);
346
+
debug_assert_eq!(self.subtree_range.end, self.range.end);
347
+
debug_assert_eq!(self.max_gap, 0);
348
+
Self::propagate_update_to_parent(self.links.parent());
349
+
}
350
+
351
+
fn after_remove(self: Pin<&mut Self>, parent: Option<NonNull<Self>>) {
352
+
Self::propagate_update_to_parent(parent);
353
+
}
354
+
355
+
fn after_rotate(
356
+
self: Pin<&mut Self>,
357
+
parent: NonNull<Self>,
358
+
sibling: Option<NonNull<Self>>,
359
+
lr_child: Option<NonNull<Self>>,
360
+
side: wavltree::Side,
361
+
) {
362
+
let this = self.project();
363
+
// Safety: caller ensures ptr is valid
364
+
let _parent = unsafe { parent.as_ref() };
365
+
366
+
this.subtree_range.start = _parent.subtree_range.start;
367
+
this.subtree_range.end = _parent.subtree_range.end;
368
+
*this.max_gap = _parent.max_gap;
369
+
370
+
if side == wavltree::Side::Left {
371
+
Self::update_gap_metadata(parent, sibling, lr_child);
372
+
} else {
373
+
Self::update_gap_metadata(parent, lr_child, sibling);
374
+
}
375
+
}
376
+
}
+414
libs/mem/src/addresses.rs
+414
libs/mem/src/addresses.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::{Layout, LayoutError};
9
+
use core::ops::Range;
10
+
11
+
use crate::address_space::RawAddressSpace;
12
+
13
+
macro_rules! impl_address {
14
+
($address_ty:ident) => {
15
+
impl $address_ty {
16
+
pub const MAX: Self = Self(usize::MAX);
17
+
pub const MIN: Self = Self(0);
18
+
pub const ZERO: Self = Self(0);
19
+
pub const BITS: u32 = usize::BITS;
20
+
21
+
#[inline]
22
+
pub const fn get(&self) -> usize {
23
+
self.0
24
+
}
25
+
26
+
#[must_use]
27
+
#[inline]
28
+
pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
29
+
Self(ptr.expose_provenance())
30
+
}
31
+
32
+
#[must_use]
33
+
#[inline]
34
+
pub fn from_mut_ptr<T: ?Sized>(ptr: *mut T) -> Self {
35
+
Self(ptr.expose_provenance())
36
+
}
37
+
38
+
#[must_use]
39
+
#[inline]
40
+
pub fn from_non_null<T: ?Sized>(ptr: ::core::ptr::NonNull<T>) -> Self {
41
+
Self(ptr.addr().get())
42
+
}
43
+
44
+
#[inline]
45
+
pub fn as_ptr(self) -> *const u8 {
46
+
::core::ptr::with_exposed_provenance(self.0)
47
+
}
48
+
49
+
#[inline]
50
+
pub fn as_mut_ptr(self) -> *mut u8 {
51
+
::core::ptr::with_exposed_provenance_mut(self.0)
52
+
}
53
+
54
+
#[inline]
55
+
pub fn as_non_null(self) -> Option<::core::ptr::NonNull<u8>> {
56
+
::core::num::NonZeroUsize::new(self.0)
57
+
.map(::core::ptr::NonNull::with_exposed_provenance)
58
+
}
59
+
60
+
#[must_use]
61
+
#[inline]
62
+
pub const fn checked_add(self, rhs: usize) -> Option<Self> {
63
+
if let Some(out) = self.0.checked_add(rhs) {
64
+
Some(Self(out))
65
+
} else {
66
+
None
67
+
}
68
+
}
69
+
70
+
#[must_use]
71
+
#[inline]
72
+
pub const fn checked_add_signed(self, rhs: isize) -> Option<Self> {
73
+
if let Some(out) = self.0.checked_add_signed(rhs) {
74
+
Some(Self(out))
75
+
} else {
76
+
None
77
+
}
78
+
}
79
+
80
+
#[must_use]
81
+
#[inline]
82
+
pub const fn checked_sub(self, rhs: usize) -> Option<Self> {
83
+
if let Some(out) = self.0.checked_sub(rhs) {
84
+
Some(Self(out))
85
+
} else {
86
+
None
87
+
}
88
+
}
89
+
#[must_use]
90
+
#[inline]
91
+
pub const fn checked_div(self, rhs: usize) -> Option<Self> {
92
+
if let Some(out) = self.0.checked_div(rhs) {
93
+
Some(Self(out))
94
+
} else {
95
+
None
96
+
}
97
+
}
98
+
#[must_use]
99
+
#[inline]
100
+
pub const fn checked_mul(self, rhs: usize) -> Option<Self> {
101
+
if let Some(out) = self.0.checked_mul(rhs) {
102
+
Some(Self(out))
103
+
} else {
104
+
None
105
+
}
106
+
}
107
+
#[must_use]
108
+
#[inline]
109
+
pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
110
+
if let Some(out) = self.0.checked_shl(rhs) {
111
+
Some(Self(out))
112
+
} else {
113
+
None
114
+
}
115
+
}
116
+
#[must_use]
117
+
#[inline]
118
+
pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
119
+
if let Some(out) = self.0.checked_shr(rhs) {
120
+
Some(Self(out))
121
+
} else {
122
+
None
123
+
}
124
+
}
125
+
// #[must_use]
126
+
// #[inline]
127
+
// pub const fn saturating_add(self, rhs: usize) -> Self {
128
+
// Self(self.0.saturating_add(rhs))
129
+
// }
130
+
// #[must_use]
131
+
// #[inline]
132
+
// pub const fn saturating_add_signed(self, rhs: isize) -> Self {
133
+
// Self(self.0.saturating_add_signed(rhs))
134
+
// }
135
+
// #[must_use]
136
+
// #[inline]
137
+
// pub const fn saturating_div(self, rhs: usize) -> Self {
138
+
// Self(self.0.saturating_div(rhs))
139
+
// }
140
+
// #[must_use]
141
+
// #[inline]
142
+
// pub const fn saturating_sub(self, rhs: usize) -> Self {
143
+
// Self(self.0.saturating_sub(rhs))
144
+
// }
145
+
// #[must_use]
146
+
// #[inline]
147
+
// pub const fn saturating_mul(self, rhs: usize) -> Self {
148
+
// Self(self.0.saturating_mul(rhs))
149
+
// }
150
+
#[must_use]
151
+
#[inline]
152
+
pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
153
+
let (a, b) = self.0.overflowing_shl(rhs);
154
+
(Self(a), b)
155
+
}
156
+
#[must_use]
157
+
#[inline]
158
+
pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
159
+
let (a, b) = self.0.overflowing_shr(rhs);
160
+
(Self(a), b)
161
+
}
162
+
163
+
#[must_use]
164
+
#[inline]
165
+
pub const fn checked_sub_addr(self, rhs: Self) -> Option<usize> {
166
+
self.0.checked_sub(rhs.0)
167
+
}
168
+
169
+
// #[must_use]
170
+
// #[inline]
171
+
// pub const fn saturating_sub_addr(self, rhs: Self) -> usize {
172
+
// self.0.saturating_sub(rhs.0)
173
+
// }
174
+
175
+
#[must_use]
176
+
#[inline]
177
+
pub const fn is_aligned_to(&self, align: usize) -> bool {
178
+
assert!(
179
+
align.is_power_of_two(),
180
+
"is_aligned_to: align is not a power-of-two"
181
+
);
182
+
183
+
self.0 & (align - 1) == 0
184
+
}
185
+
186
+
#[must_use]
187
+
#[inline]
188
+
pub const fn checked_align_up(self, align: usize) -> Option<Self> {
189
+
if !align.is_power_of_two() {
190
+
panic!("checked_align_up: align is not a power-of-two");
191
+
}
192
+
193
+
// SAFETY: `align` has been checked to be a power of 2 above
194
+
let align_minus_one = unsafe { align.unchecked_sub(1) };
195
+
196
+
// addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
197
+
if let Some(addr_plus_align) = self.0.checked_add(align_minus_one) {
198
+
let aligned = Self(addr_plus_align & 0usize.wrapping_sub(align));
199
+
debug_assert!(aligned.is_aligned_to(align));
200
+
debug_assert!(aligned.0 >= self.0);
201
+
Some(aligned)
202
+
} else {
203
+
None
204
+
}
205
+
}
206
+
207
+
// #[must_use]
208
+
// #[inline]
209
+
// pub const fn wrapping_align_up(self, align: usize) -> Self {
210
+
// if !align.is_power_of_two() {
211
+
// panic!("checked_align_up: align is not a power-of-two");
212
+
// }
213
+
//
214
+
// // SAFETY: `align` has been checked to be a power of 2 above
215
+
// let align_minus_one = unsafe { align.unchecked_sub(1) };
216
+
//
217
+
// // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
218
+
// let out = addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align);
219
+
// debug_assert!(out.is_aligned_to(align));
220
+
// out
221
+
// }
222
+
223
+
#[inline]
224
+
pub const fn alignment(&self) -> usize {
225
+
self.0 & (!self.0 + 1)
226
+
}
227
+
228
+
#[must_use]
229
+
#[inline]
230
+
pub const fn align_down(self, align: usize) -> Self {
231
+
if !align.is_power_of_two() {
232
+
panic!("checked_align_up: align is not a power-of-two");
233
+
}
234
+
235
+
let aligned = Self(self.0 & 0usize.wrapping_sub(align));
236
+
debug_assert!(aligned.is_aligned_to(align));
237
+
debug_assert!(aligned.0 <= self.0);
238
+
aligned
239
+
}
240
+
}
241
+
242
+
impl ::core::fmt::Display for $address_ty {
243
+
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
244
+
f.write_fmt(format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x
245
+
}
246
+
}
247
+
248
+
impl ::core::fmt::Debug for $address_ty {
249
+
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
250
+
f.debug_tuple(stringify!($address_ty))
251
+
.field(&format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x
252
+
.finish()
253
+
}
254
+
}
255
+
};
256
+
}
257
+
258
+
#[repr(transparent)]
259
+
#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
260
+
pub struct VirtualAddress(usize);
261
+
impl_address!(VirtualAddress);
262
+
263
+
impl VirtualAddress {
264
+
#[must_use]
265
+
pub const fn new(n: usize) -> Self {
266
+
Self(n)
267
+
}
268
+
269
+
pub const fn is_canonical<A: RawAddressSpace>(self) -> bool {
270
+
(self.0 & A::CANONICAL_ADDRESS_MASK).wrapping_sub(1) >= A::CANONICAL_ADDRESS_MASK - 1
271
+
}
272
+
273
+
#[inline]
274
+
pub const fn is_user_accessible<A: RawAddressSpace>(self) -> bool {
275
+
// This address refers to userspace if it is in the lower half of the
276
+
// canonical addresses. IOW - if all of the bits in the canonical address
277
+
// mask are zero.
278
+
(self.0 & A::CANONICAL_ADDRESS_MASK) == 0
279
+
}
280
+
}
281
+
282
+
#[repr(transparent)]
283
+
#[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
284
+
pub struct PhysicalAddress(usize);
285
+
impl_address!(PhysicalAddress);
286
+
287
+
impl PhysicalAddress {
288
+
pub const fn new(n: usize) -> Self {
289
+
Self(n)
290
+
}
291
+
}
292
+
293
+
macro_rules! address_range_impl {
294
+
() => {
295
+
fn size(&self) -> usize {
296
+
debug_assert!(self.start <= self.end);
297
+
let is = self.end.checked_sub_addr(self.start).unwrap_or_default();
298
+
let should = if self.is_empty() {
299
+
0
300
+
} else {
301
+
self.end.get() - self.start.get()
302
+
};
303
+
debug_assert_eq!(is, should);
304
+
is
305
+
}
306
+
fn checked_add(self, offset: usize) -> Option<Self> {
307
+
Some(Range::from(
308
+
self.start.checked_add(offset)?..self.end.checked_add(offset)?,
309
+
))
310
+
}
311
+
fn as_ptr_range(&self) -> Range<*const u8> {
312
+
Range::from(self.start.as_ptr()..self.end.as_ptr())
313
+
}
314
+
fn as_mut_ptr_range(&self) -> Range<*mut u8> {
315
+
Range::from(self.start.as_mut_ptr()..self.end.as_mut_ptr())
316
+
}
317
+
fn checked_align_in(self, align: usize) -> Option<Self>
318
+
where
319
+
Self: Sized,
320
+
{
321
+
let res = Range::from(self.start.checked_align_up(align)?..self.end.align_down(align));
322
+
Some(res)
323
+
}
324
+
fn checked_align_out(self, align: usize) -> Option<Self>
325
+
where
326
+
Self: Sized,
327
+
{
328
+
let res = Range::from(self.start.align_down(align)..self.end.checked_align_up(align)?);
329
+
// aligning outwards can only increase the size
330
+
debug_assert!(res.start.0 <= res.end.0);
331
+
Some(res)
332
+
}
333
+
// fn saturating_align_in(self, align: usize) -> Self {
334
+
// self.start.saturating_align_up(align)..self.end.saturating_align_down(align)
335
+
// }
336
+
// fn saturating_align_out(self, align: usize) -> Self {
337
+
// self.start.saturating_align_down(align)..self.end.saturating_align_up(align)
338
+
// }
339
+
340
+
// TODO test
341
+
fn alignment(&self) -> usize {
342
+
self.start.alignment()
343
+
}
344
+
fn into_layout(self) -> core::result::Result<Layout, core::alloc::LayoutError> {
345
+
Layout::from_size_align(self.size(), self.alignment())
346
+
}
347
+
fn is_overlapping(&self, other: &Self) -> bool {
348
+
(self.start < other.end) & (other.start < self.end)
349
+
}
350
+
fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) {
351
+
debug_assert!(self.is_overlapping(&other));
352
+
let a = Range::from(self.start..other.start);
353
+
let b = Range::from(other.end..self.end);
354
+
((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b))
355
+
}
356
+
fn clamp(&self, range: Self) -> Self {
357
+
Range::from(self.start.max(range.start)..self.end.min(range.end))
358
+
}
359
+
};
360
+
}
361
+
362
+
pub trait AddressRangeExt {
363
+
fn size(&self) -> usize;
364
+
#[must_use]
365
+
fn checked_add(self, offset: usize) -> Option<Self>
366
+
where
367
+
Self: Sized;
368
+
#[must_use]
369
+
fn as_ptr_range(&self) -> Range<*const u8>;
370
+
#[must_use]
371
+
fn as_mut_ptr_range(&self) -> Range<*mut u8>;
372
+
#[must_use]
373
+
fn checked_align_in(self, align: usize) -> Option<Self>
374
+
where
375
+
Self: Sized;
376
+
#[must_use]
377
+
fn checked_align_out(self, align: usize) -> Option<Self>
378
+
where
379
+
Self: Sized;
380
+
// #[must_use]
381
+
// fn saturating_align_in(self, align: usize) -> Self;
382
+
// #[must_use]
383
+
// fn saturating_align_out(self, align: usize) -> Self;
384
+
fn alignment(&self) -> usize;
385
+
fn into_layout(self) -> Result<Layout, LayoutError>;
386
+
fn is_overlapping(&self, other: &Self) -> bool;
387
+
fn difference(&self, other: Self) -> (Option<Self>, Option<Self>)
388
+
where
389
+
Self: Sized;
390
+
fn clamp(&self, range: Self) -> Self;
391
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool;
392
+
}
393
+
394
+
impl AddressRangeExt for Range<PhysicalAddress> {
395
+
address_range_impl!();
396
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool {
397
+
unimplemented!("PhysicalAddress is never user accessible")
398
+
}
399
+
}
400
+
401
+
impl AddressRangeExt for Range<VirtualAddress> {
402
+
address_range_impl!();
403
+
404
+
fn is_user_accessible<A: RawAddressSpace>(&self) -> bool {
405
+
if self.is_empty() {
406
+
return false;
407
+
}
408
+
let Some(end_minus_one) = self.end.checked_sub(1) else {
409
+
return false;
410
+
};
411
+
412
+
self.start.is_user_accessible::<A>() && end_minus_one.is_user_accessible::<A>()
413
+
}
414
+
}
+178
libs/mem/src/frame.rs
+178
libs/mem/src/frame.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::Layout;
9
+
use core::cmp::PartialEq;
10
+
use core::fmt::Debug;
11
+
use core::mem::offset_of;
12
+
use core::ptr::NonNull;
13
+
use core::sync::atomic;
14
+
use core::sync::atomic::{AtomicUsize, Ordering};
15
+
16
+
use cordyceps::{list, Linked};
17
+
use pin_project::pin_project;
18
+
19
+
use crate::frame_alloc::FrameAllocator;
20
+
use crate::PhysicalAddress;
21
+
22
+
/// Soft limit on the amount of references that may be made to a `Frame`.
23
+
const MAX_REFCOUNT: usize = isize::MAX as usize;
24
+
25
+
pub struct FrameRef<A: FrameAllocator> {
26
+
frame: NonNull<Frame>,
27
+
alloc: A,
28
+
}
29
+
30
+
#[pin_project(!Unpin)]
31
+
#[derive(Debug)]
32
+
pub struct Frame {
33
+
addr: PhysicalAddress,
34
+
refcount: AtomicUsize,
35
+
#[pin]
36
+
links: list::Links<Self>,
37
+
}
38
+
39
+
// ===== impl FrameRef =====
40
+
41
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
42
+
unsafe impl Send for Frame {}
43
+
44
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
45
+
unsafe impl Sync for Frame {}
46
+
47
+
impl<A: FrameAllocator + Clone> Clone for FrameRef<A> {
48
+
/// Makes a clone of the `Frame`.
49
+
///
50
+
/// This creates reference to the same `FrameInfo`, increasing the reference count by one.
51
+
fn clone(&self) -> Self {
52
+
// Increase the reference count by one. Using relaxed ordering, as knowledge of the
53
+
// original reference prevents other threads from erroneously deleting
54
+
// the object.
55
+
//
56
+
// Again, restating what the `Arc` implementation quotes from the
57
+
// [Boost documentation][1]:
58
+
//
59
+
// > Increasing the reference counter can always be done with memory_order_relaxed: New
60
+
// > references to an object can only be formed from an existing
61
+
// > reference, and passing an existing reference from one thread to
62
+
// > another must already provide any required synchronization.
63
+
//
64
+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
65
+
let old_size = self.frame().refcount.fetch_add(1, Ordering::Relaxed);
66
+
debug_assert_ne!(old_size, 0);
67
+
68
+
// Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
69
+
// `Frame`s somewhere (which we really shouldn't but just in case). Overflowing the refcount
70
+
// would *really* bad as it would treat the frame as free and potentially cause a use-after-free
71
+
// scenario. Realistically this branch should never be taken.
72
+
//
73
+
// Also worth noting: Just like `Arc`, the refcount could still overflow when in between
74
+
// the load above and this check some other cpu increased the refcount from `isize::MAX` to
75
+
// `usize::MAX` but that seems unlikely. The other option, doing the comparison and update in
76
+
// one conditional atomic operation produces much worse code, so if its good enough for the
77
+
// standard library, it is good enough for us.
78
+
assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow");
79
+
80
+
unsafe { Self::from_raw_parts(self.frame, self.alloc.clone()) }
81
+
}
82
+
}
83
+
84
+
impl<A: FrameAllocator> Drop for FrameRef<A> {
85
+
/// Drops the `Frame`.
86
+
///
87
+
/// This will decrement the reference count. If the reference count reaches zero
88
+
/// then this frame will be marked as free and returned to the frame allocator.
89
+
fn drop(&mut self) {
90
+
if self.frame().refcount.fetch_sub(1, Ordering::Release) != 1 {
91
+
return;
92
+
}
93
+
94
+
// Ensure uses of `FrameInfo` happen before freeing it.
95
+
// Because it is marked `Release`, the decreasing of the reference count synchronizes
96
+
// with this `Acquire` fence. This means that use of `FrameInfo` happens before decreasing
97
+
// the reference count, which happens before this fence, which happens before freeing `FrameInfo`.
98
+
//
99
+
// This section of the [Boost documentation][1] as quoted in Rusts `Arc` implementation and
100
+
// may explain further:
101
+
//
102
+
// > It is important to enforce any possible access to the object in one
103
+
// > thread (through an existing reference) to *happen before* deleting
104
+
// > the object in a different thread. This is achieved by a "release"
105
+
// > operation after dropping a reference (any access to the object
106
+
// > through this reference must obviously happened before), and an
107
+
// > "acquire" operation before deleting the object.
108
+
//
109
+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
110
+
atomic::fence(Ordering::Acquire);
111
+
112
+
self.drop_slow();
113
+
}
114
+
}
115
+
116
+
impl<A: FrameAllocator> FrameRef<A> {
117
+
unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: A) -> Self {
118
+
Self { frame, alloc }
119
+
}
120
+
121
+
fn frame(&self) -> &Frame {
122
+
unsafe { self.frame.as_ref() }
123
+
}
124
+
125
+
#[inline(never)]
126
+
fn drop_slow(&mut self) {
127
+
let layout = unsafe { Layout::from_size_align_unchecked(A::FRAME_SIZE, A::FRAME_SIZE) };
128
+
unsafe {
129
+
self.alloc.deallocate(self.frame, layout);
130
+
}
131
+
}
132
+
}
133
+
134
+
// ===== impl Frame =====
135
+
136
+
impl PartialEq<Frame> for &Frame {
137
+
fn eq(&self, other: &Frame) -> bool {
138
+
self.refcount() == other.refcount() && self.addr == other.addr
139
+
}
140
+
}
141
+
142
+
impl Frame {
143
+
pub fn new(addr: PhysicalAddress, initial_refcount: usize) -> Self {
144
+
Self {
145
+
addr,
146
+
refcount: AtomicUsize::new(initial_refcount),
147
+
links: list::Links::new(),
148
+
}
149
+
}
150
+
151
+
pub fn refcount(&self) -> usize {
152
+
self.refcount.load(Ordering::Relaxed)
153
+
}
154
+
155
+
pub fn addr(&self) -> PhysicalAddress {
156
+
self.addr
157
+
}
158
+
}
159
+
160
+
unsafe impl Linked<list::Links<Self>> for Frame {
161
+
type Handle = NonNull<Self>;
162
+
163
+
fn into_ptr(r: Self::Handle) -> NonNull<Self> {
164
+
r
165
+
}
166
+
167
+
unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
168
+
ptr
169
+
}
170
+
171
+
unsafe fn links(ptr: NonNull<Self>) -> NonNull<list::Links<Self>> {
172
+
ptr.map_addr(|addr| {
173
+
let offset = offset_of!(Self, links);
174
+
addr.checked_add(offset).unwrap()
175
+
})
176
+
.cast()
177
+
}
178
+
}
+135
libs/mem/src/frame_alloc.rs
+135
libs/mem/src/frame_alloc.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
mod area;
9
+
mod area_selection;
10
+
11
+
use core::alloc::Layout;
12
+
use core::cell::RefCell;
13
+
use core::cmp;
14
+
use core::ops::Range;
15
+
use core::ptr::NonNull;
16
+
use core::sync::atomic::{AtomicUsize, Ordering};
17
+
18
+
use cordyceps::List;
19
+
use cpu_local::collection::CpuLocal;
20
+
use fallible_iterator::FallibleIterator;
21
+
use lock_api::Mutex;
22
+
use smallvec::SmallVec;
23
+
24
+
use crate::address_space::RawAddressSpace;
25
+
use crate::frame_alloc::area::Area;
26
+
use crate::frame_alloc::area_selection::select_areas;
27
+
use crate::{Frame, PhysicalAddress};
28
+
29
+
#[derive(Debug)]
30
+
pub struct AllocError;
31
+
32
+
pub unsafe trait FrameAllocator: Send + Sync + 'static {
33
+
const FRAME_SIZE: usize;
34
+
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
35
+
unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
36
+
}
37
+
38
+
const MAX_FRAMES_IN_CACHE: usize = 256;
39
+
40
+
pub struct FrameAlloc<L: lock_api::RawMutex, A: RawAddressSpace> {
41
+
areas: Mutex<L, SmallVec<[Area<A>; 4]>>,
42
+
cpu_local_cache: CpuLocal<RefCell<List<Frame>>>,
43
+
max_alignment_hint: AtomicUsize,
44
+
}
45
+
46
+
impl<L: lock_api::RawMutex, A: RawAddressSpace> FrameAlloc<L, A> {
47
+
pub fn new(allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>) -> crate::Result<Self> {
48
+
let mut max_alignment_hint = 0;
49
+
let mut areas = SmallVec::new();
50
+
51
+
let mut selections = select_areas::<A>(allocatable_regions);
52
+
while let Some(selection) = selections.next()? {
53
+
let area = Area::new(selection.area, selection.bookkeeping);
54
+
max_alignment_hint = cmp::max(max_alignment_hint, area.max_alignment_hint());
55
+
areas.push(area);
56
+
}
57
+
58
+
Ok(Self {
59
+
areas: Mutex::new(areas),
60
+
cpu_local_cache: CpuLocal::new(),
61
+
max_alignment_hint: AtomicUsize::new(max_alignment_hint),
62
+
})
63
+
}
64
+
65
+
pub fn max_alignment_hint(&self) -> usize {
66
+
self.max_alignment_hint.load(Ordering::Relaxed)
67
+
}
68
+
69
+
fn allocate_local(&self, layout: Layout) -> Option<NonNull<Frame>> {
70
+
if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
71
+
let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
72
+
cache.pop_back()
73
+
} else {
74
+
None
75
+
}
76
+
}
77
+
78
+
fn deallocate_local(&self, block: NonNull<Frame>, layout: Layout) -> bool {
79
+
if layout.size() == A::PAGE_SIZE && layout.align() == A::PAGE_SIZE {
80
+
let mut cache = self.cpu_local_cache.get_or_default().borrow_mut();
81
+
82
+
if cache.len() < MAX_FRAMES_IN_CACHE {
83
+
cache.push_back(block);
84
+
return true;
85
+
}
86
+
}
87
+
88
+
false
89
+
}
90
+
}
91
+
92
+
unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
93
+
for &'static FrameAlloc<L, A>
94
+
{
95
+
const FRAME_SIZE: usize = A::PAGE_SIZE;
96
+
97
+
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
98
+
// attempt to allocate from the CPU-local cache first
99
+
if let Some(frame) = self.allocate_local(layout) {
100
+
return Ok(NonNull::slice_from_raw_parts(frame.cast(), 1));
101
+
}
102
+
103
+
let mut areas = self.areas.lock();
104
+
for area in areas.iter_mut() {
105
+
if let Ok(frames) = area.allocate(layout) {
106
+
return Ok(frames);
107
+
}
108
+
}
109
+
110
+
Err(AllocError)
111
+
}
112
+
113
+
unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout) {
114
+
// attempt to place the frame into the CPU-local cache first
115
+
if self.deallocate_local(block, layout) {
116
+
return;
117
+
}
118
+
119
+
let mut areas = self.areas.lock();
120
+
for area in areas.iter_mut() {
121
+
let block_ = unsafe { block.as_ref() };
122
+
123
+
if area.contains_frame(block_.addr()) {
124
+
unsafe { area.deallocate(block, layout) };
125
+
126
+
self.max_alignment_hint
127
+
.fetch_max(area.max_alignment_hint(), Ordering::Relaxed);
128
+
129
+
return;
130
+
}
131
+
}
132
+
133
+
unreachable!();
134
+
}
135
+
}
+444
libs/mem/src/frame_alloc/area.rs
+444
libs/mem/src/frame_alloc/area.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use core::alloc::Layout;
9
+
use core::marker::PhantomData;
10
+
use core::mem::MaybeUninit;
11
+
use core::ops::Range;
12
+
use core::ptr::NonNull;
13
+
use core::{cmp, fmt};
14
+
15
+
use cordyceps::List;
16
+
17
+
use crate::address_space::RawAddressSpace;
18
+
use crate::frame_alloc::AllocError;
19
+
use crate::{AddressRangeExt, Frame, PhysicalAddress};
20
+
21
+
const MAX_ORDER: usize = 11;
22
+
23
+
pub struct Area<A: RawAddressSpace> {
24
+
area: Range<PhysicalAddress>,
25
+
frames: &'static mut [MaybeUninit<Frame>],
26
+
27
+
free_lists: [List<Frame>; MAX_ORDER],
28
+
29
+
max_order: usize,
30
+
total_frames: usize,
31
+
used_frames: usize,
32
+
33
+
_aspace: PhantomData<A>,
34
+
}
35
+
36
+
unsafe impl<A: RawAddressSpace + Send> Send for Area<A> {}
37
+
unsafe impl<A: RawAddressSpace + Sync> Sync for Area<A> {}
38
+
39
+
impl<A: RawAddressSpace> fmt::Debug for Area<A> {
40
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
41
+
f.debug_struct("Area")
42
+
.field("area", &self.area)
43
+
.field(
44
+
"frames",
45
+
&format_args!("&[MaybeUninit<FrameInner>; {}]", self.frames.len()),
46
+
)
47
+
.field("free_lists", &self.free_lists)
48
+
.field("max_order", &self.max_order)
49
+
.field("total_frames", &self.total_frames)
50
+
.field("used_frames", &self.used_frames)
51
+
.finish()
52
+
}
53
+
}
54
+
55
+
impl<A: RawAddressSpace> Area<A> {
56
+
pub fn new(area: Range<PhysicalAddress>, frames: &'static mut [MaybeUninit<Frame>]) -> Self {
57
+
let mut free_lists = [const { List::new() }; MAX_ORDER];
58
+
let mut total_frames = 0;
59
+
let mut max_order = 0;
60
+
61
+
let mut remaining_bytes = area.size();
62
+
let mut addr = area.start;
63
+
64
+
// This is the main area initialization loop. We loop through the `area` "chopping off" the
65
+
// largest possible min_block_size-aligned block from the area and add that to its corresponding
66
+
// free list.
67
+
//
68
+
// Note: Remember that for buddy allocators `size == align`. That means we both need to check
69
+
// the alignment and size of our remaining area and can only chop off whatever is smaller.
70
+
while remaining_bytes > 0 {
71
+
// println!("processing next chunk. remaining_bytes={remaining_bytes};addr={addr:?}");
72
+
73
+
// the largest size we can chop off given the alignment of the remaining area
74
+
let max_align = if addr == PhysicalAddress::ZERO {
75
+
// if area happens to start exactly at address 0x0 our calculation below doesn't work.
76
+
// address 0x0 actually supports *any* alignment so we special-case it and return `MAX`
77
+
usize::MAX
78
+
} else {
79
+
// otherwise mask out the least significant bit of the address to figure out its alignment
80
+
addr.get() & (!addr.get() + 1)
81
+
};
82
+
// the largest size we can chop off given the size of the remaining area
83
+
// which is the next smaller power of two
84
+
let max_size = 1 << remaining_bytes.ilog2();
85
+
86
+
// our chosen size will be the smallest of
87
+
// - the maximum size by remaining areas alignment
88
+
// - the maximum size by remaining areas size
89
+
// - the maximum block size supported by this allocator
90
+
let size = cmp::min(
91
+
cmp::min(max_align, max_size),
92
+
A::PAGE_SIZE << (MAX_ORDER - 1),
93
+
);
94
+
debug_assert!(size.is_multiple_of(A::PAGE_SIZE));
95
+
96
+
let order = (size.trailing_zeros() as u8 - A::PAGE_SIZE_LOG_2) as usize;
97
+
98
+
{
99
+
let frame = frames[total_frames].write(Frame::new(addr, 0));
100
+
101
+
free_lists[order].push_back(NonNull::from(frame));
102
+
}
103
+
104
+
total_frames += 1 << order;
105
+
max_order = cmp::max(max_order, order);
106
+
addr = addr.checked_add(size).unwrap();
107
+
remaining_bytes -= size;
108
+
}
109
+
110
+
// Make sure we've accounted for all frames
111
+
debug_assert_eq!(total_frames, area.size() / A::PAGE_SIZE);
112
+
113
+
Self {
114
+
area,
115
+
frames,
116
+
117
+
free_lists,
118
+
119
+
max_order,
120
+
total_frames,
121
+
used_frames: 0,
122
+
123
+
_aspace: PhantomData,
124
+
}
125
+
}
126
+
127
+
pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
128
+
#[cfg(debug_assertions)]
129
+
self.assert_valid();
130
+
131
+
let min_order = self.allocation_order(layout)?;
132
+
133
+
// Starting at the smallest sufficient size class, search for a free block. If we find one in
134
+
// a free list, return it and its order.
135
+
let (block, block_order) = self.free_lists[min_order..]
136
+
.iter_mut()
137
+
.enumerate()
138
+
.find_map(|(i, list)| list.pop_back().map(|block| (block, i + min_order)))
139
+
.ok_or(AllocError)?;
140
+
141
+
// if the block we found is larger than the `min_order` we need, we repeatedly split off
142
+
// the upper half (of decreasing size) until we reach the desired size. The split off blocks
143
+
// are returned to their appropriate free lists.
144
+
for order in (min_order..block_order).rev() {
145
+
let block = unsafe { block.as_ref() };
146
+
let buddy_addr = block.addr().checked_add(A::PAGE_SIZE << order).unwrap();
147
+
let buddy = self.frame_for_addr(buddy_addr).unwrap();
148
+
149
+
let buddy = buddy.write(Frame::new(buddy_addr, 0));
150
+
let buddy = NonNull::from(buddy);
151
+
152
+
self.free_lists[order].push_back(buddy);
153
+
}
154
+
155
+
let alloc_size_frames = 1 << min_order;
156
+
157
+
// lazily initialize all frames
158
+
for idx in 0..alloc_size_frames {
159
+
let block = unsafe { block.as_ref() };
160
+
let addr = block.addr().checked_add(A::PAGE_SIZE * idx).unwrap();
161
+
162
+
let frame = self.frame_for_addr(addr).unwrap();
163
+
frame.write(Frame::new(addr, 1));
164
+
}
165
+
166
+
self.used_frames += alloc_size_frames;
167
+
168
+
#[cfg(debug_assertions)]
169
+
self.assert_valid();
170
+
171
+
Ok(NonNull::slice_from_raw_parts(block, alloc_size_frames))
172
+
}
173
+
174
+
pub unsafe fn deallocate(&mut self, mut block: NonNull<Frame>, layout: Layout) {
175
+
#[cfg(debug_assertions)]
176
+
self.assert_valid();
177
+
178
+
let initial_order = self.allocation_order(layout).unwrap();
179
+
let mut order = initial_order;
180
+
181
+
while order < self.free_lists.len() - 1 {
182
+
let block_ = unsafe { block.as_ref() };
183
+
if let Some(buddy) = self.buddy_addr(order, block_.addr())
184
+
&& cmp::min(block_.addr(), buddy).is_aligned_to(A::PAGE_SIZE << (order + 1))
185
+
&& self.remove_from_free_list(order, buddy)
186
+
{
187
+
let buddy: NonNull<Frame> =
188
+
NonNull::from(self.frame_for_addr(buddy).unwrap()).cast();
189
+
block = cmp::min(buddy, block);
190
+
order += 1;
191
+
} else {
192
+
break;
193
+
}
194
+
}
195
+
196
+
self.free_lists[order].push_back(block);
197
+
self.used_frames -= 1 << initial_order;
198
+
self.max_order = cmp::max(self.max_order, order);
199
+
200
+
#[cfg(debug_assertions)]
201
+
self.assert_valid();
202
+
}
203
+
204
+
pub fn max_alignment_hint(&self) -> usize {
205
+
self.order_size(self.max_order)
206
+
}
207
+
208
+
fn frame_for_addr(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<Frame>> {
209
+
let relative = addr.checked_sub_addr(self.area.start).unwrap();
210
+
let idx = relative >> A::PAGE_SIZE_LOG_2;
211
+
Some(&mut self.frames[idx])
212
+
}
213
+
214
+
pub(crate) fn contains_frame(&self, addr: PhysicalAddress) -> bool {
215
+
self.area.contains(&addr)
216
+
}
217
+
218
+
fn buddy_addr(&self, order: usize, block: PhysicalAddress) -> Option<PhysicalAddress> {
219
+
assert!(block >= self.area.start);
220
+
assert!(block.is_aligned_to(A::PAGE_SIZE << order));
221
+
222
+
let relative = block.checked_sub_addr(self.area.start).unwrap();
223
+
let size = self.order_size(order);
224
+
if size >= self.area.size() {
225
+
// MAX_ORDER blocks do not have buddies
226
+
None
227
+
} else {
228
+
// Fun: We can find our buddy by xoring the right bit in our
229
+
// offset from the base of the heap.
230
+
Some(self.area.start.checked_add(relative ^ size).unwrap())
231
+
}
232
+
}
233
+
234
+
fn remove_from_free_list(&mut self, order: usize, to_remove: PhysicalAddress) -> bool {
235
+
let mut c = self.free_lists[order].cursor_front_mut();
236
+
237
+
while let Some(candidate) = c.current() {
238
+
if candidate.addr() == to_remove {
239
+
c.remove_current().unwrap();
240
+
return true;
241
+
}
242
+
243
+
c.move_next();
244
+
}
245
+
246
+
false
247
+
}
248
+
249
+
// The size of the blocks we allocate for a given order.
250
+
const fn order_size(&self, order: usize) -> usize {
251
+
1 << (A::PAGE_SIZE_LOG_2 as usize + order)
252
+
}
253
+
254
+
const fn allocation_size(&self, layout: Layout) -> Result<usize, AllocError> {
255
+
// We can only allocate blocks that are at least one page
256
+
if !layout.size().is_multiple_of(A::PAGE_SIZE) {
257
+
return Err(AllocError);
258
+
}
259
+
260
+
// We can only allocate blocks that are at least page aligned
261
+
if !layout.align().is_multiple_of(A::PAGE_SIZE) {
262
+
return Err(AllocError);
263
+
}
264
+
265
+
let size = layout.size().next_power_of_two();
266
+
267
+
// We cannot allocate blocks larger than our largest size class
268
+
if size > self.order_size(self.free_lists.len()) {
269
+
return Err(AllocError);
270
+
}
271
+
272
+
Ok(size)
273
+
}
274
+
275
+
const fn allocation_order(&self, layout: Layout) -> Result<usize, AllocError> {
276
+
if let Ok(size) = self.allocation_size(layout) {
277
+
Ok((size.ilog2() as u8 - A::PAGE_SIZE_LOG_2) as usize)
278
+
} else {
279
+
Err(AllocError)
280
+
}
281
+
}
282
+
283
+
fn assert_valid(&self) {
284
+
for (order, l) in self.free_lists.iter().enumerate() {
285
+
l.assert_valid();
286
+
287
+
for f in l {
288
+
assert!(
289
+
f.addr().is_aligned_to(A::PAGE_SIZE << order),
290
+
"frame {f:?} is not aligned to order {order}"
291
+
);
292
+
}
293
+
}
294
+
295
+
assert_eq!(frames_in_area(self) + self.used_frames, self.total_frames);
296
+
}
297
+
}
298
+
299
+
fn frames_in_area<A: RawAddressSpace>(area: &Area<A>) -> usize {
300
+
let mut frames = 0;
301
+
for (order, l) in area.free_lists.iter().enumerate() {
302
+
frames += l.len() << order;
303
+
}
304
+
frames
305
+
}
306
+
307
+
#[cfg(test)]
308
+
mod tests {
309
+
use alloc::vec::Vec;
310
+
311
+
use proptest::{prop_assert, prop_assert_eq, prop_assume, prop_compose, proptest};
312
+
313
+
use super::*;
314
+
use crate::test_utils::TestAddressSpace;
315
+
316
+
const PAGE_SIZE: usize = 4096;
317
+
318
+
prop_compose! {
319
+
// Generate arbitrary integers up to half the maximum desired value,
320
+
// then multiply them by 2, thus producing only even integers in the
321
+
// desired range.
322
+
fn page_aligned(max: usize)(base in 0..max/PAGE_SIZE) -> usize { base * PAGE_SIZE }
323
+
}
324
+
325
+
proptest! {
326
+
#[test]
327
+
fn new_fixed_base(num_frames in 0..50_000usize) {
328
+
let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
329
+
PhysicalAddress::ZERO..PhysicalAddress::new(num_frames * PAGE_SIZE),
330
+
{
331
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
332
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
333
+
frames.leak()
334
+
}
335
+
);
336
+
area.assert_valid();
337
+
338
+
// let's check whether the area correctly initialized itself
339
+
//
340
+
// since we start on an aligned base address (0x0) we expect it have split off chunks
341
+
// largest-to-smallest. We replicate the process here, but take a block from its free list.
342
+
let mut frames_remaining = num_frames;
343
+
while frames_remaining > 0 {
344
+
// clamp the order we calculate at the max possible order
345
+
let chunk_order = cmp::min(frames_remaining.ilog2() as usize, MAX_ORDER - 1);
346
+
347
+
let chunk = area.free_lists[chunk_order].pop_back();
348
+
prop_assert!(chunk.is_some(), "expected chunk of order {chunk_order}");
349
+
350
+
frames_remaining -= 1 << chunk_order;
351
+
}
352
+
// At the end of this process we expect all free lists to be empty
353
+
prop_assert!(area.free_lists.iter().all(|list| list.is_empty()));
354
+
}
355
+
356
+
#[test]
357
+
fn new_arbitrary_base(num_frames in 0..50_000usize, area_start in page_aligned(usize::MAX)) {
358
+
359
+
let area = {
360
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
361
+
prop_assume!(area_end.is_some());
362
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
363
+
};
364
+
365
+
let area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
366
+
area,
367
+
{
368
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
369
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
370
+
frames.leak()
371
+
}
372
+
);
373
+
area.assert_valid();
374
+
375
+
// TODO figure out if we can test the free lists in a sensible way
376
+
}
377
+
378
+
#[test]
379
+
fn alloc_exhaustion(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX)) {
380
+
let area = {
381
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
382
+
prop_assume!(area_end.is_some());
383
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
384
+
};
385
+
386
+
let mut area: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
387
+
area,
388
+
{
389
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
390
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
391
+
frames.leak()
392
+
}
393
+
);
394
+
area.assert_valid();
395
+
396
+
debug_assert_eq!(frames_in_area(&mut area), num_frames);
397
+
}
398
+
399
+
#[test]
400
+
fn alloc_dealloc(num_frames in 0..5_000usize, area_start in page_aligned(usize::MAX), alloc_frames in 1..500usize) {
401
+
let area = {
402
+
let area_end = area_start.checked_add(num_frames * PAGE_SIZE);
403
+
prop_assume!(area_end.is_some());
404
+
PhysicalAddress::new(area_start)..PhysicalAddress::new(area_end.unwrap())
405
+
};
406
+
407
+
let area1: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
408
+
area.clone(),
409
+
{
410
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
411
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
412
+
frames.leak()
413
+
}
414
+
);
415
+
area1.assert_valid();
416
+
417
+
let mut area2: Area<TestAddressSpace<PAGE_SIZE, 38>> = Area::new(
418
+
area,
419
+
{
420
+
let mut frames: Vec<MaybeUninit<Frame>> = Vec::with_capacity(num_frames);
421
+
frames.resize_with(num_frames, || MaybeUninit::uninit());
422
+
frames.leak()
423
+
}
424
+
);
425
+
area2.assert_valid();
426
+
427
+
// we can only allocate contiguous blocks of the largest order available
428
+
prop_assume!(alloc_frames < (area2.max_alignment_hint() / PAGE_SIZE));
429
+
430
+
let layout = Layout::from_size_align(alloc_frames * PAGE_SIZE, PAGE_SIZE).unwrap();
431
+
432
+
let block = area2.allocate(layout).unwrap();
433
+
prop_assert!(block.len() >= alloc_frames);
434
+
435
+
unsafe { area2.deallocate(block.cast(), layout); }
436
+
437
+
assert_eq!(frames_in_area(&area2), num_frames);
438
+
439
+
for (order, (f1, f2)) in area1.free_lists.iter().zip(area2.free_lists.iter()).enumerate() {
440
+
prop_assert_eq!(f1.len(), f2.len(), "free lists at order {} have different lengths {} vs {}", order, f1.len(), f2.len());
441
+
}
442
+
}
443
+
}
444
+
}
+133
libs/mem/src/frame_alloc/area_selection.rs
+133
libs/mem/src/frame_alloc/area_selection.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
use alloc::slice;
9
+
use core::fmt::Formatter;
10
+
use core::marker::PhantomData;
11
+
use core::mem;
12
+
use core::mem::MaybeUninit;
13
+
use core::ops::Range;
14
+
15
+
use fallible_iterator::FallibleIterator;
16
+
use smallvec::SmallVec;
17
+
18
+
use crate::address_space::RawAddressSpace;
19
+
use crate::{AddressRangeExt, Frame, PhysicalAddress};
20
+
21
+
const MAX_WASTED_AREA_BYTES: usize = 0x8_4000; // 528 KiB
22
+
23
+
#[derive(Debug)]
24
+
pub struct AreaSelection {
25
+
pub area: Range<PhysicalAddress>,
26
+
pub bookkeeping: &'static mut [MaybeUninit<Frame>],
27
+
pub wasted_bytes: usize,
28
+
}
29
+
30
+
#[derive(Debug)]
31
+
pub struct SelectionError {
32
+
pub range: Range<PhysicalAddress>,
33
+
}
34
+
35
+
pub struct ArenaSelections<A: RawAddressSpace> {
36
+
allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
37
+
wasted_bytes: usize,
38
+
39
+
_aspace: PhantomData<A>,
40
+
}
41
+
42
+
pub fn select_areas<A: RawAddressSpace>(
43
+
allocatable_regions: SmallVec<[Range<PhysicalAddress>; 4]>,
44
+
) -> ArenaSelections<A> {
45
+
ArenaSelections {
46
+
allocatable_regions,
47
+
wasted_bytes: 0,
48
+
49
+
_aspace: PhantomData,
50
+
}
51
+
}
52
+
53
+
impl<A: RawAddressSpace> FallibleIterator for ArenaSelections<A> {
54
+
type Item = AreaSelection;
55
+
type Error = SelectionError;
56
+
57
+
fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
58
+
let Some(mut area) = self.allocatable_regions.pop() else {
59
+
return Ok(None);
60
+
};
61
+
62
+
while let Some(region) = self.allocatable_regions.pop() {
63
+
debug_assert!(!area.is_overlapping(®ion));
64
+
65
+
let pages_in_hole = if area.end <= region.start {
66
+
// the region is higher than the current area
67
+
region.start.checked_sub_addr(area.end).unwrap() / A::PAGE_SIZE
68
+
} else {
69
+
debug_assert!(region.end <= area.start);
70
+
// the region is lower than the current area
71
+
area.start.checked_sub_addr(region.end).unwrap() / A::PAGE_SIZE
72
+
};
73
+
74
+
let waste_from_hole = size_of::<Frame>() * pages_in_hole;
75
+
76
+
if self.wasted_bytes + waste_from_hole > MAX_WASTED_AREA_BYTES {
77
+
self.allocatable_regions.push(region);
78
+
break;
79
+
} else {
80
+
self.wasted_bytes += waste_from_hole;
81
+
82
+
if area.end <= region.start {
83
+
area.end = region.end;
84
+
} else {
85
+
area.start = region.start;
86
+
}
87
+
}
88
+
}
89
+
90
+
let mut aligned = area.checked_align_in(A::PAGE_SIZE).unwrap();
91
+
// We can't use empty areas anyway
92
+
if aligned.is_empty() {
93
+
return Err(SelectionError { range: aligned });
94
+
}
95
+
96
+
let bookkeeping_size_frames = aligned.size() / A::PAGE_SIZE;
97
+
98
+
let bookkeeping_start = aligned
99
+
.end
100
+
.checked_sub(bookkeeping_size_frames * size_of::<Frame>())
101
+
.unwrap()
102
+
.align_down(A::PAGE_SIZE);
103
+
104
+
// The area has no space to hold its own bookkeeping
105
+
if bookkeeping_start < aligned.start {
106
+
return Err(SelectionError { range: aligned });
107
+
}
108
+
109
+
let bookkeeping = unsafe {
110
+
slice::from_raw_parts_mut(
111
+
bookkeeping_start.as_mut_ptr().cast(),
112
+
bookkeeping_size_frames,
113
+
)
114
+
};
115
+
aligned.end = bookkeeping_start;
116
+
117
+
Ok(Some(AreaSelection {
118
+
area: aligned,
119
+
bookkeeping,
120
+
wasted_bytes: mem::take(&mut self.wasted_bytes),
121
+
}))
122
+
}
123
+
}
124
+
125
+
// ===== impl SelectionError =====
126
+
127
+
impl core::fmt::Display for SelectionError {
128
+
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
129
+
todo!()
130
+
}
131
+
}
132
+
133
+
impl core::error::Error for SelectionError {}
+15
libs/mem/src/lib.rs
+15
libs/mem/src/lib.rs
···
1
+
#![cfg_attr(not(test), no_std)]
2
+
extern crate alloc;
3
+
4
+
mod access_rules;
5
+
mod addresses;
6
+
mod frame;
7
+
mod test_utils;
8
+
pub mod address_space;
9
+
pub mod frame_alloc;
10
+
mod utils;
11
+
12
+
pub type Result<T> = anyhow::Result<T>;
13
+
pub use frame::{FrameRef, Frame};
14
+
pub use addresses::{PhysicalAddress, VirtualAddress, AddressRangeExt};
15
+
pub use access_rules::{AccessRules, WriteOrExecute};
+169
libs/mem/src/test_utils.rs
+169
libs/mem/src/test_utils.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
extern crate std;
9
+
10
+
use alloc::collections::BTreeMap;
11
+
use core::marker::PhantomData;
12
+
use core::num::NonZeroUsize;
13
+
14
+
use crate::address_space::{Flush, RawAddressSpace};
15
+
use crate::{AccessRules, PhysicalAddress, VirtualAddress};
16
+
17
+
#[derive(Debug)]
18
+
pub struct TestAddressSpace<const PAGE_SIZE: usize, const ADDR_BITS: u32> {
19
+
mappings: BTreeMap<VirtualAddress, Mapping>,
20
+
}
21
+
22
+
#[derive(Debug)]
23
+
pub struct Mapping {
24
+
pub virt: VirtualAddress,
25
+
pub phys: PhysicalAddress,
26
+
pub len: NonZeroUsize,
27
+
pub access_rules: AccessRules,
28
+
}
29
+
30
+
pub struct TestFlush {
31
+
_priv: PhantomData<()>,
32
+
}
33
+
34
+
impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> TestAddressSpace<PAGE_SIZE, ADDR_BITS> {
35
+
pub const fn new() -> Self {
36
+
Self {
37
+
mappings: BTreeMap::new(),
38
+
}
39
+
}
40
+
41
+
pub fn get_mapping_containing(&self, addr: VirtualAddress) -> Option<&Mapping> {
42
+
let (end, mapping) = self.mappings.range(addr..).next()?;
43
+
44
+
if addr > *end { None } else { Some(mapping) }
45
+
}
46
+
47
+
pub fn get_mapping_mut_containing(&mut self, addr: VirtualAddress) -> Option<&mut Mapping> {
48
+
let (end, mapping) = self.mappings.range_mut(addr..).next()?;
49
+
50
+
if addr > *end { None } else { Some(mapping) }
51
+
}
52
+
53
+
pub fn remove_mapping_containing(&mut self, addr: VirtualAddress) -> Option<Mapping> {
54
+
let (key, _) = self.mappings.range_mut(addr..).next()?;
55
+
let key = *key;
56
+
57
+
Some(self.mappings.remove(&key).unwrap())
58
+
}
59
+
}
60
+
61
+
unsafe impl<const PAGE_SIZE: usize, const ADDR_BITS: u32> RawAddressSpace for TestAddressSpace<PAGE_SIZE, ADDR_BITS> {
62
+
const PAGE_SIZE: usize = PAGE_SIZE;
63
+
const VIRT_ADDR_BITS: u32 = ADDR_BITS;
64
+
65
+
type Flush = TestFlush;
66
+
67
+
fn flush(&self) -> Self::Flush {
68
+
TestFlush { _priv: PhantomData }
69
+
}
70
+
71
+
fn lookup(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, AccessRules)> {
72
+
let mapping = self.get_mapping_containing(virt)?;
73
+
74
+
let offset = virt.checked_sub_addr(mapping.virt).unwrap();
75
+
76
+
Some((
77
+
mapping.phys.checked_add(offset).unwrap(),
78
+
mapping.access_rules,
79
+
))
80
+
}
81
+
82
+
unsafe fn map(
83
+
&mut self,
84
+
virt: VirtualAddress,
85
+
phys: PhysicalAddress,
86
+
len: NonZeroUsize,
87
+
access_rules: AccessRules,
88
+
_flush: &mut Self::Flush,
89
+
) -> crate::Result<()> {
90
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
91
+
assert!(phys.is_aligned_to(Self::PAGE_SIZE));
92
+
assert!(self.get_mapping_containing(virt).is_none());
93
+
94
+
let end_virt = virt.checked_add(len.get() - 1).unwrap();
95
+
assert!(end_virt.is_aligned_to(Self::PAGE_SIZE));
96
+
97
+
let prev = self.mappings.insert(
98
+
end_virt,
99
+
Mapping {
100
+
virt,
101
+
phys,
102
+
len,
103
+
access_rules,
104
+
},
105
+
);
106
+
assert!(prev.is_none());
107
+
108
+
Ok(())
109
+
}
110
+
111
+
unsafe fn unmap(
112
+
&mut self,
113
+
mut virt: VirtualAddress,
114
+
len: NonZeroUsize,
115
+
_flush: &mut Self::Flush,
116
+
) {
117
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
118
+
assert!(
119
+
virt.checked_add(len.get())
120
+
.unwrap()
121
+
.is_aligned_to(Self::PAGE_SIZE)
122
+
);
123
+
124
+
let mut bytes_remaining = len.get();
125
+
126
+
while bytes_remaining > 0 {
127
+
let mapping = self.remove_mapping_containing(virt).unwrap();
128
+
assert_eq!(mapping.virt, virt);
129
+
130
+
bytes_remaining -= mapping.len.get();
131
+
virt = virt.checked_sub(mapping.len.get()).unwrap();
132
+
}
133
+
}
134
+
135
+
unsafe fn set_access_rules(
136
+
&mut self,
137
+
mut virt: VirtualAddress,
138
+
len: NonZeroUsize,
139
+
access_rules: AccessRules,
140
+
_flush: &mut Self::Flush,
141
+
) {
142
+
assert!(virt.is_aligned_to(Self::PAGE_SIZE));
143
+
assert!(
144
+
virt.checked_add(len.get())
145
+
.unwrap()
146
+
.is_aligned_to(Self::PAGE_SIZE)
147
+
);
148
+
149
+
let mut bytes_remaining = len.get();
150
+
151
+
while bytes_remaining > 0 {
152
+
let mapping = self.get_mapping_mut_containing(virt).unwrap();
153
+
assert_eq!(mapping.virt, virt);
154
+
155
+
mapping.access_rules = access_rules;
156
+
157
+
bytes_remaining -= mapping.len.get();
158
+
virt = virt.checked_sub(mapping.len.get()).unwrap();
159
+
}
160
+
}
161
+
}
162
+
163
+
// ===== impl TestFlush =====
164
+
165
+
impl Flush for TestFlush {
166
+
fn flush(self) -> crate::Result<()> {
167
+
Ok(())
168
+
}
169
+
}
+31
libs/mem/src/utils.rs
+31
libs/mem/src/utils.rs
···
1
+
// Copyright 2025. Jonas Kruckenberg
2
+
//
3
+
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
+
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
+
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
+
// copied, modified, or distributed except according to those terms.
7
+
8
+
macro_rules! assert_unsafe_precondition_ {
9
+
($message:expr, ($($name:ident:$ty:ty = $arg:expr),*$(,)?) => $e:expr $(,)?) => {
10
+
{
11
+
// This check is inlineable, but not by the MIR inliner.
12
+
// The reason for this is that the MIR inliner is in an exceptionally bad position
13
+
// to think about whether or not to inline this. In MIR, this call is gated behind `debug_assertions`,
14
+
// which will codegen to `false` in release builds. Inlining the check would be wasted work in that case and
15
+
// would be bad for compile times.
16
+
//
17
+
// LLVM on the other hand sees the constant branch, so if it's `false`, it can immediately delete it without
18
+
// inlining the check. If it's `true`, it can inline it and get significantly better performance.
19
+
#[inline]
20
+
const fn precondition_check($($name:$ty),*) {
21
+
assert!($e, concat!("unsafe precondition(s) violated: ", $message,
22
+
"\n\nThis indicates a bug in the program. \
23
+
This Undefined Behavior check is optional, and cannot be relied on for safety."))
24
+
}
25
+
26
+
#[cfg(debug_assertions)]
27
+
precondition_check($($arg,)*);
28
+
}
29
+
};
30
+
}
31
+
pub(crate) use assert_unsafe_precondition_;
+6
-2
libs/wavltree/src/cursor.rs
+6
-2
libs/wavltree/src/cursor.rs
···
88
88
pub unsafe fn get_ptr(&self) -> Link<T> {
89
89
self.current
90
90
}
91
-
pub fn get(&self) -> Option<&'a T> {
92
-
unsafe { self.current.map(|ptr| ptr.as_ref()) }
91
+
pub const fn get(&self) -> Option<&'a T> {
92
+
if let Some(ptr) = self.current {
93
+
Some(unsafe { ptr.as_ref() })
94
+
} else {
95
+
None
96
+
}
93
97
}
94
98
pub fn get_mut(&mut self) -> Option<Pin<&'a mut T>> {
95
99
unsafe { self.current.map(|mut ptr| Pin::new_unchecked(ptr.as_mut())) }
+1
-1
libs/wavltree/src/lib.rs
+1
-1
libs/wavltree/src/lib.rs
···
1510
1510
}
1511
1511
1512
1512
/// Returns `true` if this node is currently linked to a [WAVLTree].
1513
-
pub fn is_linked(&self) -> bool {
1513
+
pub const fn is_linked(&self) -> bool {
1514
1514
let inner = unsafe { &*self.inner.get() };
1515
1515
inner.up.is_some() || inner.left.is_some() || inner.right.is_some()
1516
1516
}
History
5 rounds
0 comments
jonaskruckenberg.de
submitted
#4
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
merge conflicts detected
expand
collapse
expand
collapse
- Cargo.lock:135
- libs/wavltree/src/cursor.rs:88
expand 0 comments
jonaskruckenberg.de
submitted
#3
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#2
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#1
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#0
1 commit
expand
collapse
refactor: separate memory subsystem into own crate