Cargo.lock
Cargo.lock
This patch was likely rebased, as context lines do not match.
+1
libs/mem/Cargo.toml
+1
libs/mem/Cargo.toml
libs/mem/proptest-regressions/frame.txt
libs/mem/proptest-regressions/frame.txt
This file has not been changed.
libs/mem/src/access_rules.rs
libs/mem/src/access_rules.rs
This file has not been changed.
+15
-17
libs/mem/src/address_space.rs
+15
-17
libs/mem/src/address_space.rs
···
124
124
125
125
pub struct AddressSpace<R: RawAddressSpace> {
126
126
raw: R,
127
-
regions: WAVLTree<AddressSpaceRegion>,
128
-
batched_raw: Batch,
127
+
regions: WAVLTree<AddressSpaceRegion<R>>,
128
+
batch: Batch,
129
129
max_range: Range<VirtualAddress>,
130
130
rng: Option<ChaCha20Rng>,
131
131
}
···
135
135
Self {
136
136
raw,
137
137
regions: WAVLTree::new(),
138
-
batched_raw: Batch::new(),
138
+
batch: Batch::new(),
139
139
max_range: VirtualAddress::MIN..VirtualAddress::MAX,
140
140
rng,
141
141
}
···
179
179
180
180
// TODO OPTIONAL eagerly commit a few pages
181
181
182
-
self.batched_raw.flush_changes(&mut self.raw)?;
182
+
self.batch.flush_changes(&mut self.raw)?;
183
183
184
184
Ok(region.as_non_null())
185
185
}
···
212
212
213
213
// TODO OPTIONAL eagerly commit a few pages
214
214
215
-
self.batched_raw.flush_changes(&mut self.raw)?;
215
+
self.batch.flush_changes(&mut self.raw)?;
216
216
217
217
Ok(region.as_non_null())
218
218
}
···
236
236
// Safety: responsibility of caller
237
237
let mut region = unsafe { cursor.remove().unwrap_unchecked() };
238
238
239
-
region.decommit(.., &mut self.batched_raw).unwrap();
240
-
241
-
self.batched_raw.flush_changes(&mut self.raw).unwrap();
239
+
region.decommit(.., &mut self.batch, &mut self.raw).unwrap();
242
240
}
243
241
244
242
/// Attempts to extend the virtual memory reservation.
···
530
528
// Safety: responsibility of caller
531
529
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
532
530
533
-
region.update_access_rules(access_rules, &mut self.batched_raw)?;
531
+
region.update_access_rules(access_rules, &mut self.batch)?;
534
532
535
-
self.batched_raw.flush_changes(&mut self.raw)?;
533
+
self.batch.flush_changes(&mut self.raw)?;
536
534
537
535
Ok(())
538
536
}
···
585
583
// Safety: responsibility of caller
586
584
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
587
585
588
-
region.clear(&mut self.batched_raw)?;
586
+
region.clear(&mut self.batch)?;
589
587
590
-
self.batched_raw.flush_changes(&mut self.raw)?;
588
+
self.batch.flush_changes(&mut self.raw)?;
591
589
592
590
Ok(region.as_non_null())
593
591
}
···
664
662
// Safety: responsibility of caller
665
663
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
666
664
667
-
region.grow_in_place(new_layout, next_range, &mut self.batched_raw)?;
665
+
region.grow_in_place(new_layout, next_range, &mut self.batch)?;
668
666
669
-
self.batched_raw.flush_changes(&mut self.raw)?;
667
+
self.batch.flush_changes(&mut self.raw)?;
670
668
671
669
Ok(region.as_non_null())
672
670
}
···
692
690
// Safety: responsibility of caller
693
691
let mut region = unsafe { cursor.get_mut().unwrap_unchecked() };
694
692
695
-
region.shrink(new_layout, &mut self.batched_raw)?;
693
+
region.shrink(new_layout, &mut self.batch)?;
696
694
697
-
self.batched_raw.flush_changes(&mut self.raw)?;
695
+
self.batch.flush_changes()?;
698
696
699
697
Ok(region.as_non_null())
700
698
}
···
722
720
"cannot find free spot for layout {new_layout:?}"
723
721
))?;
724
722
725
-
region.move_to(spot, new_layout, &mut self.batched_raw)?;
723
+
region.move_to(spot, new_layout, &mut self.batch)?;
726
724
727
725
Ok(region.as_non_null())
728
726
}
+14
-3
libs/mem/src/address_space/batch.rs
+14
-3
libs/mem/src/address_space/batch.rs
···
66
66
/// - `len` must an integer multiple of `Self::PAGE_SIZE`
67
67
///
68
68
/// [`map`]: RawAddressSpace::map
69
-
pub fn map(
69
+
pub unsafe fn map(
70
70
&mut self,
71
71
virt: VirtualAddress,
72
72
phys: PhysicalAddress,
···
173
173
// Safety: the caller promised the correctness of the values on construction of
174
174
// the operation.
175
175
unsafe {
176
-
raw_aspace.map(op.virt, op.phys, op.len, op.access_rules, &mut flush)?;
176
+
raw_aspace.map(
177
+
op.virt,
178
+
op.phys,
179
+
op.len,
180
+
op.access_rules,
181
+
&mut flush,
182
+
)?;
177
183
}
178
184
}
179
185
BatchOperation::Unmap(op) => {
···
193
199
// Safety: the caller promised the correctness of the values on construction of
194
200
// the operation.
195
201
unsafe {
196
-
raw_aspace.set_access_rules(op.virt, op.len, op.access_rules, &mut flush);
202
+
raw_aspace.set_access_rules(
203
+
op.virt,
204
+
op.len,
205
+
op.access_rules,
206
+
&mut flush,
207
+
);
197
208
}
198
209
}
199
210
};
+252
-79
libs/mem/src/address_space/region.rs
+252
-79
libs/mem/src/address_space/region.rs
···
7
7
8
8
use alloc::boxed::Box;
9
9
use core::alloc::Layout;
10
+
use core::marker::PhantomData;
10
11
use core::mem::offset_of;
11
-
use core::ops::{Range, RangeBounds};
12
+
use core::num::NonZeroUsize;
13
+
use core::ops::{Bound, Range, RangeBounds};
12
14
use core::pin::Pin;
13
15
use core::ptr::NonNull;
14
-
use core::{cmp, mem, slice};
16
+
use core::{cmp, fmt, mem, slice};
15
17
16
-
use anyhow::bail;
18
+
use fallible_iterator::FallibleIterator;
17
19
use pin_project::pin_project;
18
20
19
-
use crate::address_space::batch::Batch;
21
+
use crate::address_space::{Batch, RawAddressSpace};
22
+
use crate::vmo::Vmo;
20
23
use crate::{AccessRules, AddressRangeExt, VirtualAddress};
21
24
22
25
#[pin_project]
23
26
#[derive(Debug)]
24
-
pub struct AddressSpaceRegion {
27
+
pub struct AddressSpaceRegion<R> {
28
+
range: Range<VirtualAddress>,
25
29
access_rules: AccessRules,
26
30
layout: Layout,
27
-
range: Range<VirtualAddress>,
31
+
vmo: Vmo,
32
+
vmo_offset: usize,
33
+
28
34
/// The address range covered by this region and its WAVL tree subtree, used when allocating new regions
29
35
subtree_range: Range<VirtualAddress>,
30
36
/// The largest gap in this subtree, used when allocating new regions
31
37
max_gap: usize,
32
38
/// Links to other regions in the WAVL tree
33
-
links: wavltree::Links<AddressSpaceRegion>,
39
+
links: wavltree::Links<AddressSpaceRegion<R>>,
40
+
41
+
_raw_aspace: PhantomData<R>,
34
42
}
35
43
36
-
impl AddressSpaceRegion {
37
-
pub const fn new(spot: VirtualAddress, layout: Layout, access_rules: AccessRules) -> Self {
44
+
impl<R: RawAddressSpace> AddressSpaceRegion<R> {
45
+
pub const fn new(
46
+
spot: VirtualAddress,
47
+
layout: Layout,
48
+
access_rules: AccessRules,
49
+
vmo: Vmo,
50
+
vmo_offset: usize,
51
+
) -> Self {
38
52
Self {
39
53
range: spot..spot.checked_add(layout.size()).unwrap(),
40
54
access_rules,
41
55
layout,
56
+
vmo,
57
+
vmo_offset,
42
58
43
59
max_gap: 0,
44
60
subtree_range: spot..spot.checked_add(layout.size()).unwrap(),
45
61
links: wavltree::Links::new(),
62
+
63
+
_raw_aspace: PhantomData,
46
64
}
47
65
}
48
-
66
+
49
67
pub const fn range(&self) -> &Range<VirtualAddress> {
50
68
&self.range
51
69
}
···
83
101
&& layout.size() <= self.range.end.get() - self.range.start.get()
84
102
}
85
103
86
-
/// grow region to `new_len`, attempting to grow the VMO accordingly
87
-
/// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
88
-
pub fn grow_in_place(
89
-
&mut self,
90
-
new_layout: Layout,
91
-
next_range: Option<Range<VirtualAddress>>,
92
-
_batch: &mut Batch,
104
+
/// Find physical memory frames to back the given `range`.
105
+
/// After this call succeeds, accesses that align with the given `access` are guaranteed to
106
+
/// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules.
107
+
///
108
+
/// # Errors
109
+
///
110
+
/// - `range` is out of bounds
111
+
/// - `access_rules` is NOT a subset of self.access_rules
112
+
pub fn commit(
113
+
&self,
114
+
range: impl RangeBounds<VirtualAddress>,
115
+
access_rules: AccessRules,
116
+
batch: &mut Batch,
117
+
raw_aspace: &mut R
93
118
) -> crate::Result<()> {
94
-
if new_layout.align() > self.layout.align() {
95
-
bail!("cannot grow in-place: New alignment greater than current");
96
-
}
119
+
let vmo_relative = self.bounds_to_vmo_relative(range);
97
120
98
-
let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
121
+
let mut acquired_frames = self.vmo.acquire(vmo_relative).enumerate();
122
+
while let Some((idx, frame)) = acquired_frames.next()? {
123
+
let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap();
99
124
100
-
if let Some(next_range) = next_range
101
-
&& next_range.is_overlapping(&new_range)
102
-
{
103
-
bail!("cannot grow in-place: New overlapping with next range");
125
+
unsafe {
126
+
batch.map(
127
+
virt,
128
+
frame.addr(),
129
+
NonZeroUsize::new(R::PAGE_SIZE).unwrap(),
130
+
access_rules,
131
+
);
132
+
}
133
+
134
+
if self.vmo.has_content_source() {
135
+
// TODO add virt addr to coalescer
136
+
}
104
137
}
105
138
106
-
// TODO attempt to resize VMO
107
-
self.update_range(new_range);
139
+
// materialize changes
140
+
batch.flush_changes(raw_aspace)?;
108
141
142
+
// initialize patched holes if necessary
143
+
if self.vmo.has_content_source() {
144
+
// for every region in coalescer
145
+
// figure out content source offset
146
+
// read from content source at offset into region
147
+
}
148
+
109
149
Ok(())
110
150
}
111
151
112
-
/// shrink region to the first `len` bytes, dropping the rest frames.
113
-
/// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
114
-
pub fn shrink(&mut self, new_layout: Layout, _batch: &mut Batch) -> crate::Result<()> {
115
-
if new_layout.align() > self.layout.align() {
116
-
bail!("cannot grow in-place: New alignment greater than current");
152
+
/// Release physical memory frames backing the given `range`.
153
+
/// After this call succeeds, accesses will page fault.
154
+
///
155
+
/// # Errors
156
+
///
157
+
/// - `range` is out of bounds
158
+
pub fn decommit(
159
+
&self,
160
+
range: impl RangeBounds<VirtualAddress>,
161
+
batch: &mut Batch,
162
+
raw_aspace: &mut R
163
+
) -> crate::Result<()> {
164
+
let vmo_relative = self.bounds_to_vmo_relative(range);
165
+
166
+
let mut released_frames = self.vmo.release(vmo_relative).enumerate();
167
+
while let Some((idx, _frame)) = released_frames.next()? {
168
+
let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap();
169
+
unsafe { batch.unmap(virt, NonZeroUsize::new(R::PAGE_SIZE).unwrap()) };
170
+
171
+
// if VMO has content source && frame is dirty
172
+
// add virt addr to coalescer
117
173
}
118
174
119
-
let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
175
+
// for every region in coalescer
176
+
// figure out content source offset
177
+
// write region to content source at offset
120
178
121
-
// TODO drop rest pages in VMO if possible (add unmaps to batch)
122
-
self.update_range(new_range);
179
+
// materialize changes
180
+
batch.flush_changes(raw_aspace)?;
123
181
124
182
Ok(())
125
183
}
126
184
127
-
/// move the entire region to the new base address, remapping any already mapped frames
128
-
pub fn move_to(&mut self, new_base: VirtualAddress, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
129
-
let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
130
-
131
-
// TODO
132
-
// - attempt to resize VMO
133
-
// - update self range
134
-
// - for every frame in VMO
135
-
// - attempt to map at new offset (add maps to batch)
136
-
185
+
/// Zero out the memory in the given `range`.
186
+
/// This MAY release physical memory frames backing the `range`.
187
+
///
188
+
/// # Errors
189
+
///
190
+
/// - `range` is out of bounds
191
+
pub fn clear(&self, range: impl RangeBounds<VirtualAddress>) -> crate::Result<()> {
137
192
todo!()
138
193
}
139
194
140
-
pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
141
-
where
142
-
R: RangeBounds<VirtualAddress>,
143
-
{
144
-
// TODO
145
-
// - for every *uncommited* frame in range
146
-
// - request frame from VMO (add map to batch)
147
-
195
+
/// Update the access rules of this `AddressSpaceRegion`.
196
+
pub fn update_access_rules(&mut self, access_rules: AccessRules, batch: &mut Batch) -> crate::Result<()> {
148
197
todo!()
149
198
}
150
199
151
-
pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
152
-
where
153
-
R: RangeBounds<VirtualAddress>,
154
-
{
155
-
// TODO
156
-
// - for every *committed* frame in range
157
-
// - drop pages in VMO if possible (add unmaps to batch)
158
-
200
+
/// Fetches content in the given `range`. This operates logically equivalent to
201
+
/// a read, write, or instruction fetch (depending on `access_rules`) so that future accesses
202
+
/// are quicker. The provided `access_rules` MUST be a subset or equal to this regions access rules.
203
+
///
204
+
/// # Errors
205
+
///
206
+
/// - `range` is out of bounds
207
+
/// - `access_rules` is NOT a subset of self.access_rules
208
+
pub fn prefetch(
209
+
&self,
210
+
range: impl RangeBounds<VirtualAddress>,
211
+
access_rules: AccessRules,
212
+
) -> crate::Result<()> {
159
213
todo!()
160
214
}
161
215
162
-
/// updates the access rules fo this region
163
-
pub fn update_access_rules(
164
-
&mut self,
165
-
access_rules: AccessRules,
166
-
batch: &mut Batch,
167
-
) -> crate::Result<()> {
168
-
// TODO
169
-
// - for every frame in VMO
170
-
// - update access rules (add protects to batch)
171
-
// - update self access rules
172
-
216
+
/// Attempts to grow the address space region to `new_len`.
217
+
/// `new_len` MUST be larger than or equal to the current length.
218
+
pub fn grow(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
173
219
todo!()
174
220
}
175
221
176
-
pub fn clear(&mut self, batch: &mut Batch) -> crate::Result<()> {
177
-
// TODO
178
-
// - replace VMO with "zeroed" VMO
179
-
// - drop pages in VMO if possible (add unmaps to batch)
180
-
222
+
/// Attempts to shrink the address space region to `new_len`.
223
+
/// `new_len` MUST be smaller than or equal to the current length.
224
+
pub fn shrink(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> {
181
225
todo!()
182
226
}
183
227
184
-
pub fn assert_valid(&self, msg: &str) {
228
+
// /// grow region to `new_len`, attempting to grow the VMO accordingly
229
+
// /// `new_layout.size()` mut be greater than or equal to `self.layout.size()`
230
+
// pub fn grow_in_place(
231
+
// &mut self,
232
+
// new_layout: Layout,
233
+
// next_range: Option<Range<VirtualAddress>>,
234
+
// batch: &mut Batch,
235
+
// ) -> crate::Result<()> {
236
+
// if new_layout.align() > self.layout.align() {
237
+
// bail!("cannot grow in-place: New alignment greater than current");
238
+
// }
239
+
//
240
+
// let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
241
+
//
242
+
// if let Some(next_range) = next_range
243
+
// && next_range.is_overlapping(&new_range)
244
+
// {
245
+
// bail!("cannot grow in-place: New overlapping with next range");
246
+
// }
247
+
//
248
+
// self.vmo.resize(new_range.size(), batch)?;
249
+
//
250
+
// self.update_range(new_range);
251
+
//
252
+
// Ok(())
253
+
// }
254
+
//
255
+
// /// shrink region to the first `len` bytes, dropping the rest frames.
256
+
// /// `new_layout.size()` mut be smaller than or equal to `self.layout.size()`
257
+
// pub fn shrink(&mut self, new_layout: Layout, batch: &mut Batch) -> crate::Result<()> {
258
+
// if new_layout.align() > self.layout.align() {
259
+
// bail!("cannot grow in-place: New alignment greater than current");
260
+
// }
261
+
//
262
+
// let new_range = self.range.start..self.range.start.checked_add(new_layout.size()).unwrap();
263
+
//
264
+
// self.vmo.resize(new_range.size(), batch)?;
265
+
//
266
+
// self.update_range(new_range);
267
+
//
268
+
// Ok(())
269
+
// }
270
+
//
271
+
// /// move the entire region to the new base address, remapping any already mapped frames
272
+
// pub fn move_to(
273
+
// &mut self,
274
+
// new_base: VirtualAddress,
275
+
// new_layout: Layout,
276
+
// batch: &mut Batch,
277
+
// ) -> crate::Result<()> {
278
+
// let new_range = new_base..new_base.checked_add(new_layout.size()).unwrap();
279
+
//
280
+
// self.vmo.resize(new_range.size(), batch)?;
281
+
// self.update_range(new_range);
282
+
//
283
+
// // - for every frame in VMO
284
+
// // - attempt to map at new offset (add maps to batch)
285
+
//
286
+
// todo!()
287
+
// }
288
+
//
289
+
// pub fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
290
+
// where
291
+
// R: RangeBounds<VirtualAddress>,
292
+
// {
293
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
294
+
//
295
+
// self.vmo.commit(bounds, will_write, batch)
296
+
// }
297
+
//
298
+
// pub fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
299
+
// where
300
+
// R: RangeBounds<VirtualAddress>,
301
+
// {
302
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
303
+
//
304
+
// self.vmo.decommit(bounds, batch)
305
+
// }
306
+
//
307
+
// /// updates the access rules fo this region
308
+
// pub fn update_access_rules(
309
+
// &mut self,
310
+
// access_rules: AccessRules,
311
+
// batch: &mut Batch,
312
+
// ) -> crate::Result<()> {
313
+
// // TODO
314
+
// // - for every frame in VMO
315
+
// // - update access rules (add protects to batch)
316
+
// // - update self access rules
317
+
//
318
+
// todo!()
319
+
// }
320
+
//
321
+
// pub fn clear<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
322
+
// where
323
+
// R: RangeBounds<VirtualAddress>,
324
+
// {
325
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
326
+
//
327
+
// self.vmo.clear(bounds, batch)
328
+
// }
329
+
//
330
+
// pub fn prefetch<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
331
+
// where
332
+
// R: RangeBounds<VirtualAddress>,
333
+
// {
334
+
// let bounds = self.bounds_to_vmo_relative(range.start_bound(), range.end_bound());
335
+
//
336
+
// self.vmo.prefetch(bounds, batch)
337
+
// }
338
+
339
+
pub fn assert_valid(&self, msg: &str)
340
+
where
341
+
R: fmt::Debug,
342
+
{
185
343
assert!(!self.range.is_empty(), "{msg}region range cannot be empty");
186
344
assert!(
187
345
self.subtree_range.start <= self.range.start
···
228
386
Some(unsafe { self.links.parent()?.as_ref() })
229
387
}
230
388
389
+
#[inline]
390
+
fn bounds_to_vmo_relative(
391
+
&self,
392
+
bounds: impl RangeBounds<VirtualAddress>,
393
+
) -> (Bound<usize>, Bound<usize>) {
394
+
let start = bounds.start_bound().map(|addr| {
395
+
(addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset
396
+
});
397
+
let end = bounds.end_bound().map(|addr| {
398
+
(addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset
399
+
});
400
+
401
+
(start, end)
402
+
}
403
+
231
404
fn update_range(&mut self, new_range: Range<VirtualAddress>) {
232
405
self.range = new_range;
233
406
// We also must propagate the information about our changed range to the rest of the tree
···
302
475
}
303
476
}
304
477
305
-
unsafe impl wavltree::Linked for AddressSpaceRegion {
478
+
unsafe impl<A: RawAddressSpace> wavltree::Linked for AddressSpaceRegion<A> {
306
479
/// Any heap-allocated type that owns an element may be used.
307
480
///
308
481
/// An element *must not* move while part of an intrusive data
libs/mem/src/addresses.rs
libs/mem/src/addresses.rs
This file has not been changed.
+33
-17
libs/mem/src/frame.rs
+33
-17
libs/mem/src/frame.rs
···
7
7
8
8
use core::alloc::Layout;
9
9
use core::cmp::PartialEq;
10
+
use core::fmt;
10
11
use core::fmt::Debug;
11
12
use core::mem::offset_of;
13
+
use core::ops::Deref;
12
14
use core::ptr::NonNull;
13
15
use core::sync::atomic;
14
16
use core::sync::atomic::{AtomicUsize, Ordering};
···
22
24
/// Soft limit on the amount of references that may be made to a `Frame`.
23
25
const MAX_REFCOUNT: usize = isize::MAX as usize;
24
26
25
-
pub struct FrameRef<A: FrameAllocator> {
27
+
pub struct FrameRef {
26
28
frame: NonNull<Frame>,
27
-
alloc: A,
29
+
alloc: &'static dyn FrameAllocator,
28
30
}
29
31
30
32
#[pin_project(!Unpin)]
···
38
40
39
41
// ===== impl FrameRef =====
40
42
41
-
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
42
-
unsafe impl Send for Frame {}
43
-
44
-
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
45
-
unsafe impl Sync for Frame {}
46
-
47
-
impl<A: FrameAllocator + Clone> Clone for FrameRef<A> {
43
+
impl Clone for FrameRef {
48
44
/// Makes a clone of the `Frame`.
49
45
///
50
46
/// This creates reference to the same `FrameInfo`, increasing the reference count by one.
···
62
58
// > another must already provide any required synchronization.
63
59
//
64
60
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
65
-
let old_size = self.frame().refcount.fetch_add(1, Ordering::Relaxed);
61
+
let old_size = self.refcount.fetch_add(1, Ordering::Relaxed);
66
62
debug_assert_ne!(old_size, 0);
67
63
68
64
// Just like with `Arc` we want to prevent excessive refcounts in the case that we are leaking
···
81
77
}
82
78
}
83
79
84
-
impl<A: FrameAllocator> Drop for FrameRef<A> {
80
+
impl Drop for FrameRef {
85
81
/// Drops the `Frame`.
86
82
///
87
83
/// This will decrement the reference count. If the reference count reaches zero
88
84
/// then this frame will be marked as free and returned to the frame allocator.
89
85
fn drop(&mut self) {
90
-
if self.frame().refcount.fetch_sub(1, Ordering::Release) != 1 {
86
+
if self.refcount.fetch_sub(1, Ordering::Release) != 1 {
91
87
return;
92
88
}
93
89
···
113
109
}
114
110
}
115
111
116
-
impl<A: FrameAllocator> FrameRef<A> {
117
-
unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: A) -> Self {
118
-
Self { frame, alloc }
112
+
impl Debug for FrameRef {
113
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
114
+
f.debug_struct("FrameRef")
115
+
.field("ptr", &self.frame)
116
+
.finish_non_exhaustive()
119
117
}
118
+
}
120
119
121
-
fn frame(&self) -> &Frame {
120
+
impl Deref for FrameRef {
121
+
type Target = Frame;
122
+
123
+
fn deref(&self) -> &Self::Target {
122
124
unsafe { self.frame.as_ref() }
123
125
}
126
+
}
124
127
128
+
impl FrameRef {
129
+
unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self {
130
+
Self { frame, alloc }
131
+
}
132
+
125
133
#[inline(never)]
126
134
fn drop_slow(&mut self) {
127
-
let layout = unsafe { Layout::from_size_align_unchecked(A::FRAME_SIZE, A::FRAME_SIZE) };
135
+
let layout = unsafe {
136
+
Layout::from_size_align_unchecked(self.alloc.page_size(), self.alloc.page_size())
137
+
};
128
138
unsafe {
129
139
self.alloc.deallocate(self.frame, layout);
130
140
}
···
132
142
}
133
143
134
144
// ===== impl Frame =====
145
+
146
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Send`
147
+
unsafe impl Send for Frame {}
148
+
149
+
// Safety: assert_impl_all! above ensures that `FrameInfo` is `Sync`
150
+
unsafe impl Sync for Frame {}
135
151
136
152
impl PartialEq<Frame> for &Frame {
137
153
fn eq(&self, other: &Frame) -> bool {
+5
-3
libs/mem/src/frame_alloc.rs
+5
-3
libs/mem/src/frame_alloc.rs
···
30
30
pub struct AllocError;
31
31
32
32
pub unsafe trait FrameAllocator: Send + Sync + 'static {
33
-
const FRAME_SIZE: usize;
34
33
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError>;
35
34
unsafe fn deallocate(&self, block: NonNull<Frame>, layout: Layout);
35
+
fn page_size(&self) -> usize;
36
36
}
37
37
38
38
const MAX_FRAMES_IN_CACHE: usize = 256;
···
92
92
unsafe impl<L: lock_api::RawMutex + Send + Sync, A: RawAddressSpace + Send + Sync> FrameAllocator
93
93
for &'static FrameAlloc<L, A>
94
94
{
95
-
const FRAME_SIZE: usize = A::PAGE_SIZE;
96
-
97
95
fn allocate(&self, layout: Layout) -> Result<NonNull<[Frame]>, AllocError> {
98
96
// attempt to allocate from the CPU-local cache first
99
97
if let Some(frame) = self.allocate_local(layout) {
···
131
129
}
132
130
133
131
unreachable!();
132
+
}
133
+
134
+
fn page_size(&self) -> usize {
135
+
A::PAGE_SIZE
134
136
}
135
137
}
libs/mem/src/frame_alloc/area.rs
libs/mem/src/frame_alloc/area.rs
This file has not been changed.
libs/mem/src/frame_alloc/area_selection.rs
libs/mem/src/frame_alloc/area_selection.rs
This file has not been changed.
-19
libs/mem/src/lib.rs
-19
libs/mem/src/lib.rs
···
16
16
pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress};
17
17
pub use frame::{Frame, FrameRef};
18
18
19
-
// For every region we need to track 3 pieces of information:
20
-
// 1. The virtual memory region it occupies.
21
-
// - required to know which virtual memory regions are free to use
22
-
// - required to know when resolving page faults
23
-
// 2. The physical memory region(s) it occupies.
24
-
// - required to know when resolving page faults
25
-
// - required for swap
26
-
// - either
27
-
// - PAGED (general purpose, lazy, paged physical memory. Can be committed, decommitted, swapped, or compressed)
28
-
// - MMIO (physmem is MMIO instead of regular RAM. Can NOT be swapped, or compressed, but CAN be committed and decomitted)
29
-
// - WIRED (the mapping was set up during boot. Can NEITHER be committed, decomitted, swapped, nor compressed)
30
-
// 3. The content of the memory region.
31
-
// - required for resolving page faults
32
-
// - when first committing physical memory we need to know what to fill the memory with
33
-
// - needs writeback hooks so changes can be flushed
34
-
// - either
35
-
// - ZERO FRAME (the special zero frame, filled only with zeroes)
36
-
// - USERSPACE provider (for file system, swap, etc.)
37
-
libs/mem/src/test_utils.rs
libs/mem/src/test_utils.rs
This file has not been changed.
libs/mem/src/utils.rs
libs/mem/src/utils.rs
This file has not been changed.
+356
-206
libs/mem/src/vmo.rs
+356
-206
libs/mem/src/vmo.rs
···
6
6
// copied, modified, or distributed except according to those terms.
7
7
8
8
use alloc::sync::Arc;
9
-
use core::convert::Infallible;
10
-
use core::fmt;
11
-
use core::ops::RangeBounds;
9
+
use core::ops::{Bound, Range, RangeBounds};
10
+
use core::{fmt, ptr};
12
11
13
-
use kasync::io::{Read, Write};
12
+
use anyhow::ensure;
13
+
use fallible_iterator::FallibleIterator;
14
+
use lock_api::RwLock;
15
+
use smallvec::SmallVec;
16
+
use crate::frame_list::FrameList;
17
+
use crate::{FrameRef, PhysicalAddress};
14
18
15
-
use crate::address_space::Batch;
19
+
pub struct Vmo {
20
+
name: &'static str,
21
+
vmo: RawVmo,
22
+
}
16
23
17
-
pub trait VirtualMemoryObject: fmt::Debug {
18
-
type Err;
24
+
#[derive(Debug)]
25
+
struct RawVmo {
26
+
data: *const (),
27
+
vtable: &'static RawVmoVTable,
28
+
}
19
29
20
-
// attempt to resize the vmo to `new_size`
21
-
async fn resize(&mut self, new_size: usize) -> Result<(), Self::Err>;
30
+
#[derive(Copy, Clone, Debug)]
31
+
struct RawVmoVTable {
32
+
clone: unsafe fn(*const ()) -> RawVmo,
33
+
acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
34
+
release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
35
+
clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
36
+
len: unsafe fn(*const ()) -> usize,
37
+
resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
38
+
drop: unsafe fn(*const ()),
39
+
}
22
40
23
-
// find physical pages to back the range of the object
24
-
async fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> Result<(), Self::Err>
25
-
where
26
-
R: RangeBounds<usize>;
41
+
// ===== impl Vmo =====
27
42
28
-
// free a range of the vmo back to the default state
29
-
async fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
30
-
where
31
-
R: RangeBounds<usize>;
43
+
impl Unpin for Vmo {}
32
44
33
-
// Zero a range of the VMO. May release physical pages in the process.
34
-
async fn clear<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
35
-
where
36
-
R: RangeBounds<usize>;
45
+
// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Send
46
+
// therefore Vmo is Send too
47
+
unsafe impl Send for Vmo {}
48
+
// Safety: As part of the safety contract for RawVmoVTable, the caller promised RawVmo is Sync
49
+
// therefore Vmo is Sync too
50
+
unsafe impl Sync for Vmo {}
37
51
38
-
// Fetches content in the given range of the object. This should operate logically equivalent to
39
-
// reading such that future reads are quicker.
40
-
async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
41
-
where
42
-
R: RangeBounds<usize>;
52
+
impl Clone for Vmo {
53
+
#[inline]
54
+
fn clone(&self) -> Self {
55
+
Self {
56
+
vmo: unsafe { (self.vmo.vtable.clone)(self.vmo.data) },
57
+
name: self.name,
58
+
}
59
+
}
43
60
}
44
61
45
-
#[derive(Debug)]
46
-
pub struct WiredVmo(());
62
+
impl Drop for Vmo {
63
+
#[inline]
64
+
fn drop(&mut self) {
65
+
unsafe { (self.vmo.vtable.drop)(self.vmo.data) }
66
+
}
67
+
}
47
68
48
-
impl VirtualMemoryObject for WiredVmo {
49
-
type Err = Infallible;
69
+
impl fmt::Debug for Vmo {
70
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
71
+
let vtable_ptr = self.vmo.vtable as *const RawVmoVTable;
72
+
f.debug_struct("Vmo")
73
+
.field("name", &self.name)
74
+
.field("data", &self.vmo.data)
75
+
.field("vtable", &vtable_ptr)
76
+
.finish()
77
+
}
78
+
}
50
79
51
-
async fn resize(&mut self, _new_size: usize) -> Result<(), Self::Err> {
52
-
unreachable!("cannot resize WIRED memory object");
80
+
impl Vmo {
81
+
/// Creates a new `Vmo` from the provided `len`, `data` pointer and `vtable`.
82
+
///
83
+
/// TODO
84
+
///
85
+
/// The `data` pointer can be used to store arbitrary data as required by the vmo implementation.
86
+
/// This could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
87
+
/// The value of this pointer will get passed to all functions that are part
88
+
/// of the `vtable` as the first parameter.
89
+
///
90
+
/// It is important to consider that the `data` pointer must point to a
91
+
/// thread safe type such as an `Arc`.
92
+
///
93
+
/// The `vtable` customizes the behavior of a `Cmo`. For each operation
94
+
/// on the `Clock`, the associated function in the `vtable` will be called.
95
+
///
96
+
/// # Safety
97
+
///
98
+
/// The behavior of the returned `Vmo` is undefined if the contract defined
99
+
/// in [`RawVmoVTable`]'s documentation is not upheld.
100
+
#[inline]
101
+
#[must_use]
102
+
pub const unsafe fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
103
+
// Safety: ensured by caller
104
+
unsafe { Self::from_raw(RawVmo { data, vtable }) }
53
105
}
54
106
55
-
async fn commit<R>(
56
-
&mut self,
57
-
_range: R,
58
-
_will_write: bool,
59
-
_batch: &mut Batch,
60
-
) -> Result<(), Self::Err>
107
+
/// Creates a new `Vmo` from a [`RawVmo`].
108
+
///
109
+
/// # Safety
110
+
///
111
+
/// The behavior of the returned `Vmo` is undefined if the contract defined
112
+
/// in [`RawVmo`]'s and [`RawVmoVTable`]'s documentation is not upheld.
113
+
#[inline]
114
+
#[must_use]
115
+
pub const unsafe fn from_raw(vmo: RawVmo) -> Self {
116
+
Self {
117
+
vmo,
118
+
name: "<unnamed mystery VMO>",
119
+
}
120
+
}
121
+
122
+
/// Add an arbitrary user-defined name to this `Vmo`.
123
+
pub fn named(mut self, name: &'static str) -> Self {
124
+
self.name = name;
125
+
self
126
+
}
127
+
128
+
/// Returns this `Vmo`'s name, if it was given one using the [`Vmo::named`]
129
+
/// method.
130
+
pub fn name(&self) -> &'static str {
131
+
self.name
132
+
}
133
+
134
+
pub fn len(&self) -> usize {
135
+
unsafe { (self.vmo.vtable.len)(self.vmo.data) }
136
+
}
137
+
138
+
pub fn has_content_source(&self) -> bool {
139
+
self.content_source().is_some()
140
+
}
141
+
142
+
pub fn content_source(&self) -> Option<()> {
143
+
todo!()
144
+
}
145
+
146
+
/// Gets the `data` pointer used to create this `Vmo`.
147
+
#[inline]
148
+
#[must_use]
149
+
pub fn data(&self) -> *const () {
150
+
self.vmo.data
151
+
}
152
+
153
+
/// Gets the `vtable` pointer used to create this `Vmo`.
154
+
#[inline]
155
+
#[must_use]
156
+
pub fn vtable(&self) -> &'static RawVmoVTable {
157
+
self.vmo.vtable
158
+
}
159
+
160
+
// Release the frame at the given `index`. After this call succeeds, all accessed following the
161
+
// given `access_rules` MUST NOT fault.
162
+
// UNIT: frames
163
+
pub fn acquire<R>(
164
+
&self,
165
+
range: R,
166
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
61
167
where
62
168
R: RangeBounds<usize>,
63
169
{
64
-
unreachable!("cannot commit WIRED memory object. Wired memory is always committed.");
170
+
let range = self.bound_check(range);
171
+
172
+
let i = range
173
+
.into_iter()
174
+
.flat_map(|r| r)
175
+
.filter_map(|idx| unsafe { (self.vmo.vtable.acquire)(self.vmo.data, idx).transpose() });
176
+
177
+
fallible_iterator::convert(i)
65
178
}
66
179
67
-
async fn decommit<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
180
+
// Release the frame at the given `index`. After this call succeeds, all accessed to the frame
181
+
// MUST fault. Returns the base physical address of the release frame.
182
+
// UNIT: frames
183
+
pub fn release<R>(
184
+
&self,
185
+
range: R,
186
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
68
187
where
69
188
R: RangeBounds<usize>,
70
189
{
71
-
unreachable!("cannot decommit WIRED memory object. Wired memory is always committed.");
190
+
let range = self.bound_check(range);
191
+
192
+
let i = range
193
+
.into_iter()
194
+
.flat_map(|r| r)
195
+
.filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
196
+
197
+
fallible_iterator::convert(i)
72
198
}
73
199
74
-
async fn clear<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
200
+
// Release the frame at the given `index`. After this call succeeds, all accessed to the frame
201
+
// MUST fault. Returns the base physical address of the release frame.
202
+
// UNIT: frames
203
+
pub fn clear<R>(
204
+
&self,
205
+
range: R,
206
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error>
75
207
where
76
208
R: RangeBounds<usize>,
77
209
{
78
-
todo!()
210
+
let range = self.bound_check(range);
211
+
212
+
let i = range
213
+
.into_iter()
214
+
.flat_map(|r| r)
215
+
.filter_map(|idx| unsafe { (self.vmo.vtable.clear)(self.vmo.data, idx).transpose() });
216
+
217
+
fallible_iterator::convert(i)
79
218
}
80
219
81
-
async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
220
+
// Grow the VMO to `new_size` (guaranteed to be larger than or equal to the current size).
221
+
fn grow(&self, new_len: usize) -> crate::Result<()> {
222
+
debug_assert!(new_len >= self.len());
223
+
224
+
unsafe { (self.vmo.vtable.resize)(self.vmo.data, new_len)? };
225
+
226
+
Ok(())
227
+
}
228
+
229
+
// Shrink the VMO to `new_size` (guaranteed to be smaller than or equal to the current size).
230
+
// After this call succeeds, all accesses outside the new range MUST fault.
231
+
// UNIT: frames
232
+
pub fn shrink(
233
+
&self,
234
+
new_len: usize,
235
+
) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> {
236
+
debug_assert!(new_len <= self.len());
237
+
238
+
let old_len = self.len();
239
+
240
+
unsafe {
241
+
(self.vmo.vtable.resize)(self.vmo.data, new_len)?;
242
+
};
243
+
244
+
let i = (new_len..old_len)
245
+
.into_iter()
246
+
.filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() });
247
+
248
+
fallible_iterator::convert(i)
249
+
}
250
+
251
+
#[inline]
252
+
fn bound_check<R>(&self, range: R) -> crate::Result<Range<usize>>
82
253
where
83
254
R: RangeBounds<usize>,
84
255
{
85
-
todo!()
256
+
let start = match range.start_bound() {
257
+
Bound::Included(b) => *b,
258
+
Bound::Excluded(b) => *b + 1,
259
+
Bound::Unbounded => 0,
260
+
};
261
+
let end = match range.end_bound() {
262
+
Bound::Included(b) => *b + 1,
263
+
Bound::Excluded(b) => *b,
264
+
Bound::Unbounded => self.len(),
265
+
};
266
+
267
+
ensure!(end <= self.len());
268
+
269
+
Ok(start..end)
86
270
}
87
271
}
88
272
89
-
#[derive(Debug)]
90
-
pub struct PagedVmo {
91
-
source: Arc<dyn FrameSource + Send + Sync>,
273
+
// ===== impl RawVmo =====
274
+
275
+
impl RawVmo {
276
+
/// Creates a new `RawVmo` from the provided `data` pointer and `vtable`.
277
+
///
278
+
/// The `data` pointer can be used to store arbitrary data as required by the VMO implementation.
279
+
/// his could be e.g. a type-erased pointer to an `Arc` that holds private implementation-specific state.
280
+
/// The value of this pointer will get passed to all functions that are part
281
+
/// of the `vtable` as the first parameter.
282
+
///
283
+
/// It is important to consider that the `data` pointer must point to a
284
+
/// thread safe type such as an `Arc`.
285
+
///
286
+
/// The `vtable` customizes the behavior of a `Vmo`. For each operation
287
+
/// on the `Vmo`, the associated function in the `vtable` will be called.
288
+
#[inline]
289
+
#[must_use]
290
+
pub const fn new(data: *const (), vtable: &'static RawVmoVTable) -> Self {
291
+
Self { data, vtable }
292
+
}
92
293
}
93
294
94
-
impl VirtualMemoryObject for PagedVmo {
95
-
type Err = anyhow::Error;
295
+
// ===== impl RawVmoVTable =====
96
296
97
-
async fn resize(&mut self, new_size: usize) -> Result<(), Self::Err> {
297
+
impl RawVmoVTable {
298
+
pub const fn new(
299
+
clone: unsafe fn(*const ()) -> RawVmo,
300
+
acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
301
+
release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
302
+
clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>,
303
+
len: unsafe fn(*const ()) -> usize,
304
+
resize: unsafe fn(*const (), new_len: usize) -> crate::Result<()>,
305
+
drop: unsafe fn(*const ()),
306
+
) -> Self {
307
+
Self {
308
+
clone,
309
+
acquire,
310
+
release,
311
+
clear,
312
+
len,
313
+
resize,
314
+
drop,
315
+
}
316
+
}
317
+
}
318
+
319
+
pub fn stub_vmo() -> Vmo {
320
+
const WIRED_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
321
+
stub_clone,
322
+
stub_acquire,
323
+
stub_release,
324
+
stub_clear,
325
+
stub_len,
326
+
stub_resize,
327
+
stub_drop,
328
+
);
329
+
330
+
unsafe fn stub_clone(ptr: *const ()) -> RawVmo {
331
+
debug_assert!(ptr.is_null());
332
+
RawVmo::new(ptr, &WIRED_VMO_VTABLE)
333
+
}
334
+
335
+
unsafe fn stub_acquire(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
336
+
debug_assert!(ptr.is_null());
337
+
unreachable!()
338
+
}
339
+
unsafe fn stub_release(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
340
+
debug_assert!(ptr.is_null());
341
+
unreachable!()
342
+
}
343
+
unsafe fn stub_clear(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> {
344
+
debug_assert!(ptr.is_null());
345
+
unreachable!()
346
+
}
347
+
unsafe fn stub_len(ptr: *const ()) -> usize {
348
+
debug_assert!(ptr.is_null());
349
+
unreachable!()
350
+
}
351
+
unsafe fn stub_resize(ptr: *const (), _new_len: usize) -> crate::Result<()> {
352
+
debug_assert!(ptr.is_null());
353
+
unreachable!()
354
+
}
355
+
unsafe fn stub_drop(ptr: *const ()) {
356
+
debug_assert!(ptr.is_null());
357
+
}
358
+
359
+
unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) }
360
+
}
361
+
362
+
struct PagedVmo<R: lock_api::RawRwLock> {
363
+
list: RwLock<R, SmallVec<[FrameRef; 64]>>,
364
+
}
365
+
366
+
impl<R: lock_api::RawRwLock> PagedVmo<R> {
367
+
pub const fn new(phys: Range<PhysicalAddress>) -> Self {
98
368
todo!()
99
369
}
100
370
101
-
async fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> Result<(), Self::Err>
102
-
where
103
-
R: RangeBounds<usize>,
104
-
{
371
+
const VMO_VTABLE: RawVmoVTable = RawVmoVTable::new(
372
+
Self::clone,
373
+
Self::acquire,
374
+
Self::release,
375
+
Self::clear,
376
+
Self::len,
377
+
Self::resize,
378
+
Self::drop,
379
+
);
380
+
381
+
unsafe fn clone(ptr: *const ()) -> RawVmo {
382
+
unsafe {
383
+
Arc::increment_strong_count(ptr.cast::<Self>());
384
+
}
385
+
RawVmo::new(ptr, &Self::VMO_VTABLE)
386
+
}
387
+
388
+
unsafe fn drop(ptr: *const ()) {
389
+
drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) });
390
+
}
391
+
392
+
unsafe fn acquire(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
393
+
let me = ptr.cast::<Self>().as_ref().unwrap();
394
+
395
+
let mut list = me.list.write();
396
+
397
+
list.entry(index).or_insert_with(|| todo!("allocate frame"));
398
+
399
+
// list
400
+
}
401
+
402
+
unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
105
403
todo!()
106
404
}
107
405
108
-
async fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
109
-
where
110
-
R: RangeBounds<usize>,
111
-
{
406
+
unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> {
112
407
todo!()
113
408
}
114
409
115
-
async fn clear<R>(&mut self, range: R, batch: &mut Batch) -> Result<(), Self::Err>
116
-
where
117
-
R: RangeBounds<usize>,
118
-
{
410
+
unsafe fn len(ptr: *const ()) -> usize {
119
411
todo!()
120
412
}
121
413
122
-
async fn prefetch<R>(&mut self, _range: R, _batch: &mut Batch) -> Result<(), Self::Err>
123
-
where
124
-
R: RangeBounds<usize>,
125
-
{
414
+
unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> {
126
415
todo!()
127
416
}
128
417
}
129
-
130
-
trait FrameSource: Read<Err = anyhow::Error> + Write<Err = anyhow::Error> + fmt::Debug {}
131
-
132
-
// impl<A: RawAddressSpace> VirtualMemoryObject for PhysVmo<A> {
133
-
// fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
134
-
// where
135
-
// R: RangeBounds<usize>,
136
-
// {
137
-
// let range_phys = slice_range(&self.range, range)?;
138
-
//
139
-
// // batch.map(
140
-
// // // range.start,
141
-
// // range_phys.start,
142
-
// // NonZeroUsize::new(range_phys.size()).unwrap(),
143
-
// // // self.permissions.into(),
144
-
// // )?;
145
-
//
146
-
// todo!()
147
-
// }
148
-
//
149
-
// fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
150
-
// where
151
-
// R: RangeBounds<usize>,
152
-
// {
153
-
// todo!()
154
-
// }
155
-
// }
156
-
//
157
-
// #[derive(Debug)]
158
-
// pub struct PagedVmo {}
159
-
//
160
-
// impl VirtualMemoryObject for PagedVmo {
161
-
// fn commit<R>(&mut self, range: R, will_write: bool, batch: &mut Batch) -> crate::Result<()>
162
-
// where
163
-
// R: RangeBounds<usize>,
164
-
// {
165
-
// // let access = slice_range(self.range, range);
166
-
//
167
-
// // if will_write {
168
-
// // let mut vmo = vmo.write();
169
-
// //
170
-
// // for addr in range.iter().step_by(arch::PAGE_SIZE) {
171
-
// // debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE));
172
-
// // let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap();
173
-
// // let frame = vmo.require_owned_frame(vmo_relative_offset)?;
174
-
// // batch.queue_map(
175
-
// // addr,
176
-
// // frame.addr(),
177
-
// // NonZeroUsize::new(arch::PAGE_SIZE).unwrap(),
178
-
// // self.permissions.into(),
179
-
// // )?;
180
-
// // }
181
-
// // } else {
182
-
// // let mut vmo = vmo.write();
183
-
// //
184
-
// // for addr in range.iter().step_by(arch::PAGE_SIZE) {
185
-
// // debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE));
186
-
// // let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap();
187
-
// // let frame = vmo.require_read_frame(vmo_relative_offset)?;
188
-
// // batch.queue_map(
189
-
// // addr,
190
-
// // frame.addr(),
191
-
// // NonZeroUsize::new(arch::PAGE_SIZE).unwrap(),
192
-
// // self.permissions.difference(Permissions::WRITE).into(),
193
-
// // )?;
194
-
// // }
195
-
// // }
196
-
//
197
-
// todo!()
198
-
// }
199
-
//
200
-
// fn decommit<R>(&mut self, range: R, batch: &mut Batch) -> crate::Result<()>
201
-
// where
202
-
// R: RangeBounds<usize>,
203
-
// {
204
-
// todo!()
205
-
// }
206
-
// }
207
-
//
208
-
// fn slice_range<R: RangeBounds<usize>>(
209
-
// range: &Range<PhysicalAddress>,
210
-
// bounds: R,
211
-
// ) -> crate::Result<Range<PhysicalAddress>> {
212
-
// let start = match bounds.start_bound() {
213
-
// Bound::Included(b) => range.start.checked_add(*b).unwrap(),
214
-
// Bound::Excluded(b) => range.start.checked_add(*b + 1).unwrap(),
215
-
// Bound::Unbounded => range.start,
216
-
// };
217
-
// let end = match bounds.end_bound() {
218
-
// Bound::Included(b) => range.start.checked_add(*b + 1).unwrap(),
219
-
// Bound::Excluded(b) => range.start.checked_add(*b).unwrap(),
220
-
// Bound::Unbounded => range.end,
221
-
// };
222
-
//
223
-
// ensure!(end <= range.end, "requested range {:?} is out of bounds for {range:?}", start..end);
224
-
//
225
-
// Ok(start..end)
226
-
// }
227
-
//
228
-
// #[cfg(test)]
229
-
// mod tests {
230
-
// use core::ops::Bound::{Excluded, Included};
231
-
//
232
-
// use super::*;
233
-
//
234
-
// #[test]
235
-
// fn _subrange() {
236
-
// let range = PhysicalAddress::new(0)..PhysicalAddress::new(10);
237
-
//
238
-
// assert_eq!(
239
-
// slice_range(&range, 0..1).unwrap(),
240
-
// PhysicalAddress::new(0)..PhysicalAddress::new(1)
241
-
// );
242
-
// assert_eq!(
243
-
// slice_range(&range, ..).unwrap(),
244
-
// PhysicalAddress::new(0)..PhysicalAddress::new(10)
245
-
// );
246
-
// assert_eq!(
247
-
// slice_range(&range, 0..).unwrap(),
248
-
// PhysicalAddress::new(0)..PhysicalAddress::new(10)
249
-
// );
250
-
// assert_eq!(
251
-
// slice_range(&range, ..10).unwrap(),
252
-
// PhysicalAddress::new(0)..PhysicalAddress::new(10)
253
-
// );
254
-
// assert_eq!(
255
-
// slice_range(&range, 0..10).unwrap(),
256
-
// PhysicalAddress::new(0)..PhysicalAddress::new(10)
257
-
// );
258
-
// assert_eq!(
259
-
// slice_range(&range, 0..=9).unwrap(),
260
-
// PhysicalAddress::new(0)..PhysicalAddress::new(10)
261
-
// );
262
-
// assert_eq!(
263
-
// slice_range(&range, (Excluded(0), Included(9))).unwrap(),
264
-
// PhysicalAddress::new(1)..PhysicalAddress::new(10)
265
-
// );
266
-
// }
267
-
// }
libs/wavltree/src/cursor.rs
libs/wavltree/src/cursor.rs
This file has not been changed.
libs/wavltree/src/lib.rs
libs/wavltree/src/lib.rs
This file has not been changed.
History
5 rounds
0 comments
jonaskruckenberg.de
submitted
#4
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
merge conflicts detected
expand
collapse
expand
collapse
- Cargo.lock:135
- libs/wavltree/src/cursor.rs:88
expand 0 comments
jonaskruckenberg.de
submitted
#3
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#2
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#1
1 commit
expand
collapse
refactor: separate memory subsystem into own crate
expand 0 comments
jonaskruckenberg.de
submitted
#0
1 commit
expand
collapse
refactor: separate memory subsystem into own crate