Next Generation WASM Microkernel Operating System

refactor: separate memory subsystem into own crate #1

open opened by jonaskruckenberg.de targeting main from jonas/refactor/mem
Labels

None yet.

Participants 1
AT URI
at://did:plc:wur5mmsnhlocanyqtus3oex5/sh.tangled.repo.pull/3lwbnsqcrd622
+347 -115
Interdiff #3 โ†’ #4
Cargo.lock

This file has not been changed.

libs/mem/Cargo.toml

This file has not been changed.

libs/mem/proptest-regressions/frame.txt

This file has not been changed.

+29
libs/mem/src/access_rules.rs
··· 41 41 Execute = 0b10, 42 42 } 43 43 44 + // ===== impl AccessRules ===== 45 + 46 + impl AccessRules { 47 + 48 + pub const fn is_read_only(&self) -> bool { 49 + const READ_MASK: u8 = AccessRules::READ.max_value(); 50 + assert!(READ_MASK == 1); 51 + self.0 & READ_MASK == 1 52 + } 53 + 54 + pub fn allows_read(&self) -> bool { 55 + self.get(Self::READ) 56 + } 57 + 58 + pub fn allows_write(&self) -> bool { 59 + match self.get(Self::WRITE_OR_EXECUTE) { 60 + WriteOrExecute::Write => true, 61 + _ => false, 62 + } 63 + } 64 + 65 + pub fn allows_execution(&self) -> bool { 66 + match self.get(Self::WRITE_OR_EXECUTE) { 67 + WriteOrExecute::Execute => true, 68 + _ => false, 69 + } 70 + } 71 + } 72 + 44 73 // ===== impl WriteOrExecute ===== 45 74 46 75 impl mycelium_bitfield::FromBits<u8> for WriteOrExecute {
+54 -44
libs/mem/src/address_space.rs
··· 9 9 mod region; 10 10 11 11 use alloc::boxed::Box; 12 + use alloc::sync::Arc; 12 13 use core::alloc::Layout; 13 14 use core::num::NonZeroUsize; 14 15 use core::ops::{Bound, ControlFlow, Range}; 15 16 use core::ptr::NonNull; 16 17 17 - use anyhow::{Context, format_err}; 18 + use anyhow::{format_err, Context}; 18 19 pub(crate) use batch::Batch; 19 - use rand::Rng; 20 20 use rand::distr::Uniform; 21 + use rand::Rng; 21 22 use rand_chacha::ChaCha20Rng; 22 23 use region::AddressSpaceRegion; 23 24 use wavltree::{CursorMut, WAVLTree}; 24 25 25 26 use crate::access_rules::AccessRules; 27 + use crate::frame_alloc::FrameAllocator; 26 28 use crate::utils::assert_unsafe_precondition_; 29 + use crate::vmo::PagedVmo; 27 30 use crate::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 28 31 29 32 pub unsafe trait RawAddressSpace { ··· 127 130 batch: Batch, 128 131 max_range: Range<VirtualAddress>, 129 132 rng: Option<ChaCha20Rng>, 133 + frame_alloc: &'static dyn FrameAllocator, 130 134 } 131 135 132 - impl<R: RawAddressSpace> AddressSpace<R> { 133 - pub fn new(raw: R, rng: Option<ChaCha20Rng>) -> Self { 136 + impl<A: RawAddressSpace> AddressSpace<A> { 137 + pub fn new(raw: A, rng: Option<ChaCha20Rng>, frame_alloc: &'static dyn FrameAllocator) -> Self { 134 138 Self { 135 139 raw, 136 140 regions: WAVLTree::new(), 137 141 batch: Batch::new(), 138 142 max_range: VirtualAddress::MIN..VirtualAddress::MAX, 139 143 rng, 144 + frame_alloc, 140 145 } 141 146 } 142 147 ··· 157 162 /// 158 163 /// Returning `Err` indicates the layout does not meet the address space's size or alignment 159 164 /// constraints, virtual memory is exhausted, or mapping otherwise fails. 160 - pub fn map( 165 + pub fn map<R: lock_api::RawRwLock>( 161 166 &mut self, 162 167 layout: Layout, 163 168 access_rules: AccessRules, ··· 165 170 #[cfg(debug_assertions)] 166 171 self.assert_valid("[AddressSpace::map]"); 167 172 168 - let layout = layout.align_to(R::PAGE_SIZE).unwrap(); 173 + let layout = layout.align_to(A::PAGE_SIZE).unwrap(); 169 174 170 175 let spot = self 171 176 .find_spot_for(layout) 172 177 .context(format_err!("cannot find free spot for layout {layout:?}"))?; 173 178 174 179 // TODO "relaxed" frame provider 175 - let region = AddressSpaceRegion::new(spot, layout, access_rules); 180 + let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo(); 181 + let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0); 176 182 177 183 let region = self.regions.insert(Box::pin(region)); 178 184 ··· 190 196 /// 191 197 /// Returning `Err` indicates the layout does not meet the address space's size or alignment 192 198 /// constraints, virtual memory is exhausted, or mapping otherwise fails. 193 - pub fn map_zeroed( 199 + pub fn map_zeroed<R: lock_api::RawRwLock>( 194 200 &mut self, 195 201 layout: Layout, 196 202 access_rules: AccessRules, ··· 198 204 #[cfg(debug_assertions)] 199 205 self.assert_valid("[AddressSpace::map_zeroed]"); 200 206 201 - let layout = layout.align_to(R::PAGE_SIZE).unwrap(); 207 + let layout = layout.align_to(A::PAGE_SIZE).unwrap(); 202 208 203 209 let spot = self 204 210 .find_spot_for(layout) 205 211 .context(format_err!("cannot find free spot for layout {layout:?}"))?; 206 212 207 213 // TODO "zeroed" frame provider 208 - let region = AddressSpaceRegion::new(spot, layout, access_rules); 214 + let vmo = Arc::new(PagedVmo::<R>::new(self.frame_alloc)).into_vmo(); 215 + let region = AddressSpaceRegion::new(spot, layout, access_rules, vmo, 0); 209 216 210 217 let region = self.regions.insert(Box::pin(region)); 211 218 ··· 284 291 285 292 assert_unsafe_precondition_!( 286 293 "TODO", 287 - (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 294 + (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => { 288 295 old_layout.align().is_multiple_of(page_size) 289 296 } 290 297 ); 291 298 292 299 assert_unsafe_precondition_!( 293 300 "TODO", 294 - (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 301 + (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => { 295 302 new_layout.align().is_multiple_of(page_size) 296 303 } 297 304 ); ··· 342 349 343 350 assert_unsafe_precondition_!( 344 351 "TODO", 345 - (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 352 + (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => { 346 353 old_layout.align().is_multiple_of(page_size) 347 354 } 348 355 ); 349 356 350 357 assert_unsafe_precondition_!( 351 358 "TODO", 352 - (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 359 + (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => { 353 360 new_layout.align().is_multiple_of(page_size) 354 361 } 355 362 ); ··· 414 421 415 422 assert_unsafe_precondition_!( 416 423 "TODO", 417 - (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 424 + (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => { 418 425 old_layout.align().is_multiple_of(page_size) 419 426 } 420 427 ); 421 428 422 429 assert_unsafe_precondition_!( 423 430 "TODO", 424 - (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 431 + (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => { 425 432 new_layout.align().is_multiple_of(page_size) 426 433 } 427 434 ); ··· 473 480 474 481 assert_unsafe_precondition_!( 475 482 "TODO", 476 - (old_layout: Layout = old_layout, page_size: usize = R::PAGE_SIZE) => { 483 + (old_layout: Layout = old_layout, page_size: usize = A::PAGE_SIZE) => { 477 484 old_layout.align().is_multiple_of(page_size) 478 485 } 479 486 ); 480 487 481 488 assert_unsafe_precondition_!( 482 489 "TODO", 483 - (new_layout: Layout = new_layout, page_size: usize = R::PAGE_SIZE) => { 490 + (new_layout: Layout = new_layout, page_size: usize = A::PAGE_SIZE) => { 484 491 new_layout.align().is_multiple_of(page_size) 485 492 } 486 493 ); ··· 582 589 // Safety: responsibility of caller 583 590 let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 584 591 585 - region.clear(&mut self.batch)?; 592 + region.clear(.., &mut self.batch)?; 586 593 587 594 self.batch.flush_changes(&mut self.raw)?; 588 595 ··· 661 668 // Safety: responsibility of caller 662 669 let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 663 670 664 - region.grow_in_place(new_layout, next_range, &mut self.batch)?; 671 + todo!(); 672 + region.grow(new_layout.size(), &mut self.batch)?; 665 673 666 674 self.batch.flush_changes(&mut self.raw)?; 667 675 ··· 689 697 // Safety: responsibility of caller 690 698 let mut region = unsafe { cursor.get_mut().unwrap_unchecked() }; 691 699 692 - region.shrink(new_layout, &mut self.batch)?; 700 + region.shrink(new_layout.size(), &mut self.batch)?; 693 701 694 - self.batch.flush_changes()?; 702 + self.batch.flush_changes(&mut self.raw)?; 695 703 696 704 Ok(region.as_non_null()) 697 705 } ··· 719 727 "cannot find free spot for layout {new_layout:?}" 720 728 ))?; 721 729 722 - region.move_to(spot, new_layout, &mut self.batch)?; 730 + todo!(); 723 731 732 + // region.move_to(spot, new_layout, &mut self.batch)?; 733 + 724 734 Ok(region.as_non_null()) 725 735 } 726 736 ··· 803 813 .chosen 804 814 .expect("There must be a chosen spot after the first attempt. This is a bug!"); 805 815 806 - debug_assert!(chosen.is_canonical::<R>()); 816 + debug_assert!(chosen.is_canonical::<A>()); 807 817 808 818 Some(chosen) 809 819 } ··· 889 899 /// 890 900 /// [*currently mapped*]: #currently-mapped-memory 891 901 /// [*fit*]: #memory-fitting 892 - unsafe fn get_region_containing_ptr( 893 - regions: &mut WAVLTree<AddressSpaceRegion>, 902 + unsafe fn get_region_containing_ptr<A: RawAddressSpace>( 903 + regions: &mut WAVLTree<AddressSpaceRegion<A>>, 894 904 ptr: NonNull<u8>, 895 905 layout: Layout, 896 - ) -> CursorMut<'_, AddressSpaceRegion> { 906 + ) -> CursorMut<'_, AddressSpaceRegion<A>> { 897 907 let addr = VirtualAddress::from_non_null(ptr); 898 908 899 909 let cursor = regions.lower_bound_mut(Bound::Included(&addr)); 900 910 901 - assert_unsafe_precondition_!( 902 - "TODO", 903 - (cursor: &CursorMut<AddressSpaceRegion> = &cursor) => cursor.get().is_some() 904 - ); 911 + // assert_unsafe_precondition_!( 912 + // "TODO", 913 + // (cursor: &CursorMut<AddressSpaceRegion<A>> = &cursor) => cursor.get().is_some() 914 + // ); 905 915 906 916 // Safety: The caller guarantees the pointer is currently mapped which means we must have 907 917 // a corresponding address space region for it 908 918 let region = unsafe { cursor.get().unwrap_unchecked() }; 909 919 910 - assert_unsafe_precondition_!( 911 - "TODO", 912 - (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => { 913 - let range = region.range(); 914 - 915 - range.start.get() <= addr.get() && addr.get() < range.end.get() 916 - } 917 - ); 918 - 919 - assert_unsafe_precondition_!( 920 - "`layout` does not fit memory region", 921 - (layout: Layout = layout, region: &AddressSpaceRegion = &region) => region.layout_fits_region(layout) 922 - ); 920 + // assert_unsafe_precondition_!( 921 + // "TODO", 922 + // (region: &AddressSpaceRegion = region, addr: VirtualAddress = addr) => { 923 + // let range = region.range(); 924 + // 925 + // range.start.get() <= addr.get() && addr.get() < range.end.get() 926 + // } 927 + // ); 928 + // 929 + // assert_unsafe_precondition_!( 930 + // "`layout` does not fit memory region", 931 + // (layout: Layout = layout, region: &AddressSpaceRegion = &region) => region.layout_fits_region(layout) 932 + // ); 923 933 924 934 cursor 925 935 }
libs/mem/src/address_space/batch.rs

This file has not been changed.

+40 -24
libs/mem/src/address_space/region.rs
··· 7 7 8 8 use alloc::boxed::Box; 9 9 use core::alloc::Layout; 10 + use core::fmt::Formatter; 10 11 use core::marker::PhantomData; 11 12 use core::mem::offset_of; 12 13 use core::num::NonZeroUsize; ··· 23 24 use crate::{AccessRules, AddressRangeExt, VirtualAddress}; 24 25 25 26 #[pin_project] 26 - #[derive(Debug)] 27 - pub struct AddressSpaceRegion<R> { 27 + pub struct AddressSpaceRegion<A> { 28 28 range: Range<VirtualAddress>, 29 29 access_rules: AccessRules, 30 30 layout: Layout, ··· 36 36 /// The largest gap in this subtree, used when allocating new regions 37 37 max_gap: usize, 38 38 /// Links to other regions in the WAVL tree 39 - links: wavltree::Links<AddressSpaceRegion<R>>, 39 + links: wavltree::Links<AddressSpaceRegion<A>>, 40 40 41 - _raw_aspace: PhantomData<R>, 41 + _raw_aspace: PhantomData<A>, 42 42 } 43 43 44 - impl<R: RawAddressSpace> AddressSpaceRegion<R> { 44 + impl<A> fmt::Debug for AddressSpaceRegion<A> { 45 + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 46 + f.debug_struct("AddressSpaceRegion") 47 + .field("range", &self.range) 48 + .field("access_rules", &self.access_rules) 49 + .field("layout", &self.layout) 50 + .field("vmo", &self.vmo) 51 + .field("vmo_offset", &self.vmo_offset) 52 + .field("subtree_range", &self.subtree_range) 53 + .field("max_gap", &self.max_gap) 54 + .field("links", &self.links) 55 + .finish() 56 + } 57 + } 58 + 59 + impl<A: RawAddressSpace> AddressSpaceRegion<A> { 45 60 pub const fn new( 46 61 spot: VirtualAddress, 47 62 layout: Layout, ··· 110 125 /// - `range` is out of bounds 111 126 /// - `access_rules` is NOT a subset of self.access_rules 112 127 pub fn commit( 113 - &self, 128 + &mut self, 114 129 range: impl RangeBounds<VirtualAddress>, 115 130 access_rules: AccessRules, 116 131 batch: &mut Batch, 117 - raw_aspace: &mut R, 132 + raw_aspace: &mut A, 118 133 ) -> crate::Result<()> { 119 134 let vmo_relative = self.bounds_to_vmo_relative(range); 120 135 121 - let mut acquired_frames = self.vmo.acquire(vmo_relative).enumerate(); 136 + let mut acquired_frames = self.vmo.acquire(vmo_relative, access_rules).enumerate(); 122 137 while let Some((idx, frame)) = acquired_frames.next()? { 123 - let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap(); 138 + let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap(); 124 139 125 140 unsafe { 126 141 batch.map( 127 142 virt, 128 143 frame.addr(), 129 - NonZeroUsize::new(R::PAGE_SIZE).unwrap(), 144 + NonZeroUsize::new(A::PAGE_SIZE).unwrap(), 130 145 access_rules, 131 146 ); 132 147 } ··· 156 171 /// 157 172 /// - `range` is out of bounds 158 173 pub fn decommit( 159 - &self, 174 + &mut self, 160 175 range: impl RangeBounds<VirtualAddress>, 161 176 batch: &mut Batch, 162 - raw_aspace: &mut R, 177 + raw_aspace: &mut A, 163 178 ) -> crate::Result<()> { 164 179 let vmo_relative = self.bounds_to_vmo_relative(range); 165 180 166 181 let mut released_frames = self.vmo.release(vmo_relative).enumerate(); 167 182 while let Some((idx, _frame)) = released_frames.next()? { 168 - let virt = self.range.start.checked_add(idx * R::PAGE_SIZE).unwrap(); 169 - unsafe { batch.unmap(virt, NonZeroUsize::new(R::PAGE_SIZE).unwrap()) }; 183 + let virt = self.range.start.checked_add(idx * A::PAGE_SIZE).unwrap(); 184 + unsafe { batch.unmap(virt, NonZeroUsize::new(A::PAGE_SIZE).unwrap()) }; 170 185 171 186 // if VMO has content source && frame is dirty 172 187 // add virt addr to coalescer ··· 188 203 /// # Errors 189 204 /// 190 205 /// - `range` is out of bounds 191 - pub fn clear(&self, range: impl RangeBounds<VirtualAddress>) -> crate::Result<()> { 206 + pub fn clear( 207 + &mut self, 208 + range: impl RangeBounds<VirtualAddress>, 209 + batch: &mut Batch, 210 + ) -> crate::Result<()> { 192 211 todo!() 193 212 } 194 213 ··· 210 229 /// - `range` is out of bounds 211 230 /// - `access_rules` is NOT a subset of self.access_rules 212 231 pub fn prefetch( 213 - &self, 232 + &mut self, 214 233 range: impl RangeBounds<VirtualAddress>, 215 234 access_rules: AccessRules, 216 235 ) -> crate::Result<()> { ··· 219 238 220 239 /// Attempts to grow the address space region to `new_len`. 221 240 /// `new_len` MUST be larger than or equal to the current length. 222 - pub fn grow(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 241 + pub fn grow(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 223 242 todo!() 224 243 } 225 244 226 245 /// Attempts to shrink the address space region to `new_len`. 227 246 /// `new_len` MUST be smaller than or equal to the current length. 228 - pub fn shrink(&self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 247 + pub fn shrink(&mut self, new_len: usize, batch: &mut Batch) -> crate::Result<()> { 229 248 todo!() 230 249 } 231 250 ··· 340 359 // self.vmo.prefetch(bounds, batch) 341 360 // } 342 361 343 - pub fn assert_valid(&self, msg: &str) 344 - where 345 - R: fmt::Debug, 346 - { 362 + pub fn assert_valid(&self, msg: &str) { 347 363 assert!(!self.range.is_empty(), "{msg}region range cannot be empty"); 348 364 assert!( 349 365 self.subtree_range.start <= self.range.start ··· 396 412 bounds: impl RangeBounds<VirtualAddress>, 397 413 ) -> (Bound<usize>, Bound<usize>) { 398 414 let start = bounds.start_bound().map(|addr| { 399 - (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset 415 + (addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset 400 416 }); 401 417 let end = bounds.end_bound().map(|addr| { 402 - (addr.checked_sub_addr(self.range.start).unwrap() / R::PAGE_SIZE) + self.vmo_offset 418 + (addr.checked_sub_addr(self.range.start).unwrap() / A::PAGE_SIZE) + self.vmo_offset 403 419 }); 404 420 405 421 (start, end)
libs/mem/src/addresses.rs

This file has not been changed.

+14 -6
libs/mem/src/frame.rs
··· 26 26 27 27 pub struct FrameRef { 28 28 frame: NonNull<Frame>, 29 - alloc: &'static dyn FrameAllocator, 29 + frame_alloc: &'static dyn FrameAllocator, 30 30 } 31 31 32 32 #[pin_project(!Unpin)] ··· 73 73 // standard library, it is good enough for us. 74 74 assert!(old_size <= MAX_REFCOUNT, "Frame refcount overflow"); 75 75 76 - unsafe { Self::from_raw_parts(self.frame, self.alloc.clone()) } 76 + unsafe { Self::from_raw_parts(self.frame, self.frame_alloc.clone()) } 77 77 } 78 78 } 79 79 ··· 126 126 } 127 127 128 128 impl FrameRef { 129 - unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self { 130 - Self { frame, alloc } 129 + pub unsafe fn from_raw_parts(frame: NonNull<Frame>, alloc: &'static dyn FrameAllocator) -> Self { 130 + Self { frame, frame_alloc: alloc } 131 131 } 132 + 133 + pub fn ptr_eq(a: &Self, b: &Self) -> bool { 134 + a.frame == b.frame 135 + } 132 136 133 137 #[inline(never)] 134 138 fn drop_slow(&mut self) { 135 139 let layout = unsafe { 136 - Layout::from_size_align_unchecked(self.alloc.page_size(), self.alloc.page_size()) 140 + Layout::from_size_align_unchecked(self.frame_alloc.page_size(), self.frame_alloc.page_size()) 137 141 }; 138 142 unsafe { 139 - self.alloc.deallocate(self.frame, layout); 143 + self.frame_alloc.deallocate(self.frame, layout); 140 144 } 141 145 } 142 146 } ··· 168 172 self.refcount.load(Ordering::Relaxed) 169 173 } 170 174 175 + pub fn is_unique(&self) -> bool { 176 + self.refcount() == 1 177 + } 178 + 171 179 pub fn addr(&self) -> PhysicalAddress { 172 180 self.addr 173 181 }
libs/mem/src/frame_alloc.rs

This file has not been changed.

libs/mem/src/frame_alloc/area.rs

This file has not been changed.

libs/mem/src/frame_alloc/area_selection.rs

This file has not been changed.

+1
libs/mem/src/lib.rs
··· 10 10 mod test_utils; 11 11 mod utils; 12 12 mod vmo; 13 + mod test; 13 14 14 15 pub type Result<T> = anyhow::Result<T>; 15 16
libs/mem/src/test_utils.rs

This file has not been changed.

libs/mem/src/utils.rs

This file has not been changed.

+152 -41
libs/mem/src/vmo.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use alloc::sync::Arc; 9 + use core::alloc::Layout; 9 10 use core::ops::{Bound, Range, RangeBounds}; 10 11 use core::{fmt, ptr}; 11 12 12 - use anyhow::ensure; 13 + use anyhow::{anyhow, ensure}; 13 14 use fallible_iterator::FallibleIterator; 14 15 use lock_api::RwLock; 15 16 use smallvec::SmallVec; 16 17 17 - use crate::frame_list::FrameList; 18 - use crate::{FrameRef, PhysicalAddress}; 18 + use crate::frame_alloc::FrameAllocator; 19 + use crate::{AccessRules, FrameRef}; 19 20 20 21 pub struct Vmo { 21 22 name: &'static str, ··· 28 29 vtable: &'static RawVmoVTable, 29 30 } 30 31 31 - #[derive(Copy, Clone, Debug)] 32 + #[derive(PartialEq, Copy, Clone, Debug)] 32 33 struct RawVmoVTable { 33 34 clone: unsafe fn(*const ()) -> RawVmo, 34 - acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 35 + acquire: unsafe fn( 36 + *const (), 37 + index: usize, 38 + access_rules: AccessRules, 39 + ) -> crate::Result<Option<FrameRef>>, 35 40 release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 36 41 clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 37 42 len: unsafe fn(*const ()) -> usize, ··· 164 169 pub fn acquire<R>( 165 170 &self, 166 171 range: R, 172 + access_rules: AccessRules, 167 173 ) -> impl FallibleIterator<Item = FrameRef, Error = anyhow::Error> 168 174 where 169 175 R: RangeBounds<usize>, ··· 173 179 let i = range 174 180 .into_iter() 175 181 .flat_map(|r| r) 176 - .filter_map(|idx| unsafe { (self.vmo.vtable.acquire)(self.vmo.data, idx).transpose() }); 182 + .filter_map(move |idx| unsafe { 183 + (self.vmo.vtable.acquire)(self.vmo.data, idx, access_rules).transpose() 184 + }); 177 185 178 186 fallible_iterator::convert(i) 179 187 } ··· 238 246 239 247 let old_len = self.len(); 240 248 241 - unsafe { 242 - (self.vmo.vtable.resize)(self.vmo.data, new_len)?; 243 - }; 244 - 245 - let i = (new_len..old_len) 246 - .into_iter() 247 - .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() }); 248 - 249 - fallible_iterator::convert(i) 249 + todo!(); 250 + fallible_iterator::empty() 251 + // unsafe { 252 + // (self.vmo.vtable.resize)(self.vmo.data, new_len)?; 253 + // }; 254 + // 255 + // let i = (new_len..old_len) 256 + // .into_iter() 257 + // .filter_map(|idx| unsafe { (self.vmo.vtable.release)(self.vmo.data, idx).transpose() }); 258 + // 259 + // fallible_iterator::convert(i) 250 260 } 251 261 252 262 #[inline] ··· 298 308 impl RawVmoVTable { 299 309 pub const fn new( 300 310 clone: unsafe fn(*const ()) -> RawVmo, 301 - acquire: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 311 + acquire: unsafe fn( 312 + *const (), 313 + index: usize, 314 + access_rules: AccessRules, 315 + ) -> crate::Result<Option<FrameRef>>, 302 316 release: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 303 317 clear: unsafe fn(*const (), index: usize) -> crate::Result<Option<FrameRef>>, 304 318 len: unsafe fn(*const ()) -> usize, ··· 333 347 RawVmo::new(ptr, &WIRED_VMO_VTABLE) 334 348 } 335 349 336 - unsafe fn stub_acquire(ptr: *const (), _index: usize) -> crate::Result<Option<FrameRef>> { 350 + unsafe fn stub_acquire( 351 + ptr: *const (), 352 + _index: usize, 353 + _access_rules: AccessRules, 354 + ) -> crate::Result<Option<FrameRef>> { 337 355 debug_assert!(ptr.is_null()); 338 356 unreachable!() 339 357 } ··· 360 378 unsafe { Vmo::new(ptr::null(), &WIRED_VMO_VTABLE) } 361 379 } 362 380 363 - struct PagedVmo<R: lock_api::RawRwLock> { 364 - list: RwLock<R, SmallVec<[FrameRef; 64]>>, 381 + pub struct PagedVmo<R: lock_api::RawRwLock> { 382 + list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>, 383 + frame_alloc: &'static dyn FrameAllocator, 365 384 } 366 385 367 386 impl<R: lock_api::RawRwLock> PagedVmo<R> { 368 - pub const fn new(phys: Range<PhysicalAddress>) -> Self { 369 - todo!() 370 - } 371 - 372 - const VMO_VTABLE: RawVmoVTable = RawVmoVTable::new( 373 - Self::clone, 387 + const RAW_VMO_VTABLE: RawVmoVTable = RawVmoVTable::new( 388 + Self::clone_vmo, 374 389 Self::acquire, 375 390 Self::release, 376 391 Self::clear, 377 392 Self::len, 378 393 Self::resize, 379 - Self::drop, 394 + Self::drop_vmo, 380 395 ); 381 396 382 - unsafe fn clone(ptr: *const ()) -> RawVmo { 383 - unsafe { 384 - Arc::increment_strong_count(ptr.cast::<Self>()); 397 + pub fn new(frame_alloc: &'static dyn FrameAllocator) -> Self { 398 + Self { 399 + list: RwLock::new(SmallVec::new()), 400 + frame_alloc, 385 401 } 386 - RawVmo::new(ptr, &Self::VMO_VTABLE) 387 402 } 388 403 389 - unsafe fn drop(ptr: *const ()) { 390 - drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) }); 404 + #[inline(always)] 405 + pub fn into_vmo(self: Arc<Self>) -> Vmo { 406 + unsafe { Vmo::new(Arc::into_raw(self) as *const (), &Self::RAW_VMO_VTABLE) } 391 407 } 392 408 393 - unsafe fn acquire(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 394 - let me = ptr.cast::<Self>().as_ref().unwrap(); 409 + fn allocate_frame(&self) -> FrameRef { 410 + let layout = 411 + Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size()) 412 + .unwrap(); 395 413 396 - let mut list = me.list.write(); 414 + let frames = self.frame_alloc.allocate(layout).unwrap(); 415 + debug_assert_eq!(frames.len(), 1); 397 416 398 - list.entry(index).or_insert_with(|| todo!("allocate frame")); 417 + unsafe { FrameRef::from_raw_parts(frames.cast(), self.frame_alloc.clone()) } 418 + } 399 419 400 - // list 420 + fn clone_the_zero_frame(&self) -> FrameRef { 421 + todo!() 401 422 } 402 423 403 - unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 424 + #[inline(always)] 425 + unsafe fn clone_vmo(vmo: *const ()) -> RawVmo { 426 + unsafe { 427 + Arc::increment_strong_count(vmo.cast::<Self>()); 428 + } 429 + RawVmo::new(vmo, &Self::RAW_VMO_VTABLE) 430 + } 431 + 432 + unsafe fn drop_vmo(ptr: *const ()) { 433 + drop(unsafe { Arc::from_raw(ptr.cast::<Self>()) }); 434 + } 435 + 436 + unsafe fn acquire( 437 + ptr: *const (), 438 + index: usize, 439 + access_rules: AccessRules, 440 + ) -> crate::Result<Option<FrameRef>> { 441 + let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() }; 442 + 404 443 todo!() 405 444 } 406 445 446 + unsafe fn release(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 447 + let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() }; 448 + let mut list = me.list.write(); 449 + 450 + let slot = list 451 + .get_mut(index) 452 + .ok_or(anyhow!("index out of bounds"))? 453 + .take(); 454 + 455 + Ok(slot) 456 + } 457 + 407 458 unsafe fn clear(ptr: *const (), index: usize) -> crate::Result<Option<FrameRef>> { 408 - todo!() 459 + let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() }; 460 + let mut list = me.list.write(); 461 + 462 + let prev_frame = list 463 + .get_mut(index) 464 + .ok_or(anyhow!("index out of bounds"))? 465 + .replace(me.clone_the_zero_frame()); 466 + 467 + Ok(prev_frame) 409 468 } 410 469 411 470 unsafe fn len(ptr: *const ()) -> usize { 412 - todo!() 471 + let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() }; 472 + let list = me.list.read(); 473 + 474 + list.len() 413 475 } 414 476 415 477 unsafe fn resize(ptr: *const (), new_len: usize) -> crate::Result<()> { 416 - todo!() 478 + let me = unsafe { ptr.cast::<Self>().as_ref().unwrap() }; 479 + let mut list = me.list.write(); 480 + 481 + list.resize(new_len, None); 482 + 483 + Ok(()) 417 484 } 485 + } 486 + 487 + struct VVmo<R: lock_api::RawRwLock> { 488 + list: RwLock<R, SmallVec<[Option<FrameRef>; 64]>>, 489 + frame_alloc: &'static dyn FrameAllocator, 490 + the_zero_frame: FrameRef, 491 + } 492 + 493 + struct Batch { 494 + freed: SmallVec<[FrameRef; 4]>, 495 + allocated: SmallVec<[FrameRef; 4]>, 496 + } 497 + 498 + impl<R: lock_api::RawRwLock> VVmo<R> { 499 + fn allocate_one(&self) -> FrameRef { 500 + let layout = 501 + Layout::from_size_align(self.frame_alloc.page_size(), self.frame_alloc.page_size()) 502 + .unwrap(); 503 + let frame = self.frame_alloc.allocate(layout).unwrap(); 504 + debug_assert_eq!(frame.len(), 1); 505 + unsafe { FrameRef::from_raw_parts(frame.cast(), self.frame_alloc.clone()) } 506 + } 507 + 508 + pub fn acquire(&self, index: usize, access_rules: AccessRules, batch: &mut Batch) { 509 + let mut list = self.list.write(); 510 + 511 + if let Some(old_frame) = list.get(index).unwrap() { 512 + assert!(!old_frame.is_unique()); 513 + 514 + if access_rules.is_read_only() { 515 + } 516 + 517 + 518 + 519 + 520 + } else { 521 + let new_frame = self.allocate_one(); 522 + list.insert(index, Some(new_frame)); 523 + // TODO report new_frame for mapping 524 + } 525 + } 526 + 527 + pub fn release(&self, range: Range<usize>, batch: &mut Batch) {} 528 + pub fn clear(&self, range: Range<usize>, batch: &mut Batch) {} 418 529 }
libs/wavltree/src/cursor.rs

This file has not been changed.

libs/wavltree/src/lib.rs

This file has not been changed.

+1
libs/kasync/src/task.rs
··· 433 433 } 434 434 } 435 435 436 + #[inline(always)] 436 437 fn into_raw_waker(self) -> RawWaker { 437 438 // Increment the reference count of the arc to clone it. 438 439 //
+56
libs/mem/src/test.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use core::ops::RangeBounds; 9 + use crate::{AccessRules, VirtualAddress}; 10 + use crate::address_space::Batch; 11 + 12 + struct AddressSpaceRegion<A> { 13 + _aspace: A, 14 + } 15 + 16 + impl<A> AddressSpaceRegion<A> { 17 + /// Map physical memory to back the given `range` 18 + /// 19 + /// After this call succeeds, accesses that align with the given `access` are guaranteed to 20 + /// not page fault. The provided `access_rules` MUST be a subset or equal to this regions access rules. 21 + /// 22 + /// # Errors 23 + /// 24 + /// - `range` is out of bounds 25 + /// - `access_rules` is NOT a subset of self.access_rules 26 + pub fn commit( 27 + &mut self, 28 + range: impl RangeBounds<VirtualAddress>, 29 + access_rules: AccessRules, 30 + batch: &mut Batch, 31 + raw_aspace: &mut A, 32 + ) -> crate::Result<()> { 33 + 34 + 35 + 36 + 37 + 38 + todo!() 39 + } 40 + 41 + /// Release physical memory frames backing the given `range`. 42 + /// 43 + /// After this call succeeds, accesses will page fault. 44 + /// 45 + /// # Errors 46 + /// 47 + /// - `range` is out of bounds for this address space region 48 + pub fn decommit( 49 + &mut self, 50 + range: impl RangeBounds<VirtualAddress>, 51 + batch: &mut Batch, 52 + raw_aspace: &mut A, 53 + ) -> crate::Result<()> { 54 + todo!() 55 + } 56 + }

History

5 rounds 0 comments
sign up or login to add to the discussion
1 commit
expand
refactor: separate memory subsystem into own crate
merge conflicts detected
expand
  • Cargo.lock:135
  • libs/wavltree/src/cursor.rs:88
expand 0 comments
1 commit
expand
refactor: separate memory subsystem into own crate
expand 0 comments
1 commit
expand
refactor: separate memory subsystem into own crate
expand 0 comments
1 commit
expand
refactor: separate memory subsystem into own crate
expand 0 comments
1 commit
expand
refactor: separate memory subsystem into own crate
expand 0 comments