···11+use core::alloc::Layout;
12use core::convert::Infallible;
23use core::ops::Range;
44+55+use fallible_iterator::FallibleIterator;
3647use crate::arch::{Arch, PageTableEntry, PageTableLevel};
58use crate::bootstrap::{Bootstrap, BootstrapAllocator};
···7982 &self.arch
8083 }
81848585+ pub fn physmap(&self) -> &PhysMap {
8686+ &self.physmap
8787+ }
8888+8989+ pub const fn granule_size(&self) -> usize {
9090+ A::GRANULE_SIZE
9191+ }
9292+9393+ pub const fn granule_layout(&self) -> Layout {
9494+ A::GRANULE_LAYOUT
9595+ }
9696+8297 /// Activate the address space on this CPU (set this CPUs page table).
8398 ///
8499 /// # Safety
···119134 unreachable!()
120135 }
121136137137+ /// Maps the virtual address range `virt` to *possibly discontiguous* chunk(s) of physical memory
138138+ /// `phys` with the specified memory attributes.
139139+ ///
140140+ /// If this returns `Ok`, the mapping is added to the address space.
141141+ ///
142142+ /// Note that this method **does not** establish any ordering between address space modification
143143+ /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the
144144+ /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`].
145145+ ///
146146+ /// After the modifications have been synchronized with current execution, all accesses to the virtual
147147+ /// address range will translate to accesses of the physical address range and adhere to the
148148+ /// access rules established by the `MemoryAttributes`.
149149+ ///
150150+ /// # Safety
151151+ ///
152152+ /// 1. The entire range `virt` must be unmapped.
153153+ /// 2. `virt` must be aligned to at least the smallest architecture block size.
154154+ /// 3. `phys` chunks must be aligned to at least the smallest architecture block size.
155155+ /// 4. `phys` chunks must in-total be at least as large as `virt`.
156156+ ///
157157+ /// # Errors
158158+ ///
159159+ /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain
160160+ /// partially altered. The caller should call *unmap* on the virtual address range upon failure.
161161+ pub unsafe fn map(
162162+ &mut self,
163163+ mut virt: Range<VirtualAddress>,
164164+ mut phys: impl FallibleIterator<Item = Range<PhysicalAddress>, Error = AllocError>,
165165+ attributes: MemoryAttributes,
166166+ frame_allocator: impl FrameAllocator,
167167+ flush: &mut Flush,
168168+ ) -> Result<(), AllocError> {
169169+ while let Some(chunk_phys) = phys.next()? {
170170+ debug_assert!(!virt.is_empty());
171171+172172+ // Safety: ensured by caller
173173+ unsafe {
174174+ self.map_contiguous(
175175+ Range::from_start_len(virt.start, chunk_phys.len()),
176176+ chunk_phys.start,
177177+ attributes,
178178+ frame_allocator.by_ref(),
179179+ flush,
180180+ )?;
181181+ }
182182+183183+ virt.start = virt.start.add(chunk_phys.len());
184184+ }
185185+186186+ Ok(())
187187+ }
188188+122189 /// Maps the virtual address range `virt` to a continuous region of physical memory starting at `phys`
123190 /// with the specified memory attributes.
124191 ///
···137204 /// 1. The entire range `virt` must be unmapped.
138205 /// 2. `virt` must be aligned to at least the smallest architecture block size.
139206 /// 3. `phys` must be aligned to at least the smallest architecture block size.
207207+ /// 4. The region pointed to by `phys` must be at least as large as `virt`.
140208 ///
141209 /// # Errors
142210 ///
143143- /// Returning `Err` indicates the mapping cannot be established and the address space remains
144144- /// unaltered.
211211+ /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain
212212+ /// partially altered. The caller should call *unmap* on the virtual address range upon failure.
145213 pub unsafe fn map_contiguous(
146214 &mut self,
147215 virt: Range<VirtualAddress>,
···207275 Ok(())
208276 }
209277210210- /// Remaps the virtual address range `virt` to a new continuous region of physical memory start
278278+ /// Remaps the virtual address range `virt` to new *possibly discontiguous* chunk(s) of physical
279279+ /// memory `phys`. The old physical memory region is not freed.
280280+ ///
281281+ /// Note that this method **does not** establish any ordering between address space modification
282282+ /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the
283283+ /// updated mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`].
284284+ ///
285285+ /// After the modifications have been synchronized with current execution, all accesses to the virtual
286286+ /// address range will translate to accesses of the new physical address range.
287287+ ///
288288+ /// # Safety
289289+ ///
290290+ /// 1. The entire range `virt` must be mapped.
291291+ /// 2. `virt` must be aligned to at least the smallest architecture block size.
292292+ /// 3. `phys` chunks must be aligned to `at least the smallest architecture block size.
293293+ /// 4. `phys` chunks must in-total be at least as large as `virt`.
294294+ ///
295295+ /// # Errors
296296+ ///
297297+ /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain
298298+ /// partially altered. The caller should call *unmap* on the virtual address range upon failure.
299299+ pub unsafe fn remap(
300300+ &mut self,
301301+ mut virt: Range<VirtualAddress>,
302302+ mut phys: impl FallibleIterator<Item = Range<PhysicalAddress>, Error = AllocError>,
303303+ flush: &mut Flush,
304304+ ) -> Result<(), AllocError> {
305305+ while let Some(chunk_phys) = phys.next()? {
306306+ debug_assert!(!virt.is_empty());
307307+308308+ // Safety: ensured by caller
309309+ unsafe {
310310+ self.remap_contiguous(
311311+ Range::from_start_len(virt.start, chunk_phys.len()),
312312+ chunk_phys.start,
313313+ flush,
314314+ );
315315+ }
316316+317317+ virt.start = virt.start.add(chunk_phys.len());
318318+ }
319319+320320+ Ok(())
321321+ }
322322+323323+ /// Remaps the virtual address range `virt` to a new continuous region of physical memory starting
211324 /// at `phys`. The old physical memory region is not freed.
212325 ///
213326 /// Note that this method **does not** establish any ordering between address space modification
···222335 /// 1. The entire range `virt` must be mapped.
223336 /// 2. `virt` must be aligned to at least the smallest architecture block size.
224337 /// 3. `phys` must be aligned to `at least the smallest architecture block size.
338338+ /// 4. The region pointed to by `phys` must be at least as large as `virt`.
225339 pub unsafe fn remap_contiguous(
226340 &mut self,
227341 virt: Range<VirtualAddress>,
···438552 use crate::arch::Arch;
439553 use crate::flush::Flush;
440554 use crate::frame_allocator::FrameAllocator;
441441- use crate::test_utils::{BootstrapResult, MachineBuilder};
555555+ use crate::test_utils::MachineBuilder;
442556 use crate::{MemoryAttributes, VirtualAddress, WriteOrExecute, archtest};
443557444558 archtest! {
445559 #[test]
446560 fn map<A: Arch>() {
447447- let BootstrapResult {
448448- mut address_space,
449449- frame_allocator,
450450- ..
451451- } = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
561561+ let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
452562 .with_memory_regions([0xA000])
453563 .finish_and_bootstrap()
454564 .unwrap();
565565+ let (_, mut address_space, frame_allocator) = res;
455566456567 let frame = frame_allocator
457568 .allocate_contiguous(A::GRANULE_LAYOUT)
···484595485596 #[test]
486597 fn remap<A: Arch>() {
487487- let BootstrapResult {
488488- mut address_space,
489489- frame_allocator,
490490- ..
491491- } = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
598598+ let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
492599 .with_memory_regions([0xB000])
493600 .finish_and_bootstrap()
494601 .unwrap();
602602+ let (_, mut address_space, frame_allocator) = res;
495603496604 let frame = frame_allocator
497605 .allocate_contiguous(A::GRANULE_LAYOUT)
498606 .unwrap();
499607500500- let page = Range::from_start_len(
501501- VirtualAddress::new(0x7000),
502502- A::GRANULE_SIZE,
503503- );
608608+ let page = Range::from_start_len(VirtualAddress::new(0x7000), A::GRANULE_SIZE);
504609505610 let mut flush = Flush::new();
506611 unsafe {
···547652548653 #[test]
549654 fn set_attributes<A: Arch>() {
550550- let BootstrapResult {
551551- mut address_space,
552552- frame_allocator,
553553- ..
554554- } = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
655655+ let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new()
555656 .with_memory_regions([0xB000])
556657 .finish_and_bootstrap()
557658 .unwrap();
659659+ let (_, mut address_space, frame_allocator) = res;
558660559661 let frame = frame_allocator
560662 .allocate_contiguous(A::GRANULE_LAYOUT)
561663 .unwrap();
562664563563- let page = Range::from_start_len(
564564- VirtualAddress::new(0x7000),
565565- A::GRANULE_SIZE
566566- );
665665+ let page = Range::from_start_len(VirtualAddress::new(0x7000), A::GRANULE_SIZE);
567666568667 let mut flush = Flush::new();
569668 unsafe {
···4242/// A memory block which is currently allocated may be passed to any method of the allocator that
4343/// accepts such an argument.
4444pub unsafe trait FrameAllocator {
4545- fn allocate(&self, layout: Layout) -> FrameIter<'_, Self, false>
4545+ fn allocate(&self, layout: Layout) -> FrameIter<'_, Self>
4646 where
4747 Self: Sized,
4848 {
···5353 }
5454 }
55555656- // fn allocate_zeroed(&self, layout: Layout) -> FrameIter<'_, Self, true>
5757- // where
5858- // Self: Sized,
5959- // {
6060- // FrameIter {
6161- // alloc: self,
6262- // remaining: layout.size(),
6363- // alignment: layout.align(),
6464- // }
6565- // }
5656+ fn allocate_zeroed<'a, A: Arch>(
5757+ &self,
5858+ layout: Layout,
5959+ physmap: &'a PhysMap,
6060+ arch: &'a A,
6161+ ) -> FrameIterZeroed<'_, 'a, Self, A>
6262+ where
6363+ Self: Sized,
6464+ {
6565+ FrameIterZeroed {
6666+ inner: self.allocate(layout),
6767+ physmap,
6868+ arch,
6969+ }
7070+ }
66716772 /// Attempts to allocate a contiguous block of physical memory.
6873 ///
···98103 physmap: &PhysMap,
99104 arch: &impl Arch,
100105 ) -> Result<PhysicalAddress, AllocError> {
101101- let frame = self.allocate_contiguous(layout)?;
106106+ let phys = self.allocate_contiguous(layout)?;
102107103103- let page = physmap.phys_to_virt(frame);
108108+ let virt = physmap.phys_to_virt(phys);
104109105110 // Safety: the address is properly aligned (at least page aligned) and is either valid to
106111 // access through the physical memory map or because we're in bootstrapping still and phys==virt
107112 unsafe {
108108- arch.write_bytes(page, 0, layout.size());
113113+ arch.write_bytes(virt, 0, layout.size());
109114 }
110115111111- Ok(frame)
116116+ Ok(phys)
112117 }
113118114119 /// Deallocates the block of memory referenced by `block`.
···142147 (**self).allocate_contiguous(layout)
143148 }
144149145145- // fn allocate_contiguous_zeroed(&self, layout: Layout, arch: &impl Arch) -> Result<PhysicalAddress, AllocError> {
146146- // (**self).allocate_contiguous_zeroed(layout, arch)
147147- // }
150150+ fn allocate_contiguous_zeroed(
151151+ &self,
152152+ layout: Layout,
153153+ physmap: &PhysMap,
154154+ arch: &impl Arch,
155155+ ) -> Result<PhysicalAddress, AllocError> {
156156+ (**self).allocate_contiguous_zeroed(layout, physmap, arch)
157157+ }
148158149159 unsafe fn deallocate(&self, block: PhysicalAddress, layout: Layout) {
150160 // Safety: ensured by caller
···156166 }
157167}
158168159159-pub struct FrameIter<'alloc, F: ?Sized, const ZEROED: bool> {
169169+pub struct FrameIter<'alloc, F: ?Sized> {
160170 alloc: &'alloc F,
161171 remaining: usize,
162172 alignment: usize,
163173}
164174165165-impl<F: FrameAllocator, const ZEROED: bool> FallibleIterator for FrameIter<'_, F, ZEROED> {
175175+impl<F: FrameAllocator> FallibleIterator for FrameIter<'_, F> {
166176 type Item = Range<PhysicalAddress>;
167177 type Error = AllocError;
168178···182192 );
183193 let layout = Layout::from_size_align(alloc_size.get(), self.alignment).unwrap();
184194185185- let addr = if ZEROED {
186186- todo!()
187187- // self.alloc.allocate_contiguous_zeroed(layout)?
188188- } else {
189189- self.alloc.allocate_contiguous(layout)?
190190- };
195195+ let addr = self.alloc.allocate_contiguous(layout)?;
191196192197 self.remaining -= requested_size.get();
193198194199 Ok(Some(Range::from_start_len(addr, requested_size.get())))
195200 }
196201}
202202+203203+pub struct FrameIterZeroed<'alloc, 'a, F: ?Sized, A: Arch> {
204204+ inner: FrameIter<'alloc, F>,
205205+ physmap: &'a PhysMap,
206206+ arch: &'a A,
207207+}
208208+209209+impl<F: FrameAllocator, A: Arch> FallibleIterator for FrameIterZeroed<'_, '_, F, A> {
210210+ type Item = Range<PhysicalAddress>;
211211+ type Error = AllocError;
212212+213213+ fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
214214+ let Some(range) = self.inner.next()? else {
215215+ return Ok(None);
216216+ };
217217+218218+ let virt = self.physmap.phys_to_virt_range(range.clone());
219219+220220+ // Safety: we just allocated the frame
221221+ unsafe {
222222+ self.arch.write_bytes(virt.start, 0, virt.len());
223223+ }
224224+225225+ Ok(Some(range))
226226+ }
227227+}
+1
libs/kmem/src/lib.rs
···2121pub use address::{PhysicalAddress, VirtualAddress};
2222pub use address_range::AddressRangeExt;
2323pub use address_space::HardwareAddressSpace;
2424+pub use arch::Arch;
2425pub use flush::Flush;
2526pub use frame_allocator::{AllocError, FrameAllocator, FrameIter};
2627pub use memory_attributes::{MemoryAttributes, WriteOrExecute};
+1-1
libs/kmem/src/memory_attributes.rs
···2020 /// In order to prevent malicious code execution as proactively as possible,
2121 /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced
2222 /// through the [`WriteOrExecute`] enum field.
2323- #[derive(PartialEq, Eq)]
2323+ #[derive(Default, PartialEq, Eq)]
2424 pub struct MemoryAttributes<u8> {
2525 /// If set, reading from the memory region is allowed.
2626 pub const READ: bool;
+1-1
libs/kmem/src/test_utils.rs
···44pub mod proptest;
5566pub use arch::EmulateArch;
77-pub use machine::{BootstrapResult, Cpu, HasMemory, Machine, MachineBuilder, MissingMemory};
77+pub use machine::{Cpu, HasMemory, Machine, MachineBuilder, MissingMemory};
88pub use memory::Memory;
991010#[macro_export]