Next Generation WASM Microkernel Operating System

refactor: clean up `AddressRangeExt` trait

The `AddressRangeExt` trait needed some improving and cleaning up, in preparation for making the address types more safe, sane, and compatible with strict provenance.

+290 -332
+5 -4
kernel/src/allocator.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use core::alloc::Layout; 9 + use core::ops::Range; 9 10 10 - use kmem::AddressRangeExt; 11 + use kmem::{AddressRangeExt, VirtualAddress}; 11 12 use loader_api::BootInfo; 12 13 use talc::{ErrOnOom, Span, Talc, Talck}; 13 14 ··· 24 25 25 26 let phys = boot_alloc.allocate_contiguous(layout).unwrap(); 26 27 27 - let virt = { 28 + let virt: Range<VirtualAddress> = { 28 29 let start = boot_info 29 30 .physical_address_offset 30 31 .checked_add(phys.get()) 31 32 .unwrap(); 32 33 33 - start..start.checked_add(layout.size()).unwrap() 34 + Range::from_start_len(start, layout.size()) 34 35 }; 35 36 tracing::debug!("Kernel Heap: {virt:#x?}"); 36 37 37 38 let mut alloc = KERNEL_ALLOCATOR.lock(); 38 - let span = Span::from_base_size(virt.start.as_mut_ptr(), virt.size()); 39 + let span = Span::from_base_size(virt.start.as_mut_ptr(), virt.len()); 39 40 40 41 // Safety: just allocated the memory region 41 42 unsafe {
+7 -4
kernel/src/arch/riscv64/device/plic.rs
··· 9 9 use core::alloc::Layout; 10 10 use core::mem::{MaybeUninit, offset_of}; 11 11 use core::num::NonZero; 12 - use core::ops::{BitAnd, BitOr, Not}; 12 + use core::ops::{BitAnd, BitOr, Not, Range}; 13 13 use core::ptr; 14 14 15 15 use fallible_iterator::FallibleIterator; ··· 107 107 let mmio_range = { 108 108 let reg = dev.regs().unwrap().next()?.unwrap(); 109 109 110 - let start = PhysicalAddress::new(reg.starting_address); 111 - start..start.checked_add(reg.size.unwrap()).unwrap() 110 + Range::from_start_len( 111 + PhysicalAddress::new(reg.starting_address), 112 + reg.size.unwrap(), 113 + ) 112 114 }; 113 115 114 116 let mmio_region = with_kernel_aspace(|aspace| { 115 - let layout = Layout::from_size_align(mmio_range.size(), PAGE_SIZE).unwrap(); 117 + let layout = 118 + Layout::from_size_align(AddressRangeExt::len(&mmio_range), PAGE_SIZE).unwrap(); 116 119 aspace 117 120 .lock() 118 121 .map(
+1 -1
kernel/src/backtrace/mod.rs
··· 74 74 .unwrap() 75 75 .as_ptr(); 76 76 77 - slice::from_raw_parts(base, boot_info.kernel_phys.size()) 77 + slice::from_raw_parts(base, boot_info.kernel_phys.len()) 78 78 }, 79 79 symbolize_context: OnceLock::new(), 80 80 backtrace_style,
+3 -7
kernel/src/main.rs
··· 229 229 let temp: ArrayVec<Range<PhysicalAddress>, 16> = boot_info 230 230 .memory_regions 231 231 .iter() 232 - .filter_map(|region| { 233 - let range = region.range.start..region.range.end; 234 - 235 - region.kind.is_usable().then_some(range) 236 - }) 232 + .filter_map(|region| region.kind.is_usable().then_some(region.range.clone())) 237 233 .collect(); 238 234 239 235 // merge adjacent regions ··· 270 266 .as_mut_ptr(); 271 267 272 268 // Safety: we need to trust the bootinfo data is correct 273 - let slice = unsafe { slice::from_raw_parts(base, fdt.range.size()) }; 274 - (slice, fdt.range.start..fdt.range.end) 269 + let slice = unsafe { slice::from_raw_parts(base, fdt.range.len()) }; 270 + (slice, fdt.range.clone()) 275 271 }
+24 -26
kernel/src/mem/address_space.rs
··· 163 163 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> { 164 164 let layout = layout.pad_to_align(); 165 165 let base = self.find_spot(layout, VIRT_ALLOC_ENTROPY)?; 166 - let range = base..base 167 - .checked_add(layout.size()) 168 - .expect("chosen memory range end overflows"); 166 + let range = Range::from_start_len(base, layout.size()); 169 167 170 168 self.map_internal(range, permissions, map) 171 169 } ··· 183 181 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),); 184 182 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),); 185 183 ensure!( 186 - range.size() 184 + range.clone().len() 187 185 <= self 188 186 .max_range 189 187 .end() ··· 218 216 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),); 219 217 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),); 220 218 ensure!( 221 - range.size() 219 + range.len() 222 220 <= self 223 221 .max_range 224 222 .end() ··· 233 231 // as the requested range. 234 232 let mut bytes_seen = 0; 235 233 self.for_each_region_in_range(range.clone(), |region| { 236 - bytes_seen += region.range.size(); 234 + bytes_seen += region.range.len(); 237 235 Ok(()) 238 236 })?; 239 - ensure!(bytes_seen == range.size()); 237 + ensure!(bytes_seen == range.len()); 240 238 241 239 // Actually do the unmapping now 242 240 // Safety: we checked all invariant above ··· 244 242 } 245 243 246 244 pub unsafe fn unmap_unchecked(&mut self, range: Range<VirtualAddress>) -> crate::Result<()> { 247 - let mut bytes_remaining = range.size(); 245 + let mut bytes_remaining = range.len(); 248 246 let mut c = self.regions.find_mut(&range.start); 249 247 while bytes_remaining > 0 { 250 248 let mut region = c.remove().unwrap(); 251 249 let range = region.range.clone(); 252 250 Pin::as_mut(&mut region).unmap(range.clone())?; 253 - bytes_remaining -= range.size(); 251 + bytes_remaining -= range.len(); 254 252 } 255 253 256 254 let mut flush = self.arch.new_flush(); ··· 258 256 unsafe { 259 257 self.arch.unmap( 260 258 range.start, 261 - NonZeroUsize::new(range.size()).unwrap(), 259 + NonZeroUsize::new(range.len()).unwrap(), 262 260 &mut flush, 263 261 )?; 264 262 } ··· 275 273 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),); 276 274 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),); 277 275 ensure!( 278 - range.size() 276 + range.len() 279 277 <= self 280 278 .max_range 281 279 .end() ··· 293 291 // current ones. 294 292 let mut bytes_seen = 0; 295 293 self.for_each_region_in_range(range.clone(), |region| { 296 - bytes_seen += region.range.size(); 294 + bytes_seen += region.range.len(); 297 295 298 296 ensure!(region.permissions.contains(new_permissions),); 299 297 300 298 Ok(()) 301 299 })?; 302 - ensure!(bytes_seen == range.size()); 300 + ensure!(bytes_seen == range.len()); 303 301 304 302 // Actually do the permission changes now 305 303 // Safety: we checked all invariant above ··· 311 309 range: Range<VirtualAddress>, 312 310 new_permissions: Permissions, 313 311 ) -> crate::Result<()> { 314 - let mut bytes_remaining = range.size(); 312 + let mut bytes_remaining = range.len(); 315 313 let mut c = self.regions.find_mut(&range.start); 316 314 while bytes_remaining > 0 { 317 315 let mut region = c.get_mut().unwrap(); 318 316 region.permissions = new_permissions; 319 - bytes_remaining -= range.size(); 317 + bytes_remaining -= range.len(); 320 318 } 321 319 322 320 let mut flush = self.arch.new_flush(); ··· 324 322 unsafe { 325 323 self.arch.update_flags( 326 324 range.start, 327 - NonZeroUsize::new(range.size()).unwrap(), 325 + NonZeroUsize::new(range.len()).unwrap(), 328 326 new_permissions.into(), 329 327 &mut flush, 330 328 )?; ··· 401 399 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),); 402 400 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),); 403 401 ensure!( 404 - range.size() 402 + range.len() 405 403 <= self 406 404 .max_range 407 405 .end() ··· 425 423 // Safety: we checked all invariants above 426 424 unsafe { 427 425 self.arch 428 - .unmap(range.start, NonZeroUsize::new(range.size()).unwrap(), flush)?; 426 + .unmap(range.start, NonZeroUsize::new(range.len()).unwrap(), flush)?; 429 427 } 430 428 } else { 431 429 // Safety: we checked all invariants above 432 430 unsafe { 433 431 self.arch.update_flags( 434 432 range.start, 435 - NonZeroUsize::new(range.size()).unwrap(), 433 + NonZeroUsize::new(range.len()).unwrap(), 436 434 permissions.into(), 437 435 flush, 438 436 )?; ··· 446 444 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),); 447 445 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),); 448 446 ensure!( 449 - range.size() 447 + range.len() 450 448 <= self 451 449 .max_range 452 450 .end() ··· 455 453 ); 456 454 457 455 let mut batch = Batch::new(&mut self.arch, self.frame_alloc); 458 - let mut bytes_remaining = range.size(); 456 + let mut bytes_remaining = range.len(); 459 457 let mut c = self.regions.find_mut(&range.start); 460 458 while bytes_remaining > 0 { 461 459 let region = c.get_mut().unwrap(); 462 - let clamped = range.clamp(region.range.clone()); 460 + let clamped = range.clone().intersect(region.range.clone()); 463 461 region.commit(&mut batch, clamped, will_write)?; 464 462 465 - bytes_remaining -= range.size(); 463 + bytes_remaining -= range.len(); 466 464 } 467 465 batch.flush()?; 468 466 ··· 588 586 return 0; 589 587 } 590 588 591 - let range_size = aligned.size(); 589 + let range_size = aligned.len(); 592 590 if range_size >= layout.size() { 593 591 ((range_size - layout.size()) >> layout.align().ilog2()) + 1 594 592 } else { ··· 773 771 if self.range.end != virt || self.flags != flags { 774 772 self.flush()?; 775 773 self.flags = flags; 776 - self.range = virt..virt.checked_add(len.get()).unwrap(); 774 + self.range = Range::from_start_len(virt, len.get()); 777 775 } else { 778 776 self.range.end = self.range.end.checked_add(len.get()).unwrap(); 779 777 } ··· 809 807 } 810 808 flush.flush()?; 811 809 812 - self.range = self.range.end..self.range.end; 810 + self.range = Range::from_start_len(self.range.end, 0); 813 811 Ok(()) 814 812 } 815 813
+3 -6
kernel/src/mem/address_space_region.rs
··· 142 142 batch.queue_map( 143 143 range.start, 144 144 range_phys.start, 145 - NonZeroUsize::new(range_phys.size()).unwrap(), 145 + NonZeroUsize::new(range_phys.len()).unwrap(), 146 146 self.permissions.into(), 147 147 )?; 148 148 } ··· 266 266 Vmo::Wired => unreachable!("Wired VMO can never page fault"), 267 267 Vmo::Phys(vmo) => { 268 268 let range_phys = vmo 269 - .lookup_contiguous( 270 - vmo_relative_offset 271 - ..vmo_relative_offset.checked_add(arch::PAGE_SIZE).unwrap(), 272 - ) 269 + .lookup_contiguous(vmo_relative_offset..vmo_relative_offset + arch::PAGE_SIZE) 273 270 .expect("contiguous lookup for wired VMOs should never fail"); 274 271 275 272 batch.queue_map( 276 273 addr, 277 274 range_phys.start, 278 - NonZeroUsize::new(range_phys.size()).unwrap(), 275 + NonZeroUsize::new(range_phys.len()).unwrap(), 279 276 self.permissions.into(), 280 277 )?; 281 278 }
+7 -7
kernel/src/mem/bootstrap_alloc.rs
··· 59 59 60 60 for region in self.regions.iter().rev() { 61 61 // only consider regions that we haven't already exhausted 62 - if offset < region.size() { 62 + if offset < region.len() { 63 63 // Allocating a contiguous range has different requirements than "regular" allocation 64 64 // contiguous are rare and often happen in very critical paths where e.g. virtual 65 65 // memory is not available yet. So we rather waste some memory than outright crash. 66 - if region.size() - offset < requested_size { 66 + if region.len() - offset < requested_size { 67 67 tracing::warn!( 68 68 "Skipped memory region {region:?} since it was too small to fulfill request for {requested_size} bytes. Wasted {} bytes in the process...", 69 - region.size() - offset 69 + region.len() - offset 70 70 ); 71 71 72 - self.offset += region.size() - offset; 72 + self.offset += region.len() - offset; 73 73 offset = 0; 74 74 continue; 75 75 } ··· 80 80 return Some(frame); 81 81 } 82 82 83 - offset -= region.size(); 83 + offset -= region.len(); 84 84 } 85 85 86 86 None ··· 122 122 loop { 123 123 let mut region = self.inner.next()?; 124 124 // keep advancing past already fully used memory regions 125 - if self.offset >= region.size() { 126 - self.offset -= region.size(); 125 + if self.offset >= region.len() { 126 + self.offset -= region.len(); 127 127 continue; 128 128 } else if self.offset > 0 { 129 129 region.end = region.end.checked_sub(self.offset).unwrap();
+6 -6
kernel/src/mem/frame_alloc/arena.rs
··· 57 57 58 58 impl Arena { 59 59 pub fn from_selection(selection: ArenaSelection) -> Self { 60 - debug_assert!(selection.bookkeeping.size() >= bookkeeping_size(selection.arena.size())); 60 + debug_assert!(selection.bookkeeping.len() >= bookkeeping_size(selection.arena.len())); 61 61 62 62 // Safety: arena selection has ensured the region is valid 63 63 let slots: &mut [MaybeUninit<FrameInfo>] = unsafe { ··· 68 68 69 69 slice::from_raw_parts_mut( 70 70 ptr, 71 - selection.bookkeeping.size() / ARENA_PAGE_BOOKKEEPING_SIZE, 71 + selection.bookkeeping.len() / ARENA_PAGE_BOOKKEEPING_SIZE, 72 72 ) 73 73 }; 74 74 75 - let mut remaining_bytes = selection.arena.size(); 75 + let mut remaining_bytes = selection.arena.len(); 76 76 let mut addr = selection.arena.start; 77 77 let mut total_frames = 0; 78 78 let mut max_order = 0; ··· 106 106 } 107 107 108 108 // Make sure we've accounted for all frames 109 - debug_assert_eq!(total_frames, selection.arena.size() / arch::PAGE_SIZE); 109 + debug_assert_eq!(total_frames, selection.arena.len() / arch::PAGE_SIZE); 110 110 111 111 Self { 112 112 range: selection.arena, ··· 251 251 while let Some(region) = self.free_regions.pop() { 252 252 tracing::debug!(arena.end=?arena.end,region=?region, "Attempting to add free region"); 253 253 254 - debug_assert!(!arena.is_overlapping(&region)); 254 + debug_assert!(!arena.overlaps(&region)); 255 255 256 256 let pages_in_hole = if arena.end <= region.start { 257 257 // the region is higher than the current arena ··· 280 280 } 281 281 282 282 let mut aligned = arena.checked_align_in(arch::PAGE_SIZE).unwrap(); 283 - let bookkeeping_size = bookkeeping_size(aligned.size()); 283 + let bookkeeping_size = bookkeeping_size(aligned.len()); 284 284 285 285 // We can't use empty arenas anyway 286 286 if aligned.is_empty() {
+5 -5
kernel/src/mem/mmap.rs
··· 119 119 Permissions::READ | Permissions::WRITE, 120 120 |range_virt, perms, _batch| { 121 121 Ok(AddressSpaceRegion::new_phys( 122 - range_virt.clone(), 122 + range_virt, 123 123 perms, 124 124 range_phys.clone(), 125 125 name, ··· 175 175 176 176 // Safety: checked by caller 177 177 unsafe { 178 - let slice = slice::from_raw_parts(self.range.start.as_ptr(), self.range().size()); 178 + let slice = slice::from_raw_parts(self.range.start.as_ptr(), self.range().len()); 179 179 180 180 f(&slice[range]); 181 181 } ··· 201 201 // Safety: checked by caller 202 202 unsafe { 203 203 let slice = 204 - slice::from_raw_parts_mut(self.range.start.as_mut_ptr(), self.range().size()); 204 + slice::from_raw_parts_mut(self.range.start.as_mut_ptr(), self.range().len()); 205 205 f(&mut slice[range]); 206 206 } 207 207 ··· 236 236 #[inline] 237 237 pub fn len(&self) -> usize { 238 238 // Safety: the constructor ensures that the NonNull is valid. 239 - self.range.size() 239 + self.range.len() 240 240 } 241 241 242 242 /// Whether this is a mapping of zero bytes ··· 277 277 unsafe { 278 278 aspace.arch.update_flags( 279 279 self.range.start, 280 - NonZeroUsize::new(self.range.size()).unwrap(), 280 + NonZeroUsize::new(self.range.len()).unwrap(), 281 281 new_permissions.into(), 282 282 &mut flush, 283 283 )?;
+2 -2
kernel/src/mem/mod.rs
··· 86 86 // reserve the physical memory map 87 87 aspace 88 88 .reserve( 89 - boot_info.physical_memory_map.start..boot_info.physical_memory_map.end, 89 + boot_info.physical_memory_map.clone(), 90 90 Permissions::READ | Permissions::WRITE, 91 91 Some("Physical Memory Map".to_string()), 92 92 flush, ··· 101 101 .unwrap() 102 102 .as_ptr(); 103 103 104 - slice::from_raw_parts(base, boot_info.kernel_phys.size()) 104 + slice::from_raw_parts(base, boot_info.kernel_phys.len()) 105 105 }; 106 106 let own_elf = xmas_elf::ElfFile::new(own_elf).unwrap(); 107 107
+1 -1
kernel/src/mem/vmo.rs
··· 57 57 58 58 impl PhysVmo { 59 59 pub fn is_valid_offset(&self, offset: usize) -> bool { 60 - offset <= self.range.size() 60 + offset <= self.range.len() 61 61 } 62 62 63 63 pub fn lookup_contiguous(&self, range: Range<usize>) -> crate::Result<Range<PhysicalAddress>> {
+3 -6
kernel/src/shell.rs
··· 18 18 use alloc::string::{String, ToString}; 19 19 use core::fmt; 20 20 use core::fmt::Write; 21 - use core::ops::DerefMut; 21 + use core::ops::{DerefMut, Range}; 22 22 use core::str::FromStr; 23 23 24 24 use fallible_iterator::FallibleIterator; 25 25 use kasync::executor::Executor; 26 - use kmem::PhysicalAddress; 26 + use kmem::{AddressRangeExt, PhysicalAddress}; 27 27 use spin::{Barrier, OnceLock}; 28 28 29 29 use crate::device_tree::DeviceTree; ··· 94 94 .unwrap() 95 95 .get(); 96 96 97 - let range_phys = { 98 - let start = PhysicalAddress::new(reg.starting_address); 99 - start..start.checked_add(size).unwrap() 100 - }; 97 + let range_phys = Range::from_start_len(PhysicalAddress::new(reg.starting_address), size); 101 98 102 99 let mmap = Mmap::new_phys( 103 100 aspace.clone(),
+91
libs/kmem/src/address_range.rs
··· 1 + // Copyright 2025. Jonas Kruckenberg 2 + // 3 + // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 + // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 + // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 + // copied, modified, or distributed except according to those terms. 7 + 8 + use crate::{PhysicalAddress, VirtualAddress}; 9 + 10 + pub trait AddressRangeExt { 11 + type Address; 12 + fn from_start_len(start: Self::Address, len: usize) -> Self; 13 + 14 + /// Returns `true` if the range contains no addresses. 15 + fn is_empty(&self) -> bool; 16 + 17 + /// Returns the length of the address range, in bytes. 18 + fn len(&self) -> usize; 19 + 20 + /// Returns `true` if `address` is contained in the range. 21 + fn contains(&self, address: &Self::Address) -> bool; 22 + 23 + /// Returns `true` if there exists an address present in both ranges. 24 + fn overlaps(&self, other: &Self) -> bool; 25 + 26 + /// Returns the intersection of `self` and `other`. 27 + fn intersect(self, other: Self) -> Self; 28 + 29 + fn checked_align_in(self, align: usize) -> Option<Self> 30 + where 31 + Self: Sized; 32 + fn checked_align_out(self, align: usize) -> Option<Self> 33 + where 34 + Self: Sized; 35 + } 36 + 37 + macro_rules! impl_address_range { 38 + ($address_ty:ident) => { 39 + impl AddressRangeExt for ::core::ops::Range<$address_ty> { 40 + type Address = $address_ty; 41 + 42 + fn from_start_len(start: Self::Address, len: usize) -> Self { 43 + let end = start.checked_add(len).unwrap(); 44 + 45 + Self { start, end } 46 + } 47 + 48 + fn is_empty(&self) -> bool { 49 + self.start >= self.end 50 + } 51 + 52 + fn len(&self) -> usize { 53 + self.end.checked_sub_addr(self.start).unwrap() 54 + } 55 + 56 + fn contains(&self, address: &Self::Address) -> bool { 57 + <Self as ::core::ops::RangeBounds<$address_ty>>::contains(self, address) 58 + } 59 + 60 + fn overlaps(&self, other: &Self) -> bool { 61 + self.start < other.end && other.start < self.end 62 + } 63 + 64 + fn intersect(self, other: Self) -> Self { 65 + Self { 66 + start: core::cmp::max(self.start, other.start), 67 + end: core::cmp::min(self.end, other.end), 68 + } 69 + } 70 + 71 + fn checked_align_in(self, align: usize) -> Option<Self> 72 + where 73 + Self: Sized, 74 + { 75 + let res = self.start.checked_align_up(align)?..self.end.align_down(align); 76 + Some(res) 77 + } 78 + 79 + fn checked_align_out(self, align: usize) -> Option<Self> 80 + where 81 + Self: Sized, 82 + { 83 + let res = self.start.align_down(align)..self.end.checked_align_up(align)?; 84 + Some(res) 85 + } 86 + } 87 + }; 88 + } 89 + 90 + impl_address_range!(VirtualAddress); 91 + impl_address_range!(PhysicalAddress);
+38 -155
libs/kmem/src/addresses.rs libs/kmem/src/address.rs
··· 5 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 - use core::alloc::{Layout, LayoutError}; 9 - use core::ops::Range; 8 + macro_rules! impl_address_from { 9 + ($address_ty:ident, $int_ty:ident) => { 10 + impl From<$int_ty> for $address_ty { 11 + fn from(value: $int_ty) -> Self { 12 + $address_ty(usize::from(value)) 13 + } 14 + } 15 + }; 16 + } 17 + 18 + macro_rules! impl_address_try_from { 19 + ($address_ty:ident, $int_ty:ident) => { 20 + impl TryFrom<$int_ty> for $address_ty { 21 + type Error = <usize as TryFrom<$int_ty>>::Error; 22 + 23 + fn try_from(value: $int_ty) -> Result<Self, Self::Error> { 24 + usize::try_from(value).map($address_ty) 25 + } 26 + } 27 + }; 28 + } 10 29 11 30 macro_rules! impl_address { 12 31 ($address_ty:ident) => { ··· 15 34 pub const MIN: Self = Self(0); 16 35 pub const ZERO: Self = Self(0); 17 36 pub const BITS: u32 = usize::BITS; 37 + 38 + #[must_use] 39 + pub const fn new(n: usize) -> Self { 40 + Self(n) 41 + } 18 42 19 43 #[inline] 20 44 pub const fn get(&self) -> usize { ··· 237 261 } 238 262 } 239 263 264 + impl_address_from!($address_ty, usize); 265 + impl_address_from!($address_ty, u8); 266 + impl_address_from!($address_ty, u16); 267 + impl_address_try_from!($address_ty, i8); 268 + impl_address_try_from!($address_ty, i16); 269 + impl_address_try_from!($address_ty, i32); 270 + impl_address_try_from!($address_ty, i64); 271 + impl_address_try_from!($address_ty, i128); 272 + impl_address_try_from!($address_ty, u32); 273 + impl_address_try_from!($address_ty, u64); 274 + impl_address_try_from!($address_ty, u128); 275 + 240 276 impl ::core::fmt::Display for $address_ty { 241 277 fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { 242 278 f.write_fmt(format_args!("{:#018x}", self.0)) // 18 digits to account for the leading 0x ··· 290 326 pub struct VirtualAddress(usize); 291 327 impl_address!(VirtualAddress); 292 328 293 - impl VirtualAddress { 294 - #[must_use] 295 - pub const fn new(n: usize) -> Self { 296 - Self(n) 297 - } 298 - 299 - // pub const fn is_canonical<A: RawAddressSpace>(self) -> bool { 300 - // (self.0 & A::CANONICAL_ADDRESS_MASK).wrapping_sub(1) >= A::CANONICAL_ADDRESS_MASK - 1 301 - // } 302 - // 303 - // #[inline] 304 - // pub const fn is_user_accessible<A: RawAddressSpace>(self) -> bool { 305 - // // This address refers to userspace if it is in the lower half of the 306 - // // canonical addresses. IOW - if all of the bits in the canonical address 307 - // // mask are zero. 308 - // (self.0 & A::CANONICAL_ADDRESS_MASK) == 0 309 - // } 310 - } 311 - 312 329 #[repr(transparent)] 313 330 #[derive(Default, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] 314 331 pub struct PhysicalAddress(usize); 315 332 impl_address!(PhysicalAddress); 316 - 317 - impl PhysicalAddress { 318 - pub const fn new(n: usize) -> Self { 319 - Self(n) 320 - } 321 - } 322 - 323 - macro_rules! address_range_impl { 324 - () => { 325 - fn size(&self) -> usize { 326 - debug_assert!(self.start <= self.end); 327 - let is = self.end.checked_sub_addr(self.start).unwrap_or_default(); 328 - let should = if self.is_empty() { 329 - 0 330 - } else { 331 - self.end.get() - self.start.get() 332 - }; 333 - debug_assert_eq!(is, should); 334 - is 335 - } 336 - fn checked_add(self, offset: usize) -> Option<Self> { 337 - Some(Range::from( 338 - self.start.checked_add(offset)?..self.end.checked_add(offset)?, 339 - )) 340 - } 341 - fn as_ptr_range(&self) -> Range<*const u8> { 342 - Range::from(self.start.as_ptr()..self.end.as_ptr()) 343 - } 344 - fn as_mut_ptr_range(&self) -> Range<*mut u8> { 345 - Range::from(self.start.as_mut_ptr()..self.end.as_mut_ptr()) 346 - } 347 - fn checked_align_in(self, align: usize) -> Option<Self> 348 - where 349 - Self: Sized, 350 - { 351 - let res = Range::from(self.start.checked_align_up(align)?..self.end.align_down(align)); 352 - Some(res) 353 - } 354 - fn checked_align_out(self, align: usize) -> Option<Self> 355 - where 356 - Self: Sized, 357 - { 358 - let res = Range::from(self.start.align_down(align)..self.end.checked_align_up(align)?); 359 - // aligning outwards can only increase the size 360 - debug_assert!(res.start.0 <= res.end.0); 361 - Some(res) 362 - } 363 - // fn saturating_align_in(self, align: usize) -> Self { 364 - // self.start.saturating_align_up(align)..self.end.saturating_align_down(align) 365 - // } 366 - // fn saturating_align_out(self, align: usize) -> Self { 367 - // self.start.saturating_align_down(align)..self.end.saturating_align_up(align) 368 - // } 369 - 370 - // TODO test 371 - fn alignment(&self) -> usize { 372 - self.start.alignment() 373 - } 374 - fn into_layout(self) -> core::result::Result<Layout, core::alloc::LayoutError> { 375 - Layout::from_size_align(self.size(), self.alignment()) 376 - } 377 - fn is_overlapping(&self, other: &Self) -> bool { 378 - (self.start < other.end) & (other.start < self.end) 379 - } 380 - fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) { 381 - debug_assert!(self.is_overlapping(&other)); 382 - let a = Range::from(self.start..other.start); 383 - let b = Range::from(other.end..self.end); 384 - ((!a.is_empty()).then_some(a), (!b.is_empty()).then_some(b)) 385 - } 386 - fn clamp(&self, range: Self) -> Self { 387 - Range::from(self.start.max(range.start)..self.end.min(range.end)) 388 - } 389 - }; 390 - } 391 - 392 - pub trait AddressRangeExt { 393 - fn size(&self) -> usize; 394 - #[must_use] 395 - fn checked_add(self, offset: usize) -> Option<Self> 396 - where 397 - Self: Sized; 398 - #[must_use] 399 - fn as_ptr_range(&self) -> Range<*const u8>; 400 - #[must_use] 401 - fn as_mut_ptr_range(&self) -> Range<*mut u8>; 402 - #[must_use] 403 - fn checked_align_in(self, align: usize) -> Option<Self> 404 - where 405 - Self: Sized; 406 - #[must_use] 407 - fn checked_align_out(self, align: usize) -> Option<Self> 408 - where 409 - Self: Sized; 410 - // #[must_use] 411 - // fn saturating_align_in(self, align: usize) -> Self; 412 - // #[must_use] 413 - // fn saturating_align_out(self, align: usize) -> Self; 414 - fn alignment(&self) -> usize; 415 - /// Return the largest [`Layout`] fitting this range. 416 - /// 417 - /// # Errors 418 - /// 419 - /// Return [`LayoutError`] if this range does not represent a valid layout. 420 - fn into_layout(self) -> Result<Layout, LayoutError>; 421 - fn is_overlapping(&self, other: &Self) -> bool; 422 - fn difference(&self, other: Self) -> (Option<Self>, Option<Self>) 423 - where 424 - Self: Sized; 425 - fn clamp(&self, range: Self) -> Self; 426 - // fn is_user_accessible<A: RawAddressSpace>(&self) -> bool; 427 - } 428 - 429 - impl AddressRangeExt for Range<PhysicalAddress> { 430 - address_range_impl!(); 431 - // fn is_user_accessible<A: RawAddressSpace>(&self) -> bool { 432 - // unimplemented!("PhysicalAddress is never user accessible") 433 - // } 434 - } 435 - 436 - impl AddressRangeExt for Range<VirtualAddress> { 437 - address_range_impl!(); 438 - 439 - // fn is_user_accessible<A: RawAddressSpace>(&self) -> bool { 440 - // if self.is_empty() { 441 - // return false; 442 - // } 443 - // let Some(end_minus_one) = self.end.checked_sub(1) else { 444 - // return false; 445 - // }; 446 - // 447 - // self.start.is_user_accessible::<A>() && end_minus_one.is_user_accessible::<A>() 448 - // } 449 - }
+4 -2
libs/kmem/src/lib.rs
··· 1 1 #![no_std] 2 2 #![feature(step_trait)] 3 3 4 - mod addresses; 4 + mod address; 5 + mod address_range; 5 6 6 - pub use addresses::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 7 + pub use address::{PhysicalAddress, VirtualAddress}; 8 + pub use address_range::AddressRangeExt;
+4 -5
loader/src/arch/riscv64.rs
··· 7 7 8 8 use core::arch::{asm, naked_asm}; 9 9 use core::fmt; 10 - use core::num::NonZero; 11 10 use core::ptr::NonNull; 12 11 13 12 use bitflags::bitflags; ··· 267 266 frame_alloc: &mut FrameAllocator, 268 267 mut virt: VirtualAddress, 269 268 mut phys: PhysicalAddress, 270 - len: NonZero<usize>, 269 + len: usize, 271 270 flags: Flags, 272 271 phys_off: VirtualAddress, 273 272 ) -> crate::Result<()> { 274 - let mut remaining_bytes = len.get(); 273 + let mut remaining_bytes = len; 275 274 debug_assert!( 276 275 remaining_bytes >= PAGE_SIZE, 277 276 "address range span be at least one page" ··· 358 357 root_pgtable: PhysicalAddress, 359 358 mut virt: VirtualAddress, 360 359 mut phys: PhysicalAddress, 361 - len: NonZero<usize>, 360 + len: usize, 362 361 phys_off: VirtualAddress, 363 362 ) { 364 - let mut remaining_bytes = len.get(); 363 + let mut remaining_bytes = len; 365 364 debug_assert!( 366 365 remaining_bytes >= PAGE_SIZE, 367 366 "virtual address range must span be at least one page"
+12 -13
loader/src/frame_alloc.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use core::alloc::Layout; 9 - use core::num::NonZeroUsize; 10 9 use core::ops::Range; 11 10 use core::{cmp, iter, ptr, slice}; 12 11 ··· 97 96 let mut offset = self.offset; 98 97 99 98 for region in self.regions.iter().rev() { 100 - let region_size = region.size(); 99 + let region_size = region.len(); 101 100 102 101 // only consider regions that we haven't already exhausted 103 102 if offset < region_size { ··· 157 156 } 158 157 159 158 impl FallibleIterator for FrameIter<'_, '_> { 160 - type Item = (PhysicalAddress, NonZeroUsize); 159 + type Item = Range<PhysicalAddress>; 161 160 type Error = Error; 162 161 163 162 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { ··· 166 165 167 166 for region in self.alloc.regions.iter().rev() { 168 167 // only consider regions that we haven't already exhausted 169 - if let Some(allocatable_size) = region.size().checked_sub(offset) 168 + if let Some(allocatable_size) = region.len().checked_sub(offset) 170 169 && allocatable_size >= arch::PAGE_SIZE 171 170 { 172 171 let allocation_size = cmp::min(self.remaining, allocatable_size) ··· 177 176 self.alloc.offset += allocation_size; 178 177 self.remaining -= allocation_size; 179 178 180 - return Ok(Some((frame, NonZeroUsize::new(allocation_size).unwrap()))); 179 + return Ok(Some(Range::from_start_len(frame, allocation_size))); 181 180 } 182 181 183 - offset -= region.size(); 182 + offset -= region.len(); 184 183 } 185 184 186 185 Err(Error::NoMemory) ··· 202 201 } 203 202 204 203 impl FallibleIterator for FrameIterZeroed<'_, '_> { 205 - type Item = (PhysicalAddress, NonZeroUsize); 204 + type Item = Range<PhysicalAddress>; 206 205 type Error = Error; 207 206 208 207 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { 209 - let Some((base, len)) = self.inner.next()? else { 208 + let Some(range) = self.inner.next()? else { 210 209 return Ok(None); 211 210 }; 212 211 ··· 214 213 unsafe { 215 214 ptr::write_bytes::<u8>( 216 215 self.phys_offset 217 - .checked_add(base.get()) 216 + .checked_add(range.start.get()) 218 217 .unwrap() 219 218 .as_mut_ptr(), 220 219 0, 221 - len.get(), 220 + range.len(), 222 221 ); 223 222 } 224 223 225 - Ok(Some((base, len))) 224 + Ok(Some(range)) 226 225 } 227 226 } 228 227 ··· 238 237 loop { 239 238 let mut region = self.inner.next()?; 240 239 // keep advancing past already fully used memory regions 241 - let region_size = region.size(); 240 + let region_size = region.len(); 242 241 243 242 if self.offset >= region_size { 244 243 self.offset -= region_size; ··· 264 263 fn next(&mut self) -> Option<Self::Item> { 265 264 let mut region = self.inner.next()?; 266 265 267 - if self.offset >= region.size() { 266 + if self.offset >= region.len() { 268 267 Some(region) 269 268 } else if self.offset > 0 { 270 269 region.start = region.end.checked_sub(self.offset).unwrap();
+5 -3
loader/src/kernel.rs
··· 9 9 use core::ops::Range; 10 10 use core::{fmt, slice}; 11 11 12 - use kmem::{PhysicalAddress, VirtualAddress}; 12 + use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 13 13 use loader_api::LoaderConfig; 14 14 use xmas_elf::program::{ProgramHeader, Type}; 15 15 ··· 63 63 64 64 impl Kernel<'_> { 65 65 pub fn phys_range(&self) -> Range<PhysicalAddress> { 66 - let fdt = INLINED_KERNEL_BYTES.0.as_ptr_range(); 67 - PhysicalAddress::from_ptr(fdt.start)..PhysicalAddress::from_ptr(fdt.end) 66 + Range::from_start_len( 67 + PhysicalAddress::from_ptr(INLINED_KERNEL_BYTES.0.as_ptr()), 68 + INLINED_KERNEL_BYTES.0.len(), 69 + ) 68 70 } 69 71 70 72 /// Returns the size of the kernel in memory.
+7 -8
loader/src/machine_info.rs
··· 67 67 .and_then(|addr| usize::from_str(addr).ok()) 68 68 { 69 69 // if the node is a CPU check its availability and populate the hart_mask 70 - 71 70 let available = find_cstr_property(node.properties(), "status")? == Some(c"okay"); 72 71 73 72 if available { ··· 86 85 memories.push({ 87 86 let start = PhysicalAddress::new(reg.starting_address); 88 87 89 - start..start.checked_add(reg.size.unwrap_or(0)).unwrap() 88 + Range::from_start_len(start, reg.size.unwrap_or(0)) 90 89 }); 91 90 } 92 91 } else if stack[depth - 1].is_some_and(|(s, _)| s == "reserved-memory") { ··· 99 98 reserved_memory.push({ 100 99 let start = PhysicalAddress::new(reg.starting_address); 101 100 102 - start..start.checked_add(reg.size.unwrap_or(0)).unwrap() 101 + Range::from_start_len(start, reg.size.unwrap_or(0)) 103 102 }); 104 103 } 105 104 } else if name.name == "chosen" { ··· 139 138 140 139 // Apply reserved_entries 141 140 while let Some(entry) = reservations.next()? { 142 - let region = { 143 - let start = PhysicalAddress::new(usize::try_from(entry.address)?); 141 + let region = Range::from_start_len( 142 + PhysicalAddress::try_from(entry.address)?, 143 + usize::try_from(entry.size)?, 144 + ); 144 145 145 - start..start.checked_add(usize::try_from(entry.size)?).unwrap() 146 - }; 147 146 log::trace!("applying reservation {region:#x?}"); 148 147 149 148 exclude_region(region); ··· 157 156 } 158 157 159 158 // remove memory regions that are left as zero-sized from the previous step 160 - memories.retain(|region| region.size() > 0); 159 + memories.retain(|region| !region.is_empty()); 161 160 162 161 // page-align all memory regions, this will waste some physical memory in the process, 163 162 // but we can't make use of it either way
+6 -7
loader/src/main.rs
··· 14 14 use core::ops::Range; 15 15 16 16 use arrayvec::ArrayVec; 17 - use kmem::{PhysicalAddress, VirtualAddress}; 17 + use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 18 18 use rand::SeedableRng; 19 19 use rand_chacha::ChaCha20Rng; 20 20 use spin::{Barrier, OnceLock}; ··· 95 95 let self_regions = SelfRegions::collect(&minfo); 96 96 log::debug!("{self_regions:#x?}"); 97 97 98 - let fdt_phys = { 99 - let fdt = minfo.fdt.as_ptr_range(); 100 - PhysicalAddress::from_ptr(fdt.start)..PhysicalAddress::from_ptr(fdt.end) 101 - }; 98 + let fdt_phys = Range::from_start_len( 99 + PhysicalAddress::from_ptr(minfo.fdt.as_ptr()), 100 + minfo.fdt.len(), 101 + ); 102 102 103 103 // Initialize the frame allocator 104 104 let allocatable_memories = allocatable_memory_regions(&minfo, &self_regions, fdt_phys.clone()); ··· 301 301 } 302 302 303 303 assert!( 304 - !other.contains(&region.start) 305 - && !other.contains(&(region.end.checked_sub(1).unwrap())), 304 + !region.overlaps(other), 306 305 "regions {region:#x?} and {other:#x?} overlap" 307 306 ); 308 307 }
+53 -62
loader/src/mapping.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use core::alloc::Layout; 9 - use core::num::NonZeroUsize; 10 9 use core::ops::Range; 11 10 use core::{cmp, ptr, slice}; 12 11 ··· 82 81 phys: Range<PhysicalAddress>, 83 82 flags: Flags, 84 83 ) -> crate::Result<()> { 85 - let len = NonZeroUsize::new(phys.size()).unwrap(); 86 84 let virt_start = VirtualAddress::new(phys.start.get()); 87 85 88 86 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 93 91 frame_alloc, 94 92 virt_start, 95 93 phys.start, 96 - len, 94 + phys.len(), 97 95 flags, 98 96 VirtualAddress::ZERO, // called before translation into higher half 99 97 ) ··· 109 107 let alignment = arch::page_size_for_level(2); 110 108 111 109 let phys = minfo.memory_hull().checked_align_out(alignment).unwrap(); 112 - let virt = arch::KERNEL_ASPACE_BASE 113 - .checked_add(phys.start.get()) 114 - .unwrap() 115 - ..arch::KERNEL_ASPACE_BASE 110 + let virt = Range { 111 + start: arch::KERNEL_ASPACE_BASE 112 + .checked_add(phys.start.get()) 113 + .unwrap(), 114 + end: arch::KERNEL_ASPACE_BASE 116 115 .checked_add(phys.end.get()) 117 - .unwrap(); 118 - let size = NonZeroUsize::new(phys.size()).unwrap(); 116 + .unwrap(), 117 + }; 119 118 120 119 debug_assert!(phys.start.is_aligned_to(alignment) && phys.end.is_aligned_to(alignment)); 121 120 debug_assert!(virt.start.is_aligned_to(alignment) && virt.end.is_aligned_to(alignment)); ··· 129 128 frame_alloc, 130 129 virt.start, 131 130 phys.start, 132 - size, 131 + phys.len(), 133 132 Flags::READ | Flags::WRITE, 134 133 VirtualAddress::ZERO, // called before translation into higher half 135 134 )?; 136 135 } 137 136 138 137 // exclude the physical memory map region from page allocation 139 - page_alloc.reserve(virt.start, size.get()); 138 + page_alloc.reserve(virt.start, phys.len()); 140 139 141 140 Ok((arch::KERNEL_ASPACE_BASE, virt)) 142 141 } ··· 243 242 memsz = ph.mem_size 244 243 ); 245 244 246 - let phys = { 247 - let start = phys_base.checked_add(ph.offset).unwrap(); 248 - let end = start.checked_add(ph.file_size).unwrap(); 249 - 250 - (start..end).checked_align_out(ph.align).unwrap() 251 - }; 252 - 253 - let virt = { 254 - let start = virt_base.checked_add(ph.virtual_address).unwrap(); 255 - let end = start.checked_add(ph.file_size).unwrap(); 245 + let phys = Range::from_start_len(phys_base.checked_add(ph.offset).unwrap(), ph.file_size) 246 + .checked_align_out(ph.align) 247 + .unwrap(); 256 248 257 - (start..end).checked_align_out(ph.align).unwrap() 258 - }; 249 + let virt = Range::from_start_len( 250 + virt_base.checked_add(ph.virtual_address).unwrap(), 251 + ph.file_size, 252 + ) 253 + .checked_align_out(ph.align) 254 + .unwrap(); 259 255 260 256 log::trace!("mapping {virt:#x?} => {phys:#x?}"); 261 257 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 266 262 frame_alloc, 267 263 virt.start, 268 264 phys.start, 269 - NonZeroUsize::new(phys.size()).unwrap(), 265 + phys.len(), 270 266 flags, 271 267 arch::KERNEL_ASPACE_BASE, 272 268 )?; ··· 362 358 root_pgtable, 363 359 last_page, 364 360 new_frame, 365 - NonZeroUsize::new(arch::PAGE_SIZE).unwrap(), 361 + arch::PAGE_SIZE, 366 362 phys_off, 367 363 ); 368 364 } 369 365 } 370 366 371 367 log::trace!("zero_start {zero_start:?} zero_end {zero_end:?}"); 372 - let (mut virt, len) = { 373 - // zero_start either lies at a page boundary OR somewhere within the first page 374 - // by aligning up, we move it to the beginning of the *next* page. 375 - let start = zero_start.checked_align_up(ph.align).unwrap(); 376 - let end = zero_end.checked_align_up(ph.align).unwrap(); 377 - (start, end.checked_sub_addr(start).unwrap()) 368 + // zero_start either lies at a page boundary OR somewhere within the first page 369 + // by aligning up, we move it to the beginning of the *next* page. 370 + let mut virt = Range { 371 + start: zero_start.checked_align_up(ph.align).unwrap(), 372 + end: zero_end.checked_align_up(ph.align).unwrap(), 378 373 }; 379 374 380 - if len > 0 { 381 - let mut phys_iter = frame_alloc.allocate_zeroed( 382 - Layout::from_size_align(len, arch::PAGE_SIZE).unwrap(), 375 + if !virt.is_empty() { 376 + let mut frame_iter = frame_alloc.allocate_zeroed( 377 + Layout::from_size_align(virt.len(), arch::PAGE_SIZE).unwrap(), 383 378 arch::KERNEL_ASPACE_BASE, 384 379 ); 385 380 386 - while let Some((phys, len)) = phys_iter.next()? { 387 - log::trace!( 388 - "mapping additional zeros {virt:?}..{:?}", 389 - virt.checked_add(len.get()).unwrap() 390 - ); 381 + while let Some(chunk) = frame_iter.next()? { 382 + log::trace!("mapping additional zeros {virt:?}",); 391 383 392 384 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 393 385 // abort startup anyway 394 386 unsafe { 395 387 arch::map_contiguous( 396 388 root_pgtable, 397 - phys_iter.alloc(), 398 - virt, 399 - phys, 400 - len, 389 + frame_iter.alloc(), 390 + virt.start, 391 + chunk.start, 392 + chunk.len(), 401 393 flags, 402 394 arch::KERNEL_ASPACE_BASE, 403 395 )?; 404 396 } 405 397 406 - virt = virt.checked_add(len.get()).unwrap(); 398 + virt.start = virt.start.checked_add(chunk.len()).unwrap(); 407 399 } 408 400 } 409 401 ··· 498 490 let virt = page_alloc.allocate(layout); 499 491 let mut virt_start = virt.start; 500 492 501 - let mut phys_iter = frame_alloc.allocate_zeroed(layout, phys_off); 502 - while let Some((phys, len)) = phys_iter.next()? { 493 + let mut frame_iter = frame_alloc.allocate_zeroed(layout, phys_off); 494 + while let Some(chunk) = frame_iter.next()? { 503 495 log::trace!( 504 - "Mapping TLS region {virt_start:?}..{:?} {len} ...", 505 - virt_start.checked_add(len.get()).unwrap() 496 + "Mapping TLS region {virt_start:?}..{:?} => {chunk:?} ...", 497 + virt_start.checked_add(chunk.len()).unwrap() 506 498 ); 507 499 508 500 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 510 502 unsafe { 511 503 arch::map_contiguous( 512 504 root_pgtable, 513 - phys_iter.alloc(), 505 + frame_iter.alloc(), 514 506 virt_start, 515 - phys, 516 - len, 507 + chunk.start, 508 + chunk.len(), 517 509 Flags::READ | Flags::WRITE, 518 510 phys_off, 519 511 )?; 520 512 } 521 513 522 - virt_start = virt_start.checked_add(len.get()).unwrap(); 514 + virt_start = virt_start.checked_add(chunk.len()).unwrap(); 523 515 } 524 516 525 517 Ok(TlsAllocation { ··· 550 542 .unwrap(); 551 543 let start = self.virt.start.checked_add(aligned_size * hartid).unwrap(); 552 544 553 - start..start.checked_add(self.template.mem_size).unwrap() 545 + Range::from_start_len(start, self.template.mem_size) 554 546 } 555 547 556 548 pub fn initialize_for_hart(&self, hartid: usize) { ··· 608 600 log::trace!("Allocating stack {layout:?}..."); 609 601 // The stacks region doesn't need to be zeroed, since we will be filling it with 610 602 // the canary pattern anyway 611 - let mut phys_iter = frame_alloc.allocate(layout); 603 + let mut frame_iter = frame_alloc.allocate(layout); 612 604 613 - while let Some((phys, len)) = phys_iter.next()? { 605 + while let Some(chunk) = frame_iter.next()? { 614 606 log::trace!( 615 - "mapping stack for hart {hart} {virt:?}..{:?} => {phys:?}..{:?}", 616 - virt.checked_add(len.get()).unwrap(), 617 - phys.checked_add(len.get()).unwrap() 607 + "mapping stack for hart {hart} {virt:?}..{:?} => {chunk:?}", 608 + virt.checked_add(chunk.len()).unwrap() 618 609 ); 619 610 620 611 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 622 613 unsafe { 623 614 arch::map_contiguous( 624 615 root_pgtable, 625 - phys_iter.alloc(), 616 + frame_iter.alloc(), 626 617 virt, 627 - phys, 628 - len, 618 + chunk.start, 619 + chunk.len(), 629 620 Flags::READ | Flags::WRITE, 630 621 phys_off, 631 622 )?; 632 623 } 633 624 634 - virt = virt.checked_add(len.get()).unwrap(); 625 + virt = virt.checked_add(chunk.len()).unwrap(); 635 626 } 636 627 } 637 628
+3 -2
loader/src/page_alloc.rs
··· 8 8 use core::alloc::Layout; 9 9 use core::ops::Range; 10 10 11 - use kmem::VirtualAddress; 11 + use kmem::{AddressRangeExt, VirtualAddress}; 12 12 use rand::distr::{Distribution, Uniform}; 13 13 use rand::prelude::IteratorRandom; 14 14 use rand_chacha::ChaCha20Rng; ··· 127 127 0 128 128 }; 129 129 130 - base.checked_add(offset).unwrap()..base.checked_add(offset + layout.size()).unwrap() 130 + let start = base.checked_add(offset).unwrap(); 131 + Range::from_start_len(start, layout.size()) 131 132 } 132 133 }