Next Generation WASM Microkernel Operating System

refactor: replace `checked_` methods

This change replaces the `checked_` methods previously exposed by the address types with regular `add`/`sub` methods that will panic in debug mode and _would_ wrap in release mode.
They do not wrap in release mode however because this change also enables overflow checks across the board. The intention here is to bring the behaviour of the address types more in line with the behaviour of other integer types used in the kernel. Knobs to control overflow checks should apply to both.

+208 -288
+1
Cargo.toml
··· 169 169 170 170 [profile.release] 171 171 debug = "limited" # The kernel should be able to print stack traces of itself even in release mode 172 + overflow-checks = true 172 173 173 174 # Custom profile for Loom tests: enable release optimizations so that the loom 174 175 # tests are less slow, but don't disable debug assertions.
+1 -4
kernel/src/allocator.rs
··· 26 26 let phys = boot_alloc.allocate_contiguous(layout).unwrap(); 27 27 28 28 let virt: Range<VirtualAddress> = { 29 - let start = boot_info 30 - .physical_address_offset 31 - .checked_add(phys.get()) 32 - .unwrap(); 29 + let start = boot_info.physical_address_offset.add(phys.get()); 33 30 34 31 Range::from_start_len(start, layout.size()) 35 32 };
+21 -30
kernel/src/arch/riscv64/mem.rs
··· 13 13 use core::{fmt, slice}; 14 14 15 15 use bitflags::bitflags; 16 - use kmem::{PhysicalAddress, VirtualAddress}; 16 + use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress}; 17 17 use riscv::satp; 18 18 use riscv::sbi::rfence::sfence_vma_asid; 19 19 use static_assertions::const_assert_eq; ··· 31 31 const_assert_eq!( 32 32 KERNEL_ASPACE_RANGE 33 33 .end() 34 - .checked_sub_addr(*KERNEL_ASPACE_RANGE.start()) 35 - .unwrap(), 34 + .offset_from_unsigned(*KERNEL_ASPACE_RANGE.start()), 36 35 !CANONICAL_ADDRESS_MASK 37 36 ); 38 37 ··· 68 67 // Safety: `get_active_pgtable` & `VirtualAddress::from_phys` do minimal checking that the address is valid 69 68 // but otherwise we have to trust the address is valid for the entire page. 70 69 unsafe { 71 - slice::from_raw_parts_mut( 72 - phys_to_virt(root_pgtable).unwrap().as_mut_ptr(), 73 - PAGE_SIZE / 2, 74 - ) 75 - .fill(0); 70 + slice::from_raw_parts_mut(phys_to_virt(root_pgtable).as_mut_ptr(), PAGE_SIZE / 2).fill(0); 76 71 } 77 72 78 73 wmb(); 79 74 } 80 75 81 76 #[must_use] 82 - pub fn phys_to_virt(phys: PhysicalAddress) -> Option<VirtualAddress> { 83 - KERNEL_ASPACE_RANGE.start().checked_add(phys.get()) 77 + pub fn phys_to_virt(phys: PhysicalAddress) -> VirtualAddress { 78 + KERNEL_ASPACE_RANGE.start().add(phys.get()) 84 79 } 85 80 86 81 pub const fn is_canonical(virt: VirtualAddress) -> bool { ··· 110 105 pub fn invalidate_range(asid: u16, address_range: Range<VirtualAddress>) -> crate::Result<()> { 111 106 mb(); 112 107 113 - let base_addr = address_range.start.get(); 114 - let size = address_range 115 - .end 116 - .checked_sub_addr(address_range.start) 117 - .unwrap(); 118 - sfence_vma_asid(0, usize::MAX, base_addr, size, asid)?; 108 + sfence_vma_asid( 109 + 0, 110 + usize::MAX, 111 + address_range.start.get(), 112 + address_range.len(), 113 + asid, 114 + )?; 119 115 120 116 mb(); 121 117 ··· 188 184 let root_pgtable = PhysicalAddress::new(satp.ppn() << 12); 189 185 debug_assert!(root_pgtable.get() != 0); 190 186 191 - let base = phys_to_virt(root_pgtable) 192 - .unwrap() 193 - .checked_add(PAGE_SIZE / 2) 194 - .unwrap() 195 - .as_ptr(); 187 + let base = phys_to_virt(root_pgtable).add(PAGE_SIZE / 2).as_ptr(); 196 188 197 189 slice::from_raw_parts(base, PAGE_SIZE / 2) 198 190 }; ··· 292 284 // mark this PTE as a valid leaf node pointing to the physical frame 293 285 pte.replace_address_and_flags(phys, PTEFlags::VALID | flags); 294 286 295 - flush.extend_range(self.asid, virt..virt.checked_add(page_size).unwrap())?; 296 - virt = virt.checked_add(page_size).unwrap(); 297 - phys = phys.checked_add(page_size).unwrap(); 287 + flush.extend_range(self.asid, virt..virt.add(page_size))?; 288 + virt = virt.add(page_size); 289 + phys = phys.add(page_size); 298 290 remaining_bytes -= page_size; 299 291 continue 'outer; 300 292 } else if pte.is_valid() && !pte.is_leaf() { ··· 375 367 old_flags.difference(rwx_mask).union(new_flags), 376 368 ); 377 369 378 - flush.extend_range(self.asid, virt..virt.checked_add(page_size).unwrap())?; 379 - virt = virt.checked_add(page_size).unwrap(); 370 + flush.extend_range(self.asid, Range::from_start_len(virt, page_size))?; 371 + virt = virt.add(page_size); 380 372 remaining_bytes -= page_size; 381 373 continue 'outer; 382 374 } else if pte.is_valid() { ··· 490 482 // The PTE is mapped, so go ahead and clear it unmapping the frame 491 483 pte.clear(); 492 484 493 - flush.extend_range(self.asid, *virt..virt.checked_add(page_size).unwrap())?; 494 - *virt = virt.checked_add(page_size).unwrap(); 485 + flush.extend_range(self.asid, Range::from_start_len(*virt, page_size))?; 486 + *virt = virt.add(page_size); 495 487 *remaining_bytes -= page_size; 496 488 } else if pte.is_valid() { 497 489 // This PTE is an internal node pointing to another page table ··· 522 514 NonNull::new( 523 515 KERNEL_ASPACE_RANGE 524 516 .start() 525 - .checked_add(phys.get()) 526 - .unwrap() 517 + .add(phys.get()) 527 518 .as_mut_ptr() 528 519 .cast(), 529 520 )
+4 -6
kernel/src/arch/riscv64/trap_handler.rs
··· 361 361 }; 362 362 regs.gp[2] = sscratch::read(); 363 363 364 - let backtrace = 365 - Backtrace::<32>::from_registers(regs.clone(), epc.checked_add(1).unwrap()).unwrap(); 364 + let backtrace = Backtrace::<32>::from_registers(regs.clone(), epc.add(1)).unwrap(); 366 365 tracing::error!("{backtrace}"); 367 366 368 367 // FIXME it would be great to get rid of the allocation here :/ ··· 372 371 373 372 // begin a panic on the original stack 374 373 // Safety: we saved the register state at the beginning of the trap handler 375 - unsafe { panic_unwind2::begin_unwind(payload, regs, epc.checked_add(1).unwrap().get()) }; 374 + unsafe { panic_unwind2::begin_unwind(payload, regs, epc.add(1).get()) }; 376 375 } 377 376 378 377 fn handle_recursive_fault(frame: &TrapFrame, epc: VirtualAddress) -> ! { ··· 382 381 }; 383 382 regs.gp[2] = sscratch::read(); 384 383 385 - let backtrace = 386 - Backtrace::<32>::from_registers(regs.clone(), epc.checked_add(1).unwrap()).unwrap(); 384 + let backtrace = Backtrace::<32>::from_registers(regs.clone(), epc.add(1)).unwrap(); 387 385 tracing::error!("{backtrace}"); 388 386 389 387 // FIXME it would be great to get rid of the allocation here :/ ··· 392 390 // begin a panic on the original stack 393 391 // Safety: we saved the register state at the beginning of the trap handler 394 392 unsafe { 395 - panic_unwind2::begin_unwind(payload, regs, epc.checked_add(1).unwrap().get()); 393 + panic_unwind2::begin_unwind(payload, regs, epc.add(1).get()); 396 394 } 397 395 }
+1 -2
kernel/src/backtrace/mod.rs
··· 70 70 elf: unsafe { 71 71 let base = boot_info 72 72 .physical_address_offset 73 - .checked_add(boot_info.kernel_phys.start.get()) 74 - .unwrap() 73 + .add(boot_info.kernel_phys.start.get()) 75 74 .as_ptr(); 76 75 77 76 slice::from_raw_parts(base, boot_info.kernel_phys.len())
+1 -2
kernel/src/main.rs
··· 261 261 262 262 let base = boot_info 263 263 .physical_address_offset 264 - .checked_add(fdt.range.start.get()) 265 - .unwrap() 264 + .add(fdt.range.start.get()) 266 265 .as_mut_ptr(); 267 266 268 267 // Safety: we need to trust the bootinfo data is correct
+23 -48
kernel/src/mem/address_space.rs
··· 140 140 <= self 141 141 .max_range 142 142 .end() 143 - .checked_sub_addr(*self.max_range.start()) 144 - .unwrap_or_default(), 143 + .offset_from_unsigned(*self.max_range.start()) 145 144 ); 146 145 ensure!(layout.align() <= self.frame_alloc.max_alignment(),); 147 146 ensure!(permissions.is_valid()); ··· 185 184 <= self 186 185 .max_range 187 186 .end() 188 - .checked_sub_addr(*self.max_range.start()) 189 - .unwrap_or_default(), 187 + .offset_from_unsigned(*self.max_range.start()) 190 188 ); 191 189 ensure!(permissions.is_valid()); 192 190 // ensure the entire address space range is free ··· 220 218 <= self 221 219 .max_range 222 220 .end() 223 - .checked_sub_addr(*self.max_range.start()) 224 - .unwrap_or_default(), 221 + .offset_from_unsigned(*self.max_range.start()) 225 222 ); 226 223 227 224 // ensure the entire range is mapped and doesn't cover any holes ··· 277 274 <= self 278 275 .max_range 279 276 .end() 280 - .checked_sub_addr(*self.max_range.start()) 281 - .unwrap_or_default(), 277 + .offset_from_unsigned(*self.max_range.start()) 282 278 ); 283 279 ensure!(new_permissions.is_valid()); 284 280 ··· 403 399 <= self 404 400 .max_range 405 401 .end() 406 - .checked_sub_addr(*self.max_range.start()) 407 - .unwrap_or_default(), 402 + .offset_from_unsigned(*self.max_range.start()) 408 403 ); 409 404 ensure!(permissions.is_valid()); 410 405 ··· 448 443 <= self 449 444 .max_range 450 445 .end() 451 - .checked_sub_addr(*self.max_range.start()) 452 - .unwrap_or_default(), 446 + .offset_from_unsigned(*self.max_range.start()), 453 447 ); 454 448 455 449 let mut batch = Batch::new(&mut self.arch, self.frame_alloc); ··· 559 553 self.find_spot_at_index(selected_index, layout).unwrap() 560 554 } 561 555 }; 562 - tracing::trace!("picked spot {spot}..{:?}", spot.checked_add(layout.size())); 556 + tracing::trace!("picked spot {spot}..{:?}", spot.add(layout.size())); 563 557 564 558 debug_assert!(arch::is_canonical(spot)); 565 559 Ok(spot) ··· 599 593 // if the tree is empty, treat max_range as the gap 600 594 if self.regions.is_empty() { 601 595 let aligned_gap = Range { 602 - start: self 603 - .max_range 604 - .start() 605 - .checked_align_up(layout.align()) 606 - .unwrap(), 607 - end: self 608 - .max_range 609 - .end() 610 - .checked_sub(1) 611 - .unwrap() 612 - .align_down(layout.align()), 596 + start: self.max_range.start().align_up(layout.align()), 597 + end: self.max_range.end().sub(1).align_down(layout.align()), 613 598 }; 614 599 615 600 let spot_count = spots_in_range(layout, aligned_gap.clone()); ··· 618 603 tracing::trace!("tree is empty, chose gap {aligned_gap:?}"); 619 604 return Ok(aligned_gap 620 605 .start 621 - .checked_add(target_index << layout.align().ilog2()) 622 - .unwrap()); 606 + .add(target_index << layout.align().ilog2())); 623 607 } 624 608 target_index -= spot_count; 625 609 } 626 610 627 611 // see if there is a suitable gap between the start of the address space and the first mapping 628 612 if let Some(root) = self.regions.root().get() { 629 - let aligned_gap = (*self.max_range.start()..root.max_range.start) 630 - .checked_align_in(layout.align()) 631 - .unwrap(); 613 + let aligned_gap = 614 + (*self.max_range.start()..root.max_range.start).align_in(layout.align()); 632 615 let spot_count = spots_in_range(layout, aligned_gap.clone()); 633 616 candidate_spot_count += spot_count; 634 617 if target_index < spot_count { 635 618 tracing::trace!("found gap left of tree in {aligned_gap:?}"); 636 619 return Ok(aligned_gap 637 620 .start 638 - .checked_add(target_index << layout.align().ilog2()) 639 - .unwrap()); 621 + .add(target_index << layout.align().ilog2())); 640 622 } 641 623 target_index -= spot_count; 642 624 } ··· 654 636 continue; 655 637 } 656 638 657 - let aligned_gap = (left.max_range.end..node.range.start) 658 - .checked_align_in(layout.align()) 659 - .unwrap(); 639 + let aligned_gap = 640 + (left.max_range.end..node.range.start).align_in(layout.align()); 660 641 661 642 let spot_count = spots_in_range(layout, aligned_gap.clone()); 662 643 ··· 665 646 tracing::trace!("found gap in left subtree in {aligned_gap:?}"); 666 647 return Ok(aligned_gap 667 648 .start 668 - .checked_add(target_index << layout.align().ilog2()) 669 - .unwrap()); 649 + .add(target_index << layout.align().ilog2())); 670 650 } 671 651 target_index -= spot_count; 672 652 } ··· 674 654 if let Some(right) = node.links.right() { 675 655 let right = unsafe { right.as_ref() }; 676 656 677 - let aligned_gap = (node.range.end..right.max_range.start) 678 - .checked_align_in(layout.align()) 679 - .unwrap(); 657 + let aligned_gap = 658 + (node.range.end..right.max_range.start).align_in(layout.align()); 680 659 681 660 let spot_count = spots_in_range(layout, aligned_gap.clone()); 682 661 ··· 685 664 tracing::trace!("found gap in right subtree in {aligned_gap:?}"); 686 665 return Ok(aligned_gap 687 666 .start 688 - .checked_add(target_index << layout.align().ilog2()) 689 - .unwrap()); 667 + .add(target_index << layout.align().ilog2())); 690 668 } 691 669 target_index -= spot_count; 692 670 ··· 702 680 703 681 // see if there is a suitable gap between the end of the last mapping and the end of the address space 704 682 if let Some(root) = self.regions.root().get() { 705 - let aligned_gap = (root.max_range.end..*self.max_range.end()) 706 - .checked_align_in(layout.align()) 707 - .unwrap(); 683 + let aligned_gap = (root.max_range.end..*self.max_range.end()).align_in(layout.align()); 708 684 let spot_count = spots_in_range(layout, aligned_gap.clone()); 709 685 candidate_spot_count += spot_count; 710 686 if target_index < spot_count { 711 687 tracing::trace!("found gap right of tree in {aligned_gap:?}"); 712 688 return Ok(aligned_gap 713 689 .start 714 - .checked_add(target_index << layout.align().ilog2()) 715 - .unwrap()); 690 + .add(target_index << layout.align().ilog2())); 716 691 } 717 692 } 718 693 ··· 773 748 self.flags = flags; 774 749 self.range = Range::from_start_len(virt, len.get()); 775 750 } else { 776 - self.range.end = self.range.end.checked_add(len.get()).unwrap(); 751 + self.range.end = self.range.end.add(len.get()); 777 752 } 778 753 779 754 self.actions.push(BBatchAction::Map(phys, len.get())); ··· 801 776 self.flags, 802 777 &mut flush, 803 778 )?; 804 - virt = virt.checked_add(len).unwrap(); 779 + virt = virt.add(len); 805 780 }, 806 781 } 807 782 }
+8 -18
kernel/src/mem/address_space_region.rs
··· 128 128 will_write: bool, 129 129 ) -> crate::Result<()> { 130 130 let vmo_relative_range = Range { 131 - start: range.start.checked_sub_addr(self.range.start).unwrap(), 132 - end: range.end.checked_sub_addr(self.range.start).unwrap(), 131 + start: range.start.offset_from_unsigned(self.range.start), 132 + end: range.end.offset_from_unsigned(self.range.start), 133 133 }; 134 134 135 135 match self.vmo.as_ref() { ··· 152 152 153 153 for addr in range.step_by(arch::PAGE_SIZE) { 154 154 debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE)); 155 - let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap(); 155 + let vmo_relative_offset = addr.offset_from_unsigned(self.range.start); 156 156 let frame = vmo.require_owned_frame(vmo_relative_offset)?; 157 157 batch.queue_map( 158 158 addr, ··· 166 166 167 167 for addr in range.into_iter().step_by(arch::PAGE_SIZE) { 168 168 debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE)); 169 - let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap(); 169 + let vmo_relative_offset = addr.offset_from_unsigned(self.range.start); 170 170 let frame = vmo.require_read_frame(vmo_relative_offset)?; 171 171 batch.queue_map( 172 172 addr, ··· 194 194 } 195 195 Vmo::Paged(vmo) => { 196 196 let vmo_relative_range = Range { 197 - start: range 198 - .start 199 - .checked_sub_addr(self.range.start) 200 - .and_then(|start| start.checked_add(self.vmo_offset)) 201 - .unwrap(), 202 - end: range 203 - .end 204 - .checked_sub_addr(self.range.start) 205 - .and_then(|end| end.checked_add(self.vmo_offset)) 206 - .unwrap(), 197 + start: range.start.offset_from_unsigned(self.range.start) + self.vmo_offset, 198 + end: range.end.offset_from_unsigned(self.range.start) + self.vmo_offset, 207 199 }; 208 200 209 201 let mut vmo = vmo.write(); ··· 260 252 // it is always mapped, cannot be paged-out, and also doesn't support COW. This is used to 261 253 // simplify handling of regions like kernel memory which must always be present anyway. 262 254 263 - let vmo_relative_offset = addr.checked_sub_addr(self.range.start).unwrap(); 255 + let vmo_relative_offset = addr.offset_from_unsigned(self.range.start); 264 256 265 257 match self.vmo.as_ref() { 266 258 Vmo::Wired => unreachable!("Wired VMO can never page fault"), ··· 334 326 node.max_gap = cmp::max(left_max_gap, right_max_gap); 335 327 336 328 fn gap(left_last_byte: VirtualAddress, right_first_byte: VirtualAddress) -> usize { 337 - right_first_byte 338 - .checked_sub_addr(left_last_byte) 339 - .unwrap_or_default() // TODO use saturating_sub_addr 329 + right_first_byte.get().saturating_sub(left_last_byte.get()) 340 330 } 341 331 } 342 332
+3 -4
kernel/src/mem/bootstrap_alloc.rs
··· 74 74 continue; 75 75 } 76 76 77 - let frame = region.end.checked_sub(offset + requested_size).unwrap(); 77 + let frame = region.end.sub(offset + requested_size); 78 78 self.offset += requested_size; 79 79 80 80 return Some(frame); ··· 99 99 ptr::write_bytes::<u8>( 100 100 arch::KERNEL_ASPACE_RANGE 101 101 .start() 102 - .checked_add(addr.get()) 103 - .unwrap() 102 + .add(addr.get()) 104 103 .as_mut_ptr(), 105 104 0, 106 105 requested_size, ··· 126 125 self.offset -= region.len(); 127 126 continue; 128 127 } else if self.offset > 0 { 129 - region.end = region.end.checked_sub(self.offset).unwrap(); 128 + region.end = region.end.sub(self.offset); 130 129 self.offset = 0; 131 130 } 132 131
+10 -20
kernel/src/mem/frame_alloc/arena.rs
··· 62 62 // Safety: arena selection has ensured the region is valid 63 63 let slots: &mut [MaybeUninit<FrameInfo>] = unsafe { 64 64 let ptr = arch::phys_to_virt(selection.bookkeeping.start) 65 - .unwrap() 66 65 .as_mut_ptr() 67 66 .cast(); 68 67 ··· 94 93 95 94 { 96 95 debug_assert!(addr.is_aligned_to(arch::PAGE_SIZE)); 97 - let offset = addr.checked_sub_addr(selection.arena.start).unwrap(); 96 + let offset = addr.offset_from_unsigned(selection.arena.start); 98 97 let idx = offset / arch::PAGE_SIZE; 99 98 100 99 let frame = slots[idx].write(FrameInfo::new(addr)).into(); 101 100 free_lists[order].push_back(frame); 102 101 } 103 102 104 - addr = addr.checked_add(size).unwrap(); 103 + addr = addr.add(size); 105 104 remaining_bytes -= size; 106 105 } 107 106 ··· 132 131 // Safety: we just allocated the frame 133 132 let frame = unsafe { frame.as_mut() }; 134 133 135 - let buddy_addr = frame 136 - .addr() 137 - .checked_add(arch::PAGE_SIZE << (order - 1)) 138 - .unwrap(); 134 + let buddy_addr = frame.addr().add(arch::PAGE_SIZE << (order - 1)); 139 135 140 136 let buddy = self 141 137 .find_specific(buddy_addr) ··· 170 166 // Safety: we just allocated the frame 171 167 let frame = unsafe { frame.as_mut() }; 172 168 173 - let buddy_addr = frame 174 - .addr() 175 - .checked_add(arch::PAGE_SIZE << (order - 1)) 176 - .unwrap(); 169 + let buddy_addr = frame.addr().add(arch::PAGE_SIZE << (order - 1)); 177 170 178 171 let buddy = self 179 172 .find_specific(buddy_addr) ··· 196 189 let base = unsafe { frame.as_ref().addr() }; 197 190 198 191 uninit.iter_mut().enumerate().map(move |(idx, slot)| { 199 - NonNull::from(slot.write(FrameInfo::new( 200 - base.checked_add(idx * arch::PAGE_SIZE).unwrap(), 201 - ))) 192 + NonNull::from(slot.write(FrameInfo::new(base.add(idx * arch::PAGE_SIZE)))) 202 193 }) 203 194 }; 204 195 ··· 208 199 209 200 #[inline] 210 201 fn find_specific(&mut self, addr: PhysicalAddress) -> Option<&mut MaybeUninit<FrameInfo>> { 211 - let index = addr.checked_sub_addr(self.range.start).unwrap() / arch::PAGE_SIZE; 202 + let index = addr.offset_from_unsigned(self.range.start) / arch::PAGE_SIZE; 212 203 self.slots.get_mut(index) 213 204 } 214 205 } ··· 255 246 256 247 let pages_in_hole = if arena.end <= region.start { 257 248 // the region is higher than the current arena 258 - region.start.checked_sub_addr(arena.end).unwrap() / arch::PAGE_SIZE 249 + region.start.offset_from_unsigned(arena.end) / arch::PAGE_SIZE 259 250 } else { 260 251 debug_assert!(region.end <= arena.start); 261 252 // the region is lower than the current arena 262 - arena.start.checked_sub_addr(region.end).unwrap() / arch::PAGE_SIZE 253 + arena.start.offset_from_unsigned(region.end) / arch::PAGE_SIZE 263 254 }; 264 255 265 256 let waste_from_hole = ARENA_PAGE_BOOKKEEPING_SIZE * pages_in_hole; ··· 279 270 } 280 271 } 281 272 282 - let mut aligned = arena.checked_align_in(arch::PAGE_SIZE).unwrap(); 273 + let mut aligned = arena.align_in(arch::PAGE_SIZE); 283 274 let bookkeeping_size = bookkeeping_size(aligned.len()); 284 275 285 276 // We can't use empty arenas anyway ··· 290 281 291 282 let bookkeeping_start = aligned 292 283 .end 293 - .checked_sub(bookkeeping_size) 294 - .unwrap() 284 + .sub(bookkeeping_size) 295 285 .align_down(arch::PAGE_SIZE); 296 286 297 287 // The arena has no space to hold its own bookkeeping
+2 -2
kernel/src/mem/frame_alloc/frame.rs
··· 255 255 /// Returns a slice of the corresponding physical memory 256 256 #[inline] 257 257 pub fn as_slice(&self) -> &[u8] { 258 - let base = arch::phys_to_virt(self.addr).unwrap().as_ptr(); 258 + let base = arch::phys_to_virt(self.addr).as_ptr(); 259 259 // Safety: construction ensures the base ptr is valid 260 260 unsafe { slice::from_raw_parts(base, arch::PAGE_SIZE) } 261 261 } ··· 263 263 /// Returns a mutable slice of the corresponding physical memory 264 264 #[inline] 265 265 pub fn as_mut_slice(&mut self) -> &mut [u8] { 266 - let base = arch::phys_to_virt(self.addr).unwrap().as_mut_ptr(); 266 + let base = arch::phys_to_virt(self.addr).as_mut_ptr(); 267 267 // Safety: construction ensures the base ptr is valid 268 268 unsafe { slice::from_raw_parts_mut(base, arch::PAGE_SIZE) } 269 269 }
+3 -3
kernel/src/mem/frame_alloc/mod.rs
··· 127 127 let frame = self.alloc_one()?; 128 128 129 129 // Translate the physical address into a virtual one through the physmap 130 - let virt = arch::phys_to_virt(frame.addr()).unwrap(); 130 + let virt = arch::phys_to_virt(frame.addr()); 131 131 132 132 // memset'ing the slice to zero 133 133 // Safety: the slice has just been allocated ··· 178 178 let frames = self.alloc_contiguous(layout)?; 179 179 180 180 // Translate the physical address into a virtual one through the physmap 181 - let virt = arch::phys_to_virt(frames.first().unwrap().addr()).unwrap(); 181 + let virt = arch::phys_to_virt(frames.first().unwrap().addr()); 182 182 183 183 // memset'ing the slice to zero 184 184 // Safety: the slice has just been allocated ··· 248 248 break 'outer; 249 249 } 250 250 251 - if frame.addr().checked_sub_addr(prev_addr).unwrap() > arch::PAGE_SIZE { 251 + if frame.addr().offset_from_unsigned(prev_addr) > arch::PAGE_SIZE { 252 252 // frames aren't contiguous, so let's try the next one 253 253 tracing::trace!("frames not contiguous, trying next"); 254 254 continue 'outer;
+2 -2
kernel/src/mem/mmap.rs
··· 298 298 let mut cursor = aspace.regions.find_mut(&self.range.start); 299 299 300 300 let src_range = Range { 301 - start: self.range.start.checked_add(range.start).unwrap(), 302 - end: self.range.end.checked_add(range.start).unwrap(), 301 + start: self.range.start.add(range.start), 302 + end: self.range.end.add(range.start), 303 303 }; 304 304 305 305 let mut batch = Batch::new(&mut aspace.arch, aspace.frame_alloc);
+4 -8
kernel/src/mem/mod.rs
··· 97 97 let own_elf = unsafe { 98 98 let base = boot_info 99 99 .physical_address_offset 100 - .checked_add(boot_info.kernel_phys.start.get()) 101 - .unwrap() 100 + .add(boot_info.kernel_phys.start.get()) 102 101 .as_ptr(); 103 102 104 103 slice::from_raw_parts(base, boot_info.kernel_phys.len()) ··· 113 112 let virt = boot_info 114 113 .kernel_virt 115 114 .start 116 - .checked_add(usize::try_from(ph.virtual_addr()).unwrap()) 117 - .unwrap(); 115 + .add(usize::try_from(ph.virtual_addr()).unwrap()); 118 116 119 117 let mut permissions = Permissions::empty(); 120 118 if ph.flags().is_read() { ··· 139 137 Range { 140 138 start: virt.align_down(arch::PAGE_SIZE), 141 139 end: virt 142 - .checked_add(usize::try_from(ph.mem_size()).unwrap()) 143 - .unwrap() 144 - .checked_align_up(arch::PAGE_SIZE) 145 - .unwrap(), 140 + .add(usize::try_from(ph.mem_size()).unwrap()) 141 + .align_up(arch::PAGE_SIZE), 146 142 }, 147 143 permissions, 148 144 Some(format!("Kernel {permissions} Segment")),
+2 -2
kernel/src/mem/vmo.rs
··· 65 65 range.start.is_multiple_of(arch::PAGE_SIZE), 66 66 "range is not arch::PAGE_SIZE aligned" 67 67 ); 68 - let start = self.range.start.checked_add(range.start).unwrap(); 69 - let end = self.range.start.checked_add(range.end).unwrap(); 68 + let start = self.range.start.add(range.start); 69 + let end = self.range.start.add(range.end); 70 70 71 71 ensure!( 72 72 self.range.start <= start && self.range.end >= end,
+1 -2
kernel/src/shell.rs
··· 90 90 let mmap = with_kernel_aspace(|aspace| { 91 91 // FIXME: this is gross, we're using the PhysicalAddress as an alignment utility :/ 92 92 let size = PhysicalAddress::new(reg.size.unwrap()) 93 - .checked_align_up(arch::PAGE_SIZE) 94 - .unwrap() 93 + .align_up(arch::PAGE_SIZE) 95 94 .get(); 96 95 97 96 let range_phys = Range::from_start_len(PhysicalAddress::new(reg.starting_address), size);
+1 -1
kernel/src/wasm/vm/code_object.rs
··· 96 96 pub fn text_range(&self) -> Range<VirtualAddress> { 97 97 let start = self.mmap.range().start; 98 98 99 - start..start.checked_add(self.len).unwrap() 99 + start..start.add(self.len) 100 100 } 101 101 102 102 pub fn resolve_function_loc(&self, func_loc: FunctionLoc) -> usize {
+1 -1
kernel/src/wasm/vm/instance.rs
··· 444 444 assert!(fault.is_none()); 445 445 fault = Some(WasmFault { 446 446 memory_size: memory.byte_size(), 447 - wasm_address: u64::try_from(addr.checked_sub_addr(accessible.start).unwrap()) 447 + wasm_address: u64::try_from(addr.offset_from_unsigned(accessible.start)) 448 448 .unwrap(), 449 449 }); 450 450 }
+60 -28
libs/kmem/src/address.rs
··· 78 78 .map(::core::ptr::NonNull::with_exposed_provenance) 79 79 } 80 80 81 + /// Adds an unsigned offset to this address, panicking if overflow occurred. 81 82 #[must_use] 82 83 #[inline] 83 - pub const fn checked_add(self, rhs: usize) -> Option<Self> { 84 - if let Some(out) = self.0.checked_add(rhs) { 85 - Some(Self(out)) 86 - } else { 87 - None 88 - } 84 + pub const fn add(self, offset: usize) -> Self { 85 + Self(self.0 + offset) 89 86 } 90 87 88 + /// Subtracts an unsigned offset from this address, panicking if overflow occurred. 91 89 #[must_use] 92 90 #[inline] 93 - pub const fn checked_add_signed(self, rhs: isize) -> Option<Self> { 94 - if let Some(out) = self.0.checked_add_signed(rhs) { 95 - Some(Self(out)) 96 - } else { 97 - None 98 - } 91 + pub const fn sub(self, offset: usize) -> Self { 92 + Self(self.0 - offset) 99 93 } 100 94 95 + /// Adds a signed offset in bytes to this address, panicking if overflow occurred. 101 96 #[must_use] 102 97 #[inline] 103 - pub const fn checked_sub(self, rhs: usize) -> Option<Self> { 104 - if let Some(out) = self.0.checked_sub(rhs) { 105 - Some(Self(out)) 98 + pub const fn offset(self, offset: isize) -> Self { 99 + let (a, b) = self.0.overflowing_add_signed(offset); 100 + if b { 101 + panic!("attempt to offset with overflow") 106 102 } else { 107 - None 103 + Self(a) 108 104 } 109 105 } 110 106 107 + /// Adds an unsigned offset to this address, wrapping around at the boundary of the type. 111 108 #[must_use] 112 109 #[inline] 113 - pub const fn checked_sub_addr(self, rhs: Self) -> Option<usize> { 114 - self.0.checked_sub(rhs.0) 110 + pub const fn wrapping_add(self, offset: usize) -> Self { 111 + Self(self.0.wrapping_add(offset)) 112 + } 113 + 114 + /// Subtracts an unsigned offset from this address, wrapping around at the boundary of the type. 115 + #[must_use] 116 + #[inline] 117 + pub const fn wrapping_sub(self, offset: usize) -> Self { 118 + Self(self.0.wrapping_sub(offset)) 119 + } 120 + 121 + /// Adds a signed offset in bytes to this address, wrapping around at the boundary of the type. 122 + #[must_use] 123 + #[inline] 124 + pub const fn wrapping_offset(self, offset: isize) -> Self { 125 + Self(self.0.wrapping_add_signed(offset)) 126 + } 127 + 128 + /// Calculates the distance between two addresses in bytes. 129 + #[must_use] 130 + #[inline] 131 + #[expect(clippy::cast_possible_wrap, reason = "intentional wrapping here")] 132 + pub const fn offset_from(self, origin: Self) -> isize { 133 + self.0.wrapping_sub(origin.0) as isize 134 + } 135 + 136 + /// Calculates the distance between two addresses in bytes, _where it’s known that `self` 137 + /// is equal to or greater than `origin`_. 138 + /// 139 + /// # Panics 140 + /// 141 + /// Panics if `self` is less than `origin`. 142 + #[must_use] 143 + #[inline] 144 + pub const fn offset_from_unsigned(self, origin: Self) -> usize { 145 + let (a, b) = self.0.overflowing_sub(origin.0); 146 + if b { 147 + panic!("attempt to subtract with overflow") 148 + } else { 149 + a 150 + } 115 151 } 116 152 117 153 #[must_use] ··· 127 163 128 164 #[must_use] 129 165 #[inline] 130 - pub const fn checked_align_up(self, align: usize) -> Option<Self> { 166 + pub const fn align_up(self, align: usize) -> Self { 131 167 if !align.is_power_of_two() { 132 168 panic!("checked_align_up: align is not a power-of-two"); 133 169 } ··· 135 171 // SAFETY: `align` has been checked to be a power of 2 above 136 172 let align_minus_one = unsafe { align.unchecked_sub(1) }; 137 173 138 - // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align) 139 - if let Some(addr_plus_align) = self.0.checked_add(align_minus_one) { 140 - let aligned = Self(addr_plus_align & 0usize.wrapping_sub(align)); 141 - debug_assert!(aligned.is_aligned_to(align)); 142 - debug_assert!(aligned.0 >= self.0); 143 - Some(aligned) 144 - } else { 145 - None 146 - } 174 + let aligned = 175 + Self(self.0.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)); 176 + debug_assert!(aligned.is_aligned_to(align)); 177 + debug_assert!(aligned.0 >= self.0); 178 + aligned 147 179 } 148 180 149 181 #[must_use]
+8 -10
libs/kmem/src/address_range.rs
··· 26 26 /// Returns the intersection of `self` and `other`. 27 27 fn intersect(self, other: Self) -> Self; 28 28 29 - fn checked_align_in(self, align: usize) -> Option<Self> 29 + fn align_in(self, align: usize) -> Self 30 30 where 31 31 Self: Sized; 32 - fn checked_align_out(self, align: usize) -> Option<Self> 32 + fn align_out(self, align: usize) -> Self 33 33 where 34 34 Self: Sized; 35 35 } ··· 40 40 type Address = $address_ty; 41 41 42 42 fn from_start_len(start: Self::Address, len: usize) -> Self { 43 - let end = start.checked_add(len).unwrap(); 43 + let end = start.add(len); 44 44 45 45 Self { start, end } 46 46 } ··· 50 50 } 51 51 52 52 fn len(&self) -> usize { 53 - self.end.checked_sub_addr(self.start).unwrap() 53 + self.end.offset_from_unsigned(self.start) 54 54 } 55 55 56 56 fn contains(&self, address: &Self::Address) -> bool { ··· 68 68 } 69 69 } 70 70 71 - fn checked_align_in(self, align: usize) -> Option<Self> 71 + fn align_in(self, align: usize) -> Self 72 72 where 73 73 Self: Sized, 74 74 { 75 - let res = self.start.checked_align_up(align)?..self.end.align_down(align); 76 - Some(res) 75 + self.start.align_up(align)..self.end.align_down(align) 77 76 } 78 77 79 - fn checked_align_out(self, align: usize) -> Option<Self> 78 + fn align_out(self, align: usize) -> Self 80 79 where 81 80 Self: Sized, 82 81 { 83 - let res = self.start.align_down(align)..self.end.checked_align_up(align)?; 84 - Some(res) 82 + self.start.align_down(align)..self.end.align_up(align) 85 83 } 86 84 } 87 85 };
+3 -5
loader/api/src/info.rs
··· 166 166 "{:<23} : .tdata: {:?}..{:?}, .tbss: {:?}..{:?}", 167 167 "TLS TEMPLATE", 168 168 tls.start_addr, 169 - tls.start_addr.checked_add(tls.file_size).unwrap(), 170 - tls.start_addr.checked_add(tls.file_size).unwrap(), 171 - tls.start_addr 172 - .checked_add(tls.file_size + tls.mem_size) 173 - .unwrap() 169 + tls.start_addr.add(tls.file_size), 170 + tls.start_addr.add(tls.file_size), 171 + tls.start_addr.add(tls.file_size + tls.mem_size) 174 172 )?; 175 173 } else { 176 174 writeln!(f, "{:<23} : None", "TLS TEMPLATE")?;
+5 -9
loader/src/arch/riscv64.rs
··· 320 320 // mark this PTE as a valid leaf node pointing to the physical frame 321 321 pte.replace_address_and_flags(phys, PTEFlags::VALID | PTEFlags::from(flags)); 322 322 323 - virt = virt.checked_add(page_size).unwrap(); 324 - phys = phys.checked_add(page_size).unwrap(); 323 + virt = virt.add(page_size); 324 + phys = phys.add(page_size); 325 325 remaining_bytes -= page_size; 326 326 continue 'outer; 327 327 } else if !pte.is_valid() { ··· 404 404 let (_old_phys, flags) = pte.get_address_and_flags(); 405 405 pte.replace_address_and_flags(phys, flags); 406 406 407 - virt = virt.checked_add(page_size).unwrap(); 408 - phys = phys.checked_add(page_size).unwrap(); 407 + virt = virt.add(page_size); 408 + phys = phys.add(page_size); 409 409 remaining_bytes -= page_size; 410 410 continue 'outer; 411 411 } else if pte.is_valid() { ··· 469 469 phys: PhysicalAddress, 470 470 phys_off: VirtualAddress, 471 471 ) -> NonNull<PageTableEntry> { 472 - phys.checked_add(phys_off.get()) 473 - .unwrap() 474 - .as_non_null() 475 - .unwrap() 476 - .cast() 472 + phys.add(phys_off.get()).as_non_null().unwrap().cast() 477 473 } 478 474 479 475 #[repr(transparent)]
+2 -2
loader/src/boot_info.rs
··· 33 33 Layout::from_size_align(arch::PAGE_SIZE, arch::PAGE_SIZE).unwrap(), 34 34 arch::KERNEL_ASPACE_BASE, 35 35 )?; 36 - let page = physical_address_offset.checked_add(frame.get()).unwrap(); 36 + let page = physical_address_offset.add(frame.get()); 37 37 38 38 let memory_regions = init_boot_info_memory_regions( 39 39 page, ··· 72 72 ) -> MemoryRegions { 73 73 // Safety: we just allocated a whole frame for the boot info 74 74 let regions: &mut [MaybeUninit<MemoryRegion>] = unsafe { 75 - let base = page.checked_add(size_of::<BootInfo>()).unwrap(); 75 + let base = page.add(size_of::<BootInfo>()); 76 76 let len = (arch::PAGE_SIZE - size_of::<BootInfo>()) / size_of::<MemoryRegion>(); 77 77 78 78 #[expect(
+6 -13
loader/src/frame_alloc.rs
··· 114 114 continue; 115 115 } 116 116 117 - let frame = region.end.checked_sub(offset + requested_size).unwrap(); 117 + let frame = region.end.sub(offset + requested_size); 118 118 self.offset += requested_size; 119 119 return Ok(frame); 120 120 } ··· 134 134 let addr = self.allocate_contiguous(layout)?; 135 135 // Safety: we just allocated the frame 136 136 unsafe { 137 - ptr::write_bytes::<u8>( 138 - addr.checked_add(phys_offset.get()).unwrap().as_mut_ptr(), 139 - 0, 140 - requested_size, 141 - ); 137 + ptr::write_bytes::<u8>(addr.add(phys_offset.get()).as_mut_ptr(), 0, requested_size); 142 138 } 143 139 Ok(addr) 144 140 } ··· 172 168 & 0usize.wrapping_sub(arch::PAGE_SIZE); 173 169 debug_assert!(allocation_size.is_multiple_of(arch::PAGE_SIZE)); 174 170 175 - let frame = region.end.checked_sub(offset + allocation_size).unwrap(); 171 + let frame = region.end.sub(offset + allocation_size); 176 172 self.alloc.offset += allocation_size; 177 173 self.remaining -= allocation_size; 178 174 ··· 212 208 // Safety: we just allocated the frame 213 209 unsafe { 214 210 ptr::write_bytes::<u8>( 215 - self.phys_offset 216 - .checked_add(range.start.get()) 217 - .unwrap() 218 - .as_mut_ptr(), 211 + self.phys_offset.add(range.start.get()).as_mut_ptr(), 219 212 0, 220 213 range.len(), 221 214 ); ··· 243 236 self.offset -= region_size; 244 237 continue; 245 238 } else if self.offset > 0 { 246 - region.end = region.end.checked_sub(self.offset).unwrap(); 239 + region.end = region.end.sub(self.offset); 247 240 self.offset = 0; 248 241 } 249 242 ··· 266 259 if self.offset >= region.len() { 267 260 Some(region) 268 261 } else if self.offset > 0 { 269 - region.start = region.end.checked_sub(self.offset).unwrap(); 262 + region.start = region.end.sub(self.offset); 270 263 self.offset = 0; 271 264 272 265 Some(region)
+1 -3
loader/src/kernel.rs
··· 32 32 // Safety: The kernel elf file is inlined into the loader executable as part of the build setup 33 33 // which means we just need to parse it here. 34 34 let elf_file = xmas_elf::ElfFile::new(unsafe { 35 - let base = phys_off 36 - .checked_add(INLINED_KERNEL_BYTES.0.as_ptr().addr()) 37 - .unwrap(); 35 + let base = phys_off.add(INLINED_KERNEL_BYTES.0.as_ptr().addr()); 38 36 39 37 slice::from_raw_parts(base.as_mut_ptr(), INLINED_KERNEL_BYTES.0.len()) 40 38 })
+1 -1
loader/src/machine_info.rs
··· 161 161 // page-align all memory regions, this will waste some physical memory in the process, 162 162 // but we can't make use of it either way 163 163 memories.iter_mut().for_each(|region| { 164 - *region = region.clone().checked_align_in(PAGE_SIZE).unwrap(); 164 + *region = region.clone().align_in(PAGE_SIZE); 165 165 }); 166 166 167 167 // ensure the memory regions are sorted.
+2 -4
loader/src/main.rs
··· 193 193 194 194 let kernel_entry = kernel_virt 195 195 .start 196 - .checked_add(usize::try_from(kernel.elf_file.header.pt2.entry_point()).unwrap()) 197 - .unwrap(); 196 + .add(usize::try_from(kernel.elf_file.header.pt2.entry_point()).unwrap()); 198 197 199 198 GlobalInitResult { 200 199 boot_info, ··· 236 235 read_write: Range { 237 236 start: PhysicalAddress::from_ptr(&raw const __bss_start), 238 237 end: PhysicalAddress::from_ptr(&raw const __stack_start) 239 - .checked_add(minfo.hart_mask.count_ones() as usize * STACK_SIZE) 240 - .unwrap(), 238 + .add(minfo.hart_mask.count_ones() as usize * STACK_SIZE), 241 239 }, 242 240 } 243 241 }
+28 -55
loader/src/mapping.rs
··· 106 106 ) -> crate::Result<(VirtualAddress, Range<VirtualAddress>)> { 107 107 let alignment = arch::page_size_for_level(2); 108 108 109 - let phys = minfo.memory_hull().checked_align_out(alignment).unwrap(); 109 + let phys = minfo.memory_hull().align_out(alignment); 110 110 let virt = Range { 111 - start: arch::KERNEL_ASPACE_BASE 112 - .checked_add(phys.start.get()) 113 - .unwrap(), 114 - end: arch::KERNEL_ASPACE_BASE 115 - .checked_add(phys.end.get()) 116 - .unwrap(), 111 + start: arch::KERNEL_ASPACE_BASE.add(phys.start.get()), 112 + end: arch::KERNEL_ASPACE_BASE.add(phys.end.get()), 117 113 }; 118 114 119 115 debug_assert!(phys.start.is_aligned_to(alignment) && phys.end.is_aligned_to(alignment)); ··· 242 238 memsz = ph.mem_size 243 239 ); 244 240 245 - let phys = Range::from_start_len(phys_base.checked_add(ph.offset).unwrap(), ph.file_size) 246 - .checked_align_out(ph.align) 247 - .unwrap(); 241 + let phys = Range::from_start_len(phys_base.add(ph.offset), ph.file_size).align_out(ph.align); 248 242 249 - let virt = Range::from_start_len( 250 - virt_base.checked_add(ph.virtual_address).unwrap(), 251 - ph.file_size, 252 - ) 253 - .checked_align_out(ph.align) 254 - .unwrap(); 243 + let virt = 244 + Range::from_start_len(virt_base.add(ph.virtual_address), ph.file_size).align_out(ph.align); 255 245 256 246 log::trace!("mapping {virt:#x?} => {phys:#x?}"); 257 247 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 306 296 virt_base: VirtualAddress, 307 297 phys_off: VirtualAddress, 308 298 ) -> crate::Result<()> { 309 - let virt_start = virt_base.checked_add(ph.virtual_address).unwrap(); 310 - let zero_start = virt_start.checked_add(ph.file_size).unwrap(); 311 - let zero_end = virt_start.checked_add(ph.mem_size).unwrap(); 299 + let virt_start = virt_base.add(ph.virtual_address); 300 + let zero_start = virt_start.add(ph.file_size); 301 + let zero_end = virt_start.add(ph.mem_size); 312 302 313 303 let data_bytes_before_zero = zero_start.get() & 0xfff; 314 304 ··· 319 309 320 310 if data_bytes_before_zero != 0 { 321 311 let last_page = virt_start 322 - .checked_add(ph.file_size.saturating_sub(1)) 323 - .unwrap() 312 + .add(ph.file_size.saturating_sub(1)) 324 313 .align_down(ph.align); 325 314 let last_frame = phys_base 326 - .checked_add(ph.offset + ph.file_size - 1) 327 - .unwrap() 315 + .add(ph.offset + ph.file_size - 1) 328 316 .align_down(ph.align); 329 317 330 318 let new_frame = frame_alloc.allocate_one_zeroed(arch::KERNEL_ASPACE_BASE)?; ··· 332 320 // Safety: we just allocated the frame 333 321 unsafe { 334 322 let src = slice::from_raw_parts( 335 - arch::KERNEL_ASPACE_BASE 336 - .checked_add(last_frame.get()) 337 - .unwrap() 338 - .as_mut_ptr(), 323 + arch::KERNEL_ASPACE_BASE.add(last_frame.get()).as_mut_ptr(), 339 324 data_bytes_before_zero, 340 325 ); 341 326 342 327 let dst = slice::from_raw_parts_mut( 343 - arch::KERNEL_ASPACE_BASE 344 - .checked_add(new_frame.get()) 345 - .unwrap() 346 - .as_mut_ptr(), 328 + arch::KERNEL_ASPACE_BASE.add(new_frame.get()).as_mut_ptr(), 347 329 data_bytes_before_zero, 348 330 ); 349 331 ··· 368 350 // zero_start either lies at a page boundary OR somewhere within the first page 369 351 // by aligning up, we move it to the beginning of the *next* page. 370 352 let mut virt = Range { 371 - start: zero_start.checked_align_up(ph.align).unwrap(), 372 - end: zero_end.checked_align_up(ph.align).unwrap(), 353 + start: zero_start.align_up(ph.align), 354 + end: zero_end.align_up(ph.align), 373 355 }; 374 356 375 357 if !virt.is_empty() { ··· 395 377 )?; 396 378 } 397 379 398 - virt.start = virt.start.checked_add(chunk.len()).unwrap(); 380 + virt.start = virt.start.add(chunk.len()); 399 381 } 400 382 } 401 383 ··· 447 429 // Calculate address at which to apply the relocation. 448 430 // dynamic relocations offsets are relative to the virtual layout of the elf, 449 431 // not the physical file 450 - let target = virt_base 451 - .checked_add(usize::try_from(rela.get_offset()).unwrap()) 452 - .unwrap(); 432 + let target = virt_base.add(usize::try_from(rela.get_offset()).unwrap()); 453 433 454 434 // Calculate the value to store at the relocation target. 455 - let value = virt_base 456 - .checked_add_signed(isize::try_from(rela.get_addend()).unwrap()) 457 - .unwrap(); 435 + let value = virt_base.offset(isize::try_from(rela.get_addend()).unwrap()); 458 436 459 437 // log::trace!("reloc R_RISCV_RELATIVE offset: {:#x}; addend: {:#x} => target {target:?} value {value:?}", rela.get_offset(), rela.get_addend()); 460 438 // Safety: we have to trust the ELF data here ··· 494 472 while let Some(chunk) = frame_iter.next()? { 495 473 log::trace!( 496 474 "Mapping TLS region {virt_start:?}..{:?} => {chunk:?} ...", 497 - virt_start.checked_add(chunk.len()).unwrap() 475 + virt_start.add(chunk.len()) 498 476 ); 499 477 500 478 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 511 489 )?; 512 490 } 513 491 514 - virt_start = virt_start.checked_add(chunk.len()).unwrap(); 492 + virt_start = virt_start.add(chunk.len()); 515 493 } 516 494 517 495 Ok(TlsAllocation { 518 496 virt, 519 497 template: TlsTemplate { 520 - start_addr: virt_base.checked_add(ph.virtual_address).unwrap(), 498 + start_addr: virt_base.add(ph.virtual_address), 521 499 mem_size: ph.mem_size, 522 500 file_size: ph.file_size, 523 501 align: ph.align, ··· 540 518 cmp::max(self.template.align, arch::PAGE_SIZE), 541 519 ) 542 520 .unwrap(); 543 - let start = self.virt.start.checked_add(aligned_size * hartid).unwrap(); 521 + let start = self.virt.start.add(aligned_size * hartid); 544 522 545 523 Range::from_start_len(start, self.template.mem_size) 546 524 } ··· 593 571 594 572 let mut virt = virt 595 573 .end 596 - .checked_sub(per_cpu_size_with_guard * hart as usize) 597 - .and_then(|a| a.checked_sub(per_cpu_size)) 598 - .unwrap(); 574 + .add(per_cpu_size_with_guard * hart as usize) 575 + .sub(per_cpu_size); 599 576 600 577 log::trace!("Allocating stack {layout:?}..."); 601 578 // The stacks region doesn't need to be zeroed, since we will be filling it with ··· 605 582 while let Some(chunk) = frame_iter.next()? { 606 583 log::trace!( 607 584 "mapping stack for hart {hart} {virt:?}..{:?} => {chunk:?}", 608 - virt.checked_add(chunk.len()).unwrap() 585 + virt.add(chunk.len()) 609 586 ); 610 587 611 588 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll ··· 622 599 )?; 623 600 } 624 601 625 - virt = virt.checked_add(chunk.len()).unwrap(); 602 + virt = virt.add(chunk.len()); 626 603 } 627 604 } 628 605 ··· 642 619 643 620 impl StacksAllocation { 644 621 pub fn region_for_cpu(&self, cpuid: usize) -> Range<VirtualAddress> { 645 - let end = self 646 - .virt 647 - .end 648 - .checked_sub(self.per_cpu_size_with_guard * cpuid) 649 - .unwrap(); 622 + let end = self.virt.end.add(self.per_cpu_size_with_guard * cpuid); 650 623 651 - end.checked_sub(self.per_cpu_size).unwrap()..end 624 + end.sub(self.per_cpu_size)..end 652 625 } 653 626 } 654 627
+3 -3
loader/src/page_alloc.rs
··· 76 76 pub fn reserve(&mut self, mut virt_base: VirtualAddress, mut remaining_bytes: usize) { 77 77 log::trace!( 78 78 "marking {virt_base}..{} as used", 79 - virt_base.checked_add(remaining_bytes).unwrap() 79 + virt_base.add(remaining_bytes) 80 80 ); 81 81 82 82 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1); ··· 92 92 93 93 self.page_state[page_idx] = true; 94 94 95 - virt_base = virt_base.checked_add(top_level_page_size).unwrap(); 95 + virt_base = virt_base.add(top_level_page_size); 96 96 remaining_bytes -= top_level_page_size; 97 97 } 98 98 } ··· 129 129 0 130 130 }; 131 131 132 - let start = base.checked_add(offset).unwrap(); 132 + let start = base.add(offset); 133 133 Range::from_start_len(start, layout.size()) 134 134 } 135 135 }