Next Generation WASM Microkernel Operating System

refactor: clean up `VirtualAddress` and `PhysicalAddress` methods.

+21 -118
+3 -1
kernel/src/mem/frame_alloc/mod.rs
··· 236 236 let mut index = 0; 237 237 let mut base = self.free_list.iter(); 238 238 'outer: while let Some(base_frame) = base.next() { 239 - if base_frame.addr().alignment() >= layout.align() { 239 + let address_alignment = base_frame.addr().get() & (!base_frame.addr().get() + 1); 240 + 241 + if address_alignment >= layout.align() { 240 242 let mut prev_addr = base_frame.addr(); 241 243 242 244 let mut c = 0;
+2 -2
kernel/src/mem/mmap.rs
··· 42 42 Self { 43 43 aspace: None, 44 44 range: Range { 45 - start: VirtualAddress::ZERO, 46 - end: VirtualAddress::ZERO, 45 + start: VirtualAddress::MIN, 46 + end: VirtualAddress::MIN, 47 47 }, 48 48 } 49 49 }
+3 -3
kernel/src/wasm/vm/vmcontext.rs
··· 937 937 stack_limit: UnsafeCell::new(VirtualAddress::MAX), 938 938 fuel_consumed: UnsafeCell::new(0), 939 939 epoch_deadline: UnsafeCell::new(0), 940 - last_wasm_exit_fp: UnsafeCell::new(VirtualAddress::ZERO), 941 - last_wasm_exit_pc: UnsafeCell::new(VirtualAddress::ZERO), 942 - last_wasm_entry_fp: UnsafeCell::new(VirtualAddress::ZERO), 940 + last_wasm_exit_fp: UnsafeCell::new(VirtualAddress::MIN), 941 + last_wasm_exit_pc: UnsafeCell::new(VirtualAddress::MIN), 942 + last_wasm_entry_fp: UnsafeCell::new(VirtualAddress::MIN), 943 943 } 944 944 } 945 945 }
+1 -102
libs/kmem/src/address.rs
··· 31 31 ($address_ty:ident) => { 32 32 impl $address_ty { 33 33 pub const MAX: Self = Self(usize::MAX); 34 - pub const MIN: Self = Self(0); 35 - pub const ZERO: Self = Self(0); 34 + pub const MIN: Self = Self(usize::MIN); 36 35 pub const BITS: u32 = usize::BITS; 37 36 38 37 #[must_use] ··· 108 107 None 109 108 } 110 109 } 111 - #[must_use] 112 - #[inline] 113 - pub const fn checked_div(self, rhs: usize) -> Option<Self> { 114 - if let Some(out) = self.0.checked_div(rhs) { 115 - Some(Self(out)) 116 - } else { 117 - None 118 - } 119 - } 120 - #[must_use] 121 - #[inline] 122 - pub const fn checked_mul(self, rhs: usize) -> Option<Self> { 123 - if let Some(out) = self.0.checked_mul(rhs) { 124 - Some(Self(out)) 125 - } else { 126 - None 127 - } 128 - } 129 - #[must_use] 130 - #[inline] 131 - pub const fn checked_shl(self, rhs: u32) -> Option<Self> { 132 - if let Some(out) = self.0.checked_shl(rhs) { 133 - Some(Self(out)) 134 - } else { 135 - None 136 - } 137 - } 138 - #[must_use] 139 - #[inline] 140 - pub const fn checked_shr(self, rhs: u32) -> Option<Self> { 141 - if let Some(out) = self.0.checked_shr(rhs) { 142 - Some(Self(out)) 143 - } else { 144 - None 145 - } 146 - } 147 - // #[must_use] 148 - // #[inline] 149 - // pub const fn saturating_add(self, rhs: usize) -> Self { 150 - // Self(self.0.saturating_add(rhs)) 151 - // } 152 - // #[must_use] 153 - // #[inline] 154 - // pub const fn saturating_add_signed(self, rhs: isize) -> Self { 155 - // Self(self.0.saturating_add_signed(rhs)) 156 - // } 157 - // #[must_use] 158 - // #[inline] 159 - // pub const fn saturating_div(self, rhs: usize) -> Self { 160 - // Self(self.0.saturating_div(rhs)) 161 - // } 162 - // #[must_use] 163 - // #[inline] 164 - // pub const fn saturating_sub(self, rhs: usize) -> Self { 165 - // Self(self.0.saturating_sub(rhs)) 166 - // } 167 - // #[must_use] 168 - // #[inline] 169 - // pub const fn saturating_mul(self, rhs: usize) -> Self { 170 - // Self(self.0.saturating_mul(rhs)) 171 - // } 172 - #[must_use] 173 - #[inline] 174 - pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { 175 - let (a, b) = self.0.overflowing_shl(rhs); 176 - (Self(a), b) 177 - } 178 - #[must_use] 179 - #[inline] 180 - pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { 181 - let (a, b) = self.0.overflowing_shr(rhs); 182 - (Self(a), b) 183 - } 184 110 185 111 #[must_use] 186 112 #[inline] ··· 188 114 self.0.checked_sub(rhs.0) 189 115 } 190 116 191 - // #[must_use] 192 - // #[inline] 193 - // pub const fn saturating_sub_addr(self, rhs: Self) -> usize { 194 - // self.0.saturating_sub(rhs.0) 195 - // } 196 - 197 117 #[must_use] 198 118 #[inline] 199 119 pub const fn is_aligned_to(&self, align: usize) -> bool { ··· 224 144 } else { 225 145 None 226 146 } 227 - } 228 - 229 - // #[must_use] 230 - // #[inline] 231 - // pub const fn wrapping_align_up(self, align: usize) -> Self { 232 - // if !align.is_power_of_two() { 233 - // panic!("checked_align_up: align is not a power-of-two"); 234 - // } 235 - // 236 - // // SAFETY: `align` has been checked to be a power of 2 above 237 - // let align_minus_one = unsafe { align.unchecked_sub(1) }; 238 - // 239 - // // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align) 240 - // let out = addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align); 241 - // debug_assert!(out.is_aligned_to(align)); 242 - // out 243 - // } 244 - 245 - #[inline] 246 - pub const fn alignment(&self) -> usize { 247 - self.0 & (!self.0 + 1) 248 147 } 249 148 250 149 #[must_use]
+1 -1
loader/src/main.rs
··· 116 116 117 117 let root_pgtable = frame_alloc 118 118 .allocate_one_zeroed( 119 - VirtualAddress::ZERO, // called before translation into higher half 119 + VirtualAddress::MIN, // called before translation into higher half 120 120 ) 121 121 .unwrap(); 122 122
+2 -2
loader/src/mapping.rs
··· 93 93 phys.start, 94 94 phys.len(), 95 95 flags, 96 - VirtualAddress::ZERO, // called before translation into higher half 96 + VirtualAddress::MIN, // called before translation into higher half 97 97 ) 98 98 } 99 99 } ··· 130 130 phys.start, 131 131 phys.len(), 132 132 Flags::READ | Flags::WRITE, 133 - VirtualAddress::ZERO, // called before translation into higher half 133 + VirtualAddress::MIN, // called before translation into higher half 134 134 )?; 135 135 } 136 136
+9 -7
loader/src/page_alloc.rs
··· 60 60 self.page_state[idx + i] = true; 61 61 } 62 62 63 - VirtualAddress::new(idx) 63 + let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1); 64 + 65 + let page = idx 66 + .checked_mul(top_level_page_size) // convert the index into an actual address 67 + .and_then(|idx| idx.checked_add(usize::MAX << arch::VIRT_ADDR_BITS)) // and shift it into the kernel half 68 + .unwrap(); 69 + 70 + VirtualAddress::new(page) 64 71 } else { 65 72 panic!("no usable top-level pages found ({num_pages} pages requested)"); 66 73 } ··· 97 104 98 105 // how many top-level pages are needed to map `size` bytes 99 106 // and attempt to allocate them 100 - let page_idx = self.allocate_pages(layout.size().div_ceil(top_level_page_size)); 107 + let base = self.allocate_pages(layout.size().div_ceil(top_level_page_size)); 101 108 102 109 // calculate the base address of the page 103 110 // ··· 106 113 // 107 114 // we can then take the lowest possible address of the higher half (`usize::MAX << VA_BITS`) 108 115 // and add the `idx` multiple of the size of a top-level entry to it 109 - let base = page_idx 110 - .checked_mul(top_level_page_size) 111 - .unwrap() 112 - .checked_add(usize::MAX << arch::VIRT_ADDR_BITS) 113 - .unwrap(); 114 116 115 117 let offset = if let Some(rng) = self.prng.as_mut() { 116 118 // Choose a random offset.