Next Generation WASM Microkernel Operating System

chore(kmem): clean up (#625)

* clean up kmem

* chore(kmem): clean up test utils

authored by

Jonas Kruckenberg and committed by
GitHub
3bb74c3b d8ba683a

+259 -100
-1
Cargo.lock
··· 1097 1097 version = "0.1.0" 1098 1098 dependencies = [ 1099 1099 "arrayvec", 1100 - "bitflags", 1101 1100 "cpu-local", 1102 1101 "fallible-iterator", 1103 1102 "kmem",
+1 -2
libs/kmem/Cargo.toml
··· 20 20 lock_api.workspace = true 21 21 22 22 [dev-dependencies] 23 - parking_lot = "0.12.5" 24 23 kmem = { workspace = true, features = ["test_utils"] } 25 - bitflags.workspace = true 24 + parking_lot = "0.12.5" 26 25 27 26 [features] 28 27 test_utils = ["cpu-local", "spin", "proptest", "proptest-derive"]
+126 -27
libs/kmem/src/address_space.rs
··· 1 + use core::alloc::Layout; 1 2 use core::convert::Infallible; 2 3 use core::ops::Range; 4 + 5 + use fallible_iterator::FallibleIterator; 3 6 4 7 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; 5 8 use crate::bootstrap::{Bootstrap, BootstrapAllocator}; ··· 79 82 &self.arch 80 83 } 81 84 85 + pub fn physmap(&self) -> &PhysMap { 86 + &self.physmap 87 + } 88 + 89 + pub const fn granule_size(&self) -> usize { 90 + A::GRANULE_SIZE 91 + } 92 + 93 + pub const fn granule_layout(&self) -> Layout { 94 + A::GRANULE_LAYOUT 95 + } 96 + 82 97 /// Activate the address space on this CPU (set this CPUs page table). 83 98 /// 84 99 /// # Safety ··· 119 134 unreachable!() 120 135 } 121 136 137 + /// Maps the virtual address range `virt` to *possibly discontiguous* chunk(s) of physical memory 138 + /// `phys` with the specified memory attributes. 139 + /// 140 + /// If this returns `Ok`, the mapping is added to the address space. 141 + /// 142 + /// Note that this method **does not** establish any ordering between address space modification 143 + /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 144 + /// new mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 145 + /// 146 + /// After the modifications have been synchronized with current execution, all accesses to the virtual 147 + /// address range will translate to accesses of the physical address range and adhere to the 148 + /// access rules established by the `MemoryAttributes`. 149 + /// 150 + /// # Safety 151 + /// 152 + /// 1. The entire range `virt` must be unmapped. 153 + /// 2. `virt` must be aligned to at least the smallest architecture block size. 154 + /// 3. `phys` chunks must be aligned to at least the smallest architecture block size. 155 + /// 4. `phys` chunks must in-total be at least as large as `virt`. 156 + /// 157 + /// # Errors 158 + /// 159 + /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain 160 + /// partially altered. The caller should call *unmap* on the virtual address range upon failure. 161 + pub unsafe fn map( 162 + &mut self, 163 + mut virt: Range<VirtualAddress>, 164 + mut phys: impl FallibleIterator<Item = Range<PhysicalAddress>, Error = AllocError>, 165 + attributes: MemoryAttributes, 166 + frame_allocator: impl FrameAllocator, 167 + flush: &mut Flush, 168 + ) -> Result<(), AllocError> { 169 + while let Some(chunk_phys) = phys.next()? { 170 + debug_assert!(!virt.is_empty()); 171 + 172 + // Safety: ensured by caller 173 + unsafe { 174 + self.map_contiguous( 175 + Range::from_start_len(virt.start, chunk_phys.len()), 176 + chunk_phys.start, 177 + attributes, 178 + frame_allocator.by_ref(), 179 + flush, 180 + )?; 181 + } 182 + 183 + virt.start = virt.start.add(chunk_phys.len()); 184 + } 185 + 186 + Ok(()) 187 + } 188 + 122 189 /// Maps the virtual address range `virt` to a continuous region of physical memory starting at `phys` 123 190 /// with the specified memory attributes. 124 191 /// ··· 137 204 /// 1. The entire range `virt` must be unmapped. 138 205 /// 2. `virt` must be aligned to at least the smallest architecture block size. 139 206 /// 3. `phys` must be aligned to at least the smallest architecture block size. 207 + /// 4. The region pointed to by `phys` must be at least as large as `virt`. 140 208 /// 141 209 /// # Errors 142 210 /// 143 - /// Returning `Err` indicates the mapping cannot be established and the address space remains 144 - /// unaltered. 211 + /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain 212 + /// partially altered. The caller should call *unmap* on the virtual address range upon failure. 145 213 pub unsafe fn map_contiguous( 146 214 &mut self, 147 215 virt: Range<VirtualAddress>, ··· 207 275 Ok(()) 208 276 } 209 277 210 - /// Remaps the virtual address range `virt` to a new continuous region of physical memory start 278 + /// Remaps the virtual address range `virt` to new *possibly discontiguous* chunk(s) of physical 279 + /// memory `phys`. The old physical memory region is not freed. 280 + /// 281 + /// Note that this method **does not** establish any ordering between address space modification 282 + /// and accesses through the mapping, nor does it imply a page table cache flush. To ensure the 283 + /// updated mapping is visible to the calling CPU you must call [`flush`][Flush::flush] on the returned `[Flush`]. 284 + /// 285 + /// After the modifications have been synchronized with current execution, all accesses to the virtual 286 + /// address range will translate to accesses of the new physical address range. 287 + /// 288 + /// # Safety 289 + /// 290 + /// 1. The entire range `virt` must be mapped. 291 + /// 2. `virt` must be aligned to at least the smallest architecture block size. 292 + /// 3. `phys` chunks must be aligned to `at least the smallest architecture block size. 293 + /// 4. `phys` chunks must in-total be at least as large as `virt`. 294 + /// 295 + /// # Errors 296 + /// 297 + /// Returning `Err` indicates the mapping cannot be established. NOTE: The address space may remain 298 + /// partially altered. The caller should call *unmap* on the virtual address range upon failure. 299 + pub unsafe fn remap( 300 + &mut self, 301 + mut virt: Range<VirtualAddress>, 302 + mut phys: impl FallibleIterator<Item = Range<PhysicalAddress>, Error = AllocError>, 303 + flush: &mut Flush, 304 + ) -> Result<(), AllocError> { 305 + while let Some(chunk_phys) = phys.next()? { 306 + debug_assert!(!virt.is_empty()); 307 + 308 + // Safety: ensured by caller 309 + unsafe { 310 + self.remap_contiguous( 311 + Range::from_start_len(virt.start, chunk_phys.len()), 312 + chunk_phys.start, 313 + flush, 314 + ); 315 + } 316 + 317 + virt.start = virt.start.add(chunk_phys.len()); 318 + } 319 + 320 + Ok(()) 321 + } 322 + 323 + /// Remaps the virtual address range `virt` to a new continuous region of physical memory starting 211 324 /// at `phys`. The old physical memory region is not freed. 212 325 /// 213 326 /// Note that this method **does not** establish any ordering between address space modification ··· 222 335 /// 1. The entire range `virt` must be mapped. 223 336 /// 2. `virt` must be aligned to at least the smallest architecture block size. 224 337 /// 3. `phys` must be aligned to `at least the smallest architecture block size. 338 + /// 4. The region pointed to by `phys` must be at least as large as `virt`. 225 339 pub unsafe fn remap_contiguous( 226 340 &mut self, 227 341 virt: Range<VirtualAddress>, ··· 438 552 use crate::arch::Arch; 439 553 use crate::flush::Flush; 440 554 use crate::frame_allocator::FrameAllocator; 441 - use crate::test_utils::{BootstrapResult, MachineBuilder}; 555 + use crate::test_utils::MachineBuilder; 442 556 use crate::{MemoryAttributes, VirtualAddress, WriteOrExecute, archtest}; 443 557 444 558 archtest! { 445 559 #[test] 446 560 fn map<A: Arch>() { 447 - let BootstrapResult { 448 - mut address_space, 449 - frame_allocator, 450 - .. 451 - } = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 561 + let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 452 562 .with_memory_regions([0xA000]) 453 563 .finish_and_bootstrap() 454 564 .unwrap(); 565 + let (_, mut address_space, frame_allocator) = res; 455 566 456 567 let frame = frame_allocator 457 568 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 484 595 485 596 #[test] 486 597 fn remap<A: Arch>() { 487 - let BootstrapResult { 488 - mut address_space, 489 - frame_allocator, 490 - .. 491 - } = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 598 + let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 492 599 .with_memory_regions([0xB000]) 493 600 .finish_and_bootstrap() 494 601 .unwrap(); 602 + let (_, mut address_space, frame_allocator) = res; 495 603 496 604 let frame = frame_allocator 497 605 .allocate_contiguous(A::GRANULE_LAYOUT) 498 606 .unwrap(); 499 607 500 - let page = Range::from_start_len( 501 - VirtualAddress::new(0x7000), 502 - A::GRANULE_SIZE, 503 - ); 608 + let page = Range::from_start_len(VirtualAddress::new(0x7000), A::GRANULE_SIZE); 504 609 505 610 let mut flush = Flush::new(); 506 611 unsafe { ··· 547 652 548 653 #[test] 549 654 fn set_attributes<A: Arch>() { 550 - let BootstrapResult { 551 - mut address_space, 552 - frame_allocator, 553 - .. 554 - } = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 655 + let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 555 656 .with_memory_regions([0xB000]) 556 657 .finish_and_bootstrap() 557 658 .unwrap(); 659 + let (_, mut address_space, frame_allocator) = res; 558 660 559 661 let frame = frame_allocator 560 662 .allocate_contiguous(A::GRANULE_LAYOUT) 561 663 .unwrap(); 562 664 563 - let page = Range::from_start_len( 564 - VirtualAddress::new(0x7000), 565 - A::GRANULE_SIZE 566 - ); 665 + let page = Range::from_start_len(VirtualAddress::new(0x7000), A::GRANULE_SIZE); 567 666 568 667 let mut flush = Flush::new(); 569 668 unsafe {
+6
libs/kmem/src/arch/riscv64.rs
··· 19 19 asid: u16, 20 20 } 21 21 22 + impl Riscv64Sv39 { 23 + pub const fn new(asid: u16) -> Riscv64Sv39 { 24 + Riscv64Sv39 { asid } 25 + } 26 + } 27 + 22 28 impl super::Arch for Riscv64Sv39 { 23 29 type PageTableEntry = PageTableEntry; 24 30
+57 -26
libs/kmem/src/frame_allocator.rs
··· 42 42 /// A memory block which is currently allocated may be passed to any method of the allocator that 43 43 /// accepts such an argument. 44 44 pub unsafe trait FrameAllocator { 45 - fn allocate(&self, layout: Layout) -> FrameIter<'_, Self, false> 45 + fn allocate(&self, layout: Layout) -> FrameIter<'_, Self> 46 46 where 47 47 Self: Sized, 48 48 { ··· 53 53 } 54 54 } 55 55 56 - // fn allocate_zeroed(&self, layout: Layout) -> FrameIter<'_, Self, true> 57 - // where 58 - // Self: Sized, 59 - // { 60 - // FrameIter { 61 - // alloc: self, 62 - // remaining: layout.size(), 63 - // alignment: layout.align(), 64 - // } 65 - // } 56 + fn allocate_zeroed<'a, A: Arch>( 57 + &self, 58 + layout: Layout, 59 + physmap: &'a PhysMap, 60 + arch: &'a A, 61 + ) -> FrameIterZeroed<'_, 'a, Self, A> 62 + where 63 + Self: Sized, 64 + { 65 + FrameIterZeroed { 66 + inner: self.allocate(layout), 67 + physmap, 68 + arch, 69 + } 70 + } 66 71 67 72 /// Attempts to allocate a contiguous block of physical memory. 68 73 /// ··· 98 103 physmap: &PhysMap, 99 104 arch: &impl Arch, 100 105 ) -> Result<PhysicalAddress, AllocError> { 101 - let frame = self.allocate_contiguous(layout)?; 106 + let phys = self.allocate_contiguous(layout)?; 102 107 103 - let page = physmap.phys_to_virt(frame); 108 + let virt = physmap.phys_to_virt(phys); 104 109 105 110 // Safety: the address is properly aligned (at least page aligned) and is either valid to 106 111 // access through the physical memory map or because we're in bootstrapping still and phys==virt 107 112 unsafe { 108 - arch.write_bytes(page, 0, layout.size()); 113 + arch.write_bytes(virt, 0, layout.size()); 109 114 } 110 115 111 - Ok(frame) 116 + Ok(phys) 112 117 } 113 118 114 119 /// Deallocates the block of memory referenced by `block`. ··· 142 147 (**self).allocate_contiguous(layout) 143 148 } 144 149 145 - // fn allocate_contiguous_zeroed(&self, layout: Layout, arch: &impl Arch) -> Result<PhysicalAddress, AllocError> { 146 - // (**self).allocate_contiguous_zeroed(layout, arch) 147 - // } 150 + fn allocate_contiguous_zeroed( 151 + &self, 152 + layout: Layout, 153 + physmap: &PhysMap, 154 + arch: &impl Arch, 155 + ) -> Result<PhysicalAddress, AllocError> { 156 + (**self).allocate_contiguous_zeroed(layout, physmap, arch) 157 + } 148 158 149 159 unsafe fn deallocate(&self, block: PhysicalAddress, layout: Layout) { 150 160 // Safety: ensured by caller ··· 156 166 } 157 167 } 158 168 159 - pub struct FrameIter<'alloc, F: ?Sized, const ZEROED: bool> { 169 + pub struct FrameIter<'alloc, F: ?Sized> { 160 170 alloc: &'alloc F, 161 171 remaining: usize, 162 172 alignment: usize, 163 173 } 164 174 165 - impl<F: FrameAllocator, const ZEROED: bool> FallibleIterator for FrameIter<'_, F, ZEROED> { 175 + impl<F: FrameAllocator> FallibleIterator for FrameIter<'_, F> { 166 176 type Item = Range<PhysicalAddress>; 167 177 type Error = AllocError; 168 178 ··· 182 192 ); 183 193 let layout = Layout::from_size_align(alloc_size.get(), self.alignment).unwrap(); 184 194 185 - let addr = if ZEROED { 186 - todo!() 187 - // self.alloc.allocate_contiguous_zeroed(layout)? 188 - } else { 189 - self.alloc.allocate_contiguous(layout)? 190 - }; 195 + let addr = self.alloc.allocate_contiguous(layout)?; 191 196 192 197 self.remaining -= requested_size.get(); 193 198 194 199 Ok(Some(Range::from_start_len(addr, requested_size.get()))) 195 200 } 196 201 } 202 + 203 + pub struct FrameIterZeroed<'alloc, 'a, F: ?Sized, A: Arch> { 204 + inner: FrameIter<'alloc, F>, 205 + physmap: &'a PhysMap, 206 + arch: &'a A, 207 + } 208 + 209 + impl<F: FrameAllocator, A: Arch> FallibleIterator for FrameIterZeroed<'_, '_, F, A> { 210 + type Item = Range<PhysicalAddress>; 211 + type Error = AllocError; 212 + 213 + fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { 214 + let Some(range) = self.inner.next()? else { 215 + return Ok(None); 216 + }; 217 + 218 + let virt = self.physmap.phys_to_virt_range(range.clone()); 219 + 220 + // Safety: we just allocated the frame 221 + unsafe { 222 + self.arch.write_bytes(virt.start, 0, virt.len()); 223 + } 224 + 225 + Ok(Some(range)) 226 + } 227 + }
+1
libs/kmem/src/lib.rs
··· 21 21 pub use address::{PhysicalAddress, VirtualAddress}; 22 22 pub use address_range::AddressRangeExt; 23 23 pub use address_space::HardwareAddressSpace; 24 + pub use arch::Arch; 24 25 pub use flush::Flush; 25 26 pub use frame_allocator::{AllocError, FrameAllocator, FrameIter}; 26 27 pub use memory_attributes::{MemoryAttributes, WriteOrExecute};
+1 -1
libs/kmem/src/memory_attributes.rs
··· 20 20 /// In order to prevent malicious code execution as proactively as possible, 21 21 /// [`AccessRules`] can either allow *writes* OR *execution* but never both. This is enforced 22 22 /// through the [`WriteOrExecute`] enum field. 23 - #[derive(PartialEq, Eq)] 23 + #[derive(Default, PartialEq, Eq)] 24 24 pub struct MemoryAttributes<u8> { 25 25 /// If set, reading from the memory region is allowed. 26 26 pub const READ: bool;
+1 -1
libs/kmem/src/test_utils.rs
··· 4 4 pub mod proptest; 5 5 6 6 pub use arch::EmulateArch; 7 - pub use machine::{BootstrapResult, Cpu, HasMemory, Machine, MachineBuilder, MissingMemory}; 7 + pub use machine::{Cpu, HasMemory, Machine, MachineBuilder, MissingMemory}; 8 8 pub use memory::Memory; 9 9 10 10 #[macro_export]
+51 -20
libs/kmem/src/test_utils/machine.rs
··· 54 54 self.0.memory.regions().collect() 55 55 } 56 56 57 - pub unsafe fn read<T>(&self, asid: u16, addr: VirtualAddress) -> T { 58 - assert!(addr.is_aligned_to(size_of::<T>())); 57 + pub unsafe fn read<T>(&self, asid: u16, address: VirtualAddress) -> T { 58 + assert!(address.is_aligned_to(size_of::<T>())); 59 59 60 - if let Some((phys, attrs, _level)) = self.cpu().translate(asid, addr) { 60 + if let Some((phys, attrs, level)) = self.cpu().translate(asid, address) { 61 61 assert!(attrs.allows_read()); 62 + assert_eq!( 63 + address.align_down(level.page_size()), 64 + address.add(size_of::<T>()).align_down(level.page_size()), 65 + "reads crossing page boundaries are not supported. {address} + {}", 66 + size_of::<T>() 67 + ); 62 68 63 69 unsafe { self.read_phys(phys) } 64 70 } else { 65 - core::panic!("read: {addr} size {:#x} not present", size_of::<T>()); 71 + core::panic!("read: {address} size {:#x} not present", size_of::<T>()); 66 72 } 67 73 } 68 74 69 - pub unsafe fn write<T>(&self, asid: u16, addr: VirtualAddress, value: T) { 70 - assert!(addr.is_aligned_to(size_of::<T>())); 75 + pub unsafe fn write<T>(&self, asid: u16, address: VirtualAddress, value: T) { 76 + assert!(address.is_aligned_to(size_of::<T>())); 71 77 72 - if let Some((phys, attrs, _level)) = self.cpu().translate(asid, addr) { 78 + if let Some((phys, attrs, level)) = self.cpu().translate(asid, address) { 73 79 assert!(attrs.allows_read()); 80 + assert_eq!( 81 + address.align_down(level.page_size()), 82 + address.add(size_of::<T>()).align_down(level.page_size()), 83 + "typed writes crossing page boundaries are not supported. {address} + {}", 84 + size_of::<T>() 85 + ); 74 86 75 87 unsafe { self.write_phys(phys, value) } 76 88 } else { 77 - core::panic!("write: {addr} size {:#x} not present", size_of::<T>()); 89 + core::panic!("write: {address} size {:#x} not present", size_of::<T>()); 90 + } 91 + } 92 + 93 + pub fn read_bytes(&self, asid: u16, address: VirtualAddress, count: usize) -> &[u8] { 94 + if let Some((phys, attrs, level)) = self.cpu().translate(asid, address) { 95 + assert!(attrs.allows_read()); 96 + assert_eq!( 97 + address.align_down(level.page_size()), 98 + address.add(count).align_down(level.page_size()), 99 + "reads crossing page boundaries are not supported. {address} + {}", 100 + count 101 + ); 102 + 103 + self.read_bytes_phys(phys, count) 104 + } else { 105 + panic!("write: {address} size {count:#x} not present"); 78 106 } 79 107 } 80 108 ··· 104 132 105 133 pub unsafe fn write_phys<T>(&self, address: PhysicalAddress, value: T) { 106 134 unsafe { self.0.memory.write(address, value) } 135 + } 136 + 137 + pub fn read_bytes_phys(&self, address: PhysicalAddress, count: usize) -> &[u8] { 138 + self.0.memory.read_bytes(address, count) 107 139 } 108 140 109 141 pub fn write_bytes_phys(&self, address: PhysicalAddress, value: u8, count: usize) { ··· 247 279 _m: PhantomData<(A, R)>, 248 280 } 249 281 250 - pub struct BootstrapResult<A: Arch, R: lock_api::RawMutex> { 251 - pub machine: Machine<A>, 252 - pub address_space: HardwareAddressSpace<EmulateArch<A>>, 253 - pub frame_allocator: BootstrapAllocator<R>, 254 - } 255 - 256 282 impl<A: Arch, R: lock_api::RawMutex> MachineBuilder<A, R, MissingMemory> { 257 283 pub fn new() -> Self { 258 284 Self { ··· 299 325 (Machine(Arc::new(inner)), physmap) 300 326 } 301 327 302 - pub fn finish_and_bootstrap(self) -> Result<BootstrapResult<A, R>, AllocError> { 328 + pub fn finish_and_bootstrap( 329 + self, 330 + ) -> Result< 331 + ( 332 + Machine<A>, 333 + HardwareAddressSpace<EmulateArch<A>>, 334 + BootstrapAllocator<R>, 335 + ), 336 + AllocError, 337 + > { 303 338 let (machine, physmap) = self.finish(); 304 339 305 340 let arch = EmulateArch::new(machine.clone()); ··· 318 353 319 354 flush.flush(address_space.arch()); 320 355 321 - Ok(BootstrapResult { 322 - machine, 323 - address_space, 324 - frame_allocator, 325 - }) 356 + Ok((machine, address_space, frame_allocator)) 326 357 } 327 358 }
+15 -22
libs/kmem/src/test_utils/memory.rs
··· 55 55 Some((*region, offset)) 56 56 } 57 57 58 - pub fn with_region<Ret>( 59 - &self, 60 - range: Range<PhysicalAddress>, 61 - will_write: bool, 62 - cb: impl FnOnce(&mut [u8]) -> Ret, 63 - ) -> Ret { 58 + pub fn region(&self, range: Range<PhysicalAddress>, will_write: bool) -> &mut [u8] { 64 59 let Some((mut region, offset)) = self.get_region_containing(range.start) else { 65 60 let access_ty = if will_write { "write" } else { "read" }; 66 61 ··· 71 66 }; 72 67 73 68 let region = unsafe { region.as_mut() }; 74 - let res = cb(&mut region[offset..offset + range.len()]); 75 - 76 - res 69 + &mut region[offset..offset + range.len()] 77 70 } 78 71 79 72 pub unsafe fn read<T>(&self, address: PhysicalAddress) -> T { 80 73 let size = size_of::<T>(); 81 - self.with_region( 82 - Range::from_start_len(address, size), 83 - false, 84 - |region| unsafe { region.as_ptr().cast::<T>().read() }, 85 - ) 74 + let region = self.region(Range::from_start_len(address, size), false); 75 + 76 + unsafe { region.as_ptr().cast::<T>().read() } 86 77 } 87 78 88 79 pub unsafe fn write<T>(&self, address: PhysicalAddress, value: T) { 89 80 let size = size_of::<T>(); 90 - self.with_region( 91 - Range::from_start_len(address, size), 92 - true, 93 - |region| unsafe { region.as_mut_ptr().cast::<T>().write(value) }, 94 - ) 81 + let region = self.region(Range::from_start_len(address, size), true); 82 + 83 + unsafe { region.as_mut_ptr().cast::<T>().write(value) } 84 + } 85 + 86 + pub fn read_bytes(&self, address: PhysicalAddress, count: usize) -> &[u8] { 87 + self.region(Range::from_start_len(address, count), false) 95 88 } 96 89 97 90 pub fn write_bytes(&self, address: PhysicalAddress, value: u8, count: usize) { 98 - self.with_region(Range::from_start_len(address, count), true, |region| { 99 - region.fill(value); 100 - }) 91 + let region = self.region(Range::from_start_len(address, count), true); 92 + 93 + region.fill(value); 101 94 } 102 95 } 103 96