Next Generation WASM Microkernel Operating System

refactor(kmem): streamline test utils (#632)

* refactor(kmem): streamline test utils

* refactor(kmem): test utils use layout instead of sizes

* docs(kmem): improve test utils docs

* fix(kmem): fix permission check in test utils `Machine::write`

authored by

Jonas Kruckenberg and committed by
GitHub
e258a2a7 73c72c1c

+413 -158
+2 -2
libs/kmem/Cargo.toml
··· 12 12 k23-spin = { workspace = true, optional = true } 13 13 proptest = { workspace = true, optional = true } 14 14 proptest-derive = { workspace = true, optional = true } 15 + parking_lot = { version = "0.12.5", optional = true } 15 16 16 17 # 3rd-party dependencies 17 18 mycelium-bitfield.workspace = true ··· 20 21 21 22 [dev-dependencies] 22 23 kmem = { workspace = true, features = ["test_utils"] } 23 - parking_lot = "0.12.5" 24 24 test-log = "0.2.19" 25 25 26 26 [features] 27 - test_utils = ["k23-cpu-local", "k23-spin", "proptest", "proptest-derive"] 27 + test_utils = ["k23-cpu-local", "k23-spin", "proptest", "proptest-derive", "parking_lot"] 28 28 29 29 [lints] 30 30 workspace = true
+19 -16
libs/kmem/src/address_space.rs
··· 544 544 545 545 #[cfg(test)] 546 546 mod tests { 547 + use std::alloc::Layout; 547 548 use std::ops::Range; 548 549 549 550 use crate::address_range::AddressRangeExt; 550 551 use crate::arch::Arch; 551 552 use crate::flush::Flush; 552 553 use crate::frame_allocator::FrameAllocator; 553 - use crate::test_utils::MachineBuilder; 554 + use crate::test_utils::{Machine, MachineBuilder}; 554 555 use crate::{MemoryAttributes, VirtualAddress, WriteOrExecute, archtest}; 555 556 556 557 archtest! { 557 558 #[test] 558 559 fn map<A: Arch>() { 559 - let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 560 - .with_memory_regions([0xA000]) 561 - .finish_and_bootstrap() 562 - .unwrap(); 563 - let (_, mut address_space, frame_allocator) = res; 560 + let machine: Machine<A> = MachineBuilder::new() 561 + .with_memory_regions([ 562 + Layout::from_size_align(0xA000, A::GRANULE_SIZE).unwrap() 563 + ]) 564 + .finish(); 565 + 566 + let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 564 567 565 568 let frame = frame_allocator 566 569 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 593 596 594 597 #[test] 595 598 fn remap<A: Arch>() { 596 - let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 597 - .with_memory_regions([0xB000]) 598 - .finish_and_bootstrap() 599 - .unwrap(); 600 - let (_, mut address_space, frame_allocator) = res; 599 + let machine: Machine<A> = MachineBuilder::new() 600 + .with_memory_regions([Layout::from_size_align(0xB000, A::GRANULE_SIZE).unwrap()]) 601 + .finish(); 602 + 603 + let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 601 604 602 605 let frame = frame_allocator 603 606 .allocate_contiguous(A::GRANULE_LAYOUT) ··· 650 653 651 654 #[test] 652 655 fn set_attributes<A: Arch>() { 653 - let res = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 654 - .with_memory_regions([0xB000]) 655 - .finish_and_bootstrap() 656 - .unwrap(); 657 - let (_, mut address_space, frame_allocator) = res; 656 + let machine: Machine<A> = MachineBuilder::new() 657 + .with_memory_regions([Layout::from_size_align(0xB000, A::GRANULE_SIZE).unwrap()]) 658 + .finish(); 659 + 660 + let (mut address_space, frame_allocator) = machine.bootstrap_address_space(A::DEFAULT_PHYSMAP_BASE); 658 661 659 662 let frame = frame_allocator 660 663 .allocate_contiguous(A::GRANULE_LAYOUT)
+111 -64
libs/kmem/src/bootstrap/frame_allocator.rs
··· 510 510 use crate::arch::Arch; 511 511 use crate::bootstrap::BootstrapAllocator; 512 512 use crate::frame_allocator::FrameAllocator; 513 - use crate::test_utils::{EmulateArch, MachineBuilder}; 513 + use crate::test_utils::{EmulateArch, Machine, MachineBuilder}; 514 514 use crate::{GIB, PhysMap, PhysicalAddress, archtest}; 515 515 516 516 fn assert_zeroed(frame: PhysicalAddress, bytes: usize, physmap: &PhysMap, arch: &impl Arch) { ··· 523 523 // Assert that the BootstrapAllocator can allocate frames 524 524 #[test_log::test] 525 525 fn allocate_contiguous_smoke<A: Arch>() { 526 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 527 - .with_memory_regions([0x2000, 0x1000]) 526 + let machine: Machine<A> = MachineBuilder::new() 527 + .with_memory_regions([ 528 + Layout::from_size_align(2 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 529 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 530 + ]) 528 531 .finish(); 529 532 530 533 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 531 - BootstrapAllocator::new::<A>(machine.memory_regions()); 534 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 532 535 533 536 // Based on the memory of the machine we set up above, we expect the allocator to 534 537 // yield 3 pages. ··· 548 551 .unwrap(); 549 552 assert!(frame.is_aligned_to(A::GRANULE_SIZE)); 550 553 551 - // assert that we're out of memory 554 + // after that, assert that we're out of memory 555 + 552 556 frame_allocator 553 557 .allocate_contiguous(A::GRANULE_LAYOUT) 554 558 .unwrap_err(); ··· 558 562 // bootstrap (bare, before paging is enabled) mode. 559 563 #[test_log::test] 560 564 fn allocate_contiguous_zeroed_smoke<A: Arch>() { 561 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 562 - .with_memory_regions([0x2000, 0x1000]) 565 + let machine: Machine<A> = MachineBuilder::new() 566 + .with_memory_regions([ 567 + Layout::from_size_align(2 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 568 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 569 + ]) 563 570 .finish(); 564 571 565 572 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 566 - BootstrapAllocator::new::<A>(machine.memory_regions()); 573 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 567 574 568 575 let arch = EmulateArch::new(machine); 569 576 ··· 590 597 assert!(frame.is_aligned_to(A::GRANULE_SIZE)); 591 598 assert_zeroed(frame, A::GRANULE_SIZE, &physmap, &arch); 592 599 593 - // assert that we're out of memory 600 + // after that, assert that we're out of memory 601 + 594 602 frame_allocator 595 603 .allocate_contiguous_zeroed(A::GRANULE_LAYOUT, &physmap, &arch) 596 604 .unwrap_err(); ··· 598 606 599 607 #[test_log::test] 600 608 fn allocate_smoke<A: Arch>() { 601 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 602 - .with_memory_regions([0x3000, 0x1000]) 609 + let machine: Machine<A> = MachineBuilder::new() 610 + .with_memory_regions([ 611 + Layout::from_size_align(3 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 612 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 613 + ]) 603 614 .finish(); 604 615 605 616 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 606 - BootstrapAllocator::new::<A>(machine.memory_regions()); 617 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 607 618 608 619 let blocks: Vec<_> = frame_allocator 609 - .allocate(Layout::from_size_align(0x4000, A::GRANULE_SIZE).unwrap()) 620 + .allocate(Layout::from_size_align(4 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap()) 610 621 .unwrap() 611 622 .collect(); 612 623 613 624 // assert the total size is what we expect 614 625 let allocated_size: usize = blocks.iter().map(|block| block.len()).sum(); 615 - assert!(allocated_size >= 0x4000); 626 + assert!(allocated_size >= 4 * A::GRANULE_SIZE); 616 627 617 628 // assert each block is aligned correctly 618 629 for block in blocks.iter() { ··· 622 633 623 634 #[test_log::test] 624 635 fn allocate_zeroed_smoke<A: Arch>() { 625 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 626 - .with_memory_regions([0x3000, 0x1000]) 636 + let machine: Machine<A> = MachineBuilder::new() 637 + .with_memory_regions([ 638 + Layout::from_size_align(3 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 639 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 640 + ]) 627 641 .finish(); 628 642 629 643 let arch = EmulateArch::new(machine.clone()); ··· 631 645 let physmap = PhysMap::new_bootstrap(); 632 646 633 647 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 634 - BootstrapAllocator::new::<A>(machine.memory_regions()); 648 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 635 649 636 650 let blocks: Vec<_> = frame_allocator 637 - .allocate_zeroed(Layout::from_size_align(0x4000, A::GRANULE_SIZE).unwrap(), &physmap, &arch) 651 + .allocate_zeroed( 652 + Layout::from_size_align(4 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 653 + &physmap, 654 + &arch, 655 + ) 638 656 .unwrap() 639 657 .collect(); 640 658 641 659 // assert the total size is what we expect 642 660 let allocated_size: usize = blocks.iter().map(|block| block.len()).sum(); 643 - assert!(allocated_size >= 0x4000); 661 + assert!(allocated_size >= 4 * A::GRANULE_SIZE); 644 662 645 663 // assert each block is aligned correctly 646 664 for block in blocks.iter() { ··· 652 670 653 671 #[test_log::test] 654 672 fn allocate_contiguous_small_alignment<A: Arch>() { 655 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 656 - .with_memory_regions([0x4000, 0x1000]) 673 + let machine: Machine<A> = MachineBuilder::new() 674 + .with_memory_regions([ 675 + Layout::from_size_align(4 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 676 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 677 + ]) 657 678 .finish(); 658 679 659 680 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 660 - BootstrapAllocator::new::<A>(machine.memory_regions()); 681 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 661 682 662 - let frame = frame_allocator.allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()).unwrap(); 683 + let frame = frame_allocator 684 + .allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()) 685 + .unwrap(); 663 686 664 687 assert!(frame.is_aligned_to(1)); 665 688 assert!(frame.is_aligned_to(A::GRANULE_SIZE)); ··· 667 690 668 691 #[test_log::test] 669 692 fn allocate_small_alignment<A: Arch>() { 670 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 671 - .with_memory_regions([0x4000, 0x1000]) 693 + let machine: Machine<A> = MachineBuilder::new() 694 + .with_memory_regions([ 695 + Layout::from_size_align(4 * A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 696 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 697 + ]) 672 698 .finish(); 673 699 674 700 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 675 - BootstrapAllocator::new::<A>(machine.memory_regions()); 701 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 676 702 677 - let blocks = frame_allocator.allocate(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()).unwrap(); 703 + let blocks = frame_allocator 704 + .allocate(Layout::from_size_align(A::GRANULE_SIZE, 1).unwrap()) 705 + .unwrap(); 678 706 679 707 for block in blocks { 680 708 assert!(block.start.is_aligned_to(1)); ··· 684 712 685 713 #[test_log::test] 686 714 fn allocate_contiguous_large_alignment<A: Arch>() { 687 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 688 - .with_memory_regions([2*GIB, 0x1000]) 715 + let machine: Machine<A> = MachineBuilder::new() 716 + .with_memory_regions([ 717 + Layout::from_size_align(2 * GIB, 1 * GIB).unwrap(), 718 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 719 + ]) 689 720 .finish(); 690 721 691 722 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 692 - BootstrapAllocator::new::<A>(machine.memory_regions()); 723 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 693 724 694 - let frame = frame_allocator.allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1*GIB).unwrap()).unwrap(); 725 + let frame = frame_allocator 726 + .allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, 1 * GIB).unwrap()) 727 + .unwrap(); 695 728 696 - assert!(frame.is_aligned_to(1*GIB)); 729 + assert!(frame.is_aligned_to(1 * GIB)); 697 730 } 698 731 699 732 #[test_log::test] 700 733 fn allocate_large_alignment<A: Arch>() { 701 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 702 - .with_memory_regions([2*GIB, 0x1000]) 734 + let machine: Machine<A> = MachineBuilder::new() 735 + .with_memory_regions([ 736 + Layout::from_size_align(2 * GIB, 1 * GIB).unwrap(), 737 + Layout::from_size_align(A::GRANULE_SIZE, A::GRANULE_SIZE).unwrap(), 738 + ]) 703 739 .finish(); 704 740 705 741 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 706 - BootstrapAllocator::new::<A>(machine.memory_regions()); 742 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 707 743 708 - let blocks = frame_allocator.allocate(Layout::from_size_align(A::GRANULE_SIZE, 1*GIB).unwrap()).unwrap(); 744 + let blocks = frame_allocator 745 + .allocate(Layout::from_size_align(A::GRANULE_SIZE, 1 * GIB).unwrap()) 746 + .unwrap(); 709 747 710 748 for block in blocks { 711 - assert!(block.start.is_aligned_to(1*GIB)); 749 + assert!(block.start.is_aligned_to(1 * GIB)); 712 750 } 713 751 } 714 752 } ··· 724 762 use crate::arch::Arch; 725 763 use crate::bootstrap::{BootstrapAllocator, DEFAULT_MAX_REGIONS}; 726 764 use crate::frame_allocator::FrameAllocator; 727 - use crate::test_utils::MachineBuilder; 728 - use crate::test_utils::proptest::region_sizes; 765 + use crate::test_utils::proptest::region_layouts; 766 + use crate::test_utils::{Machine, MachineBuilder}; 729 767 use crate::{GIB, KIB, for_every_arch}; 730 768 731 769 for_every_arch!(A => { 732 770 proptest! { 733 771 #[test_log::test] 734 - fn allocate(region_sizes in region_sizes(1..DEFAULT_MAX_REGIONS, 4*KIB, 16*GIB)) { 735 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 736 - .with_memory_regions(region_sizes.clone()) 772 + fn allocate(region_layouts in region_layouts(1..DEFAULT_MAX_REGIONS, 4*KIB, 16*GIB)) { 773 + let machine: Machine<A> = MachineBuilder::new() 774 + .with_memory_regions(region_layouts.clone()) 737 775 .finish(); 738 776 739 777 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 740 - BootstrapAllocator::new::<A>(machine.memory_regions()); 778 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 741 779 742 - let total_size = region_sizes.iter().sum(); 780 + let total_size = region_layouts.iter().map(|layout| layout.size()).sum(); 743 781 744 - let res = frame_allocator 745 - .allocate(Layout::from_size_align(total_size, A::GRANULE_SIZE).unwrap()); 746 - prop_assert!(res.is_ok(), "failed to allocate {} bytes with alignment {}. capacities left {:?}", total_size, A::GRANULE_SIZE, frame_allocator.capacities()); 782 + let res = 783 + frame_allocator.allocate(Layout::from_size_align(total_size, A::GRANULE_SIZE).unwrap()); 784 + prop_assert!( 785 + res.is_ok(), 786 + "failed to allocate {} bytes with alignment {}. capacities left {:?}", 787 + total_size, 788 + A::GRANULE_SIZE, 789 + frame_allocator.capacities() 790 + ); 747 791 let blocks = res.unwrap(); 748 792 749 793 let blocks: Vec<_> = blocks.collect(); ··· 759 803 } 760 804 761 805 #[test_log::test] 762 - fn allocate_contiguous(region_sizes in region_sizes(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB)) { 763 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 764 - .with_memory_regions(region_sizes.clone()) 806 + fn allocate_contiguous(region_layouts in region_layouts(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB)) { 807 + let machine: Machine<A> = MachineBuilder::new() 808 + .with_memory_regions(region_layouts.clone()) 765 809 .finish(); 766 810 767 811 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 768 - BootstrapAllocator::new::<A>(machine.memory_regions()); 812 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 769 813 770 - let total_size = region_sizes.iter().sum(); 814 + let total_size = region_layouts.iter().map(|layout| layout.size()).sum(); 771 815 772 - for _ in (0..total_size).step_by(1*GIB) { 773 - let res = frame_allocator.allocate_contiguous(Layout::from_size_align(1*GIB, A::GRANULE_SIZE).unwrap()); 816 + for _ in (0..total_size).step_by(1 * GIB) { 817 + let res = frame_allocator 818 + .allocate_contiguous(Layout::from_size_align(1 * GIB, A::GRANULE_SIZE).unwrap()); 774 819 prop_assert!(res.is_ok()); 775 820 let base = res.unwrap(); 776 821 prop_assert!(base.is_aligned_to(A::GRANULE_SIZE)); ··· 778 823 } 779 824 780 825 #[test_log::test] 781 - fn allocate_contiguous_alignments(region_sizes in region_sizes(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB), alignment_pot in 1..30) { 782 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 783 - .with_memory_regions(region_sizes.clone()) 826 + fn allocate_contiguous_alignments(region_layouts in region_layouts(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB), alignment_pot in 1..30) { 827 + let machine: Machine<A> = MachineBuilder::new() 828 + .with_memory_regions(region_layouts.clone()) 784 829 .finish(); 785 830 786 831 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 787 - BootstrapAllocator::new::<A>(machine.memory_regions()); 832 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 788 833 789 834 let alignment = 1usize << alignment_pot; 790 835 791 - let res = frame_allocator.allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, alignment).unwrap()); 836 + let res = frame_allocator 837 + .allocate_contiguous(Layout::from_size_align(A::GRANULE_SIZE, alignment).unwrap()); 792 838 prop_assert!(res.is_ok()); 793 839 let base = res.unwrap(); 794 840 ··· 796 842 } 797 843 798 844 #[test_log::test] 799 - fn allocate_alignments(region_sizes in region_sizes(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB), alignment_pot in 1..30) { 800 - let (machine, _) = MachineBuilder::<A, parking_lot::RawMutex, _>::new() 801 - .with_memory_regions(region_sizes.clone()) 845 + fn allocate_alignments(region_layouts in region_layouts(1..DEFAULT_MAX_REGIONS, 1*GIB, 16*GIB), alignment_pot in 1..30) { 846 + let machine: Machine<A> = MachineBuilder::new() 847 + .with_memory_regions(region_layouts.clone()) 802 848 .finish(); 803 849 804 850 let frame_allocator: BootstrapAllocator<parking_lot::RawMutex> = 805 - BootstrapAllocator::new::<A>(machine.memory_regions()); 851 + BootstrapAllocator::new::<A>(machine.memory_regions().collect()); 806 852 807 853 let alignment = 1usize << alignment_pot; 808 854 809 - let res = frame_allocator.allocate(Layout::from_size_align(A::GRANULE_SIZE, alignment).unwrap()); 855 + let res = 856 + frame_allocator.allocate(Layout::from_size_align(A::GRANULE_SIZE, alignment).unwrap()); 810 857 prop_assert!(res.is_ok()); 811 858 let blocks = res.unwrap(); 812 859
+3 -3
libs/kmem/src/physmap.rs
··· 98 98 use super::*; 99 99 use crate::address_range::AddressRangeExt; 100 100 use crate::test_utils::proptest::{ 101 - aligned_phys, aligned_virt, pick_address_in_regions, regions, 101 + aligned_phys, aligned_virt, pick_address_in_regions, regions_phys, 102 102 }; 103 103 use crate::{GIB, KIB}; 104 104 ··· 119 119 } 120 120 121 121 #[test] 122 - fn multi_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), regions in regions(1..10, 4*KIB, 256*GIB, 256*GIB)) { 122 + fn multi_region(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), regions in regions_phys(1..10, 4*KIB, 256*GIB, 256*GIB)) { 123 123 let regions_start = regions[0].start; 124 124 125 125 let map = PhysMap::new( ··· 131 131 } 132 132 133 133 #[test] 134 - fn phys_to_virt(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), (regions, phys) in pick_address_in_regions(regions(1..10, 4*KIB, 256*GIB, 256*GIB)), ) { 134 + fn phys_to_virt(base in aligned_virt(any::<VirtualAddress>(), 1*GIB), (regions, phys) in pick_address_in_regions(regions_phys(1..10, 4*KIB, 256*GIB, 256*GIB)), ) { 135 135 let regions_start = regions[0].start; 136 136 137 137 let map = PhysMap::new(
+10 -4
libs/kmem/src/test_utils/arch.rs
··· 69 69 // In which case we need to use `read_phys` instead of `read`, bypassing 70 70 // translation checks. 71 71 if self.active_table().is_some() { 72 + // Safety: ensured by caller. 72 73 unsafe { self.machine.read(self.asid, address) } 73 74 } else { 74 75 // Safety: We checked for the absence of an active translation table, meaning we're in ··· 83 84 // In which case we need to use `write_phys` instead of `write`, bypassing 84 85 // translation checks. 85 86 if self.active_table().is_some() { 87 + // Safety: ensured by caller. 86 88 unsafe { self.machine.write(self.asid, address, value) } 87 89 } else { 88 90 // Safety: We checked for the absence of an active translation table, meaning we're in ··· 97 99 // In which case we need to use `write_bytes_phys` instead of `write_bytes`, bypassing 98 100 // translation checks. 99 101 if self.active_table().is_some() { 100 - self.machine.read_bytes(self.asid, address, count) 102 + // Safety: ensured by caller. 103 + unsafe { self.machine.read_bytes(self.asid, address, count) } 101 104 } else { 102 105 // Safety: We checked for the absence of an active translation table, meaning we're in 103 - // "bare" mode and VirtualAddress==PhysicalAddress. 106 + // "bare" mode and VirtualAddress==PhysicalAddress. All other safety invariants are 107 + // ensured by the caller. 104 108 let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 105 109 self.machine.read_bytes_phys(address, count) 106 110 } ··· 111 115 // In which case we need to use `write_bytes_phys` instead of `write_bytes`, bypassing 112 116 // translation checks. 113 117 if self.active_table().is_some() { 114 - self.machine.write_bytes(self.asid, address, value, count) 118 + // Safety: ensured by caller. 119 + unsafe { self.machine.write_bytes(self.asid, address, value, count) } 115 120 } else { 116 121 // Safety: We checked for the absence of an active translation table, meaning we're in 117 - // "bare" mode and VirtualAddress==PhysicalAddress. 122 + // "bare" mode and VirtualAddress==PhysicalAddress. All other safety invariants are 123 + // ensured by the caller. 118 124 let address = unsafe { mem::transmute::<VirtualAddress, PhysicalAddress>(address) }; 119 125 self.machine.write_bytes_phys(address, value, count) 120 126 }
+203 -54
libs/kmem/src/test_utils/machine.rs
··· 1 + use std::alloc::Layout; 1 2 use std::cell::{Ref, RefCell, RefMut}; 2 3 use std::collections::BTreeMap; 3 4 use std::marker::PhantomData; ··· 5 6 use std::sync::Arc; 6 7 use std::{cmp, fmt}; 7 8 8 - use k23_arrayvec::ArrayVec; 9 9 use k23_cpu_local::collection::CpuLocal; 10 10 11 11 use crate::arch::{Arch, PageTableEntry, PageTableLevel}; ··· 14 14 use crate::test_utils::arch::EmulateArch; 15 15 use crate::test_utils::memory::Memory; 16 16 use crate::utils::page_table_entries_for; 17 - use crate::{ 18 - AllocError, HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, VirtualAddress, 19 - }; 17 + use crate::{HardwareAddressSpace, MemoryAttributes, PhysMap, PhysicalAddress, VirtualAddress}; 20 18 21 19 /// A "virtual machine" that emulates a given architecture. It is intended to be used in tests 22 20 /// and supports modeling the following properties: ··· 50 48 } 51 49 52 50 impl<A: Arch> Machine<A> { 53 - pub fn memory_regions<const MAX: usize>(&self) -> ArrayVec<Range<PhysicalAddress>, MAX> { 54 - self.0.memory.regions().collect() 51 + /// Bootstrap an address space for this machine. Will set up initial page table and 52 + /// frame allocator. 53 + pub fn bootstrap_address_space( 54 + &self, 55 + physmap_start: VirtualAddress, 56 + ) -> ( 57 + HardwareAddressSpace<EmulateArch<A>>, 58 + BootstrapAllocator<parking_lot::RawMutex>, 59 + ) { 60 + let physmap = PhysMap::new(physmap_start, self.memory_regions()); 61 + 62 + let arch = EmulateArch::new(self.clone()); 63 + 64 + let frame_allocator = 65 + BootstrapAllocator::new::<A>(arch.machine().memory_regions().collect()); 66 + 67 + let mut flush = Flush::new(); 68 + let mut aspace = 69 + HardwareAddressSpace::new_bootstrap(arch, physmap, &frame_allocator, &mut flush) 70 + .expect("Machine does not have enough physical memory for root page table. Consider increasing configured physical memory sizes."); 71 + 72 + aspace 73 + .map_physical_memory(&frame_allocator, &mut flush) 74 + .expect("Machine does not have enough physical memory for physmap. Consider increasing configured physical memory sizes."); 75 + 76 + // Safety: we just created the address space, so don't have any pointers into it. In hosted tests 77 + // the programs memory and CPU registers are outside the address space anyway. 78 + let address_space = unsafe { aspace.finish_bootstrap_and_activate() }; 79 + 80 + flush.flush(address_space.arch()); 81 + 82 + (address_space, frame_allocator) 55 83 } 56 84 85 + /// Returns an iterator over the physical memory regions in this machine 86 + pub fn memory_regions(&self) -> impl Iterator<Item = Range<PhysicalAddress>> { 87 + self.0.memory.regions() 88 + } 89 + 90 + /// Reads the value from `address` without moving it. This leaves the memory in `address` unchanged. 91 + /// 92 + /// This method **does not** support reads crossing page boundaries. 93 + /// 94 + /// # Safety 95 + /// 96 + /// This method largely inherits the safety requirements of [`ptr::read`], namely 97 + /// behavior is undefined if any of the following conditions are violated: 98 + /// 99 + /// - `address` must be [valid] for reads. 100 + /// - `address` must be properly aligned. 101 + /// - `address` must point to a properly initialized value of type T. 102 + /// 103 + /// Note that even if T has size 0, the pointer must be properly aligned. 104 + /// 105 + /// [valid]: 106 + /// [`ptr::read`]: core::ptr::read() 57 107 pub unsafe fn read<T>(&self, asid: u16, address: VirtualAddress) -> T { 58 108 assert!(address.is_aligned_to(size_of::<T>())); 59 109 ··· 72 122 } 73 123 } 74 124 125 + /// Overwrites the memory location pointed to by `address` with the given value without reading 126 + /// or dropping the old value. 127 + /// 128 + /// This method **does not** support writes crossing page boundaries. 129 + /// 130 + /// # Safety 131 + /// 132 + /// This method largely inherits the safety requirements of [`ptr::write`], namely 133 + /// behavior is undefined if any of the following conditions are violated: 134 + /// 135 + /// - `address` must be [valid] for writes. 136 + /// - `address` must be properly aligned. 137 + /// 138 + /// Note that even if T has size 0, the pointer must be properly aligned. 139 + /// 140 + /// [valid]: 141 + /// [`ptr::write`]: core::ptr::write() 75 142 pub unsafe fn write<T>(&self, asid: u16, address: VirtualAddress, value: T) { 76 143 assert!(address.is_aligned_to(size_of::<T>())); 77 144 78 145 if let Some((phys, attrs, level)) = self.cpu().translate(asid, address) { 79 - assert!(attrs.allows_read()); 146 + assert!(attrs.allows_write()); 80 147 assert_eq!( 81 148 address.align_down(level.page_size()), 82 149 address.add(size_of::<T>()).align_down(level.page_size()), ··· 90 157 } 91 158 } 92 159 93 - pub fn read_bytes(&self, asid: u16, address: VirtualAddress, count: usize) -> &[u8] { 160 + /// Reads `count` bytes of memory starting at `address`. This leaves the memory in `address` unchanged. 161 + /// 162 + /// This method **does not** support reads crossing page boundaries. 163 + /// 164 + /// # Safety 165 + /// 166 + /// This method largely inherits the safety requirements of [`slice::from_raw_parts`], namely 167 + /// behavior is undefined if any of the following conditions are violated: 168 + /// 169 + /// - `address` must be non-null and [valid] for reads of `count` bytes. 170 + /// - `address` must be properly aligned. 171 + /// - The memory referenced by the returned slice must not be mutated for the duration its lifetime. 172 + pub unsafe fn read_bytes(&self, asid: u16, address: VirtualAddress, count: usize) -> &[u8] { 94 173 if let Some((phys, attrs, level)) = self.cpu().translate(asid, address) { 95 174 assert!(attrs.allows_read()); 96 175 assert_eq!( ··· 106 185 } 107 186 } 108 187 109 - pub fn write_bytes(&self, asid: u16, address: VirtualAddress, value: u8, count: usize) { 188 + /// Sets `count` bytes of memory starting at `address` to `val`. 189 + /// 190 + /// `write_bytes` behaves like C's [`memset`]. 191 + /// 192 + /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset 193 + /// 194 + /// Contrary to [`Self::read`], [`Self::write`], and [`Self::write_bytes`] this **does** 195 + /// support writes crossing page boundaries. 196 + /// 197 + /// # Safety 198 + /// 199 + /// This method largely inherits the safety requirements of [`ptr::write_bytes`], namely 200 + /// behavior is undefined if any of the following conditions are violated: 201 + /// 202 + /// - `address` must be non-null and [valid] for writes of `count` bytes. 203 + /// - `address` must be properly aligned. 204 + /// 205 + /// Note that even if the effectively copied size is 0, the pointer must be properly aligned. 206 + /// 207 + /// [valid]: 208 + /// [`ptr::write_bytes`]: core::ptr::write_bytes() 209 + /// 210 + /// Additionally, note using this method one can easily introduce to undefined behavior (UB) 211 + /// later if the written bytes are not a valid representation of some T. **Use this to write 212 + /// bytes only** If you need a way to write a type to some address, use [`Self::write`]. 213 + pub unsafe fn write_bytes(&self, asid: u16, address: VirtualAddress, value: u8, count: usize) { 110 214 let mut bytes_remaining = count; 111 215 let mut address = address; 112 216 ··· 126 230 } 127 231 } 128 232 233 + /// Reads the value from physical address `address` bypassing address translation and attribute 234 + /// checks. Reads the value without moving it leaving the memory in `address` unchanged. 235 + /// 236 + /// # Safety 237 + /// 238 + /// This method largely inherits the safety requirements of [`ptr::read`], namely 239 + /// behavior is undefined if any of the following conditions are violated: 240 + /// 241 + /// - `address` must be [valid] for reads. 242 + /// - `address` must be properly aligned. 243 + /// - `address` must point to a properly initialized value of type T. 244 + /// 245 + /// Note that even if T has size 0, the pointer must be properly aligned. 246 + /// 247 + /// [valid]: 248 + /// [`ptr::read`]: core::ptr::read() 129 249 pub unsafe fn read_phys<T>(&self, address: PhysicalAddress) -> T { 130 250 unsafe { self.0.memory.read(address) } 131 251 } 132 252 253 + /// Overwrites the memory location pointed to by physical address `address` bypassing address 254 + /// translation and attribute checks. Overwrites the location with the given value without reading 255 + /// or dropping the old value. 256 + /// 257 + /// This method **does not** support writes crossing page boundaries. 258 + /// 259 + /// # Safety 260 + /// 261 + /// This method largely inherits the safety requirements of [`ptr::write`], namely 262 + /// behavior is undefined if any of the following conditions are violated: 263 + /// 264 + /// - `address` must be [valid] for writes. 265 + /// - `address` must be properly aligned. 266 + /// 267 + /// Note that even if T has size 0, the pointer must be properly aligned. 268 + /// 269 + /// [valid]: 270 + /// [`ptr::write`]: core::ptr::write() 133 271 pub unsafe fn write_phys<T>(&self, address: PhysicalAddress, value: T) { 134 272 unsafe { self.0.memory.write(address, value) } 135 273 } 136 274 275 + /// Reads `count` bytes of memory starting at physical address `address` bypassing address 276 + /// translation and attribute checks. This leaves the memory in `address` unchanged. 277 + /// 278 + /// This method **does not** support reads crossing page boundaries. 279 + /// 280 + /// # Safety 281 + /// 282 + /// This method largely inherits the safety requirements of [`slice::from_raw_parts`], namely 283 + /// behavior is undefined if any of the following conditions are violated: 284 + /// 285 + /// - `address` must be non-null and [valid] for reads of `count` bytes. 286 + /// - `address` must be properly aligned. 287 + /// - The memory referenced by the returned slice must not be mutated for the duration its lifetime. 137 288 pub fn read_bytes_phys(&self, address: PhysicalAddress, count: usize) -> &[u8] { 138 289 self.0.memory.read_bytes(address, count) 139 290 } 140 291 292 + /// Sets `count` bytes of memory starting at physical address `address` to `val`. This 293 + /// bypassing address translation and attribute checks. 294 + /// 295 + /// `write_bytes` behaves like C's [`memset`]. 296 + /// 297 + /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset 298 + /// 299 + /// Contrary to [`Self::read`], [`Self::write`], and [`Self::write_bytes`] this **does** 300 + /// support writes crossing page boundaries. 301 + /// 302 + /// # Safety 303 + /// 304 + /// This method largely inherits the safety requirements of [`ptr::write_bytes`], namely 305 + /// behavior is undefined if any of the following conditions are violated: 306 + /// 307 + /// - `address` must be non-null and [valid] for writes of `count` bytes. 308 + /// - `address` must be properly aligned. 309 + /// 310 + /// Note that even if the effectively copied size is 0, the pointer must be properly aligned. 311 + /// 312 + /// [valid]: 313 + /// [`ptr::write_bytes`]: core::ptr::write_bytes() 314 + /// 315 + /// Additionally, note using this method one can easily introduce to undefined behavior (UB) 316 + /// later if the written bytes are not a valid representation of some T. **Use this to write 317 + /// bytes only** If you need a way to write a type to some address, use [`Self::write`]. 141 318 pub fn write_bytes_phys(&self, address: PhysicalAddress, value: u8, count: usize) { 142 319 self.0.memory.write_bytes(address, value, count) 143 320 } 144 321 322 + /// Return the active page table on the calling (emulated) CPU (thread). 145 323 pub fn active_table(&self) -> Option<PhysicalAddress> { 146 324 self.cpu().active_page_table() 147 325 } 148 326 327 + /// Sets the active page table on the calling (emulated) CPU (thread). 149 328 pub unsafe fn set_active_table(&self, address: PhysicalAddress) { 150 329 self.cpu_mut().set_active_page_table(address); 151 330 } 152 331 332 + /// Invalidates existing virtual address translation entries for address space `asid` in the 333 + /// give `address_range`. 153 334 pub fn invalidate(&self, asid: u16, address_range: Range<VirtualAddress>) { 154 335 let mut cpu = self.cpu_mut(); 155 336 156 337 cpu.invalidate(asid, address_range, &self.0.memory); 157 338 } 158 339 340 + /// Invalidates all existing virtual address translation entries for address space `asid`. 159 341 pub fn invalidate_all(&self, asid: u16) { 160 342 let mut cpu = self.cpu_mut(); 161 343 ··· 272 454 273 455 pub struct HasMemory; 274 456 275 - pub struct MachineBuilder<A: Arch, R: lock_api::RawMutex, Mem> { 457 + pub struct MachineBuilder<A: Arch, Mem> { 276 458 memory: Option<Memory>, 277 - physmap_base: VirtualAddress, 278 459 _has: PhantomData<Mem>, 279 - _m: PhantomData<(A, R)>, 460 + _m: PhantomData<A>, 280 461 } 281 462 282 - impl<A: Arch, R: lock_api::RawMutex> MachineBuilder<A, R, MissingMemory> { 463 + impl<A: Arch> MachineBuilder<A, MissingMemory> { 283 464 pub fn new() -> Self { 284 465 Self { 285 466 memory: None, 286 - physmap_base: A::DEFAULT_PHYSMAP_BASE, 287 467 _has: PhantomData, 288 468 _m: PhantomData, 289 469 } 290 470 } 291 471 } 292 472 293 - impl<A: Arch, R: lock_api::RawMutex> MachineBuilder<A, R, MissingMemory> { 473 + impl<A: Arch> MachineBuilder<A, MissingMemory> { 474 + /// Sets the size and alignments(s) of the machines physical memory regions. The exact 475 + /// addresses will be chosen at random and can be retrieved via [`Machine::memory_regions`]. 294 476 pub fn with_memory_regions( 295 477 self, 296 - region_sizes: impl IntoIterator<Item = usize>, 297 - ) -> MachineBuilder<A, R, HasMemory> { 478 + region_sizes: impl IntoIterator<Item = Layout>, 479 + ) -> MachineBuilder<A, HasMemory> { 298 480 let memory = Memory::new::<A>(region_sizes); 299 481 300 482 assert!( ··· 304 486 305 487 MachineBuilder { 306 488 memory: Some(memory), 307 - physmap_base: self.physmap_base, 308 489 _has: PhantomData, 309 490 _m: PhantomData, 310 491 } 311 492 } 312 493 } 313 494 314 - impl<A: Arch, R: lock_api::RawMutex> MachineBuilder<A, R, HasMemory> { 315 - pub fn finish(self) -> (Machine<A>, PhysMap) { 495 + impl<A: Arch> MachineBuilder<A, HasMemory> { 496 + /// Finish constructing and return the machine. 497 + pub fn finish(self) -> Machine<A> { 316 498 let memory = self.memory.unwrap(); 317 499 318 - let physmap = PhysMap::new(self.physmap_base, memory.regions()); 319 - 320 500 let inner = MachineInner { 321 501 memory, 322 502 cpus: CpuLocal::with_capacity(std::thread::available_parallelism().unwrap().get()), 323 503 }; 324 504 325 - (Machine(Arc::new(inner)), physmap) 326 - } 327 - 328 - pub fn finish_and_bootstrap( 329 - self, 330 - ) -> Result< 331 - ( 332 - Machine<A>, 333 - HardwareAddressSpace<EmulateArch<A>>, 334 - BootstrapAllocator<R>, 335 - ), 336 - AllocError, 337 - > { 338 - let (machine, physmap) = self.finish(); 339 - 340 - let arch = EmulateArch::new(machine.clone()); 341 - 342 - let frame_allocator = BootstrapAllocator::new::<A>(arch.machine().memory_regions()); 343 - 344 - let mut flush = Flush::new(); 345 - let mut aspace = 346 - HardwareAddressSpace::new_bootstrap(arch, physmap, &frame_allocator, &mut flush)?; 347 - 348 - aspace.map_physical_memory(&frame_allocator, &mut flush)?; 349 - 350 - // Safety: we just created the address space, so don't have any pointers into it. In hosted tests 351 - // the programs memory and CPU registers are outside the address space anyway. 352 - let address_space = unsafe { aspace.finish_bootstrap_and_activate() }; 353 - 354 - flush.flush(address_space.arch()); 355 - 356 - Ok((machine, address_space, frame_allocator)) 505 + Machine(Arc::new(inner)) 357 506 } 358 507 }
+2 -4
libs/kmem/src/test_utils/memory.rs
··· 22 22 } 23 23 24 24 impl Memory { 25 - pub fn new<A: Arch>(region_sizes: impl IntoIterator<Item = usize>) -> Self { 25 + pub fn new<A: Arch>(region_sizes: impl IntoIterator<Item = Layout>) -> Self { 26 26 let regions = region_sizes 27 27 .into_iter() 28 - .map(|size| { 29 - let layout = Layout::from_size_align(size, A::GRANULE_SIZE).unwrap(); 30 - 28 + .map(|layout| { 31 29 let region = std::alloc::System.allocate(layout).unwrap(); 32 30 33 31 // Safety: we just allocated the ptr, we know it is valid
+63 -11
libs/kmem/src/test_utils/proptest.rs
··· 1 1 //! `proptest` strategies for virtual memory subsystem tests 2 2 3 + use std::alloc::Layout; 3 4 use std::ops::Range; 4 5 5 6 use proptest::prelude::{Just, Strategy}; ··· 32 33 addr.prop_map(move |value| value.align_down(alignment)) 33 34 } 34 35 35 - pub fn region_sizes( 36 + pub fn region_layouts( 36 37 num_regions: Range<usize>, 37 38 alignment: usize, 38 39 max_region_size: usize, 39 - ) -> impl Strategy<Value = Vec<usize>> { 40 + ) -> impl Strategy<Value = Vec<Layout>> { 40 41 proptest::collection::vec( 41 42 // Size of the region (will be aligned) 42 43 alignment..=max_region_size, 43 44 num_regions, 44 45 ) 45 - .prop_map(move |mut regions| { 46 - regions.iter_mut().for_each(|size| { 47 - let align_minus_one = unsafe { alignment.unchecked_sub(1) }; 46 + .prop_map(move |regions| { 47 + regions 48 + .into_iter() 49 + .map(|size| { 50 + let align_minus_one = unsafe { alignment.unchecked_sub(1) }; 51 + 52 + let size = size.wrapping_add(align_minus_one) & 0usize.wrapping_sub(alignment); 48 53 49 - *size = size.wrapping_add(align_minus_one) & 0usize.wrapping_sub(alignment); 54 + debug_assert_ne!(size, 0); 50 55 51 - debug_assert_ne!(*size, 0); 52 - }); 53 - regions 56 + Layout::from_size_align(size, alignment).unwrap() 57 + }) 58 + .collect() 54 59 }) 55 60 } 56 61 57 62 /// Produces a set of *sorted*, *non-overlapping* regions of physical memory aligned to `alignment`. 58 63 /// Most useful for initializing an emulated machine. 59 - pub fn regions( 64 + pub fn regions_phys( 60 65 num_regions: Range<usize>, 61 66 alignment: usize, 62 67 max_region_size: usize, ··· 103 108 } 104 109 105 110 /// Picks an arbitrary `PhysicalAddress` from a strategy that produces physical memory regions such 106 - /// as [`regions`]. 111 + /// as [`regions_phys`]. 107 112 pub fn pick_address_in_regions( 108 113 regions: impl Strategy<Value = Vec<Range<PhysicalAddress>>>, 109 114 ) -> impl Strategy<Value = (Vec<Range<PhysicalAddress>>, PhysicalAddress)> { ··· 118 123 (Just(regions), address) 119 124 }) 120 125 } 126 + 127 + /// Produces a set of *sorted*, *non-overlapping* regions of virtual memory aligned to `alignment`. 128 + pub fn regions_virt( 129 + num_regions: Range<usize>, 130 + alignment: usize, 131 + max_region_size: usize, 132 + max_gap_size: usize, 133 + ) -> impl Strategy<Value = Vec<Range<VirtualAddress>>> { 134 + proptest::collection::vec( 135 + ( 136 + // Size of the region (will be aligned) 137 + alignment..=max_region_size, 138 + // Gap after this region (will be aligned) 139 + alignment..=max_gap_size, 140 + ), 141 + num_regions, 142 + ) 143 + .prop_flat_map(move |size_gap_pairs| { 144 + // Calculate the maximum starting address that won't cause overflow 145 + let max_start = { 146 + let total_space_needed: usize = 147 + size_gap_pairs.iter().map(|(size, gap)| size + gap).sum(); 148 + 149 + // Ensure we have headroom for alignment adjustments 150 + usize::MAX 151 + .saturating_sub(total_space_needed) 152 + .saturating_sub(alignment) 153 + }; 154 + 155 + (0..=max_start).prop_map(move |start_raw| { 156 + let mut regions = Vec::with_capacity(size_gap_pairs.len()); 157 + let mut current = VirtualAddress::new(start_raw).align_down(alignment); 158 + 159 + for (size, gap) in &size_gap_pairs { 160 + let range: Range<VirtualAddress> = 161 + Range::from_start_len(current, *size).align_in(alignment); 162 + assert!(!range.is_empty()); 163 + 164 + regions.push(range); 165 + 166 + current = current.add(size + gap).align_up(alignment); 167 + } 168 + 169 + regions 170 + }) 171 + }) 172 + }