Next Generation WASM Microkernel Operating System

refactor(kasync): simplify stub (#526)

+99 -114
+2 -4
libs/kasync/src/executor.rs
··· 7 7 8 8 mod steal; 9 9 10 - use alloc::boxed::Box; 11 10 use core::alloc::AllocError; 12 11 use core::num::NonZeroUsize; 13 12 use core::ptr; ··· 25 24 use crate::future::Either; 26 25 use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; 27 26 use crate::sync::wait_queue::WaitQueue; 28 - use crate::task::{Header, JoinHandle, PollResult, Task, TaskBuilder, TaskRef}; 27 + use crate::task::{Header, JoinHandle, PollResult, TaskBuilder, TaskRef}; 29 28 30 29 #[derive(Debug)] 31 30 pub struct Executor { ··· 332 331 333 332 impl Scheduler { 334 333 fn new() -> Result<Self, AllocError> { 335 - let stub_task = Box::try_new(Task::new_stub())?; 336 - let (stub_task, _) = TaskRef::new_allocated(stub_task); 334 + let stub_task = TaskRef::new_stub()?; 337 335 338 336 Ok(Self { 339 337 run_queue: MpscQueue::new_with_stub(stub_task),
+2 -4
libs/kasync/src/executor/steal.rs
··· 5 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 - use alloc::boxed::Box; 9 8 use core::alloc::AllocError; 10 9 use core::fmt::Debug; 11 10 use core::num::NonZeroUsize; ··· 14 13 15 14 use crate::executor::Scheduler; 16 15 use crate::loom::sync::atomic::{AtomicUsize, Ordering}; 17 - use crate::task::{Header, Task, TaskRef}; 16 + use crate::task::{Header, TaskRef}; 18 17 19 18 #[derive(Debug, Clone, Eq, PartialEq)] 20 19 #[non_exhaustive] ··· 34 33 35 34 impl Injector { 36 35 pub fn new() -> Result<Self, AllocError> { 37 - let stub_task = Box::try_new(Task::new_stub())?; 38 - let (stub_task, _) = TaskRef::new_allocated(stub_task); 36 + let stub_task = TaskRef::new_stub()?; 39 37 40 38 Ok(Self { 41 39 run_queue: MpscQueue::new_with_stub(stub_task),
+95 -106
libs/kasync/src/task.rs
··· 12 12 mod yield_now; 13 13 14 14 use alloc::boxed::Box; 15 + use core::alloc::AllocError; 15 16 use core::any::type_name; 16 17 use core::mem::{ManuallyDrop, offset_of}; 17 18 use core::panic::AssertUnwindSafe; ··· 80 81 pub struct TaskRef(NonNull<Header>); 81 82 82 83 #[repr(C)] 83 - pub struct Task<F: Future>(CachePadded<TaskInner<F>>); 84 + struct Task<F: Future>(CachePadded<TaskInner<F>>); 84 85 85 86 #[repr(C)] 86 87 struct TaskInner<F: Future> { ··· 200 201 // === impl TaskRef === 201 202 202 203 impl TaskRef { 203 - #[track_caller] 204 - pub(crate) fn new_allocated<F>(task: Box<Task<F>>) -> (Self, JoinHandle<F::Output>) 205 - where 206 - F: Future, 207 - { 208 - assert_eq!(task.state().refcount(), 1); 209 - let ptr = Box::into_raw(task); 210 - 211 - // Safety: we just allocated the ptr so it is never null 212 - let task = Self(unsafe { NonNull::new_unchecked(ptr).cast() }); 213 - let join = JoinHandle::new(task.clone()); 214 - 215 - (task, join) 216 - } 217 - 218 204 /// Returns the tasks unique[^1] identifier. 219 205 /// 220 206 /// [^1]: Unique to all *currently running* tasks, *not* unique across spacetime. See [`Id`] for details. ··· 244 230 canceled 245 231 } 246 232 247 - pub(crate) fn clone_from_raw(ptr: NonNull<Header>) -> TaskRef { 248 - let this = Self(ptr); 249 - this.state().clone_ref(); 250 - this 251 - } 252 - 253 - pub(crate) fn header_ptr(&self) -> NonNull<Header> { 254 - self.0 255 - } 256 - 257 - pub(crate) fn header(&self) -> &Header { 258 - // Safety: constructor ensures the pointer is always valid 259 - unsafe { self.0.as_ref() } 260 - } 261 - 262 - /// Returns a reference to the task's state. 263 - pub(crate) fn state(&self) -> &State { 264 - &self.header().state 265 - } 266 - 267 233 pub(crate) fn poll(&self) -> PollResult { 268 234 let poll_fn = self.header().vtable.poll; 269 235 // Safety: Called through our Vtable so this access should be fine ··· 320 286 } 321 287 } 322 288 323 - pub(crate) unsafe fn from_raw(ptr: *const Header) -> Self { 324 - // Safety: ensured by caller 325 - Self(unsafe { NonNull::new_unchecked(ptr.cast_mut()) }) 289 + pub(crate) fn new_stub() -> Result<Self, AllocError> { 290 + const HEAP_STUB_VTABLE: VTable = VTable { 291 + poll: stub_poll, 292 + poll_join: stub_poll_join, 293 + deallocate: stub_deallocate, 294 + }; 295 + 296 + unsafe fn stub_poll(ptr: NonNull<Header>) -> PollResult { 297 + // Safety: this method should never be called 298 + unsafe { 299 + debug_assert!(ptr.as_ref().id.is_stub()); 300 + unreachable!("stub task ({ptr:?}) should never be polled!"); 301 + } 302 + } 303 + 304 + unsafe fn stub_poll_join( 305 + ptr: NonNull<Header>, 306 + _outptr: NonNull<()>, 307 + _cx: &mut Context<'_>, 308 + ) -> Poll<Result<(), JoinError<()>>> { 309 + // Safety: this method should never be called 310 + unsafe { 311 + debug_assert!(ptr.as_ref().id.is_stub()); 312 + unreachable!("stub task ({ptr:?}) should never be polled!"); 313 + } 314 + } 315 + 316 + unsafe fn stub_deallocate(ptr: NonNull<Header>) { 317 + // Safety: this method should never be called 318 + unsafe { 319 + debug_assert!(ptr.as_ref().id.is_stub()); 320 + drop(Box::from_raw(ptr.as_ptr())); 321 + } 322 + } 323 + 324 + let inner = Box::into_raw(Box::try_new(Header { 325 + state: State::new(), 326 + vtable: &HEAP_STUB_VTABLE, 327 + id: Id::stub(), 328 + run_queue_links: mpsc_queue::Links::new_stub(), 329 + span: tracing::Span::none(), 330 + scheduler: UnsafeCell::new(None), 331 + })?); 332 + 333 + // Safety: we just allocated the ptr so it is never null 334 + Ok(Self(unsafe { NonNull::new_unchecked(inner) })) 326 335 } 327 336 328 - pub(crate) fn into_raw(self) -> *const Header { 329 - let this = ManuallyDrop::new(self); 330 - this.0.as_ptr() 337 + pub(crate) fn clone_from_raw(ptr: NonNull<Header>) -> TaskRef { 338 + let this = Self(ptr); 339 + this.state().clone_ref(); 340 + this 341 + } 342 + 343 + pub(crate) fn header_ptr(&self) -> NonNull<Header> { 344 + self.0 345 + } 346 + 347 + // ===== private methods ===== 348 + 349 + #[track_caller] 350 + fn new_allocated<F>(task: Box<Task<F>>) -> (Self, JoinHandle<F::Output>) 351 + where 352 + F: Future, 353 + { 354 + assert_eq!(task.state().refcount(), 1); 355 + let ptr = Box::into_raw(task); 356 + 357 + // Safety: we just allocated the ptr so it is never null 358 + let task = Self(unsafe { NonNull::new_unchecked(ptr).cast() }); 359 + let join = JoinHandle::new(task.clone()); 360 + 361 + (task, join) 331 362 } 332 363 333 364 fn schedule(self) { ··· 340 371 341 372 scheduler.schedule(self); 342 373 } 374 + 375 + fn header(&self) -> &Header { 376 + // Safety: constructor ensures the pointer is always valid 377 + unsafe { self.0.as_ref() } 378 + } 379 + 380 + /// Returns a reference to the task's state. 381 + fn state(&self) -> &State { 382 + &self.header().state 383 + } 384 + 385 + unsafe fn from_raw(ptr: *const Header) -> Self { 386 + // Safety: ensured by caller 387 + Self(unsafe { NonNull::new_unchecked(ptr.cast_mut()) }) 388 + } 389 + 390 + fn into_raw(self) -> *const Header { 391 + let this = ManuallyDrop::new(self); 392 + this.0.as_ptr() 393 + } 394 + 395 + // ===== private waker-related methods ===== 343 396 344 397 fn wake(self) { 345 398 match self.state().wake_by_val() { ··· 760 813 } 761 814 } 762 815 763 - impl Task<Stub> { 764 - const HEAP_STUB_VTABLE: VTable = VTable { 765 - poll: stub_poll, 766 - poll_join: stub_poll_join, 767 - // Heap allocated stub tasks *will* need to be deallocated, since the 768 - // scheduler will deallocate its stub task if it's dropped. 769 - deallocate: Self::deallocate, 770 - }; 771 - 772 - loom_const_fn! { 773 - /// Create a new stub task. 774 - pub(crate) const fn new_stub() -> Self { 775 - let inner = TaskInner { 776 - header: Header { 777 - state: State::new(), 778 - vtable: &Self::HEAP_STUB_VTABLE, 779 - id: Id::stub(), 780 - run_queue_links: mpsc_queue::Links::new_stub(), 781 - span: tracing::Span::none(), 782 - scheduler: UnsafeCell::new(None) 783 - }, 784 - stage: UnsafeCell::new(Stage::Pending(Stub)), 785 - join_waker: UnsafeCell::new(None), 786 - }; 787 - 788 - Self(CachePadded(inner)) 789 - } 790 - } 791 - } 792 - 793 816 // === impl Stage === 794 817 795 818 impl<F> Stage<F> ··· 882 905 .cast() 883 906 } 884 907 } 885 - 886 - /// DO NOT confuse this with [`TaskSTub`]. This type is just a zero-size placeholder so we 887 - /// can plug *something* into the generics when creating the *heap allocated* stub task. 888 - /// This type is *not* publicly exported, contrary to [`TaskSTub`] which users will have to statically 889 - /// allocate themselves. 890 - #[derive(Copy, Clone, Debug)] 891 - pub(crate) struct Stub; 892 - 893 - impl Future for Stub { 894 - type Output = (); 895 - fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> { 896 - unreachable!("the stub task should never be polled!") 897 - } 898 - } 899 - 900 - unsafe fn stub_poll(ptr: NonNull<Header>) -> PollResult { 901 - // Safety: this method should never be called 902 - unsafe { 903 - debug_assert!(ptr.as_ref().id.is_stub()); 904 - unreachable!("stub task ({ptr:?}) should never be polled!"); 905 - } 906 - } 907 - 908 - unsafe fn stub_poll_join( 909 - ptr: NonNull<Header>, 910 - _outptr: NonNull<()>, 911 - _cx: &mut Context<'_>, 912 - ) -> Poll<Result<(), JoinError<()>>> { 913 - // Safety: this method should never be called 914 - unsafe { 915 - debug_assert!(ptr.as_ref().id.is_stub()); 916 - unreachable!("stub task ({ptr:?}) should never be polled!"); 917 - } 918 - }