axtask/
task.rs

1use alloc::{boxed::Box, string::String, sync::Arc};
2use core::ops::Deref;
3use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU8, AtomicU32, AtomicU64, Ordering};
4use core::{alloc::Layout, cell::UnsafeCell, fmt, ptr::NonNull};
5
6#[cfg(feature = "preempt")]
7use core::sync::atomic::AtomicUsize;
8
9use kspin::SpinNoIrq;
10use memory_addr::{VirtAddr, align_up_4k};
11
12use axhal::context::TaskContext;
13#[cfg(feature = "tls")]
14use axhal::tls::TlsArea;
15
16use crate::task_ext::AxTaskExt;
17use crate::{AxCpuMask, AxTask, AxTaskRef, WaitQueue};
18
19/// A unique identifier for a thread.
20#[derive(Debug, Clone, Copy, Eq, PartialEq)]
21pub struct TaskId(u64);
22
23/// The possible states of a task.
24#[repr(u8)]
25#[derive(Debug, Clone, Copy, Eq, PartialEq)]
26pub(crate) enum TaskState {
27    /// Task is running on some CPU.
28    Running = 1,
29    /// Task is ready to run on some scheduler's ready queue.
30    Ready = 2,
31    /// Task is blocked (in the wait queue or timer list),
32    /// and it has finished its scheduling process, it can be wake up by `notify()` on any run queue safely.
33    Blocked = 3,
34    /// Task is exited and waiting for being dropped.
35    Exited = 4,
36}
37
38/// The inner task structure.
39pub struct TaskInner {
40    id: TaskId,
41    name: String,
42    is_idle: bool,
43    is_init: bool,
44
45    entry: Option<*mut dyn FnOnce()>,
46    state: AtomicU8,
47
48    /// CPU affinity mask.
49    cpumask: SpinNoIrq<AxCpuMask>,
50
51    /// Mark whether the task is in the wait queue.
52    in_wait_queue: AtomicBool,
53
54    /// Used to indicate the CPU ID where the task is running or will run.
55    cpu_id: AtomicU32,
56    /// Used to indicate whether the task is running on a CPU.
57    #[cfg(feature = "smp")]
58    on_cpu: AtomicBool,
59
60    /// A ticket ID used to identify the timer event.
61    /// Set by `set_timer_ticket()` when creating a timer event in `set_alarm_wakeup()`,
62    /// expired by setting it as zero in `timer_ticket_expired()`, which is called by `cancel_events()`.
63    #[cfg(feature = "irq")]
64    timer_ticket_id: AtomicU64,
65
66    #[cfg(feature = "preempt")]
67    need_resched: AtomicBool,
68    #[cfg(feature = "preempt")]
69    preempt_disable_count: AtomicUsize,
70
71    exit_code: AtomicI32,
72    wait_for_exit: WaitQueue,
73
74    kstack: Option<TaskStack>,
75    ctx: UnsafeCell<TaskContext>,
76    task_ext: AxTaskExt,
77
78    #[cfg(feature = "tls")]
79    tls: TlsArea,
80}
81
82impl TaskId {
83    fn new() -> Self {
84        static ID_COUNTER: AtomicU64 = AtomicU64::new(1);
85        Self(ID_COUNTER.fetch_add(1, Ordering::Relaxed))
86    }
87
88    /// Convert the task ID to a `u64`.
89    pub const fn as_u64(&self) -> u64 {
90        self.0
91    }
92}
93
94impl From<u8> for TaskState {
95    #[inline]
96    fn from(state: u8) -> Self {
97        match state {
98            1 => Self::Running,
99            2 => Self::Ready,
100            3 => Self::Blocked,
101            4 => Self::Exited,
102            _ => unreachable!(),
103        }
104    }
105}
106
107unsafe impl Send for TaskInner {}
108unsafe impl Sync for TaskInner {}
109
110impl TaskInner {
111    /// Create a new task with the given entry function and stack size.
112    pub fn new<F>(entry: F, name: String, stack_size: usize) -> Self
113    where
114        F: FnOnce() + Send + 'static,
115    {
116        let mut t = Self::new_common(TaskId::new(), name);
117        debug!("new task: {}", t.id_name());
118        let kstack = TaskStack::alloc(align_up_4k(stack_size));
119
120        #[cfg(feature = "tls")]
121        let tls = VirtAddr::from(t.tls.tls_ptr() as usize);
122        #[cfg(not(feature = "tls"))]
123        let tls = VirtAddr::from(0);
124
125        t.entry = Some(Box::into_raw(Box::new(entry)));
126        t.ctx_mut().init(task_entry as usize, kstack.top(), tls);
127        t.kstack = Some(kstack);
128        if t.name == "idle" {
129            t.is_idle = true;
130        }
131        t
132    }
133
134    /// Gets the ID of the task.
135    pub const fn id(&self) -> TaskId {
136        self.id
137    }
138
139    /// Gets the name of the task.
140    pub fn name(&self) -> &str {
141        self.name.as_str()
142    }
143
144    /// Get a combined string of the task ID and name.
145    pub fn id_name(&self) -> alloc::string::String {
146        alloc::format!("Task({}, {:?})", self.id.as_u64(), self.name)
147    }
148
149    /// Wait for the task to exit, and return the exit code.
150    ///
151    /// It will return immediately if the task has already exited (but not dropped).
152    pub fn join(&self) -> Option<i32> {
153        self.wait_for_exit
154            .wait_until(|| self.state() == TaskState::Exited);
155        Some(self.exit_code.load(Ordering::Acquire))
156    }
157
158    /// Returns the pointer to the user-defined task extended data.
159    ///
160    /// # Safety
161    ///
162    /// The caller should not access the pointer directly, use [`TaskExtRef::task_ext`]
163    /// or [`TaskExtMut::task_ext_mut`] instead.
164    ///
165    /// [`TaskExtRef::task_ext`]: crate::task_ext::TaskExtRef::task_ext
166    /// [`TaskExtMut::task_ext_mut`]: crate::task_ext::TaskExtMut::task_ext_mut
167    pub unsafe fn task_ext_ptr(&self) -> *mut u8 {
168        self.task_ext.as_ptr()
169    }
170
171    /// Initialize the user-defined task extended data.
172    ///
173    /// Returns a reference to the task extended data if it has not been
174    /// initialized yet (empty), otherwise returns [`None`].
175    pub fn init_task_ext<T: Sized>(&mut self, data: T) -> Option<&T> {
176        if self.task_ext.is_empty() {
177            self.task_ext.write(data).map(|data| &*data)
178        } else {
179            None
180        }
181    }
182
183    /// Returns a mutable reference to the task context.
184    #[inline]
185    pub const fn ctx_mut(&mut self) -> &mut TaskContext {
186        self.ctx.get_mut()
187    }
188
189    /// Returns the top address of the kernel stack.
190    #[inline]
191    pub const fn kernel_stack_top(&self) -> Option<VirtAddr> {
192        match &self.kstack {
193            Some(s) => Some(s.top()),
194            None => None,
195        }
196    }
197
198    /// Returns the CPU ID where the task is running or will run.
199    ///
200    /// Note: the task may not be running on the CPU, it just exists in the run queue.
201    #[inline]
202    pub fn cpu_id(&self) -> u32 {
203        self.cpu_id.load(Ordering::Acquire)
204    }
205
206    /// Gets the cpu affinity mask of the task.
207    ///
208    /// Returns the cpu affinity mask of the task in type [`AxCpuMask`].
209    #[inline]
210    pub fn cpumask(&self) -> AxCpuMask {
211        *self.cpumask.lock()
212    }
213
214    /// Sets the cpu affinity mask of the task.
215    ///
216    /// # Arguments
217    /// `cpumask` - The cpu affinity mask to be set in type [`AxCpuMask`].
218    #[inline]
219    pub fn set_cpumask(&self, cpumask: AxCpuMask) {
220        *self.cpumask.lock() = cpumask
221    }
222}
223
224// private methods
225impl TaskInner {
226    fn new_common(id: TaskId, name: String) -> Self {
227        let cpumask = crate::api::cpu_mask_full();
228
229        Self {
230            id,
231            name,
232            is_idle: false,
233            is_init: false,
234            entry: None,
235            state: AtomicU8::new(TaskState::Ready as u8),
236            // By default, the task is allowed to run on all CPUs.
237            cpumask: SpinNoIrq::new(cpumask),
238            in_wait_queue: AtomicBool::new(false),
239            #[cfg(feature = "irq")]
240            timer_ticket_id: AtomicU64::new(0),
241            cpu_id: AtomicU32::new(0),
242            #[cfg(feature = "smp")]
243            on_cpu: AtomicBool::new(false),
244            #[cfg(feature = "preempt")]
245            need_resched: AtomicBool::new(false),
246            #[cfg(feature = "preempt")]
247            preempt_disable_count: AtomicUsize::new(0),
248            exit_code: AtomicI32::new(0),
249            wait_for_exit: WaitQueue::new(),
250            kstack: None,
251            ctx: UnsafeCell::new(TaskContext::new()),
252            task_ext: AxTaskExt::empty(),
253            #[cfg(feature = "tls")]
254            tls: TlsArea::alloc(),
255        }
256    }
257
258    /// Creates an "init task" using the current CPU states, to use as the
259    /// current task.
260    ///
261    /// As it is the current task, no other task can switch to it until it
262    /// switches out.
263    ///
264    /// And there is no need to set the `entry`, `kstack` or `tls` fields, as
265    /// they will be filled automatically when the task is switches out.
266    pub(crate) fn new_init(name: String) -> Self {
267        let mut t = Self::new_common(TaskId::new(), name);
268        t.is_init = true;
269        #[cfg(feature = "smp")]
270        t.set_on_cpu(true);
271        if t.name == "idle" {
272            t.is_idle = true;
273        }
274        t
275    }
276
277    pub(crate) fn into_arc(self) -> AxTaskRef {
278        Arc::new(AxTask::new(self))
279    }
280
281    #[inline]
282    pub(crate) fn state(&self) -> TaskState {
283        self.state.load(Ordering::Acquire).into()
284    }
285
286    #[inline]
287    pub(crate) fn set_state(&self, state: TaskState) {
288        self.state.store(state as u8, Ordering::Release)
289    }
290
291    /// Transition the task state from `current_state` to `new_state`,
292    /// Returns `true` if the current state is `current_state` and the state is successfully set to `new_state`,
293    /// otherwise returns `false`.
294    #[inline]
295    pub(crate) fn transition_state(&self, current_state: TaskState, new_state: TaskState) -> bool {
296        self.state
297            .compare_exchange(
298                current_state as u8,
299                new_state as u8,
300                Ordering::AcqRel,
301                Ordering::Acquire,
302            )
303            .is_ok()
304    }
305
306    #[inline]
307    pub(crate) fn is_running(&self) -> bool {
308        matches!(self.state(), TaskState::Running)
309    }
310
311    #[inline]
312    pub(crate) fn is_ready(&self) -> bool {
313        matches!(self.state(), TaskState::Ready)
314    }
315
316    #[inline]
317    pub(crate) const fn is_init(&self) -> bool {
318        self.is_init
319    }
320
321    #[inline]
322    pub(crate) const fn is_idle(&self) -> bool {
323        self.is_idle
324    }
325
326    #[inline]
327    pub(crate) fn in_wait_queue(&self) -> bool {
328        self.in_wait_queue.load(Ordering::Acquire)
329    }
330
331    #[inline]
332    pub(crate) fn set_in_wait_queue(&self, in_wait_queue: bool) {
333        self.in_wait_queue.store(in_wait_queue, Ordering::Release);
334    }
335
336    /// Returns task's current timer ticket ID.
337    #[inline]
338    #[cfg(feature = "irq")]
339    pub(crate) fn timer_ticket(&self) -> u64 {
340        self.timer_ticket_id.load(Ordering::Acquire)
341    }
342
343    /// Set the timer ticket ID.
344    #[inline]
345    #[cfg(feature = "irq")]
346    pub(crate) fn set_timer_ticket(&self, timer_ticket_id: u64) {
347        // CAN NOT set timer_ticket_id to 0,
348        // because 0 is used to indicate the timer event is expired.
349        assert!(timer_ticket_id != 0);
350        self.timer_ticket_id
351            .store(timer_ticket_id, Ordering::Release);
352    }
353
354    /// Expire timer ticket ID by setting it to 0,
355    /// it can be used to identify one timer event is triggered or expired.
356    #[inline]
357    #[cfg(feature = "irq")]
358    pub(crate) fn timer_ticket_expired(&self) {
359        self.timer_ticket_id.store(0, Ordering::Release);
360    }
361
362    #[inline]
363    #[cfg(feature = "preempt")]
364    pub(crate) fn set_preempt_pending(&self, pending: bool) {
365        self.need_resched.store(pending, Ordering::Release)
366    }
367
368    #[inline]
369    #[cfg(feature = "preempt")]
370    pub(crate) fn can_preempt(&self, current_disable_count: usize) -> bool {
371        self.preempt_disable_count.load(Ordering::Acquire) == current_disable_count
372    }
373
374    #[inline]
375    #[cfg(feature = "preempt")]
376    pub(crate) fn disable_preempt(&self) {
377        self.preempt_disable_count.fetch_add(1, Ordering::Release);
378    }
379
380    #[inline]
381    #[cfg(feature = "preempt")]
382    pub(crate) fn enable_preempt(&self, resched: bool) {
383        if self.preempt_disable_count.fetch_sub(1, Ordering::Release) == 1 && resched {
384            // If current task is pending to be preempted, do rescheduling.
385            Self::current_check_preempt_pending();
386        }
387    }
388
389    #[cfg(feature = "preempt")]
390    fn current_check_preempt_pending() {
391        use kernel_guard::NoPreemptIrqSave;
392        let curr = crate::current();
393        if curr.need_resched.load(Ordering::Acquire) && curr.can_preempt(0) {
394            // Note: if we want to print log msg during `preempt_resched`, we have to
395            // disable preemption here, because the axlog may cause preemption.
396            let mut rq = crate::current_run_queue::<NoPreemptIrqSave>();
397            if curr.need_resched.load(Ordering::Acquire) {
398                rq.preempt_resched()
399            }
400        }
401    }
402
403    /// Notify all tasks that join on this task.
404    pub(crate) fn notify_exit(&self, exit_code: i32) {
405        self.exit_code.store(exit_code, Ordering::Release);
406        self.wait_for_exit.notify_all(false);
407    }
408
409    #[inline]
410    pub(crate) const unsafe fn ctx_mut_ptr(&self) -> *mut TaskContext {
411        self.ctx.get()
412    }
413
414    /// Set the CPU ID where the task is running or will run.
415    #[cfg(feature = "smp")]
416    #[inline]
417    pub(crate) fn set_cpu_id(&self, cpu_id: u32) {
418        self.cpu_id.store(cpu_id, Ordering::Release);
419    }
420
421    /// Returns whether the task is running on a CPU.
422    ///
423    /// It is used to protect the task from being moved to a different run queue
424    /// while it has not finished its scheduling process.
425    /// The `on_cpu field is set to `true` when the task is preparing to run on a CPU,
426    /// and it is set to `false` when the task has finished its scheduling process in `clear_prev_task_on_cpu()`.
427    #[cfg(feature = "smp")]
428    #[inline]
429    pub(crate) fn on_cpu(&self) -> bool {
430        self.on_cpu.load(Ordering::Acquire)
431    }
432
433    /// Sets whether the task is running on a CPU.
434    #[cfg(feature = "smp")]
435    #[inline]
436    pub(crate) fn set_on_cpu(&self, on_cpu: bool) {
437        self.on_cpu.store(on_cpu, Ordering::Release)
438    }
439}
440
441impl fmt::Debug for TaskInner {
442    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
443        f.debug_struct("TaskInner")
444            .field("id", &self.id)
445            .field("name", &self.name)
446            .field("state", &self.state())
447            .finish()
448    }
449}
450
451impl Drop for TaskInner {
452    fn drop(&mut self) {
453        debug!("task drop: {}", self.id_name());
454    }
455}
456
457struct TaskStack {
458    ptr: NonNull<u8>,
459    layout: Layout,
460}
461
462impl TaskStack {
463    pub fn alloc(size: usize) -> Self {
464        let layout = Layout::from_size_align(size, 16).unwrap();
465        Self {
466            ptr: NonNull::new(unsafe { alloc::alloc::alloc(layout) }).unwrap(),
467            layout,
468        }
469    }
470
471    pub const fn top(&self) -> VirtAddr {
472        unsafe { core::mem::transmute(self.ptr.as_ptr().add(self.layout.size())) }
473    }
474}
475
476impl Drop for TaskStack {
477    fn drop(&mut self) {
478        unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) }
479    }
480}
481
482use core::mem::ManuallyDrop;
483
484/// A wrapper of [`AxTaskRef`] as the current task.
485///
486/// It won't change the reference count of the task when created or dropped.
487pub struct CurrentTask(ManuallyDrop<AxTaskRef>);
488
489impl CurrentTask {
490    pub(crate) fn try_get() -> Option<Self> {
491        let ptr: *const super::AxTask = axhal::percpu::current_task_ptr();
492        if !ptr.is_null() {
493            Some(Self(unsafe { ManuallyDrop::new(AxTaskRef::from_raw(ptr)) }))
494        } else {
495            None
496        }
497    }
498
499    pub(crate) fn get() -> Self {
500        Self::try_get().expect("current task is uninitialized")
501    }
502
503    /// Converts [`CurrentTask`] to [`AxTaskRef`].
504    pub fn as_task_ref(&self) -> &AxTaskRef {
505        &self.0
506    }
507
508    pub(crate) fn clone(&self) -> AxTaskRef {
509        self.0.deref().clone()
510    }
511
512    pub(crate) fn ptr_eq(&self, other: &AxTaskRef) -> bool {
513        Arc::ptr_eq(&self.0, other)
514    }
515
516    pub(crate) unsafe fn init_current(init_task: AxTaskRef) {
517        assert!(init_task.is_init());
518        #[cfg(feature = "tls")]
519        axhal::asm::write_thread_pointer(init_task.tls.tls_ptr() as usize);
520        let ptr = Arc::into_raw(init_task);
521        unsafe {
522            axhal::percpu::set_current_task_ptr(ptr);
523        }
524    }
525
526    pub(crate) unsafe fn set_current(prev: Self, next: AxTaskRef) {
527        let Self(arc) = prev;
528        ManuallyDrop::into_inner(arc); // `call Arc::drop()` to decrease prev task reference count.
529        let ptr = Arc::into_raw(next);
530        unsafe {
531            axhal::percpu::set_current_task_ptr(ptr);
532        }
533    }
534}
535
536impl Deref for CurrentTask {
537    type Target = TaskInner;
538    fn deref(&self) -> &Self::Target {
539        self.0.deref()
540    }
541}
542
543extern "C" fn task_entry() -> ! {
544    #[cfg(feature = "smp")]
545    unsafe {
546        // Clear the prev task on CPU before running the task entry function.
547        crate::run_queue::clear_prev_task_on_cpu();
548    }
549    // Enable irq (if feature "irq" is enabled) before running the task entry function.
550    #[cfg(feature = "irq")]
551    axhal::asm::enable_irqs();
552    let task = crate::current();
553    if let Some(entry) = task.entry {
554        unsafe { Box::from_raw(entry)() };
555    }
556    crate::exit(0);
557}