axtask/
task.rs

1use alloc::{boxed::Box, string::String, sync::Arc};
2use core::ops::Deref;
3use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU8, AtomicU32, AtomicU64, Ordering};
4use core::{alloc::Layout, cell::UnsafeCell, fmt, ptr::NonNull};
5
6#[cfg(feature = "preempt")]
7use core::sync::atomic::AtomicUsize;
8
9use kspin::SpinNoIrq;
10use memory_addr::{VirtAddr, align_up_4k};
11
12use axhal::context::TaskContext;
13#[cfg(feature = "tls")]
14use axhal::tls::TlsArea;
15
16use crate::task_ext::AxTaskExt;
17use crate::{AxCpuMask, AxTask, AxTaskRef, WaitQueue};
18
19/// A unique identifier for a thread.
20#[derive(Debug, Clone, Copy, Eq, PartialEq)]
21pub struct TaskId(u64);
22
23/// The possible states of a task.
24#[repr(u8)]
25#[derive(Debug, Clone, Copy, Eq, PartialEq)]
26pub(crate) enum TaskState {
27    /// Task is running on some CPU.
28    Running = 1,
29    /// Task is ready to run on some scheduler's ready queue.
30    Ready = 2,
31    /// Task is blocked (in the wait queue or timer list),
32    /// and it has finished its scheduling process, it can be wake up by `notify()` on any run queue safely.
33    Blocked = 3,
34    /// Task is exited and waiting for being dropped.
35    Exited = 4,
36}
37
38/// The inner task structure.
39pub struct TaskInner {
40    id: TaskId,
41    name: String,
42    is_idle: bool,
43    is_init: bool,
44
45    entry: Option<*mut dyn FnOnce()>,
46    state: AtomicU8,
47
48    /// CPU affinity mask.
49    cpumask: SpinNoIrq<AxCpuMask>,
50
51    /// Mark whether the task is in the wait queue.
52    in_wait_queue: AtomicBool,
53
54    /// Used to indicate the CPU ID where the task is running or will run.
55    cpu_id: AtomicU32,
56    /// Used to indicate whether the task is running on a CPU.
57    #[cfg(feature = "smp")]
58    on_cpu: AtomicBool,
59
60    /// A ticket ID used to identify the timer event.
61    /// Set by `set_timer_ticket()` when creating a timer event in `set_alarm_wakeup()`,
62    /// expired by setting it as zero in `timer_ticket_expired()`, which is called by `cancel_events()`.
63    #[cfg(feature = "irq")]
64    timer_ticket_id: AtomicU64,
65
66    #[cfg(feature = "preempt")]
67    need_resched: AtomicBool,
68    #[cfg(feature = "preempt")]
69    preempt_disable_count: AtomicUsize,
70
71    exit_code: AtomicI32,
72    wait_for_exit: WaitQueue,
73
74    kstack: Option<TaskStack>,
75    ctx: UnsafeCell<TaskContext>,
76    task_ext: AxTaskExt,
77
78    #[cfg(feature = "tls")]
79    tls: TlsArea,
80}
81
82impl TaskId {
83    fn new() -> Self {
84        static ID_COUNTER: AtomicU64 = AtomicU64::new(1);
85        Self(ID_COUNTER.fetch_add(1, Ordering::Relaxed))
86    }
87
88    /// Convert the task ID to a `u64`.
89    pub const fn as_u64(&self) -> u64 {
90        self.0
91    }
92}
93
94impl From<u8> for TaskState {
95    #[inline]
96    fn from(state: u8) -> Self {
97        match state {
98            1 => Self::Running,
99            2 => Self::Ready,
100            3 => Self::Blocked,
101            4 => Self::Exited,
102            _ => unreachable!(),
103        }
104    }
105}
106
107unsafe impl Send for TaskInner {}
108unsafe impl Sync for TaskInner {}
109
110impl TaskInner {
111    /// Create a new task with the given entry function and stack size.
112    pub fn new<F>(entry: F, name: String, stack_size: usize) -> Self
113    where
114        F: FnOnce() + Send + 'static,
115    {
116        let mut t = Self::new_common(TaskId::new(), name);
117        debug!("new task: {}", t.id_name());
118        let kstack = TaskStack::alloc(align_up_4k(stack_size));
119
120        #[cfg(feature = "tls")]
121        let tls = VirtAddr::from(t.tls.tls_ptr() as usize);
122        #[cfg(not(feature = "tls"))]
123        let tls = VirtAddr::from(0);
124
125        t.entry = Some(Box::into_raw(Box::new(entry)));
126        t.ctx_mut().init(task_entry as usize, kstack.top(), tls);
127        t.kstack = Some(kstack);
128        if t.name == "idle" {
129            t.is_idle = true;
130        }
131        t
132    }
133
134    /// Gets the ID of the task.
135    pub const fn id(&self) -> TaskId {
136        self.id
137    }
138
139    /// Gets the name of the task.
140    pub fn name(&self) -> &str {
141        self.name.as_str()
142    }
143
144    /// Get a combined string of the task ID and name.
145    pub fn id_name(&self) -> alloc::string::String {
146        alloc::format!("Task({}, {:?})", self.id.as_u64(), self.name)
147    }
148
149    /// Wait for the task to exit, and return the exit code.
150    ///
151    /// It will return immediately if the task has already exited (but not dropped).
152    pub fn join(&self) -> Option<i32> {
153        self.wait_for_exit
154            .wait_until(|| self.state() == TaskState::Exited);
155        Some(self.exit_code.load(Ordering::Acquire))
156    }
157
158    /// Returns the pointer to the user-defined task extended data.
159    ///
160    /// # Safety
161    ///
162    /// The caller should not access the pointer directly, use [`TaskExtRef::task_ext`]
163    /// or [`TaskExtMut::task_ext_mut`] instead.
164    ///
165    /// [`TaskExtRef::task_ext`]: crate::task_ext::TaskExtRef::task_ext
166    /// [`TaskExtMut::task_ext_mut`]: crate::task_ext::TaskExtMut::task_ext_mut
167    pub unsafe fn task_ext_ptr(&self) -> *mut u8 {
168        self.task_ext.as_ptr()
169    }
170
171    /// Initialize the user-defined task extended data.
172    ///
173    /// Returns a reference to the task extended data if it has not been
174    /// initialized yet (empty), otherwise returns [`None`].
175    pub fn init_task_ext<T: Sized>(&mut self, data: T) -> Option<&T> {
176        if self.task_ext.is_empty() {
177            self.task_ext.write(data).map(|data| &*data)
178        } else {
179            None
180        }
181    }
182
183    /// Returns a mutable reference to the task context.
184    #[inline]
185    pub const fn ctx_mut(&mut self) -> &mut TaskContext {
186        self.ctx.get_mut()
187    }
188
189    /// Returns the top address of the kernel stack.
190    #[inline]
191    pub const fn kernel_stack_top(&self) -> Option<VirtAddr> {
192        match &self.kstack {
193            Some(s) => Some(s.top()),
194            None => None,
195        }
196    }
197
198    /// Returns the CPU ID where the task is running or will run.
199    ///
200    /// Note: the task may not be running on the CPU, it just exists in the run queue.
201    #[inline]
202    pub fn cpu_id(&self) -> u32 {
203        self.cpu_id.load(Ordering::Acquire)
204    }
205
206    /// Gets the cpu affinity mask of the task.
207    ///
208    /// Returns the cpu affinity mask of the task in type [`AxCpuMask`].
209    #[inline]
210    pub fn cpumask(&self) -> AxCpuMask {
211        *self.cpumask.lock()
212    }
213
214    /// Sets the cpu affinity mask of the task.
215    ///
216    /// # Arguments
217    /// `cpumask` - The cpu affinity mask to be set in type [`AxCpuMask`].
218    #[inline]
219    pub fn set_cpumask(&self, cpumask: AxCpuMask) {
220        *self.cpumask.lock() = cpumask
221    }
222}
223
224// private methods
225impl TaskInner {
226    fn new_common(id: TaskId, name: String) -> Self {
227        Self {
228            id,
229            name,
230            is_idle: false,
231            is_init: false,
232            entry: None,
233            state: AtomicU8::new(TaskState::Ready as u8),
234            // By default, the task is allowed to run on all CPUs.
235            cpumask: SpinNoIrq::new(AxCpuMask::full()),
236            in_wait_queue: AtomicBool::new(false),
237            #[cfg(feature = "irq")]
238            timer_ticket_id: AtomicU64::new(0),
239            cpu_id: AtomicU32::new(0),
240            #[cfg(feature = "smp")]
241            on_cpu: AtomicBool::new(false),
242            #[cfg(feature = "preempt")]
243            need_resched: AtomicBool::new(false),
244            #[cfg(feature = "preempt")]
245            preempt_disable_count: AtomicUsize::new(0),
246            exit_code: AtomicI32::new(0),
247            wait_for_exit: WaitQueue::new(),
248            kstack: None,
249            ctx: UnsafeCell::new(TaskContext::new()),
250            task_ext: AxTaskExt::empty(),
251            #[cfg(feature = "tls")]
252            tls: TlsArea::alloc(),
253        }
254    }
255
256    /// Creates an "init task" using the current CPU states, to use as the
257    /// current task.
258    ///
259    /// As it is the current task, no other task can switch to it until it
260    /// switches out.
261    ///
262    /// And there is no need to set the `entry`, `kstack` or `tls` fields, as
263    /// they will be filled automatically when the task is switches out.
264    pub(crate) fn new_init(name: String) -> Self {
265        let mut t = Self::new_common(TaskId::new(), name);
266        t.is_init = true;
267        #[cfg(feature = "smp")]
268        t.set_on_cpu(true);
269        if t.name == "idle" {
270            t.is_idle = true;
271        }
272        t
273    }
274
275    pub(crate) fn into_arc(self) -> AxTaskRef {
276        Arc::new(AxTask::new(self))
277    }
278
279    #[inline]
280    pub(crate) fn state(&self) -> TaskState {
281        self.state.load(Ordering::Acquire).into()
282    }
283
284    #[inline]
285    pub(crate) fn set_state(&self, state: TaskState) {
286        self.state.store(state as u8, Ordering::Release)
287    }
288
289    /// Transition the task state from `current_state` to `new_state`,
290    /// Returns `true` if the current state is `current_state` and the state is successfully set to `new_state`,
291    /// otherwise returns `false`.
292    #[inline]
293    pub(crate) fn transition_state(&self, current_state: TaskState, new_state: TaskState) -> bool {
294        self.state
295            .compare_exchange(
296                current_state as u8,
297                new_state as u8,
298                Ordering::AcqRel,
299                Ordering::Acquire,
300            )
301            .is_ok()
302    }
303
304    #[inline]
305    pub(crate) fn is_running(&self) -> bool {
306        matches!(self.state(), TaskState::Running)
307    }
308
309    #[inline]
310    pub(crate) fn is_ready(&self) -> bool {
311        matches!(self.state(), TaskState::Ready)
312    }
313
314    #[inline]
315    pub(crate) const fn is_init(&self) -> bool {
316        self.is_init
317    }
318
319    #[inline]
320    pub(crate) const fn is_idle(&self) -> bool {
321        self.is_idle
322    }
323
324    #[inline]
325    pub(crate) fn in_wait_queue(&self) -> bool {
326        self.in_wait_queue.load(Ordering::Acquire)
327    }
328
329    #[inline]
330    pub(crate) fn set_in_wait_queue(&self, in_wait_queue: bool) {
331        self.in_wait_queue.store(in_wait_queue, Ordering::Release);
332    }
333
334    /// Returns task's current timer ticket ID.
335    #[inline]
336    #[cfg(feature = "irq")]
337    pub(crate) fn timer_ticket(&self) -> u64 {
338        self.timer_ticket_id.load(Ordering::Acquire)
339    }
340
341    /// Set the timer ticket ID.
342    #[inline]
343    #[cfg(feature = "irq")]
344    pub(crate) fn set_timer_ticket(&self, timer_ticket_id: u64) {
345        // CAN NOT set timer_ticket_id to 0,
346        // because 0 is used to indicate the timer event is expired.
347        assert!(timer_ticket_id != 0);
348        self.timer_ticket_id
349            .store(timer_ticket_id, Ordering::Release);
350    }
351
352    /// Expire timer ticket ID by setting it to 0,
353    /// it can be used to identify one timer event is triggered or expired.
354    #[inline]
355    #[cfg(feature = "irq")]
356    pub(crate) fn timer_ticket_expired(&self) {
357        self.timer_ticket_id.store(0, Ordering::Release);
358    }
359
360    #[inline]
361    #[cfg(feature = "preempt")]
362    pub(crate) fn set_preempt_pending(&self, pending: bool) {
363        self.need_resched.store(pending, Ordering::Release)
364    }
365
366    #[inline]
367    #[cfg(feature = "preempt")]
368    pub(crate) fn can_preempt(&self, current_disable_count: usize) -> bool {
369        self.preempt_disable_count.load(Ordering::Acquire) == current_disable_count
370    }
371
372    #[inline]
373    #[cfg(feature = "preempt")]
374    pub(crate) fn disable_preempt(&self) {
375        self.preempt_disable_count.fetch_add(1, Ordering::Release);
376    }
377
378    #[inline]
379    #[cfg(feature = "preempt")]
380    pub(crate) fn enable_preempt(&self, resched: bool) {
381        if self.preempt_disable_count.fetch_sub(1, Ordering::Release) == 1 && resched {
382            // If current task is pending to be preempted, do rescheduling.
383            Self::current_check_preempt_pending();
384        }
385    }
386
387    #[cfg(feature = "preempt")]
388    fn current_check_preempt_pending() {
389        use kernel_guard::NoPreemptIrqSave;
390        let curr = crate::current();
391        if curr.need_resched.load(Ordering::Acquire) && curr.can_preempt(0) {
392            // Note: if we want to print log msg during `preempt_resched`, we have to
393            // disable preemption here, because the axlog may cause preemption.
394            let mut rq = crate::current_run_queue::<NoPreemptIrqSave>();
395            if curr.need_resched.load(Ordering::Acquire) {
396                rq.preempt_resched()
397            }
398        }
399    }
400
401    /// Notify all tasks that join on this task.
402    pub(crate) fn notify_exit(&self, exit_code: i32) {
403        self.exit_code.store(exit_code, Ordering::Release);
404        self.wait_for_exit.notify_all(false);
405    }
406
407    #[inline]
408    pub(crate) const unsafe fn ctx_mut_ptr(&self) -> *mut TaskContext {
409        self.ctx.get()
410    }
411
412    /// Set the CPU ID where the task is running or will run.
413    #[cfg(feature = "smp")]
414    #[inline]
415    pub(crate) fn set_cpu_id(&self, cpu_id: u32) {
416        self.cpu_id.store(cpu_id, Ordering::Release);
417    }
418
419    /// Returns whether the task is running on a CPU.
420    ///
421    /// It is used to protect the task from being moved to a different run queue
422    /// while it has not finished its scheduling process.
423    /// The `on_cpu field is set to `true` when the task is preparing to run on a CPU,
424    /// and it is set to `false` when the task has finished its scheduling process in `clear_prev_task_on_cpu()`.
425    #[cfg(feature = "smp")]
426    #[inline]
427    pub(crate) fn on_cpu(&self) -> bool {
428        self.on_cpu.load(Ordering::Acquire)
429    }
430
431    /// Sets whether the task is running on a CPU.
432    #[cfg(feature = "smp")]
433    #[inline]
434    pub(crate) fn set_on_cpu(&self, on_cpu: bool) {
435        self.on_cpu.store(on_cpu, Ordering::Release)
436    }
437}
438
439impl fmt::Debug for TaskInner {
440    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
441        f.debug_struct("TaskInner")
442            .field("id", &self.id)
443            .field("name", &self.name)
444            .field("state", &self.state())
445            .finish()
446    }
447}
448
449impl Drop for TaskInner {
450    fn drop(&mut self) {
451        debug!("task drop: {}", self.id_name());
452    }
453}
454
455struct TaskStack {
456    ptr: NonNull<u8>,
457    layout: Layout,
458}
459
460impl TaskStack {
461    pub fn alloc(size: usize) -> Self {
462        let layout = Layout::from_size_align(size, 16).unwrap();
463        Self {
464            ptr: NonNull::new(unsafe { alloc::alloc::alloc(layout) }).unwrap(),
465            layout,
466        }
467    }
468
469    pub const fn top(&self) -> VirtAddr {
470        unsafe { core::mem::transmute(self.ptr.as_ptr().add(self.layout.size())) }
471    }
472}
473
474impl Drop for TaskStack {
475    fn drop(&mut self) {
476        unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) }
477    }
478}
479
480use core::mem::ManuallyDrop;
481
482/// A wrapper of [`AxTaskRef`] as the current task.
483///
484/// It won't change the reference count of the task when created or dropped.
485pub struct CurrentTask(ManuallyDrop<AxTaskRef>);
486
487impl CurrentTask {
488    pub(crate) fn try_get() -> Option<Self> {
489        let ptr: *const super::AxTask = axhal::percpu::current_task_ptr();
490        if !ptr.is_null() {
491            Some(Self(unsafe { ManuallyDrop::new(AxTaskRef::from_raw(ptr)) }))
492        } else {
493            None
494        }
495    }
496
497    pub(crate) fn get() -> Self {
498        Self::try_get().expect("current task is uninitialized")
499    }
500
501    /// Converts [`CurrentTask`] to [`AxTaskRef`].
502    pub fn as_task_ref(&self) -> &AxTaskRef {
503        &self.0
504    }
505
506    pub(crate) fn clone(&self) -> AxTaskRef {
507        self.0.deref().clone()
508    }
509
510    pub(crate) fn ptr_eq(&self, other: &AxTaskRef) -> bool {
511        Arc::ptr_eq(&self.0, other)
512    }
513
514    pub(crate) unsafe fn init_current(init_task: AxTaskRef) {
515        assert!(init_task.is_init());
516        #[cfg(feature = "tls")]
517        axhal::asm::write_thread_pointer(init_task.tls.tls_ptr() as usize);
518        let ptr = Arc::into_raw(init_task);
519        unsafe {
520            axhal::percpu::set_current_task_ptr(ptr);
521        }
522    }
523
524    pub(crate) unsafe fn set_current(prev: Self, next: AxTaskRef) {
525        let Self(arc) = prev;
526        ManuallyDrop::into_inner(arc); // `call Arc::drop()` to decrease prev task reference count.
527        let ptr = Arc::into_raw(next);
528        unsafe {
529            axhal::percpu::set_current_task_ptr(ptr);
530        }
531    }
532}
533
534impl Deref for CurrentTask {
535    type Target = TaskInner;
536    fn deref(&self) -> &Self::Target {
537        self.0.deref()
538    }
539}
540
541extern "C" fn task_entry() -> ! {
542    #[cfg(feature = "smp")]
543    unsafe {
544        // Clear the prev task on CPU before running the task entry function.
545        crate::run_queue::clear_prev_task_on_cpu();
546    }
547    // Enable irq (if feature "irq" is enabled) before running the task entry function.
548    #[cfg(feature = "irq")]
549    axhal::asm::enable_irqs();
550    let task = crate::current();
551    if let Some(entry) = task.entry {
552        unsafe { Box::from_raw(entry)() };
553    }
554    crate::exit(0);
555}