1use alloc::{boxed::Box, string::String, sync::Arc};
2use core::ops::Deref;
3use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU8, AtomicU32, AtomicU64, Ordering};
4use core::{alloc::Layout, cell::UnsafeCell, fmt, ptr::NonNull};
5
6#[cfg(feature = "preempt")]
7use core::sync::atomic::AtomicUsize;
8
9use kspin::SpinNoIrq;
10use memory_addr::{VirtAddr, align_up_4k};
11
12use axhal::context::TaskContext;
13#[cfg(feature = "tls")]
14use axhal::tls::TlsArea;
15
16use crate::task_ext::AxTaskExt;
17use crate::{AxCpuMask, AxTask, AxTaskRef, WaitQueue};
18
19#[derive(Debug, Clone, Copy, Eq, PartialEq)]
21pub struct TaskId(u64);
22
23#[repr(u8)]
25#[derive(Debug, Clone, Copy, Eq, PartialEq)]
26pub(crate) enum TaskState {
27 Running = 1,
29 Ready = 2,
31 Blocked = 3,
34 Exited = 4,
36}
37
38pub struct TaskInner {
40 id: TaskId,
41 name: String,
42 is_idle: bool,
43 is_init: bool,
44
45 entry: Option<*mut dyn FnOnce()>,
46 state: AtomicU8,
47
48 cpumask: SpinNoIrq<AxCpuMask>,
50
51 in_wait_queue: AtomicBool,
53
54 cpu_id: AtomicU32,
56 #[cfg(feature = "smp")]
58 on_cpu: AtomicBool,
59
60 #[cfg(feature = "irq")]
64 timer_ticket_id: AtomicU64,
65
66 #[cfg(feature = "preempt")]
67 need_resched: AtomicBool,
68 #[cfg(feature = "preempt")]
69 preempt_disable_count: AtomicUsize,
70
71 exit_code: AtomicI32,
72 wait_for_exit: WaitQueue,
73
74 kstack: Option<TaskStack>,
75 ctx: UnsafeCell<TaskContext>,
76 task_ext: AxTaskExt,
77
78 #[cfg(feature = "tls")]
79 tls: TlsArea,
80}
81
82impl TaskId {
83 fn new() -> Self {
84 static ID_COUNTER: AtomicU64 = AtomicU64::new(1);
85 Self(ID_COUNTER.fetch_add(1, Ordering::Relaxed))
86 }
87
88 pub const fn as_u64(&self) -> u64 {
90 self.0
91 }
92}
93
94impl From<u8> for TaskState {
95 #[inline]
96 fn from(state: u8) -> Self {
97 match state {
98 1 => Self::Running,
99 2 => Self::Ready,
100 3 => Self::Blocked,
101 4 => Self::Exited,
102 _ => unreachable!(),
103 }
104 }
105}
106
107unsafe impl Send for TaskInner {}
108unsafe impl Sync for TaskInner {}
109
110impl TaskInner {
111 pub fn new<F>(entry: F, name: String, stack_size: usize) -> Self
113 where
114 F: FnOnce() + Send + 'static,
115 {
116 let mut t = Self::new_common(TaskId::new(), name);
117 debug!("new task: {}", t.id_name());
118 let kstack = TaskStack::alloc(align_up_4k(stack_size));
119
120 #[cfg(feature = "tls")]
121 let tls = VirtAddr::from(t.tls.tls_ptr() as usize);
122 #[cfg(not(feature = "tls"))]
123 let tls = VirtAddr::from(0);
124
125 t.entry = Some(Box::into_raw(Box::new(entry)));
126 t.ctx_mut().init(task_entry as usize, kstack.top(), tls);
127 t.kstack = Some(kstack);
128 if t.name == "idle" {
129 t.is_idle = true;
130 }
131 t
132 }
133
134 pub const fn id(&self) -> TaskId {
136 self.id
137 }
138
139 pub fn name(&self) -> &str {
141 self.name.as_str()
142 }
143
144 pub fn id_name(&self) -> alloc::string::String {
146 alloc::format!("Task({}, {:?})", self.id.as_u64(), self.name)
147 }
148
149 pub fn join(&self) -> Option<i32> {
153 self.wait_for_exit
154 .wait_until(|| self.state() == TaskState::Exited);
155 Some(self.exit_code.load(Ordering::Acquire))
156 }
157
158 pub unsafe fn task_ext_ptr(&self) -> *mut u8 {
168 self.task_ext.as_ptr()
169 }
170
171 pub fn init_task_ext<T: Sized>(&mut self, data: T) -> Option<&T> {
176 if self.task_ext.is_empty() {
177 self.task_ext.write(data).map(|data| &*data)
178 } else {
179 None
180 }
181 }
182
183 #[inline]
185 pub const fn ctx_mut(&mut self) -> &mut TaskContext {
186 self.ctx.get_mut()
187 }
188
189 #[inline]
191 pub const fn kernel_stack_top(&self) -> Option<VirtAddr> {
192 match &self.kstack {
193 Some(s) => Some(s.top()),
194 None => None,
195 }
196 }
197
198 #[inline]
202 pub fn cpu_id(&self) -> u32 {
203 self.cpu_id.load(Ordering::Acquire)
204 }
205
206 #[inline]
210 pub fn cpumask(&self) -> AxCpuMask {
211 *self.cpumask.lock()
212 }
213
214 #[inline]
219 pub fn set_cpumask(&self, cpumask: AxCpuMask) {
220 *self.cpumask.lock() = cpumask
221 }
222}
223
224impl TaskInner {
226 fn new_common(id: TaskId, name: String) -> Self {
227 let cpumask = crate::api::cpu_mask_full();
228
229 Self {
230 id,
231 name,
232 is_idle: false,
233 is_init: false,
234 entry: None,
235 state: AtomicU8::new(TaskState::Ready as u8),
236 cpumask: SpinNoIrq::new(cpumask),
238 in_wait_queue: AtomicBool::new(false),
239 #[cfg(feature = "irq")]
240 timer_ticket_id: AtomicU64::new(0),
241 cpu_id: AtomicU32::new(0),
242 #[cfg(feature = "smp")]
243 on_cpu: AtomicBool::new(false),
244 #[cfg(feature = "preempt")]
245 need_resched: AtomicBool::new(false),
246 #[cfg(feature = "preempt")]
247 preempt_disable_count: AtomicUsize::new(0),
248 exit_code: AtomicI32::new(0),
249 wait_for_exit: WaitQueue::new(),
250 kstack: None,
251 ctx: UnsafeCell::new(TaskContext::new()),
252 task_ext: AxTaskExt::empty(),
253 #[cfg(feature = "tls")]
254 tls: TlsArea::alloc(),
255 }
256 }
257
258 pub(crate) fn new_init(name: String) -> Self {
267 let mut t = Self::new_common(TaskId::new(), name);
268 t.is_init = true;
269 #[cfg(feature = "smp")]
270 t.set_on_cpu(true);
271 if t.name == "idle" {
272 t.is_idle = true;
273 }
274 t
275 }
276
277 pub(crate) fn into_arc(self) -> AxTaskRef {
278 Arc::new(AxTask::new(self))
279 }
280
281 #[inline]
282 pub(crate) fn state(&self) -> TaskState {
283 self.state.load(Ordering::Acquire).into()
284 }
285
286 #[inline]
287 pub(crate) fn set_state(&self, state: TaskState) {
288 self.state.store(state as u8, Ordering::Release)
289 }
290
291 #[inline]
295 pub(crate) fn transition_state(&self, current_state: TaskState, new_state: TaskState) -> bool {
296 self.state
297 .compare_exchange(
298 current_state as u8,
299 new_state as u8,
300 Ordering::AcqRel,
301 Ordering::Acquire,
302 )
303 .is_ok()
304 }
305
306 #[inline]
307 pub(crate) fn is_running(&self) -> bool {
308 matches!(self.state(), TaskState::Running)
309 }
310
311 #[inline]
312 pub(crate) fn is_ready(&self) -> bool {
313 matches!(self.state(), TaskState::Ready)
314 }
315
316 #[inline]
317 pub(crate) const fn is_init(&self) -> bool {
318 self.is_init
319 }
320
321 #[inline]
322 pub(crate) const fn is_idle(&self) -> bool {
323 self.is_idle
324 }
325
326 #[inline]
327 pub(crate) fn in_wait_queue(&self) -> bool {
328 self.in_wait_queue.load(Ordering::Acquire)
329 }
330
331 #[inline]
332 pub(crate) fn set_in_wait_queue(&self, in_wait_queue: bool) {
333 self.in_wait_queue.store(in_wait_queue, Ordering::Release);
334 }
335
336 #[inline]
338 #[cfg(feature = "irq")]
339 pub(crate) fn timer_ticket(&self) -> u64 {
340 self.timer_ticket_id.load(Ordering::Acquire)
341 }
342
343 #[inline]
345 #[cfg(feature = "irq")]
346 pub(crate) fn set_timer_ticket(&self, timer_ticket_id: u64) {
347 assert!(timer_ticket_id != 0);
350 self.timer_ticket_id
351 .store(timer_ticket_id, Ordering::Release);
352 }
353
354 #[inline]
357 #[cfg(feature = "irq")]
358 pub(crate) fn timer_ticket_expired(&self) {
359 self.timer_ticket_id.store(0, Ordering::Release);
360 }
361
362 #[inline]
363 #[cfg(feature = "preempt")]
364 pub(crate) fn set_preempt_pending(&self, pending: bool) {
365 self.need_resched.store(pending, Ordering::Release)
366 }
367
368 #[inline]
369 #[cfg(feature = "preempt")]
370 pub(crate) fn can_preempt(&self, current_disable_count: usize) -> bool {
371 self.preempt_disable_count.load(Ordering::Acquire) == current_disable_count
372 }
373
374 #[inline]
375 #[cfg(feature = "preempt")]
376 pub(crate) fn disable_preempt(&self) {
377 self.preempt_disable_count.fetch_add(1, Ordering::Release);
378 }
379
380 #[inline]
381 #[cfg(feature = "preempt")]
382 pub(crate) fn enable_preempt(&self, resched: bool) {
383 if self.preempt_disable_count.fetch_sub(1, Ordering::Release) == 1 && resched {
384 Self::current_check_preempt_pending();
386 }
387 }
388
389 #[cfg(feature = "preempt")]
390 fn current_check_preempt_pending() {
391 use kernel_guard::NoPreemptIrqSave;
392 let curr = crate::current();
393 if curr.need_resched.load(Ordering::Acquire) && curr.can_preempt(0) {
394 let mut rq = crate::current_run_queue::<NoPreemptIrqSave>();
397 if curr.need_resched.load(Ordering::Acquire) {
398 rq.preempt_resched()
399 }
400 }
401 }
402
403 pub(crate) fn notify_exit(&self, exit_code: i32) {
405 self.exit_code.store(exit_code, Ordering::Release);
406 self.wait_for_exit.notify_all(false);
407 }
408
409 #[inline]
410 pub(crate) const unsafe fn ctx_mut_ptr(&self) -> *mut TaskContext {
411 self.ctx.get()
412 }
413
414 #[cfg(feature = "smp")]
416 #[inline]
417 pub(crate) fn set_cpu_id(&self, cpu_id: u32) {
418 self.cpu_id.store(cpu_id, Ordering::Release);
419 }
420
421 #[cfg(feature = "smp")]
428 #[inline]
429 pub(crate) fn on_cpu(&self) -> bool {
430 self.on_cpu.load(Ordering::Acquire)
431 }
432
433 #[cfg(feature = "smp")]
435 #[inline]
436 pub(crate) fn set_on_cpu(&self, on_cpu: bool) {
437 self.on_cpu.store(on_cpu, Ordering::Release)
438 }
439}
440
441impl fmt::Debug for TaskInner {
442 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
443 f.debug_struct("TaskInner")
444 .field("id", &self.id)
445 .field("name", &self.name)
446 .field("state", &self.state())
447 .finish()
448 }
449}
450
451impl Drop for TaskInner {
452 fn drop(&mut self) {
453 debug!("task drop: {}", self.id_name());
454 }
455}
456
457struct TaskStack {
458 ptr: NonNull<u8>,
459 layout: Layout,
460}
461
462impl TaskStack {
463 pub fn alloc(size: usize) -> Self {
464 let layout = Layout::from_size_align(size, 16).unwrap();
465 Self {
466 ptr: NonNull::new(unsafe { alloc::alloc::alloc(layout) }).unwrap(),
467 layout,
468 }
469 }
470
471 pub const fn top(&self) -> VirtAddr {
472 unsafe { core::mem::transmute(self.ptr.as_ptr().add(self.layout.size())) }
473 }
474}
475
476impl Drop for TaskStack {
477 fn drop(&mut self) {
478 unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) }
479 }
480}
481
482use core::mem::ManuallyDrop;
483
484pub struct CurrentTask(ManuallyDrop<AxTaskRef>);
488
489impl CurrentTask {
490 pub(crate) fn try_get() -> Option<Self> {
491 let ptr: *const super::AxTask = axhal::percpu::current_task_ptr();
492 if !ptr.is_null() {
493 Some(Self(unsafe { ManuallyDrop::new(AxTaskRef::from_raw(ptr)) }))
494 } else {
495 None
496 }
497 }
498
499 pub(crate) fn get() -> Self {
500 Self::try_get().expect("current task is uninitialized")
501 }
502
503 pub fn as_task_ref(&self) -> &AxTaskRef {
505 &self.0
506 }
507
508 pub(crate) fn clone(&self) -> AxTaskRef {
509 self.0.deref().clone()
510 }
511
512 pub(crate) fn ptr_eq(&self, other: &AxTaskRef) -> bool {
513 Arc::ptr_eq(&self.0, other)
514 }
515
516 pub(crate) unsafe fn init_current(init_task: AxTaskRef) {
517 assert!(init_task.is_init());
518 #[cfg(feature = "tls")]
519 axhal::asm::write_thread_pointer(init_task.tls.tls_ptr() as usize);
520 let ptr = Arc::into_raw(init_task);
521 unsafe {
522 axhal::percpu::set_current_task_ptr(ptr);
523 }
524 }
525
526 pub(crate) unsafe fn set_current(prev: Self, next: AxTaskRef) {
527 let Self(arc) = prev;
528 ManuallyDrop::into_inner(arc); let ptr = Arc::into_raw(next);
530 unsafe {
531 axhal::percpu::set_current_task_ptr(ptr);
532 }
533 }
534}
535
536impl Deref for CurrentTask {
537 type Target = TaskInner;
538 fn deref(&self) -> &Self::Target {
539 self.0.deref()
540 }
541}
542
543extern "C" fn task_entry() -> ! {
544 #[cfg(feature = "smp")]
545 unsafe {
546 crate::run_queue::clear_prev_task_on_cpu();
548 }
549 #[cfg(feature = "irq")]
551 axhal::asm::enable_irqs();
552 let task = crate::current();
553 if let Some(entry) = task.entry {
554 unsafe { Box::from_raw(entry)() };
555 }
556 crate::exit(0);
557}