use alloc::{string::String, sync::Arc};
use kernel_guard::NoPreemptIrqSave;
pub(crate) use crate::run_queue::{current_run_queue, select_run_queue};
#[doc(cfg(feature = "multitask"))]
pub use crate::task::{CurrentTask, TaskId, TaskInner};
#[doc(cfg(feature = "multitask"))]
pub use crate::task_ext::{TaskExtMut, TaskExtRef};
#[doc(cfg(feature = "multitask"))]
pub use crate::wait_queue::WaitQueue;
pub type AxTaskRef = Arc<AxTask>;
pub type AxCpuMask = cpumask::CpuMask<{ axconfig::SMP }>;
cfg_if::cfg_if! {
if #[cfg(feature = "sched_rr")] {
const MAX_TIME_SLICE: usize = 5;
pub(crate) type AxTask = scheduler::RRTask<TaskInner, MAX_TIME_SLICE>;
pub(crate) type Scheduler = scheduler::RRScheduler<TaskInner, MAX_TIME_SLICE>;
} else if #[cfg(feature = "sched_cfs")] {
pub(crate) type AxTask = scheduler::CFSTask<TaskInner>;
pub(crate) type Scheduler = scheduler::CFScheduler<TaskInner>;
} else {
pub(crate) type AxTask = scheduler::FifoTask<TaskInner>;
pub(crate) type Scheduler = scheduler::FifoScheduler<TaskInner>;
}
}
#[cfg(feature = "preempt")]
struct KernelGuardIfImpl;
#[cfg(feature = "preempt")]
#[crate_interface::impl_interface]
impl kernel_guard::KernelGuardIf for KernelGuardIfImpl {
fn disable_preempt() {
if let Some(curr) = current_may_uninit() {
curr.disable_preempt();
}
}
fn enable_preempt() {
if let Some(curr) = current_may_uninit() {
curr.enable_preempt(true);
}
}
}
pub fn current_may_uninit() -> Option<CurrentTask> {
CurrentTask::try_get()
}
pub fn current() -> CurrentTask {
CurrentTask::get()
}
pub fn init_scheduler() {
info!("Initialize scheduling...");
crate::run_queue::init();
#[cfg(feature = "irq")]
crate::timers::init();
info!(" use {} scheduler.", Scheduler::scheduler_name());
}
pub fn init_scheduler_secondary() {
crate::run_queue::init_secondary();
#[cfg(feature = "irq")]
crate::timers::init();
}
#[cfg(feature = "irq")]
#[doc(cfg(feature = "irq"))]
pub fn on_timer_tick() {
use kernel_guard::NoOp;
crate::timers::check_events();
current_run_queue::<NoOp>().scheduler_timer_tick();
}
pub fn spawn_task(task: TaskInner) -> AxTaskRef {
let task_ref = task.into_arc();
select_run_queue::<NoPreemptIrqSave>(&task_ref).add_task(task_ref.clone());
task_ref
}
pub fn spawn_raw<F>(f: F, name: String, stack_size: usize) -> AxTaskRef
where
F: FnOnce() + Send + 'static,
{
spawn_task(TaskInner::new(f, name, stack_size))
}
pub fn spawn<F>(f: F) -> AxTaskRef
where
F: FnOnce() + Send + 'static,
{
spawn_raw(f, "".into(), axconfig::TASK_STACK_SIZE)
}
pub fn set_priority(prio: isize) -> bool {
current_run_queue::<NoPreemptIrqSave>().set_current_priority(prio)
}
pub fn set_current_affinity(cpumask: AxCpuMask) -> bool {
if cpumask.is_empty() {
false
} else {
let curr = current().clone();
curr.set_cpumask(cpumask);
#[cfg(feature = "smp")]
if !cpumask.get(axhal::cpu::this_cpu_id()) {
const MIGRATION_TASK_STACK_SIZE: usize = 4096;
let migration_task = TaskInner::new(
move || crate::run_queue::migrate_entry(curr),
"migration-task".into(),
MIGRATION_TASK_STACK_SIZE,
)
.into_arc();
current_run_queue::<NoPreemptIrqSave>().migrate_current(migration_task);
assert!(cpumask.get(axhal::cpu::this_cpu_id()), "Migration failed");
}
true
}
}
pub fn yield_now() {
current_run_queue::<NoPreemptIrqSave>().yield_current()
}
pub fn sleep(dur: core::time::Duration) {
sleep_until(axhal::time::wall_time() + dur);
}
pub fn sleep_until(deadline: axhal::time::TimeValue) {
#[cfg(feature = "irq")]
current_run_queue::<NoPreemptIrqSave>().sleep_until(deadline);
#[cfg(not(feature = "irq"))]
axhal::time::busy_wait_until(deadline);
}
pub fn exit(exit_code: i32) -> ! {
current_run_queue::<NoPreemptIrqSave>().exit_current(exit_code)
}
pub fn run_idle() -> ! {
loop {
yield_now();
debug!("idle task: waiting for IRQs...");
#[cfg(feature = "irq")]
axhal::arch::wait_for_irqs();
}
}