axhal/arch/x86_64/
context.rs

1use core::{arch::naked_asm, fmt};
2use memory_addr::VirtAddr;
3/// Saved registers when a trap (interrupt or exception) occurs.
4#[allow(missing_docs)]
5#[repr(C)]
6#[derive(Debug, Default, Clone, Copy)]
7pub struct TrapFrame {
8    pub rax: u64,
9    pub rcx: u64,
10    pub rdx: u64,
11    pub rbx: u64,
12    pub rbp: u64,
13    pub rsi: u64,
14    pub rdi: u64,
15    pub r8: u64,
16    pub r9: u64,
17    pub r10: u64,
18    pub r11: u64,
19    pub r12: u64,
20    pub r13: u64,
21    pub r14: u64,
22    pub r15: u64,
23
24    // Pushed by `trap.S`
25    pub vector: u64,
26    pub error_code: u64,
27
28    // Pushed by CPU
29    pub rip: u64,
30    pub cs: u64,
31    pub rflags: u64,
32    pub rsp: u64,
33    pub ss: u64,
34}
35
36impl TrapFrame {
37    /// Gets the 0th syscall argument.
38    pub const fn arg0(&self) -> usize {
39        self.rdi as _
40    }
41
42    /// Gets the 1st syscall argument.
43    pub const fn arg1(&self) -> usize {
44        self.rsi as _
45    }
46
47    /// Gets the 2nd syscall argument.
48    pub const fn arg2(&self) -> usize {
49        self.rdx as _
50    }
51
52    /// Gets the 3rd syscall argument.
53    pub const fn arg3(&self) -> usize {
54        self.r10 as _
55    }
56
57    /// Gets the 4th syscall argument.
58    pub const fn arg4(&self) -> usize {
59        self.r8 as _
60    }
61
62    /// Gets the 5th syscall argument.
63    pub const fn arg5(&self) -> usize {
64        self.r9 as _
65    }
66
67    /// Whether the trap is from userspace.
68    pub const fn is_user(&self) -> bool {
69        self.cs & 0b11 == 3
70    }
71}
72
73/// Context to enter user space.
74#[cfg(feature = "uspace")]
75pub struct UspaceContext(TrapFrame);
76
77#[cfg(feature = "uspace")]
78impl UspaceContext {
79    /// Creates an empty context with all registers set to zero.
80    pub const fn empty() -> Self {
81        unsafe { core::mem::MaybeUninit::zeroed().assume_init() }
82    }
83
84    /// Creates a new context with the given entry point, user stack pointer,
85    /// and the argument.
86    pub fn new(entry: usize, ustack_top: VirtAddr, arg0: usize) -> Self {
87        use crate::arch::GdtStruct;
88        use x86_64::registers::rflags::RFlags;
89        Self(TrapFrame {
90            rdi: arg0 as _,
91            rip: entry as _,
92            cs: GdtStruct::UCODE64_SELECTOR.0 as _,
93            #[cfg(feature = "irq")]
94            rflags: RFlags::INTERRUPT_FLAG.bits(), // IOPL = 0, IF = 1
95            rsp: ustack_top.as_usize() as _,
96            ss: GdtStruct::UDATA_SELECTOR.0 as _,
97            ..Default::default()
98        })
99    }
100
101    /// Creates a new context from the given [`TrapFrame`].
102    ///
103    /// It copies almost all registers except `CS` and `SS` which need to be
104    /// set to the user segment selectors.
105    pub const fn from(tf: &TrapFrame) -> Self {
106        use crate::arch::GdtStruct;
107        let mut tf = *tf;
108        tf.cs = GdtStruct::UCODE64_SELECTOR.0 as _;
109        tf.ss = GdtStruct::UDATA_SELECTOR.0 as _;
110        Self(tf)
111    }
112
113    /// Gets the instruction pointer.
114    pub const fn get_ip(&self) -> usize {
115        self.0.rip as _
116    }
117
118    /// Gets the stack pointer.
119    pub const fn get_sp(&self) -> usize {
120        self.0.rsp as _
121    }
122
123    /// Sets the instruction pointer.
124    pub const fn set_ip(&mut self, rip: usize) {
125        self.0.rip = rip as _;
126    }
127
128    /// Sets the stack pointer.
129    pub const fn set_sp(&mut self, rsp: usize) {
130        self.0.rsp = rsp as _;
131    }
132
133    /// Sets the return value register.
134    pub const fn set_retval(&mut self, rax: usize) {
135        self.0.rax = rax as _;
136    }
137
138    /// Enters user space.
139    ///
140    /// It restores the user registers and jumps to the user entry point
141    /// (saved in `rip`).
142    /// When an exception or syscall occurs, the kernel stack pointer is
143    /// switched to `kstack_top`.
144    ///
145    /// # Safety
146    ///
147    /// This function is unsafe because it changes processor mode and the stack.
148    pub unsafe fn enter_uspace(&self, kstack_top: VirtAddr) -> ! {
149        super::disable_irqs();
150        assert_eq!(super::tss_get_rsp0(), kstack_top);
151        unsafe {
152            core::arch::asm!("
153                mov     rsp, {tf}
154                pop     rax
155                pop     rcx
156                pop     rdx
157                pop     rbx
158                pop     rbp
159                pop     rsi
160                pop     rdi
161                pop     r8
162                pop     r9
163                pop     r10
164                pop     r11
165                pop     r12
166                pop     r13
167                pop     r14
168                pop     r15
169                add     rsp, 16     // skip vector, error_code
170                swapgs
171                iretq",
172                tf = in(reg) &self.0,
173                options(noreturn),
174            )
175        }
176    }
177}
178
179#[repr(C)]
180#[derive(Debug, Default)]
181struct ContextSwitchFrame {
182    r15: u64,
183    r14: u64,
184    r13: u64,
185    r12: u64,
186    rbx: u64,
187    rbp: u64,
188    rip: u64,
189}
190
191/// A 512-byte memory region for the FXSAVE/FXRSTOR instruction to save and
192/// restore the x87 FPU, MMX, XMM, and MXCSR registers.
193///
194/// See <https://www.felixcloutier.com/x86/fxsave> for more details.
195#[allow(missing_docs)]
196#[repr(C, align(16))]
197#[derive(Debug)]
198pub struct FxsaveArea {
199    pub fcw: u16,
200    pub fsw: u16,
201    pub ftw: u16,
202    pub fop: u16,
203    pub fip: u64,
204    pub fdp: u64,
205    pub mxcsr: u32,
206    pub mxcsr_mask: u32,
207    pub st: [u64; 16],
208    pub xmm: [u64; 32],
209    _padding: [u64; 12],
210}
211
212static_assertions::const_assert_eq!(core::mem::size_of::<FxsaveArea>(), 512);
213
214/// Extended state of a task, such as FP/SIMD states.
215pub struct ExtendedState {
216    /// Memory region for the FXSAVE/FXRSTOR instruction.
217    pub fxsave_area: FxsaveArea,
218}
219
220#[cfg(feature = "fp_simd")]
221impl ExtendedState {
222    #[inline]
223    fn save(&mut self) {
224        unsafe { core::arch::x86_64::_fxsave64(&mut self.fxsave_area as *mut _ as *mut u8) }
225    }
226
227    #[inline]
228    fn restore(&self) {
229        unsafe { core::arch::x86_64::_fxrstor64(&self.fxsave_area as *const _ as *const u8) }
230    }
231
232    const fn default() -> Self {
233        let mut area: FxsaveArea = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
234        area.fcw = 0x37f;
235        area.ftw = 0xffff;
236        area.mxcsr = 0x1f80;
237        Self { fxsave_area: area }
238    }
239}
240
241impl fmt::Debug for ExtendedState {
242    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
243        f.debug_struct("ExtendedState")
244            .field("fxsave_area", &self.fxsave_area)
245            .finish()
246    }
247}
248
249/// Saved hardware states of a task.
250///
251/// The context usually includes:
252///
253/// - Callee-saved registers
254/// - Stack pointer register
255/// - Thread pointer register (for thread-local storage, currently unsupported)
256/// - FP/SIMD registers
257///
258/// On context switch, current task saves its context from CPU to memory,
259/// and the next task restores its context from memory to CPU.
260///
261/// On x86_64, callee-saved registers are saved to the kernel stack by the
262/// `PUSH` instruction. So that [`rsp`] is the `RSP` after callee-saved
263/// registers are pushed, and [`kstack_top`] is the top of the kernel stack
264/// (`RSP` before any push).
265///
266/// [`rsp`]: TaskContext::rsp
267/// [`kstack_top`]: TaskContext::kstack_top
268#[derive(Debug)]
269pub struct TaskContext {
270    /// The kernel stack top of the task.
271    pub kstack_top: VirtAddr,
272    /// `RSP` after all callee-saved registers are pushed.
273    pub rsp: u64,
274    /// Thread Local Storage (TLS).
275    pub fs_base: usize,
276    /// The `gs_base` register value.
277    #[cfg(feature = "uspace")]
278    pub gs_base: usize,
279    /// Extended states, i.e., FP/SIMD states.
280    #[cfg(feature = "fp_simd")]
281    pub ext_state: ExtendedState,
282    /// The `CR3` register value, i.e., the page table root.
283    #[cfg(feature = "uspace")]
284    pub cr3: memory_addr::PhysAddr,
285}
286
287impl TaskContext {
288    /// Creates a dummy context for a new task.
289    ///
290    /// Note the context is not initialized, it will be filled by [`switch_to`]
291    /// (for initial tasks) and [`init`] (for regular tasks) methods.
292    ///
293    /// [`init`]: TaskContext::init
294    /// [`switch_to`]: TaskContext::switch_to
295    pub fn new() -> Self {
296        Self {
297            kstack_top: va!(0),
298            rsp: 0,
299            fs_base: 0,
300            #[cfg(feature = "uspace")]
301            cr3: crate::paging::kernel_page_table_root(),
302            #[cfg(feature = "fp_simd")]
303            ext_state: ExtendedState::default(),
304            #[cfg(feature = "uspace")]
305            gs_base: 0,
306        }
307    }
308
309    /// Initializes the context for a new task, with the given entry point and
310    /// kernel stack.
311    pub fn init(&mut self, entry: usize, kstack_top: VirtAddr, tls_area: VirtAddr) {
312        unsafe {
313            // x86_64 calling convention: the stack must be 16-byte aligned before
314            // calling a function. That means when entering a new task (`ret` in `context_switch`
315            // is executed), (stack pointer + 8) should be 16-byte aligned.
316            let frame_ptr = (kstack_top.as_mut_ptr() as *mut u64).sub(1);
317            let frame_ptr = (frame_ptr as *mut ContextSwitchFrame).sub(1);
318            core::ptr::write(
319                frame_ptr,
320                ContextSwitchFrame {
321                    rip: entry as _,
322                    ..Default::default()
323                },
324            );
325            self.rsp = frame_ptr as u64;
326        }
327        self.kstack_top = kstack_top;
328        self.fs_base = tls_area.as_usize();
329    }
330
331    /// Changes the page table root (`CR3` register for x86_64).
332    ///
333    /// If not set, the kernel page table root is used (obtained by
334    /// [`axhal::paging::kernel_page_table_root`][1]).
335    ///
336    /// [1]: crate::paging::kernel_page_table_root
337    #[cfg(feature = "uspace")]
338    pub fn set_page_table_root(&mut self, cr3: memory_addr::PhysAddr) {
339        self.cr3 = cr3;
340    }
341
342    /// Switches to another task.
343    ///
344    /// It first saves the current task's context from CPU to this place, and then
345    /// restores the next task's context from `next_ctx` to CPU.
346    pub fn switch_to(&mut self, next_ctx: &Self) {
347        #[cfg(feature = "fp_simd")]
348        {
349            self.ext_state.save();
350            next_ctx.ext_state.restore();
351        }
352        #[cfg(any(feature = "tls", feature = "uspace"))]
353        unsafe {
354            self.fs_base = super::read_thread_pointer();
355            super::write_thread_pointer(next_ctx.fs_base);
356        }
357        #[cfg(feature = "uspace")]
358        unsafe {
359            // Switch gs base for user space.
360            self.gs_base = x86::msr::rdmsr(x86::msr::IA32_KERNEL_GSBASE) as usize;
361            x86::msr::wrmsr(x86::msr::IA32_KERNEL_GSBASE, next_ctx.gs_base as u64);
362            super::tss_set_rsp0(next_ctx.kstack_top);
363            if next_ctx.cr3 != self.cr3 {
364                super::write_page_table_root(next_ctx.cr3);
365            }
366        }
367        unsafe { context_switch(&mut self.rsp, &next_ctx.rsp) }
368    }
369}
370
371#[unsafe(naked)]
372unsafe extern "C" fn context_switch(_current_stack: &mut u64, _next_stack: &u64) {
373    naked_asm!(
374        "
375        .code64
376        push    rbp
377        push    rbx
378        push    r12
379        push    r13
380        push    r14
381        push    r15
382        mov     [rdi], rsp
383
384        mov     rsp, [rsi]
385        pop     r15
386        pop     r14
387        pop     r13
388        pop     r12
389        pop     rbx
390        pop     rbp
391        ret",
392    )
393}