1use axalloc::global_allocator;
4use page_table_multiarch::PagingHandler;
5
6use crate::mem::{MemRegionFlags, PAGE_SIZE_4K, PhysAddr, VirtAddr, phys_to_virt, virt_to_phys};
7
8#[doc(no_inline)]
9pub use page_table_multiarch::{MappingFlags, PageSize, PagingError, PagingResult};
10
11impl From<MemRegionFlags> for MappingFlags {
12 fn from(f: MemRegionFlags) -> Self {
13 let mut ret = Self::empty();
14 if f.contains(MemRegionFlags::READ) {
15 ret |= Self::READ;
16 }
17 if f.contains(MemRegionFlags::WRITE) {
18 ret |= Self::WRITE;
19 }
20 if f.contains(MemRegionFlags::EXECUTE) {
21 ret |= Self::EXECUTE;
22 }
23 if f.contains(MemRegionFlags::DEVICE) {
24 ret |= Self::DEVICE;
25 }
26 if f.contains(MemRegionFlags::UNCACHED) {
27 ret |= Self::UNCACHED;
28 }
29 ret
30 }
31}
32
33pub struct PagingHandlerImpl;
36
37impl PagingHandler for PagingHandlerImpl {
38 fn alloc_frame() -> Option<PhysAddr> {
39 global_allocator()
40 .alloc_pages(1, PAGE_SIZE_4K)
41 .map(|vaddr| virt_to_phys(vaddr.into()))
42 .ok()
43 }
44
45 fn dealloc_frame(paddr: PhysAddr) {
46 global_allocator().dealloc_pages(phys_to_virt(paddr).as_usize(), 1)
47 }
48
49 #[inline]
50 fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
51 phys_to_virt(paddr)
52 }
53}
54
55cfg_if::cfg_if! {
56 if #[cfg(target_arch = "x86_64")] {
57 pub type PageTable = page_table_multiarch::x86_64::X64PageTable<PagingHandlerImpl>;
59 } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] {
60 pub type PageTable = page_table_multiarch::riscv::Sv39PageTable<PagingHandlerImpl>;
62 } else if #[cfg(target_arch = "aarch64")]{
63 pub type PageTable = page_table_multiarch::aarch64::A64PageTable<PagingHandlerImpl>;
65 } else if #[cfg(target_arch = "loongarch64")] {
66 pub type PageTable = page_table_multiarch::loongarch64::LA64PageTable<PagingHandlerImpl>;
68 }
69}