1#![no_std]
4
5#[macro_use]
6extern crate log;
7extern crate alloc;
8
9mod aspace;
10mod backend;
11
12pub use self::aspace::AddrSpace;
13pub use self::backend::Backend;
14
15use axerrno::{AxError, AxResult};
16use axhal::mem::{MemRegionFlags, phys_to_virt};
17use axhal::paging::MappingFlags;
18use kspin::SpinNoIrq;
19use lazyinit::LazyInit;
20use memory_addr::{MemoryAddr, PhysAddr, VirtAddr, va};
21use memory_set::MappingError;
22
23static KERNEL_ASPACE: LazyInit<SpinNoIrq<AddrSpace>> = LazyInit::new();
24
25fn mapping_err_to_ax_err(err: MappingError) -> AxError {
26 if !matches!(err, MappingError::AlreadyExists) {
27 warn!("Mapping error: {err:?}");
28 }
29 match err {
30 MappingError::InvalidParam => AxError::InvalidInput,
31 MappingError::AlreadyExists => AxError::AlreadyExists,
32 MappingError::BadState => AxError::BadState,
33 }
34}
35
36fn reg_flag_to_map_flag(f: MemRegionFlags) -> MappingFlags {
37 let mut ret = MappingFlags::empty();
38 if f.contains(MemRegionFlags::READ) {
39 ret |= MappingFlags::READ;
40 }
41 if f.contains(MemRegionFlags::WRITE) {
42 ret |= MappingFlags::WRITE;
43 }
44 if f.contains(MemRegionFlags::EXECUTE) {
45 ret |= MappingFlags::EXECUTE;
46 }
47 if f.contains(MemRegionFlags::DEVICE) {
48 ret |= MappingFlags::DEVICE;
49 }
50 if f.contains(MemRegionFlags::UNCACHED) {
51 ret |= MappingFlags::UNCACHED;
52 }
53 ret
54}
55
56pub fn new_kernel_aspace() -> AxResult<AddrSpace> {
58 let mut aspace = AddrSpace::new_empty(
59 va!(axconfig::plat::KERNEL_ASPACE_BASE),
60 axconfig::plat::KERNEL_ASPACE_SIZE,
61 )?;
62 for r in axhal::mem::memory_regions() {
63 let start = r.paddr.align_down_4k();
65 let end = (r.paddr + r.size).align_up_4k();
66 aspace.map_linear(
67 phys_to_virt(start),
68 start,
69 end - start,
70 reg_flag_to_map_flag(r.flags),
71 )?;
72 }
73 Ok(aspace)
74}
75
76pub fn new_user_aspace(base: VirtAddr, size: usize) -> AxResult<AddrSpace> {
78 let mut aspace = AddrSpace::new_empty(base, size)?;
79 if !cfg!(target_arch = "aarch64") && !cfg!(target_arch = "loongarch64") {
80 aspace.copy_mappings_from(&kernel_aspace().lock())?;
84 }
85 Ok(aspace)
86}
87
88pub fn kernel_aspace() -> &'static SpinNoIrq<AddrSpace> {
90 &KERNEL_ASPACE
91}
92
93pub fn kernel_page_table_root() -> PhysAddr {
95 KERNEL_ASPACE.lock().page_table_root()
96}
97
98pub fn init_memory_management() {
103 info!("Initialize virtual memory management...");
104
105 let kernel_aspace = new_kernel_aspace().expect("failed to initialize kernel address space");
106 debug!("kernel address space init OK: {:#x?}", kernel_aspace);
107 KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace));
108 unsafe { axhal::asm::write_kernel_page_table(kernel_page_table_root()) };
109}
110
111pub fn init_memory_management_secondary() {
113 unsafe { axhal::asm::write_kernel_page_table(kernel_page_table_root()) };
114}