axmm/backend/
alloc.rs

1use axalloc::global_allocator;
2use axhal::mem::{phys_to_virt, virt_to_phys};
3use axhal::paging::{MappingFlags, PageSize, PageTable};
4use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr};
5
6use super::Backend;
7
8fn alloc_frame(zeroed: bool) -> Option<PhysAddr> {
9    let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?);
10    if zeroed {
11        unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) };
12    }
13    let paddr = virt_to_phys(vaddr);
14    Some(paddr)
15}
16
17fn dealloc_frame(frame: PhysAddr) {
18    let vaddr = phys_to_virt(frame);
19    global_allocator().dealloc_pages(vaddr.as_usize(), 1);
20}
21
22impl Backend {
23    /// Creates a new allocation mapping backend.
24    pub const fn new_alloc(populate: bool) -> Self {
25        Self::Alloc { populate }
26    }
27
28    pub(crate) fn map_alloc(
29        &self,
30        start: VirtAddr,
31        size: usize,
32        flags: MappingFlags,
33        pt: &mut PageTable,
34        populate: bool,
35    ) -> bool {
36        debug!(
37            "map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
38            start,
39            start + size,
40            flags,
41            populate
42        );
43        if populate {
44            // allocate all possible physical frames for populated mapping.
45            for addr in PageIter4K::new(start, start + size).unwrap() {
46                if let Some(frame) = alloc_frame(true) {
47                    if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) {
48                        tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings.
49                    } else {
50                        return false;
51                    }
52                }
53            }
54            true
55        } else {
56            // Map to a empty entry for on-demand mapping.
57            let flags = MappingFlags::empty();
58            pt.map_region(start, |_| 0.into(), size, flags, false, false)
59                .map(|tlb| tlb.ignore())
60                .is_ok()
61        }
62    }
63
64    pub(crate) fn unmap_alloc(
65        &self,
66        start: VirtAddr,
67        size: usize,
68        pt: &mut PageTable,
69        _populate: bool,
70    ) -> bool {
71        debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
72        for addr in PageIter4K::new(start, start + size).unwrap() {
73            if let Ok((frame, page_size, tlb)) = pt.unmap(addr) {
74                // Deallocate the physical frame if there is a mapping in the
75                // page table.
76                if page_size.is_huge() {
77                    return false;
78                }
79                tlb.flush();
80                dealloc_frame(frame);
81            } else {
82                // Deallocation is needn't if the page is not mapped.
83            }
84        }
85        true
86    }
87
88    pub(crate) fn handle_page_fault_alloc(
89        &self,
90        vaddr: VirtAddr,
91        orig_flags: MappingFlags,
92        pt: &mut PageTable,
93        populate: bool,
94    ) -> bool {
95        if populate {
96            false // Populated mappings should not trigger page faults.
97        } else if let Some(frame) = alloc_frame(true) {
98            // Allocate a physical frame lazily and map it to the fault address.
99            // `vaddr` does not need to be aligned. It will be automatically
100            // aligned during `pt.remap` regardless of the page size.
101            pt.remap(vaddr, frame, orig_flags)
102                .map(|(_, tlb)| tlb.flush())
103                .is_ok()
104        } else {
105            false
106        }
107    }
108}