axmm/
aspace.rs

1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use memory_addr::{
7    MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
8};
9use memory_set::{MemoryArea, MemorySet};
10
11use crate::backend::Backend;
12use crate::mapping_err_to_ax_err;
13
14/// The virtual memory address space.
15pub struct AddrSpace {
16    va_range: VirtAddrRange,
17    areas: MemorySet<Backend>,
18    pt: PageTable,
19}
20
21impl AddrSpace {
22    /// Returns the address space base.
23    pub const fn base(&self) -> VirtAddr {
24        self.va_range.start
25    }
26
27    /// Returns the address space end.
28    pub const fn end(&self) -> VirtAddr {
29        self.va_range.end
30    }
31
32    /// Returns the address space size.
33    pub fn size(&self) -> usize {
34        self.va_range.size()
35    }
36
37    /// Returns the reference to the inner page table.
38    pub const fn page_table(&self) -> &PageTable {
39        &self.pt
40    }
41
42    /// Returns the root physical address of the inner page table.
43    pub const fn page_table_root(&self) -> PhysAddr {
44        self.pt.root_paddr()
45    }
46
47    /// Checks if the address space contains the given address range.
48    pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
49        self.va_range
50            .contains_range(VirtAddrRange::from_start_size(start, size))
51    }
52
53    /// Creates a new empty address space.
54    pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
55        Ok(Self {
56            va_range: VirtAddrRange::from_start_size(base, size),
57            areas: MemorySet::new(),
58            pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
59        })
60    }
61
62    /// Copies page table mappings from another address space.
63    ///
64    /// It copies the page table entries only rather than the memory regions,
65    /// usually used to copy a portion of the kernel space mapping to the
66    /// user space.
67    ///
68    /// Returns an error if the two address spaces overlap.
69    pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
70        if self.va_range.overlaps(other.va_range) {
71            return ax_err!(InvalidInput, "address space overlap");
72        }
73        self.pt.copy_from(&other.pt, other.base(), other.size());
74        Ok(())
75    }
76
77    /// Finds a free area that can accommodate the given size.
78    ///
79    /// The search starts from the given hint address, and the area should be within the given limit range.
80    ///
81    /// Returns the start address of the free area. Returns None if no such area is found.
82    pub fn find_free_area(
83        &self,
84        hint: VirtAddr,
85        size: usize,
86        limit: VirtAddrRange,
87    ) -> Option<VirtAddr> {
88        self.areas.find_free_area(hint, size, limit)
89    }
90
91    /// Add a new linear mapping.
92    ///
93    /// See [`Backend`] for more details about the mapping backends.
94    ///
95    /// The `flags` parameter indicates the mapping permissions and attributes.
96    ///
97    /// Returns an error if the address range is out of the address space or not
98    /// aligned.
99    pub fn map_linear(
100        &mut self,
101        start_vaddr: VirtAddr,
102        start_paddr: PhysAddr,
103        size: usize,
104        flags: MappingFlags,
105    ) -> AxResult {
106        if !self.contains_range(start_vaddr, size) {
107            return ax_err!(InvalidInput, "address out of range");
108        }
109        if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
110            return ax_err!(InvalidInput, "address not aligned");
111        }
112
113        let offset = start_vaddr.as_usize() - start_paddr.as_usize();
114        let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
115        self.areas
116            .map(area, &mut self.pt, false)
117            .map_err(mapping_err_to_ax_err)?;
118        Ok(())
119    }
120
121    /// Add a new allocation mapping.
122    ///
123    /// See [`Backend`] for more details about the mapping backends.
124    ///
125    /// The `flags` parameter indicates the mapping permissions and attributes.
126    ///
127    /// Returns an error if the address range is out of the address space or not
128    /// aligned.
129    pub fn map_alloc(
130        &mut self,
131        start: VirtAddr,
132        size: usize,
133        flags: MappingFlags,
134        populate: bool,
135    ) -> AxResult {
136        if !self.contains_range(start, size) {
137            return ax_err!(InvalidInput, "address out of range");
138        }
139        if !start.is_aligned_4k() || !is_aligned_4k(size) {
140            return ax_err!(InvalidInput, "address not aligned");
141        }
142
143        let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
144        self.areas
145            .map(area, &mut self.pt, false)
146            .map_err(mapping_err_to_ax_err)?;
147        Ok(())
148    }
149
150    /// Removes mappings within the specified virtual address range.
151    ///
152    /// Returns an error if the address range is out of the address space or not
153    /// aligned.
154    pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
155        if !self.contains_range(start, size) {
156            return ax_err!(InvalidInput, "address out of range");
157        }
158        if !start.is_aligned_4k() || !is_aligned_4k(size) {
159            return ax_err!(InvalidInput, "address not aligned");
160        }
161
162        self.areas
163            .unmap(start, size, &mut self.pt)
164            .map_err(mapping_err_to_ax_err)?;
165        Ok(())
166    }
167
168    /// To process data in this area with the given function.
169    ///
170    /// Now it supports reading and writing data in the given interval.
171    fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
172    where
173        F: FnMut(VirtAddr, usize, usize),
174    {
175        if !self.contains_range(start, size) {
176            return ax_err!(InvalidInput, "address out of range");
177        }
178        let mut cnt = 0;
179        // If start is aligned to 4K, start_align_down will be equal to start_align_up.
180        let end_align_up = (start + size).align_up_4k();
181        for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
182            .expect("Failed to create page iterator")
183        {
184            let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
185
186            let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
187
188            if copy_size == 0 {
189                break;
190            }
191            if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
192                let align_offset = start.align_offset_4k();
193                copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
194                paddr += align_offset;
195            }
196            f(phys_to_virt(paddr), cnt, copy_size);
197            cnt += copy_size;
198        }
199        Ok(())
200    }
201
202    /// To read data from the address space.
203    ///
204    /// # Arguments
205    ///
206    /// * `start` - The start virtual address to read.
207    /// * `buf` - The buffer to store the data.
208    pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
209        self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
210            core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
211        })
212    }
213
214    /// To write data to the address space.
215    ///
216    /// # Arguments
217    ///
218    /// * `start_vaddr` - The start virtual address to write.
219    /// * `buf` - The buffer to write to the address space.
220    pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
221        self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
222            core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
223        })
224    }
225
226    /// Updates mapping within the specified virtual address range.
227    ///
228    /// Returns an error if the address range is out of the address space or not
229    /// aligned.
230    pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
231        if !self.contains_range(start, size) {
232            return ax_err!(InvalidInput, "address out of range");
233        }
234        if !start.is_aligned_4k() || !is_aligned_4k(size) {
235            return ax_err!(InvalidInput, "address not aligned");
236        }
237
238        // TODO
239        self.pt
240            .protect_region(start, size, flags, true)
241            .map_err(|_| AxError::BadState)?
242            .ignore();
243        Ok(())
244    }
245
246    /// Removes all mappings in the address space.
247    pub fn clear(&mut self) {
248        self.areas.clear(&mut self.pt).unwrap();
249    }
250
251    /// Handles a page fault at the given address.
252    ///
253    /// `access_flags` indicates the access type that caused the page fault.
254    ///
255    /// Returns `true` if the page fault is handled successfully (not a real
256    /// fault).
257    pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool {
258        if !self.va_range.contains(vaddr) {
259            return false;
260        }
261        if let Some(area) = self.areas.find(vaddr) {
262            let orig_flags = area.flags();
263            if orig_flags.contains(access_flags) {
264                return area
265                    .backend()
266                    .handle_page_fault(vaddr, orig_flags, &mut self.pt);
267            }
268        }
269        false
270    }
271}
272
273impl fmt::Debug for AddrSpace {
274    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
275        f.debug_struct("AddrSpace")
276            .field("va_range", &self.va_range)
277            .field("page_table_root", &self.pt.root_paddr())
278            .field("areas", &self.areas)
279            .finish()
280    }
281}
282
283impl Drop for AddrSpace {
284    fn drop(&mut self) {
285        self.clear();
286    }
287}