axmm/
aspace.rs

1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use axhal::trap::PageFaultFlags;
7use memory_addr::{
8    MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
9};
10use memory_set::{MemoryArea, MemorySet};
11
12use crate::backend::Backend;
13use crate::mapping_err_to_ax_err;
14
15/// The virtual memory address space.
16pub struct AddrSpace {
17    va_range: VirtAddrRange,
18    areas: MemorySet<Backend>,
19    pt: PageTable,
20}
21
22impl AddrSpace {
23    /// Returns the address space base.
24    pub const fn base(&self) -> VirtAddr {
25        self.va_range.start
26    }
27
28    /// Returns the address space end.
29    pub const fn end(&self) -> VirtAddr {
30        self.va_range.end
31    }
32
33    /// Returns the address space size.
34    pub fn size(&self) -> usize {
35        self.va_range.size()
36    }
37
38    /// Returns the reference to the inner page table.
39    pub const fn page_table(&self) -> &PageTable {
40        &self.pt
41    }
42
43    /// Returns the root physical address of the inner page table.
44    pub const fn page_table_root(&self) -> PhysAddr {
45        self.pt.root_paddr()
46    }
47
48    /// Checks if the address space contains the given address range.
49    pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
50        self.va_range
51            .contains_range(VirtAddrRange::from_start_size(start, size))
52    }
53
54    /// Creates a new empty address space.
55    pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
56        Ok(Self {
57            va_range: VirtAddrRange::from_start_size(base, size),
58            areas: MemorySet::new(),
59            pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
60        })
61    }
62
63    /// Copies page table mappings from another address space.
64    ///
65    /// It copies the page table entries only rather than the memory regions,
66    /// usually used to copy a portion of the kernel space mapping to the
67    /// user space.
68    ///
69    /// Returns an error if the two address spaces overlap.
70    pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
71        if self.va_range.overlaps(other.va_range) {
72            return ax_err!(InvalidInput, "address space overlap");
73        }
74        self.pt
75            .cursor()
76            .copy_from(&other.pt, other.base(), other.size());
77        Ok(())
78    }
79
80    /// Finds a free area that can accommodate the given size.
81    ///
82    /// The search starts from the given hint address, and the area should be within the given limit range.
83    ///
84    /// Returns the start address of the free area. Returns None if no such area is found.
85    pub fn find_free_area(
86        &self,
87        hint: VirtAddr,
88        size: usize,
89        limit: VirtAddrRange,
90    ) -> Option<VirtAddr> {
91        self.areas.find_free_area(hint, size, limit, PAGE_SIZE_4K)
92    }
93
94    /// Add a new linear mapping.
95    ///
96    /// See [`Backend`] for more details about the mapping backends.
97    ///
98    /// The `flags` parameter indicates the mapping permissions and attributes.
99    ///
100    /// Returns an error if the address range is out of the address space or not
101    /// aligned.
102    pub fn map_linear(
103        &mut self,
104        start_vaddr: VirtAddr,
105        start_paddr: PhysAddr,
106        size: usize,
107        flags: MappingFlags,
108    ) -> AxResult {
109        if !self.contains_range(start_vaddr, size) {
110            return ax_err!(InvalidInput, "address out of range");
111        }
112        if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
113            return ax_err!(InvalidInput, "address not aligned");
114        }
115
116        let offset = start_vaddr.as_usize() - start_paddr.as_usize();
117        let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
118        self.areas
119            .map(area, &mut self.pt, false)
120            .map_err(mapping_err_to_ax_err)?;
121        Ok(())
122    }
123
124    /// Add a new allocation mapping.
125    ///
126    /// See [`Backend`] for more details about the mapping backends.
127    ///
128    /// The `flags` parameter indicates the mapping permissions and attributes.
129    ///
130    /// Returns an error if the address range is out of the address space or not
131    /// aligned.
132    pub fn map_alloc(
133        &mut self,
134        start: VirtAddr,
135        size: usize,
136        flags: MappingFlags,
137        populate: bool,
138    ) -> AxResult {
139        if !self.contains_range(start, size) {
140            return ax_err!(InvalidInput, "address out of range");
141        }
142        if !start.is_aligned_4k() || !is_aligned_4k(size) {
143            return ax_err!(InvalidInput, "address not aligned");
144        }
145
146        let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
147        self.areas
148            .map(area, &mut self.pt, false)
149            .map_err(mapping_err_to_ax_err)?;
150        Ok(())
151    }
152
153    /// Removes mappings within the specified virtual address range.
154    ///
155    /// Returns an error if the address range is out of the address space or not
156    /// aligned.
157    pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
158        if !self.contains_range(start, size) {
159            return ax_err!(InvalidInput, "address out of range");
160        }
161        if !start.is_aligned_4k() || !is_aligned_4k(size) {
162            return ax_err!(InvalidInput, "address not aligned");
163        }
164
165        self.areas
166            .unmap(start, size, &mut self.pt)
167            .map_err(mapping_err_to_ax_err)?;
168        Ok(())
169    }
170
171    /// To process data in this area with the given function.
172    ///
173    /// Now it supports reading and writing data in the given interval.
174    fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
175    where
176        F: FnMut(VirtAddr, usize, usize),
177    {
178        if !self.contains_range(start, size) {
179            return ax_err!(InvalidInput, "address out of range");
180        }
181        let mut cnt = 0;
182        // If start is aligned to 4K, start_align_down will be equal to start_align_up.
183        let end_align_up = (start + size).align_up_4k();
184        for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
185            .expect("Failed to create page iterator")
186        {
187            let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
188
189            let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
190
191            if copy_size == 0 {
192                break;
193            }
194            if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
195                let align_offset = start.align_offset_4k();
196                copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
197                paddr += align_offset;
198            }
199            f(phys_to_virt(paddr), cnt, copy_size);
200            cnt += copy_size;
201        }
202        Ok(())
203    }
204
205    /// To read data from the address space.
206    ///
207    /// # Arguments
208    ///
209    /// * `start` - The start virtual address to read.
210    /// * `buf` - The buffer to store the data.
211    pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
212        self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
213            core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
214        })
215    }
216
217    /// To write data to the address space.
218    ///
219    /// # Arguments
220    ///
221    /// * `start_vaddr` - The start virtual address to write.
222    /// * `buf` - The buffer to write to the address space.
223    pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
224        self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
225            core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
226        })
227    }
228
229    /// Updates mapping within the specified virtual address range.
230    ///
231    /// Returns an error if the address range is out of the address space or not
232    /// aligned.
233    pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
234        if !self.contains_range(start, size) {
235            return ax_err!(InvalidInput, "address out of range");
236        }
237        if !start.is_aligned_4k() || !is_aligned_4k(size) {
238            return ax_err!(InvalidInput, "address not aligned");
239        }
240
241        // TODO
242        self.pt
243            .cursor()
244            .protect_region(start, size, flags)
245            .map_err(|_| AxError::BadState)?;
246        Ok(())
247    }
248
249    /// Removes all mappings in the address space.
250    pub fn clear(&mut self) {
251        self.areas.clear(&mut self.pt).unwrap();
252    }
253
254    /// Checks whether an access to the specified memory region is valid.
255    ///
256    /// Returns `true` if the memory region given by `range` is all mapped and
257    /// has proper permission flags (i.e. containing `access_flags`).
258    pub fn can_access_range(
259        &self,
260        start: VirtAddr,
261        size: usize,
262        access_flags: MappingFlags,
263    ) -> bool {
264        let mut range = VirtAddrRange::from_start_size(start, size);
265        for area in self.areas.iter() {
266            if area.end() <= range.start {
267                continue;
268            }
269            if area.start() > range.start {
270                return false;
271            }
272
273            // This area overlaps with the memory region
274            if !area.flags().contains(access_flags) {
275                return false;
276            }
277
278            range.start = area.end();
279            if range.is_empty() {
280                return true;
281            }
282        }
283
284        false
285    }
286
287    /// Handles a page fault at the given address.
288    ///
289    /// `access_flags` indicates the access type that caused the page fault.
290    ///
291    /// Returns `true` if the page fault is handled successfully (not a real
292    /// fault).
293    pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: PageFaultFlags) -> bool {
294        if !self.va_range.contains(vaddr) {
295            return false;
296        }
297        if let Some(area) = self.areas.find(vaddr) {
298            let orig_flags = area.flags();
299            let access_flags = MappingFlags::from_bits_truncate(access_flags.bits());
300            if orig_flags.contains(access_flags) {
301                return area
302                    .backend()
303                    .handle_page_fault(vaddr, orig_flags, &mut self.pt);
304            }
305        }
306        false
307    }
308}
309
310impl fmt::Debug for AddrSpace {
311    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
312        f.debug_struct("AddrSpace")
313            .field("va_range", &self.va_range)
314            .field("page_table_root", &self.pt.root_paddr())
315            .field("areas", &self.areas)
316            .finish()
317    }
318}
319
320impl Drop for AddrSpace {
321    fn drop(&mut self) {
322        self.clear();
323    }
324}