axmm/
aspace.rs

1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use axhal::trap::PageFaultFlags;
7use memory_addr::{
8    MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
9};
10use memory_set::{MemoryArea, MemorySet};
11
12use crate::backend::Backend;
13use crate::mapping_err_to_ax_err;
14
15/// The virtual memory address space.
16pub struct AddrSpace {
17    va_range: VirtAddrRange,
18    areas: MemorySet<Backend>,
19    pt: PageTable,
20}
21
22impl AddrSpace {
23    /// Returns the address space base.
24    pub const fn base(&self) -> VirtAddr {
25        self.va_range.start
26    }
27
28    /// Returns the address space end.
29    pub const fn end(&self) -> VirtAddr {
30        self.va_range.end
31    }
32
33    /// Returns the address space size.
34    pub fn size(&self) -> usize {
35        self.va_range.size()
36    }
37
38    /// Returns the reference to the inner page table.
39    pub const fn page_table(&self) -> &PageTable {
40        &self.pt
41    }
42
43    /// Returns the root physical address of the inner page table.
44    pub const fn page_table_root(&self) -> PhysAddr {
45        self.pt.root_paddr()
46    }
47
48    /// Checks if the address space contains the given address range.
49    pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
50        self.va_range
51            .contains_range(VirtAddrRange::from_start_size(start, size))
52    }
53
54    /// Creates a new empty address space.
55    pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
56        Ok(Self {
57            va_range: VirtAddrRange::from_start_size(base, size),
58            areas: MemorySet::new(),
59            pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
60        })
61    }
62
63    /// Copies page table mappings from another address space.
64    ///
65    /// It copies the page table entries only rather than the memory regions,
66    /// usually used to copy a portion of the kernel space mapping to the
67    /// user space.
68    ///
69    /// Returns an error if the two address spaces overlap.
70    pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
71        if self.va_range.overlaps(other.va_range) {
72            return ax_err!(InvalidInput, "address space overlap");
73        }
74        self.pt.copy_from(&other.pt, other.base(), other.size());
75        Ok(())
76    }
77
78    /// Finds a free area that can accommodate the given size.
79    ///
80    /// The search starts from the given hint address, and the area should be within the given limit range.
81    ///
82    /// Returns the start address of the free area. Returns None if no such area is found.
83    pub fn find_free_area(
84        &self,
85        hint: VirtAddr,
86        size: usize,
87        limit: VirtAddrRange,
88    ) -> Option<VirtAddr> {
89        self.areas.find_free_area(hint, size, limit, PAGE_SIZE_4K)
90    }
91
92    /// Add a new linear mapping.
93    ///
94    /// See [`Backend`] for more details about the mapping backends.
95    ///
96    /// The `flags` parameter indicates the mapping permissions and attributes.
97    ///
98    /// Returns an error if the address range is out of the address space or not
99    /// aligned.
100    pub fn map_linear(
101        &mut self,
102        start_vaddr: VirtAddr,
103        start_paddr: PhysAddr,
104        size: usize,
105        flags: MappingFlags,
106    ) -> AxResult {
107        if !self.contains_range(start_vaddr, size) {
108            return ax_err!(InvalidInput, "address out of range");
109        }
110        if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
111            return ax_err!(InvalidInput, "address not aligned");
112        }
113
114        let offset = start_vaddr.as_usize() - start_paddr.as_usize();
115        let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
116        self.areas
117            .map(area, &mut self.pt, false)
118            .map_err(mapping_err_to_ax_err)?;
119        Ok(())
120    }
121
122    /// Add a new allocation mapping.
123    ///
124    /// See [`Backend`] for more details about the mapping backends.
125    ///
126    /// The `flags` parameter indicates the mapping permissions and attributes.
127    ///
128    /// Returns an error if the address range is out of the address space or not
129    /// aligned.
130    pub fn map_alloc(
131        &mut self,
132        start: VirtAddr,
133        size: usize,
134        flags: MappingFlags,
135        populate: bool,
136    ) -> AxResult {
137        if !self.contains_range(start, size) {
138            return ax_err!(InvalidInput, "address out of range");
139        }
140        if !start.is_aligned_4k() || !is_aligned_4k(size) {
141            return ax_err!(InvalidInput, "address not aligned");
142        }
143
144        let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
145        self.areas
146            .map(area, &mut self.pt, false)
147            .map_err(mapping_err_to_ax_err)?;
148        Ok(())
149    }
150
151    /// Removes mappings within the specified virtual address range.
152    ///
153    /// Returns an error if the address range is out of the address space or not
154    /// aligned.
155    pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
156        if !self.contains_range(start, size) {
157            return ax_err!(InvalidInput, "address out of range");
158        }
159        if !start.is_aligned_4k() || !is_aligned_4k(size) {
160            return ax_err!(InvalidInput, "address not aligned");
161        }
162
163        self.areas
164            .unmap(start, size, &mut self.pt)
165            .map_err(mapping_err_to_ax_err)?;
166        Ok(())
167    }
168
169    /// To process data in this area with the given function.
170    ///
171    /// Now it supports reading and writing data in the given interval.
172    fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
173    where
174        F: FnMut(VirtAddr, usize, usize),
175    {
176        if !self.contains_range(start, size) {
177            return ax_err!(InvalidInput, "address out of range");
178        }
179        let mut cnt = 0;
180        // If start is aligned to 4K, start_align_down will be equal to start_align_up.
181        let end_align_up = (start + size).align_up_4k();
182        for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
183            .expect("Failed to create page iterator")
184        {
185            let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
186
187            let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
188
189            if copy_size == 0 {
190                break;
191            }
192            if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
193                let align_offset = start.align_offset_4k();
194                copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
195                paddr += align_offset;
196            }
197            f(phys_to_virt(paddr), cnt, copy_size);
198            cnt += copy_size;
199        }
200        Ok(())
201    }
202
203    /// To read data from the address space.
204    ///
205    /// # Arguments
206    ///
207    /// * `start` - The start virtual address to read.
208    /// * `buf` - The buffer to store the data.
209    pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
210        self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
211            core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
212        })
213    }
214
215    /// To write data to the address space.
216    ///
217    /// # Arguments
218    ///
219    /// * `start_vaddr` - The start virtual address to write.
220    /// * `buf` - The buffer to write to the address space.
221    pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
222        self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
223            core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
224        })
225    }
226
227    /// Updates mapping within the specified virtual address range.
228    ///
229    /// Returns an error if the address range is out of the address space or not
230    /// aligned.
231    pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
232        if !self.contains_range(start, size) {
233            return ax_err!(InvalidInput, "address out of range");
234        }
235        if !start.is_aligned_4k() || !is_aligned_4k(size) {
236            return ax_err!(InvalidInput, "address not aligned");
237        }
238
239        // TODO
240        self.pt
241            .protect_region(start, size, flags, true)
242            .map_err(|_| AxError::BadState)?
243            .ignore();
244        Ok(())
245    }
246
247    /// Removes all mappings in the address space.
248    pub fn clear(&mut self) {
249        self.areas.clear(&mut self.pt).unwrap();
250    }
251
252    /// Checks whether an access to the specified memory region is valid.
253    ///
254    /// Returns `true` if the memory region given by `range` is all mapped and
255    /// has proper permission flags (i.e. containing `access_flags`).
256    pub fn can_access_range(
257        &self,
258        start: VirtAddr,
259        size: usize,
260        access_flags: MappingFlags,
261    ) -> bool {
262        let mut range = VirtAddrRange::from_start_size(start, size);
263        for area in self.areas.iter() {
264            if area.end() <= range.start {
265                continue;
266            }
267            if area.start() > range.start {
268                return false;
269            }
270
271            // This area overlaps with the memory region
272            if !area.flags().contains(access_flags) {
273                return false;
274            }
275
276            range.start = area.end();
277            if range.is_empty() {
278                return true;
279            }
280        }
281
282        false
283    }
284
285    /// Handles a page fault at the given address.
286    ///
287    /// `access_flags` indicates the access type that caused the page fault.
288    ///
289    /// Returns `true` if the page fault is handled successfully (not a real
290    /// fault).
291    pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: PageFaultFlags) -> bool {
292        if !self.va_range.contains(vaddr) {
293            return false;
294        }
295        if let Some(area) = self.areas.find(vaddr) {
296            let orig_flags = area.flags();
297            if orig_flags.contains(access_flags) {
298                return area
299                    .backend()
300                    .handle_page_fault(vaddr, orig_flags, &mut self.pt);
301            }
302        }
303        false
304    }
305}
306
307impl fmt::Debug for AddrSpace {
308    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
309        f.debug_struct("AddrSpace")
310            .field("va_range", &self.va_range)
311            .field("page_table_root", &self.pt.root_paddr())
312            .field("areas", &self.areas)
313            .finish()
314    }
315}
316
317impl Drop for AddrSpace {
318    fn drop(&mut self) {
319        self.clear();
320    }
321}