1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use memory_addr::{
7 MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
8};
9use memory_set::{MemoryArea, MemorySet};
10
11use crate::backend::Backend;
12use crate::mapping_err_to_ax_err;
13
14pub struct AddrSpace {
16 va_range: VirtAddrRange,
17 areas: MemorySet<Backend>,
18 pt: PageTable,
19}
20
21impl AddrSpace {
22 pub const fn base(&self) -> VirtAddr {
24 self.va_range.start
25 }
26
27 pub const fn end(&self) -> VirtAddr {
29 self.va_range.end
30 }
31
32 pub fn size(&self) -> usize {
34 self.va_range.size()
35 }
36
37 pub const fn page_table(&self) -> &PageTable {
39 &self.pt
40 }
41
42 pub const fn page_table_root(&self) -> PhysAddr {
44 self.pt.root_paddr()
45 }
46
47 pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
49 self.va_range
50 .contains_range(VirtAddrRange::from_start_size(start, size))
51 }
52
53 pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
55 Ok(Self {
56 va_range: VirtAddrRange::from_start_size(base, size),
57 areas: MemorySet::new(),
58 pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
59 })
60 }
61
62 pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
70 if self.va_range.overlaps(other.va_range) {
71 return ax_err!(InvalidInput, "address space overlap");
72 }
73 self.pt.copy_from(&other.pt, other.base(), other.size());
74 Ok(())
75 }
76
77 pub fn find_free_area(
83 &self,
84 hint: VirtAddr,
85 size: usize,
86 limit: VirtAddrRange,
87 ) -> Option<VirtAddr> {
88 self.areas.find_free_area(hint, size, limit)
89 }
90
91 pub fn map_linear(
100 &mut self,
101 start_vaddr: VirtAddr,
102 start_paddr: PhysAddr,
103 size: usize,
104 flags: MappingFlags,
105 ) -> AxResult {
106 if !self.contains_range(start_vaddr, size) {
107 return ax_err!(InvalidInput, "address out of range");
108 }
109 if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
110 return ax_err!(InvalidInput, "address not aligned");
111 }
112
113 let offset = start_vaddr.as_usize() - start_paddr.as_usize();
114 let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
115 self.areas
116 .map(area, &mut self.pt, false)
117 .map_err(mapping_err_to_ax_err)?;
118 Ok(())
119 }
120
121 pub fn map_alloc(
130 &mut self,
131 start: VirtAddr,
132 size: usize,
133 flags: MappingFlags,
134 populate: bool,
135 ) -> AxResult {
136 if !self.contains_range(start, size) {
137 return ax_err!(InvalidInput, "address out of range");
138 }
139 if !start.is_aligned_4k() || !is_aligned_4k(size) {
140 return ax_err!(InvalidInput, "address not aligned");
141 }
142
143 let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
144 self.areas
145 .map(area, &mut self.pt, false)
146 .map_err(mapping_err_to_ax_err)?;
147 Ok(())
148 }
149
150 pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
155 if !self.contains_range(start, size) {
156 return ax_err!(InvalidInput, "address out of range");
157 }
158 if !start.is_aligned_4k() || !is_aligned_4k(size) {
159 return ax_err!(InvalidInput, "address not aligned");
160 }
161
162 self.areas
163 .unmap(start, size, &mut self.pt)
164 .map_err(mapping_err_to_ax_err)?;
165 Ok(())
166 }
167
168 fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
172 where
173 F: FnMut(VirtAddr, usize, usize),
174 {
175 if !self.contains_range(start, size) {
176 return ax_err!(InvalidInput, "address out of range");
177 }
178 let mut cnt = 0;
179 let end_align_up = (start + size).align_up_4k();
181 for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
182 .expect("Failed to create page iterator")
183 {
184 let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
185
186 let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
187
188 if copy_size == 0 {
189 break;
190 }
191 if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
192 let align_offset = start.align_offset_4k();
193 copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
194 paddr += align_offset;
195 }
196 f(phys_to_virt(paddr), cnt, copy_size);
197 cnt += copy_size;
198 }
199 Ok(())
200 }
201
202 pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
209 self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
210 core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
211 })
212 }
213
214 pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
221 self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
222 core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
223 })
224 }
225
226 pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
231 if !self.contains_range(start, size) {
232 return ax_err!(InvalidInput, "address out of range");
233 }
234 if !start.is_aligned_4k() || !is_aligned_4k(size) {
235 return ax_err!(InvalidInput, "address not aligned");
236 }
237
238 self.pt
240 .protect_region(start, size, flags, true)
241 .map_err(|_| AxError::BadState)?
242 .ignore();
243 Ok(())
244 }
245
246 pub fn clear(&mut self) {
248 self.areas.clear(&mut self.pt).unwrap();
249 }
250
251 pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool {
258 if !self.va_range.contains(vaddr) {
259 return false;
260 }
261 if let Some(area) = self.areas.find(vaddr) {
262 let orig_flags = area.flags();
263 if orig_flags.contains(access_flags) {
264 return area
265 .backend()
266 .handle_page_fault(vaddr, orig_flags, &mut self.pt);
267 }
268 }
269 false
270 }
271}
272
273impl fmt::Debug for AddrSpace {
274 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
275 f.debug_struct("AddrSpace")
276 .field("va_range", &self.va_range)
277 .field("page_table_root", &self.pt.root_paddr())
278 .field("areas", &self.areas)
279 .finish()
280 }
281}
282
283impl Drop for AddrSpace {
284 fn drop(&mut self) {
285 self.clear();
286 }
287}