1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use axhal::trap::PageFaultFlags;
7use memory_addr::{
8 MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
9};
10use memory_set::{MemoryArea, MemorySet};
11
12use crate::backend::Backend;
13use crate::mapping_err_to_ax_err;
14
15pub struct AddrSpace {
17 va_range: VirtAddrRange,
18 areas: MemorySet<Backend>,
19 pt: PageTable,
20}
21
22impl AddrSpace {
23 pub const fn base(&self) -> VirtAddr {
25 self.va_range.start
26 }
27
28 pub const fn end(&self) -> VirtAddr {
30 self.va_range.end
31 }
32
33 pub fn size(&self) -> usize {
35 self.va_range.size()
36 }
37
38 pub const fn page_table(&self) -> &PageTable {
40 &self.pt
41 }
42
43 pub const fn page_table_root(&self) -> PhysAddr {
45 self.pt.root_paddr()
46 }
47
48 pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
50 self.va_range
51 .contains_range(VirtAddrRange::from_start_size(start, size))
52 }
53
54 pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
56 Ok(Self {
57 va_range: VirtAddrRange::from_start_size(base, size),
58 areas: MemorySet::new(),
59 pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
60 })
61 }
62
63 pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
71 if self.va_range.overlaps(other.va_range) {
72 return ax_err!(InvalidInput, "address space overlap");
73 }
74 self.pt
75 .cursor()
76 .copy_from(&other.pt, other.base(), other.size());
77 Ok(())
78 }
79
80 pub fn find_free_area(
86 &self,
87 hint: VirtAddr,
88 size: usize,
89 limit: VirtAddrRange,
90 ) -> Option<VirtAddr> {
91 self.areas.find_free_area(hint, size, limit, PAGE_SIZE_4K)
92 }
93
94 pub fn map_linear(
103 &mut self,
104 start_vaddr: VirtAddr,
105 start_paddr: PhysAddr,
106 size: usize,
107 flags: MappingFlags,
108 ) -> AxResult {
109 if !self.contains_range(start_vaddr, size) {
110 return ax_err!(InvalidInput, "address out of range");
111 }
112 if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
113 return ax_err!(InvalidInput, "address not aligned");
114 }
115
116 let offset = start_vaddr.as_usize() - start_paddr.as_usize();
117 let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
118 self.areas
119 .map(area, &mut self.pt, false)
120 .map_err(mapping_err_to_ax_err)?;
121 Ok(())
122 }
123
124 pub fn map_alloc(
133 &mut self,
134 start: VirtAddr,
135 size: usize,
136 flags: MappingFlags,
137 populate: bool,
138 ) -> AxResult {
139 if !self.contains_range(start, size) {
140 return ax_err!(InvalidInput, "address out of range");
141 }
142 if !start.is_aligned_4k() || !is_aligned_4k(size) {
143 return ax_err!(InvalidInput, "address not aligned");
144 }
145
146 let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
147 self.areas
148 .map(area, &mut self.pt, false)
149 .map_err(mapping_err_to_ax_err)?;
150 Ok(())
151 }
152
153 pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
158 if !self.contains_range(start, size) {
159 return ax_err!(InvalidInput, "address out of range");
160 }
161 if !start.is_aligned_4k() || !is_aligned_4k(size) {
162 return ax_err!(InvalidInput, "address not aligned");
163 }
164
165 self.areas
166 .unmap(start, size, &mut self.pt)
167 .map_err(mapping_err_to_ax_err)?;
168 Ok(())
169 }
170
171 fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
175 where
176 F: FnMut(VirtAddr, usize, usize),
177 {
178 if !self.contains_range(start, size) {
179 return ax_err!(InvalidInput, "address out of range");
180 }
181 let mut cnt = 0;
182 let end_align_up = (start + size).align_up_4k();
184 for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
185 .expect("Failed to create page iterator")
186 {
187 let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
188
189 let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
190
191 if copy_size == 0 {
192 break;
193 }
194 if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
195 let align_offset = start.align_offset_4k();
196 copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
197 paddr += align_offset;
198 }
199 f(phys_to_virt(paddr), cnt, copy_size);
200 cnt += copy_size;
201 }
202 Ok(())
203 }
204
205 pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
212 self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
213 core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
214 })
215 }
216
217 pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
224 self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
225 core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
226 })
227 }
228
229 pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
234 if !self.contains_range(start, size) {
235 return ax_err!(InvalidInput, "address out of range");
236 }
237 if !start.is_aligned_4k() || !is_aligned_4k(size) {
238 return ax_err!(InvalidInput, "address not aligned");
239 }
240
241 self.pt
243 .cursor()
244 .protect_region(start, size, flags)
245 .map_err(|_| AxError::BadState)?;
246 Ok(())
247 }
248
249 pub fn clear(&mut self) {
251 self.areas.clear(&mut self.pt).unwrap();
252 }
253
254 pub fn can_access_range(
259 &self,
260 start: VirtAddr,
261 size: usize,
262 access_flags: MappingFlags,
263 ) -> bool {
264 let mut range = VirtAddrRange::from_start_size(start, size);
265 for area in self.areas.iter() {
266 if area.end() <= range.start {
267 continue;
268 }
269 if area.start() > range.start {
270 return false;
271 }
272
273 if !area.flags().contains(access_flags) {
275 return false;
276 }
277
278 range.start = area.end();
279 if range.is_empty() {
280 return true;
281 }
282 }
283
284 false
285 }
286
287 pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: PageFaultFlags) -> bool {
294 if !self.va_range.contains(vaddr) {
295 return false;
296 }
297 if let Some(area) = self.areas.find(vaddr) {
298 let orig_flags = area.flags();
299 let access_flags = MappingFlags::from_bits_truncate(access_flags.bits());
300 if orig_flags.contains(access_flags) {
301 return area
302 .backend()
303 .handle_page_fault(vaddr, orig_flags, &mut self.pt);
304 }
305 }
306 false
307 }
308}
309
310impl fmt::Debug for AddrSpace {
311 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
312 f.debug_struct("AddrSpace")
313 .field("va_range", &self.va_range)
314 .field("page_table_root", &self.pt.root_paddr())
315 .field("areas", &self.areas)
316 .finish()
317 }
318}
319
320impl Drop for AddrSpace {
321 fn drop(&mut self) {
322 self.clear();
323 }
324}