1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageTable};
6use axhal::trap::PageFaultFlags;
7use memory_addr::{
8 MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
9};
10use memory_set::{MemoryArea, MemorySet};
11
12use crate::backend::Backend;
13use crate::mapping_err_to_ax_err;
14
15pub struct AddrSpace {
17 va_range: VirtAddrRange,
18 areas: MemorySet<Backend>,
19 pt: PageTable,
20}
21
22impl AddrSpace {
23 pub const fn base(&self) -> VirtAddr {
25 self.va_range.start
26 }
27
28 pub const fn end(&self) -> VirtAddr {
30 self.va_range.end
31 }
32
33 pub fn size(&self) -> usize {
35 self.va_range.size()
36 }
37
38 pub const fn page_table(&self) -> &PageTable {
40 &self.pt
41 }
42
43 pub const fn page_table_root(&self) -> PhysAddr {
45 self.pt.root_paddr()
46 }
47
48 pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
50 self.va_range
51 .contains_range(VirtAddrRange::from_start_size(start, size))
52 }
53
54 pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
56 Ok(Self {
57 va_range: VirtAddrRange::from_start_size(base, size),
58 areas: MemorySet::new(),
59 pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
60 })
61 }
62
63 pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
71 if self.va_range.overlaps(other.va_range) {
72 return ax_err!(InvalidInput, "address space overlap");
73 }
74 self.pt.copy_from(&other.pt, other.base(), other.size());
75 Ok(())
76 }
77
78 pub fn find_free_area(
84 &self,
85 hint: VirtAddr,
86 size: usize,
87 limit: VirtAddrRange,
88 ) -> Option<VirtAddr> {
89 self.areas.find_free_area(hint, size, limit, PAGE_SIZE_4K)
90 }
91
92 pub fn map_linear(
101 &mut self,
102 start_vaddr: VirtAddr,
103 start_paddr: PhysAddr,
104 size: usize,
105 flags: MappingFlags,
106 ) -> AxResult {
107 if !self.contains_range(start_vaddr, size) {
108 return ax_err!(InvalidInput, "address out of range");
109 }
110 if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
111 return ax_err!(InvalidInput, "address not aligned");
112 }
113
114 let offset = start_vaddr.as_usize() - start_paddr.as_usize();
115 let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
116 self.areas
117 .map(area, &mut self.pt, false)
118 .map_err(mapping_err_to_ax_err)?;
119 Ok(())
120 }
121
122 pub fn map_alloc(
131 &mut self,
132 start: VirtAddr,
133 size: usize,
134 flags: MappingFlags,
135 populate: bool,
136 ) -> AxResult {
137 if !self.contains_range(start, size) {
138 return ax_err!(InvalidInput, "address out of range");
139 }
140 if !start.is_aligned_4k() || !is_aligned_4k(size) {
141 return ax_err!(InvalidInput, "address not aligned");
142 }
143
144 let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
145 self.areas
146 .map(area, &mut self.pt, false)
147 .map_err(mapping_err_to_ax_err)?;
148 Ok(())
149 }
150
151 pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
156 if !self.contains_range(start, size) {
157 return ax_err!(InvalidInput, "address out of range");
158 }
159 if !start.is_aligned_4k() || !is_aligned_4k(size) {
160 return ax_err!(InvalidInput, "address not aligned");
161 }
162
163 self.areas
164 .unmap(start, size, &mut self.pt)
165 .map_err(mapping_err_to_ax_err)?;
166 Ok(())
167 }
168
169 fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
173 where
174 F: FnMut(VirtAddr, usize, usize),
175 {
176 if !self.contains_range(start, size) {
177 return ax_err!(InvalidInput, "address out of range");
178 }
179 let mut cnt = 0;
180 let end_align_up = (start + size).align_up_4k();
182 for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
183 .expect("Failed to create page iterator")
184 {
185 let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
186
187 let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
188
189 if copy_size == 0 {
190 break;
191 }
192 if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
193 let align_offset = start.align_offset_4k();
194 copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
195 paddr += align_offset;
196 }
197 f(phys_to_virt(paddr), cnt, copy_size);
198 cnt += copy_size;
199 }
200 Ok(())
201 }
202
203 pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
210 self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
211 core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
212 })
213 }
214
215 pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
222 self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
223 core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
224 })
225 }
226
227 pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
232 if !self.contains_range(start, size) {
233 return ax_err!(InvalidInput, "address out of range");
234 }
235 if !start.is_aligned_4k() || !is_aligned_4k(size) {
236 return ax_err!(InvalidInput, "address not aligned");
237 }
238
239 self.pt
241 .protect_region(start, size, flags, true)
242 .map_err(|_| AxError::BadState)?
243 .ignore();
244 Ok(())
245 }
246
247 pub fn clear(&mut self) {
249 self.areas.clear(&mut self.pt).unwrap();
250 }
251
252 pub fn can_access_range(
257 &self,
258 start: VirtAddr,
259 size: usize,
260 access_flags: MappingFlags,
261 ) -> bool {
262 let mut range = VirtAddrRange::from_start_size(start, size);
263 for area in self.areas.iter() {
264 if area.end() <= range.start {
265 continue;
266 }
267 if area.start() > range.start {
268 return false;
269 }
270
271 if !area.flags().contains(access_flags) {
273 return false;
274 }
275
276 range.start = area.end();
277 if range.is_empty() {
278 return true;
279 }
280 }
281
282 false
283 }
284
285 pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: PageFaultFlags) -> bool {
292 if !self.va_range.contains(vaddr) {
293 return false;
294 }
295 if let Some(area) = self.areas.find(vaddr) {
296 let orig_flags = area.flags();
297 if orig_flags.contains(access_flags) {
298 return area
299 .backend()
300 .handle_page_fault(vaddr, orig_flags, &mut self.pt);
301 }
302 }
303 false
304 }
305}
306
307impl fmt::Debug for AddrSpace {
308 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
309 f.debug_struct("AddrSpace")
310 .field("va_range", &self.va_range)
311 .field("page_table_root", &self.pt.root_paddr())
312 .field("areas", &self.areas)
313 .finish()
314 }
315}
316
317impl Drop for AddrSpace {
318 fn drop(&mut self) {
319 self.clear();
320 }
321}