use core::fmt;
use axerrno::{ax_err, AxError, AxResult};
use axhal::{
mem::phys_to_virt,
paging::{MappingFlags, PageTable},
};
use memory_addr::{
is_aligned_4k, pa, MemoryAddr, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, PAGE_SIZE_4K,
};
use crate::paging_err_to_ax_err;
pub struct AddrSpace {
va_range: VirtAddrRange,
pt: PageTable,
}
impl AddrSpace {
pub const fn base(&self) -> VirtAddr {
self.va_range.start
}
pub const fn end(&self) -> VirtAddr {
self.va_range.end
}
pub fn size(&self) -> usize {
self.va_range.size()
}
pub const fn page_table(&self) -> &PageTable {
&self.pt
}
pub const fn page_table_root(&self) -> PhysAddr {
self.pt.root_paddr()
}
pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
self.va_range
.contains_range(VirtAddrRange::from_start_size(start, size))
}
pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
Ok(Self {
va_range: VirtAddrRange::from_start_size(base, size),
pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
})
}
pub fn map_linear(
&mut self,
start_vaddr: VirtAddr,
start_paddr: PhysAddr,
size: usize,
flags: MappingFlags,
) -> AxResult {
if !self.contains_range(start_vaddr, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
let offset = start_vaddr.as_usize() - start_paddr.as_usize();
self.pt
.map_region(
start_vaddr,
|va| pa!(va.as_usize() - offset),
size,
flags,
false, false, )
.map_err(paging_err_to_ax_err)?
.flush_all();
Ok(())
}
pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
self.pt
.unmap_region(start, size, true)
.map_err(paging_err_to_ax_err)?
.ignore();
Ok(())
}
fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
where
F: FnMut(VirtAddr, usize, usize),
{
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
let mut cnt = 0;
let end_align_up = (start + size).align_up_4k();
for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
.expect("Failed to create page iterator")
{
let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
if copy_size == 0 {
break;
}
if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
let align_offset = start.align_offset_4k();
copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
paddr += align_offset;
}
f(phys_to_virt(paddr), cnt, copy_size);
cnt += copy_size;
}
Ok(())
}
pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
})
}
pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
})
}
pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
self.pt
.protect_region(start, size, flags, true)
.map_err(paging_err_to_ax_err)?
.ignore();
Ok(())
}
}
impl fmt::Debug for AddrSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("AddrSpace")
.field("va_range", &self.va_range)
.field("page_table_root", &self.pt.root_paddr())
.finish()
}
}