1use core::fmt;
2
3use memory_addr::{AddrRange, MemoryAddr};
4
5use crate::{MappingBackend, MappingError, MappingResult};
6
7pub struct MemoryArea<B: MappingBackend> {
13 va_range: AddrRange<B::Addr>,
14 flags: B::Flags,
15 backend: B,
16}
17
18impl<B: MappingBackend> MemoryArea<B> {
19 pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self {
25 Self {
26 va_range: AddrRange::from_start_size(start, size),
27 flags,
28 backend,
29 }
30 }
31
32 pub const fn va_range(&self) -> AddrRange<B::Addr> {
34 self.va_range
35 }
36
37 pub const fn flags(&self) -> B::Flags {
39 self.flags
40 }
41
42 pub const fn start(&self) -> B::Addr {
44 self.va_range.start
45 }
46
47 pub const fn end(&self) -> B::Addr {
49 self.va_range.end
50 }
51
52 pub fn size(&self) -> usize {
54 self.va_range.size()
55 }
56
57 pub const fn backend(&self) -> &B {
59 &self.backend
60 }
61}
62
63impl<B: MappingBackend> MemoryArea<B> {
64 pub(crate) fn set_flags(&mut self, new_flags: B::Flags) {
66 self.flags = new_flags;
67 }
68
69 pub(crate) fn set_end(&mut self, new_end: B::Addr) {
71 self.va_range.end = new_end;
72 }
73
74 pub(crate) fn map_area(&self, page_table: &mut B::PageTable) -> MappingResult {
76 self.backend
77 .map(self.start(), self.size(), self.flags, page_table)
78 .then_some(())
79 .ok_or(MappingError::BadState)
80 }
81
82 pub(crate) fn unmap_area(&self, page_table: &mut B::PageTable) -> MappingResult {
84 self.backend
85 .unmap(self.start(), self.size(), page_table)
86 .then_some(())
87 .ok_or(MappingError::BadState)
88 }
89
90 pub(crate) fn protect_area(
92 &mut self,
93 new_flags: B::Flags,
94 page_table: &mut B::PageTable,
95 ) -> MappingResult {
96 self.backend
97 .protect(self.start(), self.size(), new_flags, page_table);
98 Ok(())
99 }
100
101 pub(crate) fn shrink_left(
108 &mut self,
109 new_size: usize,
110 page_table: &mut B::PageTable,
111 ) -> MappingResult {
112 assert!(new_size > 0 && new_size < self.size());
113
114 let old_size = self.size();
115 let unmap_size = old_size - new_size;
116
117 if !self.backend.unmap(self.start(), unmap_size, page_table) {
118 return Err(MappingError::BadState);
119 }
120 self.va_range.start = self.va_range.start.wrapping_add(unmap_size);
124 Ok(())
125 }
126
127 pub(crate) fn shrink_right(
134 &mut self,
135 new_size: usize,
136 page_table: &mut B::PageTable,
137 ) -> MappingResult {
138 assert!(new_size > 0 && new_size < self.size());
139 let old_size = self.size();
140 let unmap_size = old_size - new_size;
141
142 let unmap_start = self.start().wrapping_add(new_size);
145
146 if !self.backend.unmap(unmap_start, unmap_size, page_table) {
147 return Err(MappingError::BadState);
148 }
149
150 self.va_range.end = self.va_range.end.wrapping_sub(unmap_size);
152 Ok(())
153 }
154
155 pub(crate) fn split(&mut self, pos: B::Addr) -> Option<Self> {
163 if self.start() < pos && pos < self.end() {
164 let new_area = Self::new(
165 pos,
166 self.end().wrapping_sub_addr(pos),
169 self.flags,
170 self.backend.clone(),
171 );
172 self.va_range.end = pos;
173 Some(new_area)
174 } else {
175 None
176 }
177 }
178}
179
180impl<B: MappingBackend> fmt::Debug for MemoryArea<B>
181where
182 B::Addr: fmt::Debug,
183 B::Flags: fmt::Debug + Copy,
184{
185 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
186 f.debug_struct("MemoryArea")
187 .field("va_range", &self.va_range)
188 .field("flags", &self.flags)
189 .finish()
190 }
191}