axplat/mem.rs
1//! Physical memory information.
2
3use core::ops::{Deref, DerefMut};
4use core::{fmt, ops::Range};
5
6pub use memory_addr::{PAGE_SIZE_4K, PhysAddr, VirtAddr, pa, va};
7
8bitflags::bitflags! {
9 /// The flags of a physical memory region.
10 #[derive(Clone, Copy)]
11 pub struct MemRegionFlags: usize {
12 /// Readable.
13 const READ = 1 << 0;
14 /// Writable.
15 const WRITE = 1 << 1;
16 /// Executable.
17 const EXECUTE = 1 << 2;
18 /// Device memory. (e.g., MMIO regions)
19 const DEVICE = 1 << 4;
20 /// Uncachable memory. (e.g., framebuffer)
21 const UNCACHED = 1 << 5;
22 /// Reserved memory, do not use for allocation.
23 const RESERVED = 1 << 6;
24 /// Free memory for allocation.
25 const FREE = 1 << 7;
26 }
27}
28
29impl fmt::Debug for MemRegionFlags {
30 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
31 fmt::Debug::fmt(&self.0, f)
32 }
33}
34
35/// The default flags for a normal memory region (readable, writable and allocatable).
36pub const DEFAULT_RAM_FLAGS: MemRegionFlags = MemRegionFlags::READ
37 .union(MemRegionFlags::WRITE)
38 .union(MemRegionFlags::FREE);
39
40/// The default flags for a reserved memory region (readable, writable, and reserved).
41pub const DEFAULT_RESERVED_FLAGS: MemRegionFlags = MemRegionFlags::READ
42 .union(MemRegionFlags::WRITE)
43 .union(MemRegionFlags::RESERVED);
44
45/// The default flags for a MMIO region (readable, writable, device, and reserved).
46pub const DEFAULT_MMIO_FLAGS: MemRegionFlags = MemRegionFlags::READ
47 .union(MemRegionFlags::WRITE)
48 .union(MemRegionFlags::DEVICE)
49 .union(MemRegionFlags::RESERVED);
50
51/// The raw memory range with start and size.
52pub type RawRange = (usize, usize);
53
54/// A wrapper type for aligning a value to 4K bytes.
55#[repr(align(4096))]
56pub struct Aligned4K<T: Sized>(T);
57
58impl<T: Sized> Aligned4K<T> {
59 /// Creates a new [`Aligned4K`] instance with the given value.
60 pub const fn new(value: T) -> Self {
61 Self(value)
62 }
63}
64
65impl<T> Deref for Aligned4K<T> {
66 type Target = T;
67
68 fn deref(&self) -> &Self::Target {
69 &self.0
70 }
71}
72
73impl<T> DerefMut for Aligned4K<T> {
74 fn deref_mut(&mut self) -> &mut Self::Target {
75 &mut self.0
76 }
77}
78
79/// A physical memory region.
80#[derive(Debug, Clone, Copy)]
81pub struct PhysMemRegion {
82 /// The start physical address of the region.
83 pub paddr: PhysAddr,
84 /// The size in bytes of the region.
85 pub size: usize,
86 /// The region flags, see [`MemRegionFlags`].
87 pub flags: MemRegionFlags,
88 /// The region name, used for identification.
89 pub name: &'static str,
90}
91
92impl PhysMemRegion {
93 /// Creates a RAM region with default flags (readable, writable, and allocatable).
94 pub const fn new_ram(start: usize, size: usize, name: &'static str) -> Self {
95 Self {
96 paddr: PhysAddr::from_usize(start),
97 size,
98 flags: DEFAULT_RAM_FLAGS,
99 name,
100 }
101 }
102
103 /// Creates a MMIO region with default flags (readable, writable, and device).
104 pub const fn new_mmio(start: usize, size: usize, name: &'static str) -> Self {
105 Self {
106 paddr: PhysAddr::from_usize(start),
107 size,
108 flags: DEFAULT_MMIO_FLAGS,
109 name,
110 }
111 }
112
113 /// Creates a reserved memory region with default flags (readable, writable, and reserved).
114 pub const fn new_reserved(start: usize, size: usize, name: &'static str) -> Self {
115 Self {
116 paddr: PhysAddr::from_usize(start),
117 size,
118 flags: DEFAULT_RESERVED_FLAGS,
119 name,
120 }
121 }
122}
123
124/// Physical memory interface.
125#[def_plat_interface]
126pub trait MemIf {
127 /// Returns all physical memory (RAM) ranges on the platform.
128 ///
129 /// All memory ranges except reserved ranges (including the kernel loaded
130 /// range) are free for allocation.
131 fn phys_ram_ranges() -> &'static [RawRange];
132
133 /// Returns all reserved physical memory ranges on the platform.
134 ///
135 /// Reserved memory can be contained in [`phys_ram_ranges`], they are not
136 /// allocatable but should be mapped to kernel's address space.
137 ///
138 /// Note that the ranges returned should not include the range where the
139 /// kernel is loaded.
140 fn reserved_phys_ram_ranges() -> &'static [RawRange];
141
142 /// Returns all device memory (MMIO) ranges on the platform.
143 fn mmio_ranges() -> &'static [RawRange];
144
145 /// Translates a physical address to a virtual address.
146 ///
147 /// It is just an easy way to access physical memory when virtual memory
148 /// is enabled. The mapping may not be unique, there can be multiple `vaddr`s
149 /// mapped to that `paddr`.
150 fn phys_to_virt(paddr: PhysAddr) -> VirtAddr;
151
152 /// Translates a virtual address to a physical address.
153 ///
154 /// It is a reverse operation of [`phys_to_virt`]. It requires that the
155 /// `vaddr` must be available through the [`phys_to_virt`] translation.
156 /// It **cannot** be used to translate arbitrary virtual addresses.
157 fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr;
158}
159
160/// Returns the total size of physical memory (RAM) on the platform.
161///
162/// It should be equal to the sum of sizes of all physical memory ranges (returned
163/// by [`phys_ram_ranges`]).
164pub fn total_ram_size() -> usize {
165 phys_ram_ranges().iter().map(|range| range.1).sum()
166}
167
168/// The error type for overlapping check.
169///
170/// It contains the overlapping range pair.
171pub type OverlapErr = (Range<usize>, Range<usize>);
172
173/// Checks if the given ranges are overlapping.
174///
175/// Returns `Err` with one of the overlapping range pair if they are overlapping.
176///
177/// The given ranges should be sorted by the start, otherwise it always returns
178/// `Err`.
179///
180/// # Example
181///
182/// ```rust
183/// # use axplat::mem::check_sorted_ranges_overlap;
184/// assert!(check_sorted_ranges_overlap([(0, 10), (10, 10)].into_iter()).is_ok());
185/// assert_eq!(
186/// check_sorted_ranges_overlap([(0, 10), (5, 10)].into_iter()),
187/// Err((0..10, 5..15))
188/// );
189/// ```
190pub fn check_sorted_ranges_overlap(
191 ranges: impl Iterator<Item = RawRange>,
192) -> Result<(), OverlapErr> {
193 let mut prev = Range::default();
194 for (start, size) in ranges {
195 if prev.end > start {
196 return Err((prev, start..start + size));
197 }
198 prev = start..start + size;
199 }
200 Ok(())
201}
202
203/// Removes a portion of ranges from the given ranges.
204///
205/// `from` is a list of ranges to be operated on, and `exclude` is a list of
206/// ranges to be removed. `exclude` should have been sorted by the start, and
207/// have non-overlapping ranges. If not, an error will be returned.
208///
209/// The result is also a list of ranges with each range contained in `from` but
210/// not in `exclude`. `result_op` is a closure that will be called for each range
211/// in the result.
212///
213/// # Example
214///
215/// ```rust
216/// # use axplat::mem::ranges_difference;
217/// let mut res = Vec::new();
218/// // 0..10, 20..30 - 5..15, 15..25 = 0..5, 25..30
219/// ranges_difference(&[(0, 10), (20, 10)], &[(5, 10), (15, 10)], |r| res.push(r)).unwrap();
220/// assert_eq!(res, &[(0, 5), (25, 5)]);
221/// ```
222pub fn ranges_difference<F>(
223 from: &[RawRange],
224 exclude: &[RawRange],
225 mut result_op: F,
226) -> Result<(), OverlapErr>
227where
228 F: FnMut(RawRange),
229{
230 check_sorted_ranges_overlap(exclude.iter().cloned())?;
231
232 for &(start, size) in from {
233 let mut start = start;
234 let end = start + size;
235
236 for &(exclude_start, exclude_size) in exclude {
237 let exclude_end = exclude_start + exclude_size;
238 if exclude_end <= start {
239 continue;
240 } else if exclude_start >= end {
241 break;
242 } else if exclude_start > start {
243 result_op((start, exclude_start - start));
244 }
245 start = exclude_end;
246 }
247 if start < end {
248 result_op((start, end - start));
249 }
250 }
251 Ok(())
252}
253
254#[cfg(test)]
255mod tests {
256 #[test]
257 fn check_sorted_ranges_overlap() {
258 use super::check_sorted_ranges_overlap as f;
259
260 assert!(f([(0, 10), (10, 10), (20, 10)].into_iter()).is_ok());
261 assert!(f([(0, 10), (20, 10), (40, 10)].into_iter()).is_ok());
262 assert_eq!(f([(0, 1), (0, 2)].into_iter()), Err((0..1, 0..2)));
263 assert_eq!(
264 f([(0, 11), (10, 10), (20, 10)].into_iter()),
265 Err((0..11, 10..20)),
266 );
267 assert_eq!(
268 f([(0, 10), (20, 10), (10, 10)].into_iter()),
269 Err((20..30, 10..20)), // not sorted
270 );
271 }
272
273 #[test]
274 fn ranges_difference() {
275 let f = |from, exclude| {
276 let mut res = Vec::new();
277 super::ranges_difference(from, exclude, |r| res.push(r)).unwrap();
278 res
279 };
280
281 // 0..10, 20..30
282 assert_eq!(
283 f(&[(0, 10), (20, 10)], &[(5, 5), (25, 5)]), // - 5..10, 25..30
284 &[(0, 5), (20, 5)] // = 0..5, 20..25
285 );
286 assert_eq!(
287 f(&[(0, 10), (20, 10)], &[(5, 10), (15, 5)]), // - 5..15, 15..20
288 &[(0, 5), (20, 10)] // = 0..5, 20..30
289 );
290 assert_eq!(
291 f(&[(0, 10), (20, 10)], &[(5, 1), (25, 1), (30, 1)]), // - 5..6, 25..26, 30..31
292 &[(0, 5), (6, 4), (20, 5), (26, 4)] // = 0..5, 6..10, 20..25, 26..30
293 );
294
295 // 0..10, 20..30
296 assert_eq!(f(&[(0, 10), (20, 10)], &[(5, 20)]), &[(0, 5), (25, 5)]); // - 5..25 = 0..5, 25..30
297 assert_eq!(f(&[(0, 10), (20, 10)], &[(0, 30)]), &[]); // - 0..30 = []
298
299 // 0..30
300 assert_eq!(
301 f(&[(0, 30)], &[(0, 5), (10, 5), (20, 5)]), // - 0..5, 10..15, 20..25
302 &[(5, 5), (15, 5), (25, 5)] // = 5..10, 15..20, 25..30
303 );
304 assert_eq!(
305 f(
306 &[(0, 30)],
307 &[(0, 5), (5, 5), (10, 5), (15, 5), (20, 5), (25, 5)] // - 0..5, 5..10, 10..15, 15..20, 20..25, 25..30
308 ),
309 &[] // = []
310 );
311
312 // 10..20
313 assert_eq!(f(&[(10, 10)], &[(0, 30)]), &[]); // - 0..30 = []
314 }
315}