memory_set/
area.rs

1use core::fmt;
2
3use memory_addr::{AddrRange, MemoryAddr};
4
5use crate::{MappingBackend, MappingError, MappingResult};
6
7/// A memory area represents a continuous range of virtual memory with the same
8/// flags.
9///
10/// The target physical memory frames are determined by [`MappingBackend`] and
11/// may not be contiguous.
12pub struct MemoryArea<B: MappingBackend> {
13    va_range: AddrRange<B::Addr>,
14    flags: B::Flags,
15    backend: B,
16}
17
18impl<B: MappingBackend> MemoryArea<B> {
19    /// Creates a new memory area.
20    ///
21    /// # Panics
22    ///
23    /// Panics if `start + size` overflows.
24    pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self {
25        Self {
26            va_range: AddrRange::from_start_size(start, size),
27            flags,
28            backend,
29        }
30    }
31
32    /// Returns the virtual address range.
33    pub const fn va_range(&self) -> AddrRange<B::Addr> {
34        self.va_range
35    }
36
37    /// Returns the memory flags, e.g., the permission bits.
38    pub const fn flags(&self) -> B::Flags {
39        self.flags
40    }
41
42    /// Returns the start address of the memory area.
43    pub const fn start(&self) -> B::Addr {
44        self.va_range.start
45    }
46
47    /// Returns the end address of the memory area.
48    pub const fn end(&self) -> B::Addr {
49        self.va_range.end
50    }
51
52    /// Returns the size of the memory area.
53    pub fn size(&self) -> usize {
54        self.va_range.size()
55    }
56
57    /// Returns the mapping backend of the memory area.
58    pub const fn backend(&self) -> &B {
59        &self.backend
60    }
61}
62
63impl<B: MappingBackend> MemoryArea<B> {
64    /// Changes the flags.
65    pub(crate) fn set_flags(&mut self, new_flags: B::Flags) {
66        self.flags = new_flags;
67    }
68
69    /// Changes the end address of the memory area.
70    pub(crate) fn set_end(&mut self, new_end: B::Addr) {
71        self.va_range.end = new_end;
72    }
73
74    /// Maps the whole memory area in the page table.
75    pub(crate) fn map_area(&self, page_table: &mut B::PageTable) -> MappingResult {
76        self.backend
77            .map(self.start(), self.size(), self.flags, page_table)
78            .then_some(())
79            .ok_or(MappingError::BadState)
80    }
81
82    /// Unmaps the whole memory area in the page table.
83    pub(crate) fn unmap_area(&self, page_table: &mut B::PageTable) -> MappingResult {
84        self.backend
85            .unmap(self.start(), self.size(), page_table)
86            .then_some(())
87            .ok_or(MappingError::BadState)
88    }
89
90    /// Changes the flags in the page table.
91    pub(crate) fn protect_area(
92        &mut self,
93        new_flags: B::Flags,
94        page_table: &mut B::PageTable,
95    ) -> MappingResult {
96        self.backend
97            .protect(self.start(), self.size(), new_flags, page_table);
98        Ok(())
99    }
100
101    /// Shrinks the memory area at the left side.
102    ///
103    /// The start address of the memory area is increased by `new_size`. The
104    /// shrunk part is unmapped.
105    ///
106    /// `new_size` must be greater than 0 and less than the current size.
107    pub(crate) fn shrink_left(
108        &mut self,
109        new_size: usize,
110        page_table: &mut B::PageTable,
111    ) -> MappingResult {
112        assert!(new_size > 0 && new_size < self.size());
113
114        let old_size = self.size();
115        let unmap_size = old_size - new_size;
116
117        if !self.backend.unmap(self.start(), unmap_size, page_table) {
118            return Err(MappingError::BadState);
119        }
120        // Use wrapping_add to avoid overflow check.
121        // Safety: `unmap_size` is less than the current size, so it will never
122        // overflow.
123        self.va_range.start = self.va_range.start.wrapping_add(unmap_size);
124        Ok(())
125    }
126
127    /// Shrinks the memory area at the right side.
128    ///
129    /// The end address of the memory area is decreased by `new_size`. The
130    /// shrunk part is unmapped.
131    ///
132    /// `new_size` must be greater than 0 and less than the current size.
133    pub(crate) fn shrink_right(
134        &mut self,
135        new_size: usize,
136        page_table: &mut B::PageTable,
137    ) -> MappingResult {
138        assert!(new_size > 0 && new_size < self.size());
139        let old_size = self.size();
140        let unmap_size = old_size - new_size;
141
142        // Use wrapping_add to avoid overflow check.
143        // Safety: `new_size` is less than the current size, so it will never overflow.
144        let unmap_start = self.start().wrapping_add(new_size);
145
146        if !self.backend.unmap(unmap_start, unmap_size, page_table) {
147            return Err(MappingError::BadState);
148        }
149
150        // Use wrapping_sub to avoid overflow check, same as above.
151        self.va_range.end = self.va_range.end.wrapping_sub(unmap_size);
152        Ok(())
153    }
154
155    /// Splits the memory area at the given position.
156    ///
157    /// The original memory area is shrunk to the left part, and the right part
158    /// is returned.
159    ///
160    /// Returns `None` if the given position is not in the memory area, or one
161    /// of the parts is empty after splitting.
162    pub(crate) fn split(&mut self, pos: B::Addr) -> Option<Self> {
163        if self.start() < pos && pos < self.end() {
164            let new_area = Self::new(
165                pos,
166                // Use wrapping_sub_addr to avoid overflow check. It is safe because
167                // `pos` is within the memory area.
168                self.end().wrapping_sub_addr(pos),
169                self.flags,
170                self.backend.clone(),
171            );
172            self.va_range.end = pos;
173            Some(new_area)
174        } else {
175            None
176        }
177    }
178}
179
180impl<B: MappingBackend> fmt::Debug for MemoryArea<B>
181where
182    B::Addr: fmt::Debug,
183    B::Flags: fmt::Debug + Copy,
184{
185    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
186        f.debug_struct("MemoryArea")
187            .field("va_range", &self.va_range)
188            .field("flags", &self.flags)
189            .finish()
190    }
191}