memory_set/
set.rs

1use alloc::collections::BTreeMap;
2#[allow(unused_imports)] // this is a weird false alarm
3use alloc::vec::Vec;
4use core::fmt;
5
6use memory_addr::{AddrRange, MemoryAddr};
7
8use crate::{MappingBackend, MappingError, MappingResult, MemoryArea};
9
10/// A container that maintains memory mappings ([`MemoryArea`]).
11pub struct MemorySet<B: MappingBackend> {
12    areas: BTreeMap<B::Addr, MemoryArea<B>>,
13}
14
15impl<B: MappingBackend> MemorySet<B> {
16    /// Creates a new memory set.
17    pub const fn new() -> Self {
18        Self {
19            areas: BTreeMap::new(),
20        }
21    }
22
23    /// Returns the number of memory areas in the memory set.
24    pub fn len(&self) -> usize {
25        self.areas.len()
26    }
27
28    /// Returns `true` if the memory set contains no memory areas.
29    pub fn is_empty(&self) -> bool {
30        self.areas.is_empty()
31    }
32
33    /// Returns the iterator over all memory areas.
34    pub fn iter(&self) -> impl Iterator<Item = &MemoryArea<B>> {
35        self.areas.values()
36    }
37
38    /// Returns whether the given address range overlaps with any existing area.
39    pub fn overlaps(&self, range: AddrRange<B::Addr>) -> bool {
40        if let Some((_, before)) = self.areas.range(..range.start).last() {
41            if before.va_range().overlaps(range) {
42                return true;
43            }
44        }
45        if let Some((_, after)) = self.areas.range(range.start..).next() {
46            if after.va_range().overlaps(range) {
47                return true;
48            }
49        }
50        false
51    }
52
53    /// Finds the memory area that contains the given address.
54    pub fn find(&self, addr: B::Addr) -> Option<&MemoryArea<B>> {
55        let candidate = self.areas.range(..=addr).last().map(|(_, a)| a);
56        candidate.filter(|a| a.va_range().contains(addr))
57    }
58
59    /// Finds a free area that can accommodate the given size.
60    ///
61    /// The search starts from the given `hint` address, and the area should be
62    /// within the given `limit` range.
63    ///
64    /// # Notes
65    /// The `align` parameter specifies the alignment of the start address and
66    /// the size of the area. The start address of the resulting area will
67    /// be aligned to this value. Also, the size of the area must be a multiple
68    /// of this value.
69    ///
70    /// # Returns
71    /// Returns the start address of the free area. Returns `None` if no such
72    /// area is found.
73    pub fn find_free_area(
74        &self,
75        hint: B::Addr,
76        size: usize,
77        limit: AddrRange<B::Addr>,
78        align: usize,
79    ) -> Option<B::Addr> {
80        if size % align != 0 {
81            // size must be a multiple of align.
82            return None;
83        }
84        // brute force: try each area's end address as the start.
85        let mut last_end: <B as MappingBackend>::Addr = hint.max(limit.start).align_up(align);
86        if let Some((_, area)) = self.areas.range(..last_end).last() {
87            last_end = last_end.max(area.end()).align_up(align);
88        }
89        for (&addr, area) in self.areas.range(last_end..) {
90            if last_end.checked_add(size).is_some_and(|end| end <= addr) {
91                return Some(last_end);
92            }
93            last_end = area.end().align_up(align);
94        }
95        if last_end
96            .checked_add(size)
97            .is_some_and(|end| end <= limit.end)
98        {
99            Some(last_end)
100        } else {
101            None
102        }
103    }
104
105    /// Add a new memory mapping.
106    ///
107    /// The mapping is represented by a [`MemoryArea`].
108    ///
109    /// If the new area overlaps with any existing area, the behavior is
110    /// determined by the `unmap_overlap` parameter. If it is `true`, the
111    /// overlapped regions will be unmapped first. Otherwise, it returns an
112    /// error.
113    pub fn map(
114        &mut self,
115        area: MemoryArea<B>,
116        page_table: &mut B::PageTable,
117        unmap_overlap: bool,
118    ) -> MappingResult {
119        if area.va_range().is_empty() {
120            return Err(MappingError::InvalidParam);
121        }
122
123        if self.overlaps(area.va_range()) {
124            if unmap_overlap {
125                self.unmap(area.start(), area.size(), page_table)?;
126            } else {
127                return Err(MappingError::AlreadyExists);
128            }
129        }
130
131        area.map_area(page_table)?;
132        assert!(self.areas.insert(area.start(), area).is_none());
133        Ok(())
134    }
135
136    /// Remove memory mappings within the given address range.
137    ///
138    /// All memory areas that are fully contained in the range will be removed
139    /// directly. If the area intersects with the boundary, it will be shrinked.
140    /// If the unmapped range is in the middle of an existing area, it will be
141    /// split into two areas.
142    pub fn unmap(
143        &mut self,
144        start: B::Addr,
145        size: usize,
146        page_table: &mut B::PageTable,
147    ) -> MappingResult {
148        let range =
149            AddrRange::try_from_start_size(start, size).ok_or(MappingError::InvalidParam)?;
150        if range.is_empty() {
151            return Ok(());
152        }
153
154        let end = range.end;
155
156        // Unmap entire areas that are contained by the range.
157        self.areas.retain(|_, area| {
158            if area.va_range().contained_in(range) {
159                area.unmap_area(page_table).unwrap();
160                false
161            } else {
162                true
163            }
164        });
165
166        // Shrink right if the area intersects with the left boundary.
167        if let Some((&before_start, before)) = self.areas.range_mut(..start).last() {
168            let before_end = before.end();
169            if before_end > start {
170                if before_end <= end {
171                    // the unmapped area is at the end of `before`.
172                    before.shrink_right(start.sub_addr(before_start), page_table)?;
173                } else {
174                    // the unmapped area is in the middle `before`, need to split.
175                    let right_part = before.split(end).unwrap();
176                    before.shrink_right(start.sub_addr(before_start), page_table)?;
177                    assert_eq!(right_part.start().into(), Into::<usize>::into(end));
178                    self.areas.insert(end, right_part);
179                }
180            }
181        }
182
183        // Shrink left if the area intersects with the right boundary.
184        if let Some((&after_start, after)) = self.areas.range_mut(start..).next() {
185            let after_end = after.end();
186            if after_start < end {
187                // the unmapped area is at the start of `after`.
188                let mut new_area = self.areas.remove(&after_start).unwrap();
189                new_area.shrink_left(after_end.sub_addr(end), page_table)?;
190                assert_eq!(new_area.start().into(), Into::<usize>::into(end));
191                self.areas.insert(end, new_area);
192            }
193        }
194
195        Ok(())
196    }
197
198    /// Remove all memory areas and the underlying mappings.
199    pub fn clear(&mut self, page_table: &mut B::PageTable) -> MappingResult {
200        for (_, area) in self.areas.iter() {
201            area.unmap_area(page_table)?;
202        }
203        self.areas.clear();
204        Ok(())
205    }
206
207    /// Change the flags of memory mappings within the given address range.
208    ///
209    /// `update_flags` is a function that receives old flags and processes
210    /// new flags (e.g., some flags can not be changed through this interface).
211    /// It returns [`None`] if there is no bit to change.
212    ///
213    /// Memory areas will be skipped according to `update_flags`. Memory areas
214    /// that are fully contained in the range or contains the range or
215    /// intersects with the boundary will be handled similarly to `munmap`.
216    pub fn protect(
217        &mut self,
218        start: B::Addr,
219        size: usize,
220        update_flags: impl Fn(B::Flags) -> Option<B::Flags>,
221        page_table: &mut B::PageTable,
222    ) -> MappingResult {
223        let end = start.checked_add(size).ok_or(MappingError::InvalidParam)?;
224        let mut to_insert = Vec::new();
225        for (&area_start, area) in self.areas.iter_mut() {
226            let area_end = area.end();
227
228            if let Some(new_flags) = update_flags(area.flags()) {
229                if area_start >= end {
230                    // [ prot ]
231                    //          [ area ]
232                    break;
233                } else if area_end <= start {
234                    //          [ prot ]
235                    // [ area ]
236                    // Do nothing
237                } else if area_start >= start && area_end <= end {
238                    // [   prot   ]
239                    //   [ area ]
240                    area.protect_area(new_flags, page_table)?;
241                    area.set_flags(new_flags);
242                } else if area_start < start && area_end > end {
243                    //        [ prot ]
244                    // [ left | area | right ]
245                    let right_part = area.split(end).unwrap();
246                    area.set_end(start);
247
248                    let mut middle_part =
249                        MemoryArea::new(start, size, area.flags(), area.backend().clone());
250                    middle_part.protect_area(new_flags, page_table)?;
251                    middle_part.set_flags(new_flags);
252
253                    to_insert.push((right_part.start(), right_part));
254                    to_insert.push((middle_part.start(), middle_part));
255                } else if area_end > end {
256                    // [    prot ]
257                    //   [  area | right ]
258                    let right_part = area.split(end).unwrap();
259                    area.protect_area(new_flags, page_table)?;
260                    area.set_flags(new_flags);
261
262                    to_insert.push((right_part.start(), right_part));
263                } else {
264                    //        [ prot    ]
265                    // [ left |  area ]
266                    let mut right_part = area.split(start).unwrap();
267                    right_part.protect_area(new_flags, page_table)?;
268                    right_part.set_flags(new_flags);
269
270                    to_insert.push((right_part.start(), right_part));
271                }
272            }
273        }
274        self.areas.extend(to_insert);
275        Ok(())
276    }
277}
278
279impl<B: MappingBackend> fmt::Debug for MemorySet<B>
280where
281    B::Addr: fmt::Debug,
282    B::Flags: fmt::Debug,
283{
284    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
285        f.debug_list().entries(self.areas.values()).finish()
286    }
287}