page_table_entry/arch/
aarch64.rs

1//! AArch64 VMSAv8-64 translation table format descriptors.
2
3use core::fmt;
4
5use aarch64_cpu::registers::MAIR_EL1;
6use memory_addr::PhysAddr;
7
8use crate::{GenericPTE, MappingFlags};
9
10bitflags::bitflags! {
11    /// Memory attribute fields in the VMSAv8-64 translation table format descriptors.
12    #[derive(Debug)]
13    pub struct DescriptorAttr: u64 {
14        // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors:
15
16        /// Whether the descriptor is valid.
17        const VALID =       1 << 0;
18        /// The descriptor gives the address of the next level of translation table or 4KB page.
19        /// (not a 2M, 1G block)
20        const NON_BLOCK =   1 << 1;
21        /// Memory attributes index field.
22        const ATTR_INDX =   0b111 << 2;
23        /// Non-secure bit. For memory accesses from Secure state, specifies whether the output
24        /// address is in Secure or Non-secure memory.
25        const NS =          1 << 5;
26        /// Access permission: accessable at EL0.
27        const AP_EL0 =      1 << 6;
28        /// Access permission: read-only.
29        const AP_RO =       1 << 7;
30        /// Shareability: Inner Shareable (otherwise Outer Shareable).
31        const INNER =       1 << 8;
32        /// Shareability: Inner or Outer Shareable (otherwise Non-shareable).
33        const SHAREABLE =   1 << 9;
34        /// The Access flag.
35        const AF =          1 << 10;
36        /// The not global bit.
37        const NG =          1 << 11;
38        /// Indicates that 16 adjacent translation table entries point to contiguous memory regions.
39        const CONTIGUOUS =  1 <<  52;
40        /// The Privileged execute-never field.
41        const PXN =         1 <<  53;
42        /// The Execute-never or Unprivileged execute-never field.
43        const UXN =         1 <<  54;
44
45        // Next-level attributes in stage 1 VMSAv8-64 Table descriptors:
46
47        /// PXN limit for subsequent levels of lookup.
48        const PXN_TABLE =           1 << 59;
49        /// XN limit for subsequent levels of lookup.
50        const XN_TABLE =            1 << 60;
51        /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted.
52        const AP_NO_EL0_TABLE =     1 << 61;
53        /// Access permissions limit for subsequent levels of lookup: write access not permitted.
54        const AP_NO_WRITE_TABLE =   1 << 62;
55        /// For memory accesses from Secure state, specifies the Security state for subsequent
56        /// levels of lookup.
57        const NS_TABLE =            1 << 63;
58    }
59}
60
61/// The memory attributes index field in the descriptor, which is used to index
62/// into the MAIR (Memory Attribute Indirection Register).
63#[repr(u64)]
64#[derive(Debug, Clone, Copy, Eq, PartialEq)]
65pub enum MemAttr {
66    /// Device-nGnRE memory
67    Device             = 0,
68    /// Normal memory
69    Normal             = 1,
70    /// Normal non-cacheable memory
71    NormalNonCacheable = 2,
72}
73
74impl DescriptorAttr {
75    #[allow(clippy::unusual_byte_groupings)]
76    const ATTR_INDEX_MASK: u64 = 0b111_00;
77
78    /// Constructs a descriptor from the memory index, leaving the other fields
79    /// empty.
80    pub const fn from_mem_attr(idx: MemAttr) -> Self {
81        let mut bits = (idx as u64) << 2;
82        if matches!(idx, MemAttr::Normal | MemAttr::NormalNonCacheable) {
83            bits |= Self::INNER.bits() | Self::SHAREABLE.bits();
84        }
85        Self::from_bits_retain(bits)
86    }
87
88    /// Returns the memory attribute index field.
89    pub const fn mem_attr(&self) -> Option<MemAttr> {
90        let idx = (self.bits() & Self::ATTR_INDEX_MASK) >> 2;
91        Some(match idx {
92            0 => MemAttr::Device,
93            1 => MemAttr::Normal,
94            2 => MemAttr::NormalNonCacheable,
95            _ => return None,
96        })
97    }
98}
99
100impl MemAttr {
101    /// The MAIR_ELx register should be set to this value to match the memory
102    /// attributes in the descriptors.
103    pub const MAIR_VALUE: u64 = {
104        // Device-nGnRE memory
105        let attr0 = MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck.value;
106        // Normal memory
107        let attr1 = MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc.value
108            | MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc.value;
109        let attr2 = MAIR_EL1::Attr2_Normal_Inner::NonCacheable.value
110            + MAIR_EL1::Attr2_Normal_Outer::NonCacheable.value;
111        attr0 | attr1 | attr2 // 0x44_ff_04
112    };
113}
114
115impl From<DescriptorAttr> for MappingFlags {
116    fn from(attr: DescriptorAttr) -> Self {
117        if !attr.contains(DescriptorAttr::VALID) {
118            return Self::empty();
119        }
120        let mut flags = Self::READ;
121        if !attr.contains(DescriptorAttr::AP_RO) {
122            flags |= Self::WRITE;
123        }
124        #[cfg(not(feature = "arm-el2"))]
125        {
126            if attr.contains(DescriptorAttr::AP_EL0) {
127                flags |= Self::USER;
128                if !attr.contains(DescriptorAttr::UXN) {
129                    flags |= Self::EXECUTE;
130                }
131            } else if !attr.intersects(DescriptorAttr::PXN) {
132                flags |= Self::EXECUTE;
133            }
134        }
135        #[cfg(feature = "arm-el2")]
136        {
137            if !attr.intersects(DescriptorAttr::UXN) {
138                flags |= Self::EXECUTE;
139            }
140        }
141        match attr.mem_attr() {
142            Some(MemAttr::Device) => flags |= Self::DEVICE,
143            Some(MemAttr::NormalNonCacheable) => flags |= Self::UNCACHED,
144            _ => {}
145        }
146        flags
147    }
148}
149
150impl From<MappingFlags> for DescriptorAttr {
151    fn from(flags: MappingFlags) -> Self {
152        if flags.is_empty() {
153            return Self::empty();
154        }
155        let mut attr = if flags.contains(MappingFlags::DEVICE) {
156            Self::from_mem_attr(MemAttr::Device)
157        } else if flags.contains(MappingFlags::UNCACHED) {
158            Self::from_mem_attr(MemAttr::NormalNonCacheable)
159        } else {
160            Self::from_mem_attr(MemAttr::Normal)
161        };
162        if flags.contains(MappingFlags::READ) {
163            attr |= Self::VALID;
164        }
165        if !flags.contains(MappingFlags::WRITE) {
166            attr |= Self::AP_RO;
167        }
168        #[cfg(not(feature = "arm-el2"))]
169        {
170            if flags.contains(MappingFlags::USER) {
171                attr |= Self::AP_EL0 | Self::PXN;
172                if !flags.contains(MappingFlags::EXECUTE) {
173                    attr |= Self::UXN;
174                }
175            } else {
176                attr |= Self::UXN;
177                if !flags.contains(MappingFlags::EXECUTE) {
178                    attr |= Self::PXN;
179                }
180            }
181        }
182        #[cfg(feature = "arm-el2")]
183        {
184            if !flags.contains(MappingFlags::EXECUTE) {
185                attr |= Self::UXN;
186            }
187        }
188        attr
189    }
190}
191
192/// A VMSAv8-64 translation table descriptor.
193///
194/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for
195/// device memory, and `1` for normal memory. The system must configure the
196/// MAIR_ELx system register accordingly.
197#[derive(Clone, Copy)]
198#[repr(transparent)]
199pub struct A64PTE(u64);
200
201impl A64PTE {
202    // bits 12..48
203    const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
204
205    /// Creates an empty descriptor with all bits set to zero.
206    pub const fn empty() -> Self {
207        Self(0)
208    }
209}
210
211impl GenericPTE for A64PTE {
212    fn new_page(paddr: PhysAddr, flags: MappingFlags, is_huge: bool) -> Self {
213        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
214        if !is_huge {
215            attr |= DescriptorAttr::NON_BLOCK;
216        }
217        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
218    }
219
220    fn new_table(paddr: PhysAddr) -> Self {
221        let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID;
222        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
223    }
224
225    fn paddr(&self) -> PhysAddr {
226        PhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize)
227    }
228
229    fn flags(&self) -> MappingFlags {
230        DescriptorAttr::from_bits_truncate(self.0).into()
231    }
232
233    fn set_paddr(&mut self, paddr: PhysAddr) {
234        self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)
235    }
236
237    fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) {
238        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
239        if !is_huge {
240            attr |= DescriptorAttr::NON_BLOCK;
241        }
242        self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits();
243    }
244
245    fn bits(self) -> usize {
246        self.0 as usize
247    }
248
249    fn is_unused(&self) -> bool {
250        self.0 == 0
251    }
252
253    fn is_present(&self) -> bool {
254        DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID)
255    }
256
257    fn is_huge(&self) -> bool {
258        !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK)
259    }
260
261    fn clear(&mut self) {
262        self.0 = 0
263    }
264}
265
266impl fmt::Debug for A64PTE {
267    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
268        let mut f = f.debug_struct("A64PTE");
269        f.field("raw", &self.0)
270            .field("paddr", &self.paddr())
271            .field("attr", &DescriptorAttr::from_bits_truncate(self.0))
272            .field("flags", &self.flags())
273            .finish()
274    }
275}