page_table_entry/arch/
aarch64.rs

1//! AArch64 VMSAv8-64 translation table format descriptors.
2
3use aarch64_cpu::registers::MAIR_EL1;
4use core::fmt;
5use memory_addr::PhysAddr;
6
7use crate::{GenericPTE, MappingFlags};
8
9bitflags::bitflags! {
10    /// Memory attribute fields in the VMSAv8-64 translation table format descriptors.
11    #[derive(Debug)]
12    pub struct DescriptorAttr: u64 {
13        // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors:
14
15        /// Whether the descriptor is valid.
16        const VALID =       1 << 0;
17        /// The descriptor gives the address of the next level of translation table or 4KB page.
18        /// (not a 2M, 1G block)
19        const NON_BLOCK =   1 << 1;
20        /// Memory attributes index field.
21        const ATTR_INDX =   0b111 << 2;
22        /// Non-secure bit. For memory accesses from Secure state, specifies whether the output
23        /// address is in Secure or Non-secure memory.
24        const NS =          1 << 5;
25        /// Access permission: accessable at EL0.
26        const AP_EL0 =      1 << 6;
27        /// Access permission: read-only.
28        const AP_RO =       1 << 7;
29        /// Shareability: Inner Shareable (otherwise Outer Shareable).
30        const INNER =       1 << 8;
31        /// Shareability: Inner or Outer Shareable (otherwise Non-shareable).
32        const SHAREABLE =   1 << 9;
33        /// The Access flag.
34        const AF =          1 << 10;
35        /// The not global bit.
36        const NG =          1 << 11;
37        /// Indicates that 16 adjacent translation table entries point to contiguous memory regions.
38        const CONTIGUOUS =  1 <<  52;
39        /// The Privileged execute-never field.
40        const PXN =         1 <<  53;
41        /// The Execute-never or Unprivileged execute-never field.
42        const UXN =         1 <<  54;
43
44        // Next-level attributes in stage 1 VMSAv8-64 Table descriptors:
45
46        /// PXN limit for subsequent levels of lookup.
47        const PXN_TABLE =           1 << 59;
48        /// XN limit for subsequent levels of lookup.
49        const XN_TABLE =            1 << 60;
50        /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted.
51        const AP_NO_EL0_TABLE =     1 << 61;
52        /// Access permissions limit for subsequent levels of lookup: write access not permitted.
53        const AP_NO_WRITE_TABLE =   1 << 62;
54        /// For memory accesses from Secure state, specifies the Security state for subsequent
55        /// levels of lookup.
56        const NS_TABLE =            1 << 63;
57    }
58}
59
60/// The memory attributes index field in the descriptor, which is used to index
61/// into the MAIR (Memory Attribute Indirection Register).
62#[repr(u64)]
63#[derive(Debug, Clone, Copy, Eq, PartialEq)]
64pub enum MemAttr {
65    /// Device-nGnRE memory
66    Device = 0,
67    /// Normal memory
68    Normal = 1,
69    /// Normal non-cacheable memory
70    NormalNonCacheable = 2,
71}
72
73impl DescriptorAttr {
74    #[allow(clippy::unusual_byte_groupings)]
75    const ATTR_INDEX_MASK: u64 = 0b111_00;
76
77    /// Constructs a descriptor from the memory index, leaving the other fields
78    /// empty.
79    pub const fn from_mem_attr(idx: MemAttr) -> Self {
80        let mut bits = (idx as u64) << 2;
81        if matches!(idx, MemAttr::Normal | MemAttr::NormalNonCacheable) {
82            bits |= Self::INNER.bits() | Self::SHAREABLE.bits();
83        }
84        Self::from_bits_retain(bits)
85    }
86
87    /// Returns the memory attribute index field.
88    pub const fn mem_attr(&self) -> Option<MemAttr> {
89        let idx = (self.bits() & Self::ATTR_INDEX_MASK) >> 2;
90        Some(match idx {
91            0 => MemAttr::Device,
92            1 => MemAttr::Normal,
93            2 => MemAttr::NormalNonCacheable,
94            _ => return None,
95        })
96    }
97}
98
99impl MemAttr {
100    /// The MAIR_ELx register should be set to this value to match the memory
101    /// attributes in the descriptors.
102    pub const MAIR_VALUE: u64 = {
103        // Device-nGnRE memory
104        let attr0 = MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck.value;
105        // Normal memory
106        let attr1 = MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc.value
107            | MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc.value;
108        let attr2 = MAIR_EL1::Attr2_Normal_Inner::NonCacheable.value
109            + MAIR_EL1::Attr2_Normal_Outer::NonCacheable.value;
110        attr0 | attr1 | attr2 // 0x44_ff_04
111    };
112}
113
114impl From<DescriptorAttr> for MappingFlags {
115    fn from(attr: DescriptorAttr) -> Self {
116        if !attr.contains(DescriptorAttr::VALID) {
117            return Self::empty();
118        }
119        let mut flags = Self::READ;
120        if !attr.contains(DescriptorAttr::AP_RO) {
121            flags |= Self::WRITE;
122        }
123        #[cfg(not(feature = "arm-el2"))]
124        {
125            if attr.contains(DescriptorAttr::AP_EL0) {
126                flags |= Self::USER;
127                if !attr.contains(DescriptorAttr::UXN) {
128                    flags |= Self::EXECUTE;
129                }
130            } else if !attr.intersects(DescriptorAttr::PXN) {
131                flags |= Self::EXECUTE;
132            }
133        }
134        #[cfg(feature = "arm-el2")]
135        {
136            if !attr.intersects(DescriptorAttr::UXN) {
137                flags |= Self::EXECUTE;
138            }
139        }
140        match attr.mem_attr() {
141            Some(MemAttr::Device) => flags |= Self::DEVICE,
142            Some(MemAttr::NormalNonCacheable) => flags |= Self::UNCACHED,
143            _ => {}
144        }
145        flags
146    }
147}
148
149impl From<MappingFlags> for DescriptorAttr {
150    fn from(flags: MappingFlags) -> Self {
151        if flags.is_empty() {
152            return Self::empty();
153        }
154        let mut attr = if flags.contains(MappingFlags::DEVICE) {
155            Self::from_mem_attr(MemAttr::Device)
156        } else if flags.contains(MappingFlags::UNCACHED) {
157            Self::from_mem_attr(MemAttr::NormalNonCacheable)
158        } else {
159            Self::from_mem_attr(MemAttr::Normal)
160        };
161        if flags.contains(MappingFlags::READ) {
162            attr |= Self::VALID;
163        }
164        if !flags.contains(MappingFlags::WRITE) {
165            attr |= Self::AP_RO;
166        }
167        #[cfg(not(feature = "arm-el2"))]
168        {
169            if flags.contains(MappingFlags::USER) {
170                attr |= Self::AP_EL0 | Self::PXN;
171                if !flags.contains(MappingFlags::EXECUTE) {
172                    attr |= Self::UXN;
173                }
174            } else {
175                attr |= Self::UXN;
176                if !flags.contains(MappingFlags::EXECUTE) {
177                    attr |= Self::PXN;
178                }
179            }
180        }
181        #[cfg(feature = "arm-el2")]
182        {
183            if !flags.contains(MappingFlags::EXECUTE) {
184                attr |= Self::UXN;
185            }
186        }
187        attr
188    }
189}
190
191/// A VMSAv8-64 translation table descriptor.
192///
193/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for device
194/// memory, and `1` for normal memory. The system must configure the MAIR_ELx
195/// system register accordingly.
196#[derive(Clone, Copy)]
197#[repr(transparent)]
198pub struct A64PTE(u64);
199
200impl A64PTE {
201    const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000; // bits 12..48
202
203    /// Creates an empty descriptor with all bits set to zero.
204    pub const fn empty() -> Self {
205        Self(0)
206    }
207}
208
209impl GenericPTE for A64PTE {
210    fn new_page(paddr: PhysAddr, flags: MappingFlags, is_huge: bool) -> Self {
211        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
212        if !is_huge {
213            attr |= DescriptorAttr::NON_BLOCK;
214        }
215        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
216    }
217    fn new_table(paddr: PhysAddr) -> Self {
218        let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID;
219        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
220    }
221    fn paddr(&self) -> PhysAddr {
222        PhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize)
223    }
224    fn flags(&self) -> MappingFlags {
225        DescriptorAttr::from_bits_truncate(self.0).into()
226    }
227    fn set_paddr(&mut self, paddr: PhysAddr) {
228        self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)
229    }
230    fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) {
231        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
232        if !is_huge {
233            attr |= DescriptorAttr::NON_BLOCK;
234        }
235        self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits();
236    }
237
238    fn bits(self) -> usize {
239        self.0 as usize
240    }
241    fn is_unused(&self) -> bool {
242        self.0 == 0
243    }
244    fn is_present(&self) -> bool {
245        DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID)
246    }
247    fn is_huge(&self) -> bool {
248        !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK)
249    }
250    fn clear(&mut self) {
251        self.0 = 0
252    }
253}
254
255impl fmt::Debug for A64PTE {
256    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
257        let mut f = f.debug_struct("A64PTE");
258        f.field("raw", &self.0)
259            .field("paddr", &self.paddr())
260            .field("attr", &DescriptorAttr::from_bits_truncate(self.0))
261            .field("flags", &self.flags())
262            .finish()
263    }
264}