blob: f1f1234b5ea07aedf4a44d78b3e068e6ce0add16 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
Imre Kis64d112f2025-01-20 12:59:01 +01006#![doc = include_str!("../README.md")]
Imre Kis703482d2023-11-30 15:51:26 +01007
8extern crate alloc;
9
Imre Kis5f960442024-11-29 16:49:43 +010010use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010011use core::iter::zip;
Imre Kis21d7f722025-01-17 17:55:35 +010012use core::marker::PhantomData;
Imre Kis86fd04a2024-11-29 16:09:59 +010013use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010014
Imre Kisb5146b52024-10-31 14:03:06 +010015use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010016use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010017
18use bitflags::bitflags;
19use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010020use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010021
22use self::descriptor::DescriptorType;
23
24use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
Imre Kis631127d2024-11-21 13:09:01 +010025use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010026use self::region::{PhysicalRegion, VirtualRegion};
27use self::region_pool::{Region, RegionPool, RegionPoolError};
28
Imre Kisd5b96fd2024-09-11 17:04:32 +020029pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010030mod block;
Imre Kis703482d2023-11-30 15:51:26 +010031mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010032mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010033pub mod page_pool;
34mod region;
35mod region_pool;
36
Imre Kis703482d2023-11-30 15:51:26 +010037/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010038#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010039pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010040 #[error("Invalid parameter: {0}")]
41 InvalidParameterError(&'static str),
42 #[error("Cannot allocate {1}: {0:?}")]
43 PageAllocationError(RegionPoolError, usize),
44 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
45 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
46 #[error("Entry not found for {0:?}")]
47 VaNotFound(VirtualAddress),
48 #[error("Cannot allocate virtual address {0:?}")]
49 VaAllocationError(RegionPoolError),
50 #[error("Cannot release virtual address {1:?}: {0:?}")]
51 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010052}
53
54/// Memory attributes
55///
56/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010057#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010058#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
59pub enum MemoryAttributesIndex {
60 #[default]
61 Device_nGnRnE = 0x00,
62 Normal_IWBWA_OWBWA = 0x01,
63}
64
65bitflags! {
Imre Kis64d112f2025-01-20 12:59:01 +010066 /// Memory access rights
Imre Kis703482d2023-11-30 15:51:26 +010067 #[derive(Debug, Clone, Copy)]
68 pub struct MemoryAccessRights : u32 {
Imre Kis64d112f2025-01-20 12:59:01 +010069 /// Read
Imre Kis703482d2023-11-30 15:51:26 +010070 const R = 0b00000001;
Imre Kis64d112f2025-01-20 12:59:01 +010071 /// Write
Imre Kis703482d2023-11-30 15:51:26 +010072 const W = 0b00000010;
Imre Kis64d112f2025-01-20 12:59:01 +010073 /// Execute
Imre Kis703482d2023-11-30 15:51:26 +010074 const X = 0b00000100;
Imre Kis64d112f2025-01-20 12:59:01 +010075 /// Non-secure
Imre Kis703482d2023-11-30 15:51:26 +010076 const NS = 0b00001000;
77
Imre Kis64d112f2025-01-20 12:59:01 +010078 /// Read-write
Imre Kis703482d2023-11-30 15:51:26 +010079 const RW = Self::R.bits() | Self::W.bits();
Imre Kis64d112f2025-01-20 12:59:01 +010080 /// Read-execute
Imre Kis703482d2023-11-30 15:51:26 +010081 const RX = Self::R.bits() | Self::X.bits();
Imre Kis64d112f2025-01-20 12:59:01 +010082 /// Read-write-execute
Imre Kis703482d2023-11-30 15:51:26 +010083 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
84
Imre Kis64d112f2025-01-20 12:59:01 +010085 /// User accessible
Imre Kis703482d2023-11-30 15:51:26 +010086 const USER = 0b00010000;
Imre Kis64d112f2025-01-20 12:59:01 +010087 /// Device region
Imre Kis703482d2023-11-30 15:51:26 +010088 const DEVICE = 0b00100000;
Imre Kis64d112f2025-01-20 12:59:01 +010089 /// Global (not tied to ASID)
Imre Kisc1dab892024-03-26 12:03:58 +010090 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010091 }
92}
93
94impl From<MemoryAccessRights> for Attributes {
95 fn from(access_rights: MemoryAccessRights) -> Self {
96 let data_access_permissions = match (
97 access_rights.contains(MemoryAccessRights::USER),
98 access_rights.contains(MemoryAccessRights::W),
99 ) {
100 (false, false) => DataAccessPermissions::ReadOnly_None,
101 (false, true) => DataAccessPermissions::ReadWrite_None,
102 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
103 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
104 };
105
106 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
107 MemoryAttributesIndex::Device_nGnRnE
108 } else {
109 MemoryAttributesIndex::Normal_IWBWA_OWBWA
110 };
111
112 Attributes {
113 uxn: !access_rights.contains(MemoryAccessRights::X)
114 || !access_rights.contains(MemoryAccessRights::USER),
115 pxn: !access_rights.contains(MemoryAccessRights::X)
116 || access_rights.contains(MemoryAccessRights::USER),
117 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100118 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100119 access_flag: true,
120 shareability: Shareability::NonShareable,
121 data_access_permissions,
122 non_secure: access_rights.contains(MemoryAccessRights::NS),
123 mem_attr_index,
124 }
125 }
126}
127
Imre Kis5c8a4852025-02-28 13:29:51 +0100128impl From<Attributes> for MemoryAccessRights {
129 fn from(value: Attributes) -> Self {
130 let mut result = Self::empty();
131
132 result |= match value.data_access_permissions {
133 DataAccessPermissions::ReadOnly_None => Self::R,
134 DataAccessPermissions::ReadWrite_None => Self::RW,
135 DataAccessPermissions::ReadOnly_ReadOnly => Self::R | Self::USER,
136 DataAccessPermissions::ReadWrite_ReadWrite => Self::RW | Self::USER,
137 };
138
139 if value.mem_attr_index == MemoryAttributesIndex::Device_nGnRnE {
140 result |= Self::DEVICE;
141 }
142
143 if !value.uxn {
144 result |= Self::X;
145 }
146
147 if !value.not_global {
148 result |= Self::GLOBAL;
149 }
150
151 if value.non_secure {
152 result |= Self::NS;
153 }
154
155 result
156 }
157}
158
Imre Kis64d112f2025-01-20 12:59:01 +0100159/// Virtual Address range, selects x in `TTBRx_EL*`
Imre Kisc9a55ff2025-01-17 15:06:50 +0100160#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100161pub enum RegimeVaRange {
Imre Kis64d112f2025-01-20 12:59:01 +0100162 /// Lower virtual address range, select `TTBR0_EL*`
Imre Kisb5146b52024-10-31 14:03:06 +0100163 Lower,
Imre Kis64d112f2025-01-20 12:59:01 +0100164 /// Upper virtual address range, select `TTBR1_EL*`
Imre Kisb5146b52024-10-31 14:03:06 +0100165 Upper,
166}
167
Imre Kis64d112f2025-01-20 12:59:01 +0100168/// Translation regime
Imre Kisc9a55ff2025-01-17 15:06:50 +0100169#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100170pub enum TranslationRegime {
Imre Kis64d112f2025-01-20 12:59:01 +0100171 /// EL1 and EL0 stage 1, TTBRx_EL1
172 EL1_0(RegimeVaRange, u8),
Imre Kisb5146b52024-10-31 14:03:06 +0100173 #[cfg(target_feature = "vh")]
Imre Kis64d112f2025-01-20 12:59:01 +0100174 /// EL2 and EL0 with VHE
175 EL2_0(RegimeVaRange, u8),
176 /// EL2
177 EL2,
178 /// EL3, TTBR0_EL3
179 EL3,
Imre Kisc1dab892024-03-26 12:03:58 +0100180}
181
Imre Kisc9a55ff2025-01-17 15:06:50 +0100182impl TranslationRegime {
Imre Kis64d112f2025-01-20 12:59:01 +0100183 /// Checks if the translation regime uses the upper virtual address range.
Imre Kisc9a55ff2025-01-17 15:06:50 +0100184 fn is_upper_va_range(&self) -> bool {
185 match self {
186 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
187 #[cfg(target_feature = "vh")]
188 EL2_0(RegimeVaRange::Upper, _) => true,
189 _ => false,
190 }
191 }
192}
193
Imre Kis64d112f2025-01-20 12:59:01 +0100194/// Translation granule
Imre Kis725ef5e2024-11-20 14:20:19 +0100195pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
196
Imre Kis21d7f722025-01-17 17:55:35 +0100197/// Trait for converting between virtual address space of the running kernel environment and
198/// the physical address space.
199pub trait KernelAddressTranslator {
Imre Kis64d112f2025-01-20 12:59:01 +0100200 /// Convert virtual address of the running kernel environment into a physical address.
Imre Kis21d7f722025-01-17 17:55:35 +0100201 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
Imre Kis64d112f2025-01-20 12:59:01 +0100202 /// Convert physical address into a virtual address of the running kernel environment.
Imre Kis21d7f722025-01-17 17:55:35 +0100203 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
204}
205
206pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> {
Imre Kis631127d2024-11-21 13:09:01 +0100207 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100208 page_pool: PagePool,
209 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100210 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100211 granule: TranslationGranule<VA_BITS>,
Imre Kis21d7f722025-01-17 17:55:35 +0100212 _kernel_address_translator: PhantomData<K>,
Imre Kis703482d2023-11-30 15:51:26 +0100213}
214
215/// Memory translation table handling
Imre Kis64d112f2025-01-20 12:59:01 +0100216///
Imre Kis703482d2023-11-30 15:51:26 +0100217/// # High level interface
218/// * allocate and map zero initialized region (with or without VA)
219/// * allocate and map memory region and load contents (with or without VA)
220/// * map memory region by PA (with or without VA)
221/// * unmap memory region by PA
222/// * query PA by VA
223/// * set access rights of mapped memory areas
224/// * active mapping
225///
226/// # Debug features
227/// * print translation table details
228///
229/// # Region level interface
230/// * map regions
231/// * unmap region
232/// * find a mapped region which contains
233/// * find empty area for region
234/// * set access rights for a region
Imre Kis703482d2023-11-30 15:51:26 +0100235///
236/// # Block level interface
237/// * map block
238/// * unmap block
239/// * set access rights of block
Imre Kis21d7f722025-01-17 17:55:35 +0100240impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
Imre Kis64d112f2025-01-20 12:59:01 +0100241 /// Create new Xlat instance
242 /// # Arguments
243 /// * page_pool: Page pool to allocate translation tables
244 /// * address: Virtual address range
245 /// * regime: Translation regime
246 /// * granule: Translation granule
247 /// # Return value
248 /// * Xlat instance
Imre Kisb5146b52024-10-31 14:03:06 +0100249 pub fn new(
250 page_pool: PagePool,
251 address: VirtualAddressRange,
252 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100253 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100254 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100255 let initial_lookup_level = granule.initial_lookup_level();
256
Imre Kisc9a55ff2025-01-17 15:06:50 +0100257 if !address.start.is_valid_in_regime::<VA_BITS>(regime)
258 || !address.end.is_valid_in_regime::<VA_BITS>(regime)
259 {
260 panic!(
261 "Invalid address range {:?} for regime {:?}",
262 address, regime
263 );
264 }
265
Imre Kis631127d2024-11-21 13:09:01 +0100266 let base_table = page_pool
267 .allocate_pages(
268 granule.table_size::<Descriptor>(initial_lookup_level),
269 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
270 )
271 .unwrap();
272
Imre Kis703482d2023-11-30 15:51:26 +0100273 let mut regions = RegionPool::new();
274 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100275 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100276 .unwrap();
277 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100278 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100279 page_pool,
280 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100281 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100282 granule,
Imre Kis21d7f722025-01-17 17:55:35 +0100283 _kernel_address_translator: PhantomData,
Imre Kis703482d2023-11-30 15:51:26 +0100284 }
285 }
286
287 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
288 /// initial data
289 /// # Arguments
290 /// * va: Virtual address of the memory area
291 /// * data: Data to be loaded to the memory area
292 /// * access_rights: Memory access rights of the area
293 /// # Return value
294 /// * Virtual address of the mapped memory
295 pub fn allocate_initalized_range(
296 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200297 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100298 data: &[u8],
299 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200300 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100301 let mut pages = self
302 .page_pool
303 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100304 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100305
Imre Kis21d7f722025-01-17 17:55:35 +0100306 pages.copy_data_to_page::<K>(data);
Imre Kis703482d2023-11-30 15:51:26 +0100307
308 let pages_length = pages.length();
309 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
310 let region = if let Some(required_va) = va {
311 self.regions
312 .acquire(required_va, pages_length, physical_region)
313 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100314 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100315 }
Imre Kisd20b5292024-12-04 16:05:30 +0100316 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100317
318 self.map_region(region, access_rights.into())
319 }
320
321 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
322 /// # Arguments
323 /// * va: Virtual address of the memory area
324 /// * length: Length of the memory area in bytes
325 /// * access_rights: Memory access rights of the area
326 /// # Return value
327 /// * Virtual address of the mapped memory
328 pub fn allocate_zero_init_range(
329 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200330 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100331 length: usize,
332 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200333 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100334 let mut pages = self
335 .page_pool
336 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100337 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100338
Imre Kis21d7f722025-01-17 17:55:35 +0100339 pages.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100340
341 let pages_length = pages.length();
342 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
343 let region = if let Some(required_va) = va {
344 self.regions
345 .acquire(required_va, pages_length, physical_region)
346 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100347 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100348 }
Imre Kisd20b5292024-12-04 16:05:30 +0100349 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100350
351 self.map_region(region, access_rights.into())
352 }
353
354 /// Map memory area by physical address
355 /// # Arguments
356 /// * va: Virtual address of the memory area
357 /// * pa: Physical address of the memory area
358 /// * length: Length of the memory area in bytes
359 /// * access_rights: Memory access rights of the area
360 /// # Return value
361 /// * Virtual address of the mapped memory
362 pub fn map_physical_address_range(
363 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200364 va: Option<VirtualAddress>,
365 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100366 length: usize,
367 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200368 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100369 let resource = PhysicalRegion::PhysicalAddress(pa);
370 let region = if let Some(required_va) = va {
371 self.regions.acquire(required_va, length, resource)
372 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100373 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100374 }
Imre Kisd20b5292024-12-04 16:05:30 +0100375 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100376
377 self.map_region(region, access_rights.into())
378 }
379
380 /// Unmap memory area by virtual address
381 /// # Arguments
382 /// * va: Virtual address
383 /// * length: Length of the memory area in bytes
384 pub fn unmap_virtual_address_range(
385 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200386 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100387 length: usize,
388 ) -> Result<(), XlatError> {
389 let pa = self.get_pa_by_va(va, length)?;
390
391 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
392
393 self.unmap_region(&region_to_release)?;
394
395 self.regions
396 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100397 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100398 }
399
400 /// Query physical address by virtual address range. Only returns a value if the memory area
401 /// mapped as continuous area.
402 /// # Arguments
403 /// * va: Virtual address of the memory area
404 /// * length: Length of the memory area in bytes
405 /// # Return value
406 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200407 pub fn get_pa_by_va(
408 &self,
409 va: VirtualAddress,
410 length: usize,
411 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100412 let containing_region = self
413 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100414 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100415
416 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100417 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100418 }
419
420 Ok(containing_region.get_pa_for_va(va))
421 }
422
423 /// Sets the memory access right of memory area
424 /// # Arguments
425 /// * va: Virtual address of the memory area
426 /// * length: Length of the memory area in bytes
427 /// * access_rights: New memory access rights of the area
428 pub fn set_access_rights(
429 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200430 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100431 length: usize,
432 access_rights: MemoryAccessRights,
433 ) -> Result<(), XlatError> {
434 let containing_region = self
435 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100436 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100437
438 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100439 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100440 }
441
442 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
443 self.map_region(region, access_rights.into())?;
444
445 Ok(())
446 }
447
Imre Kis5c8a4852025-02-28 13:29:51 +0100448 /// Query the memory access rights of virtual address
449 /// # Arguments
450 /// * va : Virtual address, alignment to granule size is not required
451 /// # Return value
452 /// Memory access rights of the virtual address and the minimal length of a continuous memory
453 /// area after the virtual address that is guaranteed to have the same access rights. The
454 /// following area after the returned length might or might not have the same access rights.
455 pub fn get_access_rights(
456 &self,
457 va: VirtualAddress,
458 ) -> Result<(MemoryAccessRights, usize), XlatError> {
459 let containing_region = self
460 .find_containing_region(va, 1)
461 .ok_or(XlatError::VaNotFound(va))?;
462
463 if !containing_region.used() {
464 return Err(XlatError::VaNotFound(va));
465 }
466
467 let (descriptor, level) = self.get_descriptor(va.remove_upper_bits::<VA_BITS>());
468
469 match descriptor.get_descriptor_type(level) {
470 DescriptorType::Block => {
471 let attributes = descriptor.get_block_attributes(level);
472 let block_length = self.granule.block_size_at_level(level);
473 let length = block_length - va.mask_bits(block_length - 1).0;
474
475 Ok((MemoryAccessRights::from(attributes), length))
476 }
477 _ => Err(XlatError::VaNotFound(va)),
478 }
479 }
480
Imre Kis703482d2023-11-30 15:51:26 +0100481 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100482 ///
483 /// # Safety
484 /// When activating memory mapping for the running exception level, the
485 /// caller must ensure that the new mapping will not break any existing
486 /// references. After activation the caller must ensure that there are no
487 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100488 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100489 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100490 // Select translation granule
491 let is_tg0 = match &self.regime {
492 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
493 | TranslationRegime::EL2
494 | TranslationRegime::EL3 => true,
495 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
496 #[cfg(target_feature = "vh")]
497 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
498 #[cfg(target_feature = "vh")]
499 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
500 };
501
Imre Kis631127d2024-11-21 13:09:01 +0100502 if is_tg0 {
503 self.modify_tcr(|tcr| {
504 let tg0 = match self.granule {
505 TranslationGranule::Granule4k => 0b00,
506 TranslationGranule::Granule16k => 0b10,
507 TranslationGranule::Granule64k => 0b01,
508 };
509
510 (tcr & !(3 << 14)) | (tg0 << 14)
511 });
512 } else {
513 self.modify_tcr(|tcr| {
514 let tg1 = match self.granule {
515 TranslationGranule::Granule4k => 0b10,
516 TranslationGranule::Granule16k => 0b01,
517 TranslationGranule::Granule64k => 0b11,
518 };
519
520 (tcr & !(3 << 30)) | (tg1 << 30)
521 });
522 }
523
524 // Set translation table
Imre Kis21d7f722025-01-17 17:55:35 +0100525 let base_table_pa = self.base_table.get_pa().0 as u64;
Imre Kisc1dab892024-03-26 12:03:58 +0100526
Imre Kisb5146b52024-10-31 14:03:06 +0100527 match &self.regime {
528 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
529 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100530 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100531 in(reg) ((*asid as u64) << 48) | base_table_pa),
532 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
533 "msr ttbr1_el1, {0}
534 isb",
535 in(reg) ((*asid as u64) << 48) | base_table_pa),
536 #[cfg(target_feature = "vh")]
537 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
538 "msr ttbr0_el2, {0}
539 isb",
540 in(reg) ((*asid as u64) << 48) | base_table_pa),
541 #[cfg(target_feature = "vh")]
542 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
543 "msr ttbr1_el2, {0}
544 isb",
545 in(reg) ((*asid as u64) << 48) | base_table_pa),
546 TranslationRegime::EL2 => core::arch::asm!(
547 "msr ttbr0_el2, {0}
548 isb",
549 in(reg) base_table_pa),
550 TranslationRegime::EL3 => core::arch::asm!(
551 "msr ttbr0_el3, {0}
552 isb",
553 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100554 }
Imre Kis703482d2023-11-30 15:51:26 +0100555 }
556
Imre Kis1278c9f2025-01-15 19:48:36 +0100557 /// # Safety
558 /// Dummy functions for test builds
559 #[cfg(not(target_arch = "aarch64"))]
560 pub unsafe fn activate(&self) {}
561
Imre Kis631127d2024-11-21 13:09:01 +0100562 /// Modifies the TCR register of the selected regime of the instance.
563 #[cfg(target_arch = "aarch64")]
564 unsafe fn modify_tcr<F>(&self, f: F)
565 where
566 F: Fn(u64) -> u64,
567 {
568 let mut tcr: u64;
569
570 match &self.regime {
571 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
572 "mrs {0}, tcr_el1
573 isb",
574 out(reg) tcr),
575 #[cfg(target_feature = "vh")]
576 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
577 "mrs {0}, tcr_el2
578 isb",
579 out(reg) tcr),
580 TranslationRegime::EL2 => core::arch::asm!(
581 "mrs {0}, tcr_el2
582 isb",
583 out(reg) tcr),
584 TranslationRegime::EL3 => core::arch::asm!(
585 "mrs {0}, tcr_el3
586 isb",
587 out(reg) tcr),
588 }
589
590 tcr = f(tcr);
591
592 match &self.regime {
593 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
594 "msr tcr_el1, {0}
595 isb",
596 in(reg) tcr),
597 #[cfg(target_feature = "vh")]
598 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
599 "msr tcr_el2, {0}
600 isb",
601 in(reg) tcr),
602 TranslationRegime::EL2 => core::arch::asm!(
603 "msr tcr_el2, {0}
604 isb",
605 in(reg) tcr),
606 TranslationRegime::EL3 => core::arch::asm!(
607 "msr tcr_el3, {0}
608 isb",
609 in(reg) tcr),
610 }
611 }
612
Imre Kis703482d2023-11-30 15:51:26 +0100613 /// Prints a single translation table to the debug console
614 /// # Arguments
615 /// * level: Level of the translation table
616 /// * va: Base virtual address of the table
617 /// * table: Table entries
Imre Kis64d112f2025-01-20 12:59:01 +0100618 /// * granule: Translation granule
Imre Kis5f960442024-11-29 16:49:43 +0100619 fn dump_table(
620 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100621 level: isize,
622 va: usize,
623 table: &[Descriptor],
624 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100625 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100626 let level_prefix = match level {
627 0 | 1 => "|-",
628 2 => "| |-",
629 _ => "| | |-",
630 };
631
Imre Kis631127d2024-11-21 13:09:01 +0100632 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100633 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100634 DescriptorType::Block => {
635 writeln!(
636 f,
637 "{} {:#010x} Block -> {:#010x}",
638 level_prefix,
639 va,
640 descriptor.get_block_output_address(granule, level).0
641 )?;
642 }
Imre Kis703482d2023-11-30 15:51:26 +0100643 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100644 let table_pa = descriptor.get_next_level_table(level);
Imre Kis5f960442024-11-29 16:49:43 +0100645 writeln!(
646 f,
Imre Kis703482d2023-11-30 15:51:26 +0100647 "{} {:#010x} Table -> {:#010x}",
Imre Kisa7ef6842025-01-17 13:12:52 +0100648 level_prefix, va, table_pa.0
Imre Kis5f960442024-11-29 16:49:43 +0100649 )?;
Imre Kisa7ef6842025-01-17 13:12:52 +0100650
651 let next_level_table =
652 unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Imre Kis5f960442024-11-29 16:49:43 +0100653 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100654 }
655 _ => {}
656 }
657 }
Imre Kis5f960442024-11-29 16:49:43 +0100658
659 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100660 }
661
662 /// Adds memory region from the translation table. The function splits the region to blocks and
663 /// uses the block level functions to do the mapping.
664 /// # Arguments
665 /// * region: Memory region object
Imre Kis64d112f2025-01-20 12:59:01 +0100666 /// * attributes: Memory attributes
Imre Kis703482d2023-11-30 15:51:26 +0100667 /// # Return value
668 /// * Virtual address of the mapped memory
669 fn map_region(
670 &mut self,
671 region: VirtualRegion,
672 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200673 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100674 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100675 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100676 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100677 region.length(),
678 self.granule,
679 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100680 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100681 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100682 }
683
684 Ok(region.base())
685 }
686
687 /// Remove memory region from the translation table. The function splits the region to blocks
688 /// and uses the block level functions to do the unmapping.
689 /// # Arguments
690 /// * region: Memory region object
691 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100692 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100693 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100694 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100695 region.length(),
696 self.granule,
697 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100698 for block in blocks {
699 self.unmap_block(block);
700 }
701
702 Ok(())
703 }
704
705 /// Find mapped region that contains the whole region
706 /// # Arguments
Imre Kis64d112f2025-01-20 12:59:01 +0100707 /// * va: Virtual address to look for
708 /// * length: Length of the region
Imre Kis703482d2023-11-30 15:51:26 +0100709 /// # Return value
710 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200711 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100712 self.regions.find_containing_region(va, length).ok()
713 }
714
Imre Kis703482d2023-11-30 15:51:26 +0100715 /// Add block to memory mapping
716 /// # Arguments
717 /// * block: Memory block that can be represented by a single translation table entry
718 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100719 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100720 Self::set_block_descriptor_recursively(
721 attributes,
722 block.pa,
723 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100724 block.size,
725 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100726 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100727 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100728 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100729 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100730 )
Imre Kis703482d2023-11-30 15:51:26 +0100731 }
732
733 /// Adds the block descriptor to the translation table along all the intermediate tables the
734 /// reach the required granule.
735 /// # Arguments
736 /// * attributes: Memory block's permssions, flags
737 /// * pa: Physical address
738 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100739 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100740 /// * level: Translation table level
741 /// * table: Translation table on the given level
742 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100743 /// * regime: Translation regime
744 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100745 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100746 fn set_block_descriptor_recursively(
747 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200748 pa: PhysicalAddress,
749 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100750 block_size: usize,
751 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100752 table: &mut [Descriptor],
753 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100754 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100755 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100756 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100757 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100758 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100759
760 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100761 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100762 // Follow break-before-make sequence
763 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
764 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100765 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100766 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100767 }
768
769 // Need to iterate forward
770 match descriptor.get_descriptor_type(level) {
771 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100772 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100773 let mut page = page_pool
774 .allocate_pages(
775 granule.table_size::<Descriptor>(level + 1),
776 Some(granule.table_alignment::<Descriptor>(level + 1)),
777 )
Imre Kisd20b5292024-12-04 16:05:30 +0100778 .map_err(|e| {
779 XlatError::PageAllocationError(
780 e,
781 granule.table_size::<Descriptor>(level + 1),
782 )
783 })?;
784
Imre Kis21d7f722025-01-17 17:55:35 +0100785 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100786
787 // Fill next level table
788 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100789 attributes,
790 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100791 va.mask_for_level(granule, level),
792 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100793 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100794 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100795 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100796 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100797 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100798 );
799
800 if result.is_ok() {
801 // Set table descriptor if the table is configured properly
Imre Kis21d7f722025-01-17 17:55:35 +0100802 let next_table_pa =
803 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100804 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100805 } else {
806 // Release next level table on error and keep invalid descriptor on current level
807 page_pool.release_pages(page).unwrap();
808 }
809
810 result
Imre Kis703482d2023-11-30 15:51:26 +0100811 }
812 DescriptorType::Block => {
813 // Saving current descriptor details
Imre Kisbe5481e2025-02-24 15:45:12 +0100814 let current_va = va.mask_bits(!(granule.block_size_at_level(level) - 1));
Imre Kis631127d2024-11-21 13:09:01 +0100815 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100816 let current_attributes = descriptor.get_block_attributes(level);
817
818 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100819
Imre Kisd20b5292024-12-04 16:05:30 +0100820 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100821 let mut page = page_pool
822 .allocate_pages(
823 granule.table_size::<Descriptor>(level + 1),
824 Some(granule.table_alignment::<Descriptor>(level + 1)),
825 )
Imre Kisd20b5292024-12-04 16:05:30 +0100826 .map_err(|e| {
827 XlatError::PageAllocationError(
828 e,
829 granule.table_size::<Descriptor>(level + 1),
830 )
831 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100832
Imre Kis21d7f722025-01-17 17:55:35 +0100833 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100834
835 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200836 for exploded_va in VirtualAddressRange::new(
837 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100838 current_va
839 .add_offset(granule.block_size_at_level(level))
840 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200841 )
Imre Kis631127d2024-11-21 13:09:01 +0100842 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100843 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200844 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100845
846 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100847 Self::set_block_descriptor_recursively(
848 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200849 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100850 exploded_va.mask_for_level(granule, level),
851 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100852 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100853 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100854 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100855 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100856 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100857 )
Imre Kisd20b5292024-12-04 16:05:30 +0100858 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100859 }
860
861 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100862 let result = Self::set_block_descriptor_recursively(
863 attributes,
864 pa,
Imre Kisbe5481e2025-02-24 15:45:12 +0100865 va.mask_for_level(granule, level),
Imre Kisd20b5292024-12-04 16:05:30 +0100866 block_size,
867 level + 1,
868 next_table,
869 page_pool,
870 regime,
871 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100872 );
Imre Kisd20b5292024-12-04 16:05:30 +0100873
874 if result.is_ok() {
Imre Kis21d7f722025-01-17 17:55:35 +0100875 let next_table_pa =
876 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100877
Imre Kisd20b5292024-12-04 16:05:30 +0100878 // Follow break-before-make sequence
879 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
880 Self::invalidate(regime, Some(current_va));
881
882 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100883 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100884 } else {
885 // Release next level table on error and keep invalid descriptor on current level
886 page_pool.release_pages(page).unwrap();
887 }
888
889 result
Imre Kis703482d2023-11-30 15:51:26 +0100890 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100891 DescriptorType::Table => {
892 let next_level_table = unsafe {
893 Self::get_table_from_pa_mut(
894 descriptor.get_next_level_table(level),
895 granule,
896 level + 1,
897 )
898 };
899
900 Self::set_block_descriptor_recursively(
901 attributes,
902 pa,
903 va.mask_for_level(granule, level),
904 block_size,
905 level + 1,
906 next_level_table,
907 page_pool,
908 regime,
909 granule,
910 )
911 }
Imre Kis703482d2023-11-30 15:51:26 +0100912 }
913 }
914
915 /// Remove block from memory mapping
916 /// # Arguments
917 /// * block: memory block that can be represented by a single translation entry
918 fn unmap_block(&mut self, block: Block) {
919 Self::remove_block_descriptor_recursively(
920 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100921 block.size,
922 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100923 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100924 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100925 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100926 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100927 )
Imre Kis703482d2023-11-30 15:51:26 +0100928 }
929
930 /// Removes block descriptor from the translation table along all the intermediate tables which
931 /// become empty during the removal process.
932 /// # Arguments
933 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100934 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100935 /// * level: Translation table level
936 /// * table: Translation table on the given level
937 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100938 /// * regime: Translation regime
939 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100940 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200941 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100942 block_size: usize,
943 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100944 table: &mut [Descriptor],
945 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100946 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100947 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100948 ) {
949 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100950 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100951
Imre Kis631127d2024-11-21 13:09:01 +0100952 // We reached the required level with the matching block size
953 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100954 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100955 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100956 return;
957 }
958
959 // Need to iterate forward
960 match descriptor.get_descriptor_type(level) {
961 DescriptorType::Invalid => {
962 panic!("Cannot remove block from non-existing table");
963 }
964 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100965 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100966 }
967 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100968 let next_level_table = unsafe {
969 Self::get_table_from_pa_mut(
970 descriptor.get_next_level_table(level),
971 granule,
972 level + 1,
973 )
974 };
975
Imre Kis703482d2023-11-30 15:51:26 +0100976 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100977 va.mask_for_level(granule, level),
978 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100979 level + 1,
980 next_level_table,
981 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100982 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100983 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100984 );
985
986 if next_level_table.iter().all(|d| !d.is_valid()) {
987 // Empty table
988 let mut page = unsafe {
Imre Kisa7ef6842025-01-17 13:12:52 +0100989 let table_pa = descriptor.set_table_descriptor_to_invalid(level);
990 let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
Imre Kis21d7f722025-01-17 17:55:35 +0100991 Pages::from_slice::<K, Descriptor>(next_table)
Imre Kis703482d2023-11-30 15:51:26 +0100992 };
Imre Kisa7ef6842025-01-17 13:12:52 +0100993
Imre Kis21d7f722025-01-17 17:55:35 +0100994 page.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100995 page_pool.release_pages(page).unwrap();
996 }
997 }
998 }
999 }
1000
Imre Kis5c8a4852025-02-28 13:29:51 +01001001 /// Find a block or an invalid descriptor which describes the virtual address' mapping.
1002 /// # Arguments
1003 /// * va : Virtual address, alignment to granule size is not required
1004 /// # Return value
1005 /// Reference to the descriptor and level of the descriptor in the translation table
1006 fn get_descriptor(&self, va: VirtualAddress) -> (&Descriptor, isize) {
Imre Kis631127d2024-11-21 13:09:01 +01001007 Self::walk_descriptors(
1008 va,
Imre Kis631127d2024-11-21 13:09:01 +01001009 self.granule.initial_lookup_level(),
Imre Kis5c8a4852025-02-28 13:29:51 +01001010 unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
Imre Kis631127d2024-11-21 13:09:01 +01001011 self.granule,
1012 )
Imre Kis703482d2023-11-30 15:51:26 +01001013 }
1014
Imre Kis5c8a4852025-02-28 13:29:51 +01001015 /// Walk translation table until finding a block or invalid descriptor that contains the VA.
1016 /// # Arguments
1017 /// * va : Virtual address, alignment to granule size is not required
1018 /// * table: Translation table on the given level
1019 /// * granule: Translation granule
1020 /// # Return value
1021 /// Reference to the descriptor and level of the descriptor in the translation table
Imre Kis703482d2023-11-30 15:51:26 +01001022 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +02001023 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +01001024 level: isize,
Imre Kis5c8a4852025-02-28 13:29:51 +01001025 table: &[Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +01001026 granule: TranslationGranule<VA_BITS>,
Imre Kis5c8a4852025-02-28 13:29:51 +01001027 ) -> (&Descriptor, isize) {
Imre Kis703482d2023-11-30 15:51:26 +01001028 // Get descriptor of the current level
Imre Kis5c8a4852025-02-28 13:29:51 +01001029 let descriptor = &table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +01001030
1031 // Need to iterate forward
1032 match descriptor.get_descriptor_type(level) {
Imre Kis5c8a4852025-02-28 13:29:51 +01001033 DescriptorType::Invalid | DescriptorType::Block => (descriptor, level),
Imre Kisa7ef6842025-01-17 13:12:52 +01001034 DescriptorType::Table => {
1035 let next_level_table = unsafe {
Imre Kis5c8a4852025-02-28 13:29:51 +01001036 Self::get_table_from_pa(
Imre Kisa7ef6842025-01-17 13:12:52 +01001037 descriptor.get_next_level_table(level),
1038 granule,
1039 level + 1,
1040 )
1041 };
1042
1043 Self::walk_descriptors(
1044 va.mask_for_level(granule, level),
Imre Kisa7ef6842025-01-17 13:12:52 +01001045 level + 1,
1046 next_level_table,
1047 granule,
1048 )
1049 }
1050 }
1051 }
1052
1053 /// Create a translation table descriptor slice from a physical address.
1054 ///
1055 /// # Safety
1056 /// The caller must ensure that the physical address points to a valid translation table and
1057 /// it it mapped into the virtual address space of the running kernel context.
1058 unsafe fn get_table_from_pa<'a>(
1059 pa: PhysicalAddress,
1060 granule: TranslationGranule<VA_BITS>,
1061 level: isize,
1062 ) -> &'a [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +01001063 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +01001064 unsafe {
1065 core::slice::from_raw_parts(
Imre Kis21d7f722025-01-17 17:55:35 +01001066 table_va.0 as *const Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +01001067 granule.entry_count_at_level(level),
1068 )
1069 }
1070 }
1071
1072 /// Create a mutable translation table descriptor slice from a physical address.
1073 ///
1074 /// # Safety
1075 /// The caller must ensure that the physical address points to a valid translation table and
1076 /// it it mapped into the virtual address space of the running kernel context.
1077 unsafe fn get_table_from_pa_mut<'a>(
1078 pa: PhysicalAddress,
1079 granule: TranslationGranule<VA_BITS>,
1080 level: isize,
1081 ) -> &'a mut [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +01001082 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +01001083 unsafe {
1084 core::slice::from_raw_parts_mut(
Imre Kis21d7f722025-01-17 17:55:35 +01001085 table_va.0 as *mut Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +01001086 granule.entry_count_at_level(level),
1087 )
Imre Kis703482d2023-11-30 15:51:26 +01001088 }
1089 }
Imre Kis9a9d0492024-10-31 15:19:46 +01001090
Imre Kis1278c9f2025-01-15 19:48:36 +01001091 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +01001092 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
1093 // SAFETY: The assembly code invalidates the translation table entry of
1094 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +01001095 unsafe {
1096 if let Some(VirtualAddress(va)) = va {
1097 match regime {
1098 TranslationRegime::EL1_0(_, _) => {
1099 core::arch::asm!(
1100 "tlbi vaae1is, {0}
1101 dsb nsh
1102 isb",
1103 in(reg) va)
1104 }
1105 #[cfg(target_feature = "vh")]
1106 TranslationRegime::EL2_0(_, _) => {
1107 core::arch::asm!(
1108 "tlbi vaae1is, {0}
1109 dsb nsh
1110 isb",
1111 in(reg) va)
1112 }
1113 TranslationRegime::EL2 => core::arch::asm!(
1114 "tlbi vae2is, {0}
1115 dsb nsh
1116 isb",
1117 in(reg) va),
1118 TranslationRegime::EL3 => core::arch::asm!(
1119 "tlbi vae3is, {0}
1120 dsb nsh
1121 isb",
1122 in(reg) va),
1123 }
1124 } else {
1125 match regime {
1126 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
1127 "tlbi aside1, {0}
1128 dsb nsh
1129 isb",
1130 in(reg) (*asid as u64) << 48
1131 ),
1132 #[cfg(target_feature = "vh")]
1133 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
1134 "tlbi aside1, {0}
1135 dsb nsh
1136 isb",
1137 in(reg) (*asid as u64) << 48
1138 ),
1139 TranslationRegime::EL2 => core::arch::asm!(
1140 "tlbi alle2
1141 dsb nsh
1142 isb"
1143 ),
1144 TranslationRegime::EL3 => core::arch::asm!(
1145 "tlbi alle3
1146 dsb nsh
1147 isb"
1148 ),
1149 }
1150 }
1151 }
1152 }
Imre Kis1278c9f2025-01-15 19:48:36 +01001153
1154 #[cfg(not(target_arch = "aarch64"))]
1155 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +01001156}
Imre Kis5f960442024-11-29 16:49:43 +01001157
Imre Kis21d7f722025-01-17 17:55:35 +01001158impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {
Imre Kis5f960442024-11-29 16:49:43 +01001159 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
1160 f.debug_struct("Xlat")
1161 .field("regime", &self.regime)
1162 .field("granule", &self.granule)
1163 .field("VA_BITS", &VA_BITS)
1164 .field("base_table", &self.base_table.get_pa())
1165 .finish()?;
1166
1167 Self::dump_table(
1168 f,
1169 self.granule.initial_lookup_level(),
1170 0,
Imre Kis21d7f722025-01-17 17:55:35 +01001171 unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
Imre Kis5f960442024-11-29 16:49:43 +01001172 self.granule,
1173 )?;
1174
1175 Ok(())
1176 }
1177}