blob: c939335f3f04c7c809a4db7565aadf12306be5a6 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
Imre Kis64d112f2025-01-20 12:59:01 +01006#![doc = include_str!("../README.md")]
Imre Kis703482d2023-11-30 15:51:26 +01007
8extern crate alloc;
9
Imre Kis5f960442024-11-29 16:49:43 +010010use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010011use core::iter::zip;
Imre Kis21d7f722025-01-17 17:55:35 +010012use core::marker::PhantomData;
Imre Kis86fd04a2024-11-29 16:09:59 +010013use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010014
Imre Kisb5146b52024-10-31 14:03:06 +010015use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010016use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010017
18use bitflags::bitflags;
19use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010020use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010021
22use self::descriptor::DescriptorType;
23
24use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
Imre Kis631127d2024-11-21 13:09:01 +010025use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010026use self::region::{PhysicalRegion, VirtualRegion};
27use self::region_pool::{Region, RegionPool, RegionPoolError};
28
Imre Kisd5b96fd2024-09-11 17:04:32 +020029pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010030mod block;
Imre Kis703482d2023-11-30 15:51:26 +010031mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010032mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010033pub mod page_pool;
34mod region;
35mod region_pool;
36
Imre Kis703482d2023-11-30 15:51:26 +010037/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010038#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010039pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010040 #[error("Invalid parameter: {0}")]
41 InvalidParameterError(&'static str),
42 #[error("Cannot allocate {1}: {0:?}")]
43 PageAllocationError(RegionPoolError, usize),
44 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
45 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
46 #[error("Entry not found for {0:?}")]
47 VaNotFound(VirtualAddress),
48 #[error("Cannot allocate virtual address {0:?}")]
49 VaAllocationError(RegionPoolError),
50 #[error("Cannot release virtual address {1:?}: {0:?}")]
51 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010052}
53
54/// Memory attributes
55///
56/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010057#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010058#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
59pub enum MemoryAttributesIndex {
60 #[default]
61 Device_nGnRnE = 0x00,
62 Normal_IWBWA_OWBWA = 0x01,
63}
64
65bitflags! {
Imre Kis64d112f2025-01-20 12:59:01 +010066 /// Memory access rights
Imre Kis703482d2023-11-30 15:51:26 +010067 #[derive(Debug, Clone, Copy)]
68 pub struct MemoryAccessRights : u32 {
Imre Kis64d112f2025-01-20 12:59:01 +010069 /// Read
Imre Kis703482d2023-11-30 15:51:26 +010070 const R = 0b00000001;
Imre Kis64d112f2025-01-20 12:59:01 +010071 /// Write
Imre Kis703482d2023-11-30 15:51:26 +010072 const W = 0b00000010;
Imre Kis64d112f2025-01-20 12:59:01 +010073 /// Execute
Imre Kis703482d2023-11-30 15:51:26 +010074 const X = 0b00000100;
Imre Kis64d112f2025-01-20 12:59:01 +010075 /// Non-secure
Imre Kis703482d2023-11-30 15:51:26 +010076 const NS = 0b00001000;
77
Imre Kis64d112f2025-01-20 12:59:01 +010078 /// Read-write
Imre Kis703482d2023-11-30 15:51:26 +010079 const RW = Self::R.bits() | Self::W.bits();
Imre Kis64d112f2025-01-20 12:59:01 +010080 /// Read-execute
Imre Kis703482d2023-11-30 15:51:26 +010081 const RX = Self::R.bits() | Self::X.bits();
Imre Kis64d112f2025-01-20 12:59:01 +010082 /// Read-write-execute
Imre Kis703482d2023-11-30 15:51:26 +010083 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
84
Imre Kis64d112f2025-01-20 12:59:01 +010085 /// User accessible
Imre Kis703482d2023-11-30 15:51:26 +010086 const USER = 0b00010000;
Imre Kis64d112f2025-01-20 12:59:01 +010087 /// Device region
Imre Kis703482d2023-11-30 15:51:26 +010088 const DEVICE = 0b00100000;
Imre Kis64d112f2025-01-20 12:59:01 +010089 /// Global (not tied to ASID)
Imre Kisc1dab892024-03-26 12:03:58 +010090 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010091 }
92}
93
94impl From<MemoryAccessRights> for Attributes {
95 fn from(access_rights: MemoryAccessRights) -> Self {
96 let data_access_permissions = match (
97 access_rights.contains(MemoryAccessRights::USER),
98 access_rights.contains(MemoryAccessRights::W),
99 ) {
100 (false, false) => DataAccessPermissions::ReadOnly_None,
101 (false, true) => DataAccessPermissions::ReadWrite_None,
102 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
103 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
104 };
105
106 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
107 MemoryAttributesIndex::Device_nGnRnE
108 } else {
109 MemoryAttributesIndex::Normal_IWBWA_OWBWA
110 };
111
112 Attributes {
113 uxn: !access_rights.contains(MemoryAccessRights::X)
114 || !access_rights.contains(MemoryAccessRights::USER),
115 pxn: !access_rights.contains(MemoryAccessRights::X)
116 || access_rights.contains(MemoryAccessRights::USER),
117 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100118 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100119 access_flag: true,
120 shareability: Shareability::NonShareable,
121 data_access_permissions,
122 non_secure: access_rights.contains(MemoryAccessRights::NS),
123 mem_attr_index,
124 }
125 }
126}
127
Imre Kis64d112f2025-01-20 12:59:01 +0100128/// Virtual Address range, selects x in `TTBRx_EL*`
Imre Kisc9a55ff2025-01-17 15:06:50 +0100129#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100130pub enum RegimeVaRange {
Imre Kis64d112f2025-01-20 12:59:01 +0100131 /// Lower virtual address range, select `TTBR0_EL*`
Imre Kisb5146b52024-10-31 14:03:06 +0100132 Lower,
Imre Kis64d112f2025-01-20 12:59:01 +0100133 /// Upper virtual address range, select `TTBR1_EL*`
Imre Kisb5146b52024-10-31 14:03:06 +0100134 Upper,
135}
136
Imre Kis64d112f2025-01-20 12:59:01 +0100137/// Translation regime
Imre Kisc9a55ff2025-01-17 15:06:50 +0100138#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100139pub enum TranslationRegime {
Imre Kis64d112f2025-01-20 12:59:01 +0100140 /// EL1 and EL0 stage 1, TTBRx_EL1
141 EL1_0(RegimeVaRange, u8),
Imre Kisb5146b52024-10-31 14:03:06 +0100142 #[cfg(target_feature = "vh")]
Imre Kis64d112f2025-01-20 12:59:01 +0100143 /// EL2 and EL0 with VHE
144 EL2_0(RegimeVaRange, u8),
145 /// EL2
146 EL2,
147 /// EL3, TTBR0_EL3
148 EL3,
Imre Kisc1dab892024-03-26 12:03:58 +0100149}
150
Imre Kisc9a55ff2025-01-17 15:06:50 +0100151impl TranslationRegime {
Imre Kis64d112f2025-01-20 12:59:01 +0100152 /// Checks if the translation regime uses the upper virtual address range.
Imre Kisc9a55ff2025-01-17 15:06:50 +0100153 fn is_upper_va_range(&self) -> bool {
154 match self {
155 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
156 #[cfg(target_feature = "vh")]
157 EL2_0(RegimeVaRange::Upper, _) => true,
158 _ => false,
159 }
160 }
161}
162
Imre Kis64d112f2025-01-20 12:59:01 +0100163/// Translation granule
Imre Kis725ef5e2024-11-20 14:20:19 +0100164pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
165
Imre Kis21d7f722025-01-17 17:55:35 +0100166/// Trait for converting between virtual address space of the running kernel environment and
167/// the physical address space.
168pub trait KernelAddressTranslator {
Imre Kis64d112f2025-01-20 12:59:01 +0100169 /// Convert virtual address of the running kernel environment into a physical address.
Imre Kis21d7f722025-01-17 17:55:35 +0100170 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
Imre Kis64d112f2025-01-20 12:59:01 +0100171 /// Convert physical address into a virtual address of the running kernel environment.
Imre Kis21d7f722025-01-17 17:55:35 +0100172 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
173}
174
175pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> {
Imre Kis631127d2024-11-21 13:09:01 +0100176 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100177 page_pool: PagePool,
178 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100179 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100180 granule: TranslationGranule<VA_BITS>,
Imre Kis21d7f722025-01-17 17:55:35 +0100181 _kernel_address_translator: PhantomData<K>,
Imre Kis703482d2023-11-30 15:51:26 +0100182}
183
184/// Memory translation table handling
Imre Kis64d112f2025-01-20 12:59:01 +0100185///
Imre Kis703482d2023-11-30 15:51:26 +0100186/// # High level interface
187/// * allocate and map zero initialized region (with or without VA)
188/// * allocate and map memory region and load contents (with or without VA)
189/// * map memory region by PA (with or without VA)
190/// * unmap memory region by PA
191/// * query PA by VA
192/// * set access rights of mapped memory areas
193/// * active mapping
194///
195/// # Debug features
196/// * print translation table details
197///
198/// # Region level interface
199/// * map regions
200/// * unmap region
201/// * find a mapped region which contains
202/// * find empty area for region
203/// * set access rights for a region
Imre Kis703482d2023-11-30 15:51:26 +0100204///
205/// # Block level interface
206/// * map block
207/// * unmap block
208/// * set access rights of block
Imre Kis21d7f722025-01-17 17:55:35 +0100209impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
Imre Kis64d112f2025-01-20 12:59:01 +0100210 /// Create new Xlat instance
211 /// # Arguments
212 /// * page_pool: Page pool to allocate translation tables
213 /// * address: Virtual address range
214 /// * regime: Translation regime
215 /// * granule: Translation granule
216 /// # Return value
217 /// * Xlat instance
Imre Kisb5146b52024-10-31 14:03:06 +0100218 pub fn new(
219 page_pool: PagePool,
220 address: VirtualAddressRange,
221 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100222 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100223 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100224 let initial_lookup_level = granule.initial_lookup_level();
225
Imre Kisc9a55ff2025-01-17 15:06:50 +0100226 if !address.start.is_valid_in_regime::<VA_BITS>(regime)
227 || !address.end.is_valid_in_regime::<VA_BITS>(regime)
228 {
229 panic!(
230 "Invalid address range {:?} for regime {:?}",
231 address, regime
232 );
233 }
234
Imre Kis631127d2024-11-21 13:09:01 +0100235 let base_table = page_pool
236 .allocate_pages(
237 granule.table_size::<Descriptor>(initial_lookup_level),
238 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
239 )
240 .unwrap();
241
Imre Kis703482d2023-11-30 15:51:26 +0100242 let mut regions = RegionPool::new();
243 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100244 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100245 .unwrap();
246 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100247 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100248 page_pool,
249 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100250 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100251 granule,
Imre Kis21d7f722025-01-17 17:55:35 +0100252 _kernel_address_translator: PhantomData,
Imre Kis703482d2023-11-30 15:51:26 +0100253 }
254 }
255
256 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
257 /// initial data
258 /// # Arguments
259 /// * va: Virtual address of the memory area
260 /// * data: Data to be loaded to the memory area
261 /// * access_rights: Memory access rights of the area
262 /// # Return value
263 /// * Virtual address of the mapped memory
264 pub fn allocate_initalized_range(
265 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200266 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100267 data: &[u8],
268 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200269 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100270 let mut pages = self
271 .page_pool
272 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100273 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100274
Imre Kis21d7f722025-01-17 17:55:35 +0100275 pages.copy_data_to_page::<K>(data);
Imre Kis703482d2023-11-30 15:51:26 +0100276
277 let pages_length = pages.length();
278 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
279 let region = if let Some(required_va) = va {
280 self.regions
281 .acquire(required_va, pages_length, physical_region)
282 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100283 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100284 }
Imre Kisd20b5292024-12-04 16:05:30 +0100285 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100286
287 self.map_region(region, access_rights.into())
288 }
289
290 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
291 /// # Arguments
292 /// * va: Virtual address of the memory area
293 /// * length: Length of the memory area in bytes
294 /// * access_rights: Memory access rights of the area
295 /// # Return value
296 /// * Virtual address of the mapped memory
297 pub fn allocate_zero_init_range(
298 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200299 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100300 length: usize,
301 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200302 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100303 let mut pages = self
304 .page_pool
305 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100306 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100307
Imre Kis21d7f722025-01-17 17:55:35 +0100308 pages.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100309
310 let pages_length = pages.length();
311 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
312 let region = if let Some(required_va) = va {
313 self.regions
314 .acquire(required_va, pages_length, physical_region)
315 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100316 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100317 }
Imre Kisd20b5292024-12-04 16:05:30 +0100318 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100319
320 self.map_region(region, access_rights.into())
321 }
322
323 /// Map memory area by physical address
324 /// # Arguments
325 /// * va: Virtual address of the memory area
326 /// * pa: Physical address of the memory area
327 /// * length: Length of the memory area in bytes
328 /// * access_rights: Memory access rights of the area
329 /// # Return value
330 /// * Virtual address of the mapped memory
331 pub fn map_physical_address_range(
332 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200333 va: Option<VirtualAddress>,
334 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100335 length: usize,
336 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200337 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100338 let resource = PhysicalRegion::PhysicalAddress(pa);
339 let region = if let Some(required_va) = va {
340 self.regions.acquire(required_va, length, resource)
341 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100342 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100343 }
Imre Kisd20b5292024-12-04 16:05:30 +0100344 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100345
346 self.map_region(region, access_rights.into())
347 }
348
349 /// Unmap memory area by virtual address
350 /// # Arguments
351 /// * va: Virtual address
352 /// * length: Length of the memory area in bytes
353 pub fn unmap_virtual_address_range(
354 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200355 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100356 length: usize,
357 ) -> Result<(), XlatError> {
358 let pa = self.get_pa_by_va(va, length)?;
359
360 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
361
362 self.unmap_region(&region_to_release)?;
363
364 self.regions
365 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100366 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100367 }
368
369 /// Query physical address by virtual address range. Only returns a value if the memory area
370 /// mapped as continuous area.
371 /// # Arguments
372 /// * va: Virtual address of the memory area
373 /// * length: Length of the memory area in bytes
374 /// # Return value
375 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200376 pub fn get_pa_by_va(
377 &self,
378 va: VirtualAddress,
379 length: usize,
380 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100381 let containing_region = self
382 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100383 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100384
385 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100386 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100387 }
388
389 Ok(containing_region.get_pa_for_va(va))
390 }
391
392 /// Sets the memory access right of memory area
393 /// # Arguments
394 /// * va: Virtual address of the memory area
395 /// * length: Length of the memory area in bytes
396 /// * access_rights: New memory access rights of the area
397 pub fn set_access_rights(
398 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200399 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100400 length: usize,
401 access_rights: MemoryAccessRights,
402 ) -> Result<(), XlatError> {
403 let containing_region = self
404 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100405 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100406
407 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100408 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100409 }
410
411 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
412 self.map_region(region, access_rights.into())?;
413
414 Ok(())
415 }
416
417 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100418 ///
419 /// # Safety
420 /// When activating memory mapping for the running exception level, the
421 /// caller must ensure that the new mapping will not break any existing
422 /// references. After activation the caller must ensure that there are no
423 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100424 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100425 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100426 // Select translation granule
427 let is_tg0 = match &self.regime {
428 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
429 | TranslationRegime::EL2
430 | TranslationRegime::EL3 => true,
431 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
432 #[cfg(target_feature = "vh")]
433 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
434 #[cfg(target_feature = "vh")]
435 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
436 };
437
Imre Kis631127d2024-11-21 13:09:01 +0100438 if is_tg0 {
439 self.modify_tcr(|tcr| {
440 let tg0 = match self.granule {
441 TranslationGranule::Granule4k => 0b00,
442 TranslationGranule::Granule16k => 0b10,
443 TranslationGranule::Granule64k => 0b01,
444 };
445
446 (tcr & !(3 << 14)) | (tg0 << 14)
447 });
448 } else {
449 self.modify_tcr(|tcr| {
450 let tg1 = match self.granule {
451 TranslationGranule::Granule4k => 0b10,
452 TranslationGranule::Granule16k => 0b01,
453 TranslationGranule::Granule64k => 0b11,
454 };
455
456 (tcr & !(3 << 30)) | (tg1 << 30)
457 });
458 }
459
460 // Set translation table
Imre Kis21d7f722025-01-17 17:55:35 +0100461 let base_table_pa = self.base_table.get_pa().0 as u64;
Imre Kisc1dab892024-03-26 12:03:58 +0100462
Imre Kisb5146b52024-10-31 14:03:06 +0100463 match &self.regime {
464 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
465 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100466 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100467 in(reg) ((*asid as u64) << 48) | base_table_pa),
468 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
469 "msr ttbr1_el1, {0}
470 isb",
471 in(reg) ((*asid as u64) << 48) | base_table_pa),
472 #[cfg(target_feature = "vh")]
473 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
474 "msr ttbr0_el2, {0}
475 isb",
476 in(reg) ((*asid as u64) << 48) | base_table_pa),
477 #[cfg(target_feature = "vh")]
478 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
479 "msr ttbr1_el2, {0}
480 isb",
481 in(reg) ((*asid as u64) << 48) | base_table_pa),
482 TranslationRegime::EL2 => core::arch::asm!(
483 "msr ttbr0_el2, {0}
484 isb",
485 in(reg) base_table_pa),
486 TranslationRegime::EL3 => core::arch::asm!(
487 "msr ttbr0_el3, {0}
488 isb",
489 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100490 }
Imre Kis703482d2023-11-30 15:51:26 +0100491 }
492
Imre Kis1278c9f2025-01-15 19:48:36 +0100493 /// # Safety
494 /// Dummy functions for test builds
495 #[cfg(not(target_arch = "aarch64"))]
496 pub unsafe fn activate(&self) {}
497
Imre Kis631127d2024-11-21 13:09:01 +0100498 /// Modifies the TCR register of the selected regime of the instance.
499 #[cfg(target_arch = "aarch64")]
500 unsafe fn modify_tcr<F>(&self, f: F)
501 where
502 F: Fn(u64) -> u64,
503 {
504 let mut tcr: u64;
505
506 match &self.regime {
507 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
508 "mrs {0}, tcr_el1
509 isb",
510 out(reg) tcr),
511 #[cfg(target_feature = "vh")]
512 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
513 "mrs {0}, tcr_el2
514 isb",
515 out(reg) tcr),
516 TranslationRegime::EL2 => core::arch::asm!(
517 "mrs {0}, tcr_el2
518 isb",
519 out(reg) tcr),
520 TranslationRegime::EL3 => core::arch::asm!(
521 "mrs {0}, tcr_el3
522 isb",
523 out(reg) tcr),
524 }
525
526 tcr = f(tcr);
527
528 match &self.regime {
529 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
530 "msr tcr_el1, {0}
531 isb",
532 in(reg) tcr),
533 #[cfg(target_feature = "vh")]
534 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
535 "msr tcr_el2, {0}
536 isb",
537 in(reg) tcr),
538 TranslationRegime::EL2 => core::arch::asm!(
539 "msr tcr_el2, {0}
540 isb",
541 in(reg) tcr),
542 TranslationRegime::EL3 => core::arch::asm!(
543 "msr tcr_el3, {0}
544 isb",
545 in(reg) tcr),
546 }
547 }
548
Imre Kis703482d2023-11-30 15:51:26 +0100549 /// Prints a single translation table to the debug console
550 /// # Arguments
551 /// * level: Level of the translation table
552 /// * va: Base virtual address of the table
553 /// * table: Table entries
Imre Kis64d112f2025-01-20 12:59:01 +0100554 /// * granule: Translation granule
Imre Kis5f960442024-11-29 16:49:43 +0100555 fn dump_table(
556 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100557 level: isize,
558 va: usize,
559 table: &[Descriptor],
560 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100561 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100562 let level_prefix = match level {
563 0 | 1 => "|-",
564 2 => "| |-",
565 _ => "| | |-",
566 };
567
Imre Kis631127d2024-11-21 13:09:01 +0100568 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100569 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100570 DescriptorType::Block => {
571 writeln!(
572 f,
573 "{} {:#010x} Block -> {:#010x}",
574 level_prefix,
575 va,
576 descriptor.get_block_output_address(granule, level).0
577 )?;
578 }
Imre Kis703482d2023-11-30 15:51:26 +0100579 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100580 let table_pa = descriptor.get_next_level_table(level);
Imre Kis5f960442024-11-29 16:49:43 +0100581 writeln!(
582 f,
Imre Kis703482d2023-11-30 15:51:26 +0100583 "{} {:#010x} Table -> {:#010x}",
Imre Kisa7ef6842025-01-17 13:12:52 +0100584 level_prefix, va, table_pa.0
Imre Kis5f960442024-11-29 16:49:43 +0100585 )?;
Imre Kisa7ef6842025-01-17 13:12:52 +0100586
587 let next_level_table =
588 unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Imre Kis5f960442024-11-29 16:49:43 +0100589 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100590 }
591 _ => {}
592 }
593 }
Imre Kis5f960442024-11-29 16:49:43 +0100594
595 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100596 }
597
598 /// Adds memory region from the translation table. The function splits the region to blocks and
599 /// uses the block level functions to do the mapping.
600 /// # Arguments
601 /// * region: Memory region object
Imre Kis64d112f2025-01-20 12:59:01 +0100602 /// * attributes: Memory attributes
Imre Kis703482d2023-11-30 15:51:26 +0100603 /// # Return value
604 /// * Virtual address of the mapped memory
605 fn map_region(
606 &mut self,
607 region: VirtualRegion,
608 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200609 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100610 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100611 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100612 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100613 region.length(),
614 self.granule,
615 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100616 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100617 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100618 }
619
620 Ok(region.base())
621 }
622
623 /// Remove memory region from the translation table. The function splits the region to blocks
624 /// and uses the block level functions to do the unmapping.
625 /// # Arguments
626 /// * region: Memory region object
627 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100628 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100629 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100630 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100631 region.length(),
632 self.granule,
633 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100634 for block in blocks {
635 self.unmap_block(block);
636 }
637
638 Ok(())
639 }
640
641 /// Find mapped region that contains the whole region
642 /// # Arguments
Imre Kis64d112f2025-01-20 12:59:01 +0100643 /// * va: Virtual address to look for
644 /// * length: Length of the region
Imre Kis703482d2023-11-30 15:51:26 +0100645 /// # Return value
646 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200647 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100648 self.regions.find_containing_region(va, length).ok()
649 }
650
Imre Kis703482d2023-11-30 15:51:26 +0100651 /// Add block to memory mapping
652 /// # Arguments
653 /// * block: Memory block that can be represented by a single translation table entry
654 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100655 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100656 Self::set_block_descriptor_recursively(
657 attributes,
658 block.pa,
659 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100660 block.size,
661 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100662 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100663 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100664 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100665 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100666 )
Imre Kis703482d2023-11-30 15:51:26 +0100667 }
668
669 /// Adds the block descriptor to the translation table along all the intermediate tables the
670 /// reach the required granule.
671 /// # Arguments
672 /// * attributes: Memory block's permssions, flags
673 /// * pa: Physical address
674 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100675 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100676 /// * level: Translation table level
677 /// * table: Translation table on the given level
678 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100679 /// * regime: Translation regime
680 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100681 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100682 fn set_block_descriptor_recursively(
683 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200684 pa: PhysicalAddress,
685 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100686 block_size: usize,
687 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100688 table: &mut [Descriptor],
689 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100690 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100691 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100692 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100693 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100694 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100695
696 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100697 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100698 // Follow break-before-make sequence
699 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
700 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100701 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100702 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100703 }
704
705 // Need to iterate forward
706 match descriptor.get_descriptor_type(level) {
707 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100708 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100709 let mut page = page_pool
710 .allocate_pages(
711 granule.table_size::<Descriptor>(level + 1),
712 Some(granule.table_alignment::<Descriptor>(level + 1)),
713 )
Imre Kisd20b5292024-12-04 16:05:30 +0100714 .map_err(|e| {
715 XlatError::PageAllocationError(
716 e,
717 granule.table_size::<Descriptor>(level + 1),
718 )
719 })?;
720
Imre Kis21d7f722025-01-17 17:55:35 +0100721 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100722
723 // Fill next level table
724 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100725 attributes,
726 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100727 va.mask_for_level(granule, level),
728 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100729 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100730 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100731 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100732 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100733 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100734 );
735
736 if result.is_ok() {
737 // Set table descriptor if the table is configured properly
Imre Kis21d7f722025-01-17 17:55:35 +0100738 let next_table_pa =
739 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100740 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100741 } else {
742 // Release next level table on error and keep invalid descriptor on current level
743 page_pool.release_pages(page).unwrap();
744 }
745
746 result
Imre Kis703482d2023-11-30 15:51:26 +0100747 }
748 DescriptorType::Block => {
749 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100750 let current_va = va.mask_for_level(granule, level);
751 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100752 let current_attributes = descriptor.get_block_attributes(level);
753
754 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100755
Imre Kisd20b5292024-12-04 16:05:30 +0100756 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100757 let mut page = page_pool
758 .allocate_pages(
759 granule.table_size::<Descriptor>(level + 1),
760 Some(granule.table_alignment::<Descriptor>(level + 1)),
761 )
Imre Kisd20b5292024-12-04 16:05:30 +0100762 .map_err(|e| {
763 XlatError::PageAllocationError(
764 e,
765 granule.table_size::<Descriptor>(level + 1),
766 )
767 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100768
Imre Kis21d7f722025-01-17 17:55:35 +0100769 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100770
771 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200772 for exploded_va in VirtualAddressRange::new(
773 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100774 current_va
775 .add_offset(granule.block_size_at_level(level))
776 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200777 )
Imre Kis631127d2024-11-21 13:09:01 +0100778 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100779 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200780 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100781
782 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100783 Self::set_block_descriptor_recursively(
784 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200785 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100786 exploded_va.mask_for_level(granule, level),
787 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100788 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100789 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100790 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100791 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100792 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100793 )
Imre Kisd20b5292024-12-04 16:05:30 +0100794 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100795 }
796
797 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100798 let result = Self::set_block_descriptor_recursively(
799 attributes,
800 pa,
801 va.mask_for_level(granule, level + 1),
802 block_size,
803 level + 1,
804 next_table,
805 page_pool,
806 regime,
807 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100808 );
Imre Kisd20b5292024-12-04 16:05:30 +0100809
810 if result.is_ok() {
Imre Kis21d7f722025-01-17 17:55:35 +0100811 let next_table_pa =
812 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100813
Imre Kisd20b5292024-12-04 16:05:30 +0100814 // Follow break-before-make sequence
815 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
816 Self::invalidate(regime, Some(current_va));
817
818 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100819 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100820 } else {
821 // Release next level table on error and keep invalid descriptor on current level
822 page_pool.release_pages(page).unwrap();
823 }
824
825 result
Imre Kis703482d2023-11-30 15:51:26 +0100826 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100827 DescriptorType::Table => {
828 let next_level_table = unsafe {
829 Self::get_table_from_pa_mut(
830 descriptor.get_next_level_table(level),
831 granule,
832 level + 1,
833 )
834 };
835
836 Self::set_block_descriptor_recursively(
837 attributes,
838 pa,
839 va.mask_for_level(granule, level),
840 block_size,
841 level + 1,
842 next_level_table,
843 page_pool,
844 regime,
845 granule,
846 )
847 }
Imre Kis703482d2023-11-30 15:51:26 +0100848 }
849 }
850
851 /// Remove block from memory mapping
852 /// # Arguments
853 /// * block: memory block that can be represented by a single translation entry
854 fn unmap_block(&mut self, block: Block) {
855 Self::remove_block_descriptor_recursively(
856 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100857 block.size,
858 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100859 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100860 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100861 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100862 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100863 )
Imre Kis703482d2023-11-30 15:51:26 +0100864 }
865
866 /// Removes block descriptor from the translation table along all the intermediate tables which
867 /// become empty during the removal process.
868 /// # Arguments
869 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100870 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100871 /// * level: Translation table level
872 /// * table: Translation table on the given level
873 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100874 /// * regime: Translation regime
875 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100876 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200877 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100878 block_size: usize,
879 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100880 table: &mut [Descriptor],
881 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100882 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100883 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100884 ) {
885 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100886 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100887
Imre Kis631127d2024-11-21 13:09:01 +0100888 // We reached the required level with the matching block size
889 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100890 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100891 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100892 return;
893 }
894
895 // Need to iterate forward
896 match descriptor.get_descriptor_type(level) {
897 DescriptorType::Invalid => {
898 panic!("Cannot remove block from non-existing table");
899 }
900 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100901 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100902 }
903 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100904 let next_level_table = unsafe {
905 Self::get_table_from_pa_mut(
906 descriptor.get_next_level_table(level),
907 granule,
908 level + 1,
909 )
910 };
911
Imre Kis703482d2023-11-30 15:51:26 +0100912 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100913 va.mask_for_level(granule, level),
914 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100915 level + 1,
916 next_level_table,
917 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100918 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100919 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100920 );
921
922 if next_level_table.iter().all(|d| !d.is_valid()) {
923 // Empty table
924 let mut page = unsafe {
Imre Kisa7ef6842025-01-17 13:12:52 +0100925 let table_pa = descriptor.set_table_descriptor_to_invalid(level);
926 let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
Imre Kis21d7f722025-01-17 17:55:35 +0100927 Pages::from_slice::<K, Descriptor>(next_table)
Imre Kis703482d2023-11-30 15:51:26 +0100928 };
Imre Kisa7ef6842025-01-17 13:12:52 +0100929
Imre Kis21d7f722025-01-17 17:55:35 +0100930 page.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100931 page_pool.release_pages(page).unwrap();
932 }
933 }
934 }
935 }
936
Imre Kis631127d2024-11-21 13:09:01 +0100937 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
938 Self::walk_descriptors(
939 va,
940 block_size,
941 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100942 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis631127d2024-11-21 13:09:01 +0100943 self.granule,
944 )
Imre Kis703482d2023-11-30 15:51:26 +0100945 }
946
947 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200948 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100949 block_size: usize,
950 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100951 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100952 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100953 ) -> &mut Descriptor {
954 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100955 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100956
Imre Kis631127d2024-11-21 13:09:01 +0100957 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100958 return descriptor;
959 }
960
961 // Need to iterate forward
962 match descriptor.get_descriptor_type(level) {
963 DescriptorType::Invalid => {
964 panic!("Invalid descriptor");
965 }
966 DescriptorType::Block => {
967 panic!("Cannot split existing block descriptor to table");
968 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100969 DescriptorType::Table => {
970 let next_level_table = unsafe {
971 Self::get_table_from_pa_mut(
972 descriptor.get_next_level_table(level),
973 granule,
974 level + 1,
975 )
976 };
977
978 Self::walk_descriptors(
979 va.mask_for_level(granule, level),
980 block_size,
981 level + 1,
982 next_level_table,
983 granule,
984 )
985 }
986 }
987 }
988
989 /// Create a translation table descriptor slice from a physical address.
990 ///
991 /// # Safety
992 /// The caller must ensure that the physical address points to a valid translation table and
993 /// it it mapped into the virtual address space of the running kernel context.
994 unsafe fn get_table_from_pa<'a>(
995 pa: PhysicalAddress,
996 granule: TranslationGranule<VA_BITS>,
997 level: isize,
998 ) -> &'a [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +0100999 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +01001000 unsafe {
1001 core::slice::from_raw_parts(
Imre Kis21d7f722025-01-17 17:55:35 +01001002 table_va.0 as *const Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +01001003 granule.entry_count_at_level(level),
1004 )
1005 }
1006 }
1007
1008 /// Create a mutable translation table descriptor slice from a physical address.
1009 ///
1010 /// # Safety
1011 /// The caller must ensure that the physical address points to a valid translation table and
1012 /// it it mapped into the virtual address space of the running kernel context.
1013 unsafe fn get_table_from_pa_mut<'a>(
1014 pa: PhysicalAddress,
1015 granule: TranslationGranule<VA_BITS>,
1016 level: isize,
1017 ) -> &'a mut [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +01001018 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +01001019 unsafe {
1020 core::slice::from_raw_parts_mut(
Imre Kis21d7f722025-01-17 17:55:35 +01001021 table_va.0 as *mut Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +01001022 granule.entry_count_at_level(level),
1023 )
Imre Kis703482d2023-11-30 15:51:26 +01001024 }
1025 }
Imre Kis9a9d0492024-10-31 15:19:46 +01001026
Imre Kis1278c9f2025-01-15 19:48:36 +01001027 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +01001028 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
1029 // SAFETY: The assembly code invalidates the translation table entry of
1030 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +01001031 unsafe {
1032 if let Some(VirtualAddress(va)) = va {
1033 match regime {
1034 TranslationRegime::EL1_0(_, _) => {
1035 core::arch::asm!(
1036 "tlbi vaae1is, {0}
1037 dsb nsh
1038 isb",
1039 in(reg) va)
1040 }
1041 #[cfg(target_feature = "vh")]
1042 TranslationRegime::EL2_0(_, _) => {
1043 core::arch::asm!(
1044 "tlbi vaae1is, {0}
1045 dsb nsh
1046 isb",
1047 in(reg) va)
1048 }
1049 TranslationRegime::EL2 => core::arch::asm!(
1050 "tlbi vae2is, {0}
1051 dsb nsh
1052 isb",
1053 in(reg) va),
1054 TranslationRegime::EL3 => core::arch::asm!(
1055 "tlbi vae3is, {0}
1056 dsb nsh
1057 isb",
1058 in(reg) va),
1059 }
1060 } else {
1061 match regime {
1062 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
1063 "tlbi aside1, {0}
1064 dsb nsh
1065 isb",
1066 in(reg) (*asid as u64) << 48
1067 ),
1068 #[cfg(target_feature = "vh")]
1069 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
1070 "tlbi aside1, {0}
1071 dsb nsh
1072 isb",
1073 in(reg) (*asid as u64) << 48
1074 ),
1075 TranslationRegime::EL2 => core::arch::asm!(
1076 "tlbi alle2
1077 dsb nsh
1078 isb"
1079 ),
1080 TranslationRegime::EL3 => core::arch::asm!(
1081 "tlbi alle3
1082 dsb nsh
1083 isb"
1084 ),
1085 }
1086 }
1087 }
1088 }
Imre Kis1278c9f2025-01-15 19:48:36 +01001089
1090 #[cfg(not(target_arch = "aarch64"))]
1091 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +01001092}
Imre Kis5f960442024-11-29 16:49:43 +01001093
Imre Kis21d7f722025-01-17 17:55:35 +01001094impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {
Imre Kis5f960442024-11-29 16:49:43 +01001095 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
1096 f.debug_struct("Xlat")
1097 .field("regime", &self.regime)
1098 .field("granule", &self.granule)
1099 .field("VA_BITS", &VA_BITS)
1100 .field("base_table", &self.base_table.get_pa())
1101 .finish()?;
1102
1103 Self::dump_table(
1104 f,
1105 self.granule.initial_lookup_level(),
1106 0,
Imre Kis21d7f722025-01-17 17:55:35 +01001107 unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
Imre Kis5f960442024-11-29 16:49:43 +01001108 self.granule,
1109 )?;
1110
1111 Ok(())
1112 }
1113}