blob: a294d147cf446b0715a08776bafe72dce002acad [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
6
7extern crate alloc;
8
Imre Kis5f960442024-11-29 16:49:43 +01009use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
Imre Kis21d7f722025-01-17 17:55:35 +010011use core::marker::PhantomData;
Imre Kis86fd04a2024-11-29 16:09:59 +010012use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010013
Imre Kisb5146b52024-10-31 14:03:06 +010014use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010015use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010016
17use bitflags::bitflags;
18use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010019use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010020
21use self::descriptor::DescriptorType;
22
23use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
Imre Kis631127d2024-11-21 13:09:01 +010024use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010025use self::region::{PhysicalRegion, VirtualRegion};
26use self::region_pool::{Region, RegionPool, RegionPoolError};
27
Imre Kisd5b96fd2024-09-11 17:04:32 +020028pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010029mod block;
Imre Kis703482d2023-11-30 15:51:26 +010030mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010031mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010032pub mod page_pool;
33mod region;
34mod region_pool;
35
Imre Kis703482d2023-11-30 15:51:26 +010036/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010037#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010038pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010039 #[error("Invalid parameter: {0}")]
40 InvalidParameterError(&'static str),
41 #[error("Cannot allocate {1}: {0:?}")]
42 PageAllocationError(RegionPoolError, usize),
43 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
44 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
45 #[error("Entry not found for {0:?}")]
46 VaNotFound(VirtualAddress),
47 #[error("Cannot allocate virtual address {0:?}")]
48 VaAllocationError(RegionPoolError),
49 #[error("Cannot release virtual address {1:?}: {0:?}")]
50 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010051}
52
53/// Memory attributes
54///
55/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010056#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010057#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
58pub enum MemoryAttributesIndex {
59 #[default]
60 Device_nGnRnE = 0x00,
61 Normal_IWBWA_OWBWA = 0x01,
62}
63
64bitflags! {
65 #[derive(Debug, Clone, Copy)]
66 pub struct MemoryAccessRights : u32 {
67 const R = 0b00000001;
68 const W = 0b00000010;
69 const X = 0b00000100;
70 const NS = 0b00001000;
71
72 const RW = Self::R.bits() | Self::W.bits();
73 const RX = Self::R.bits() | Self::X.bits();
74 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
75
76 const USER = 0b00010000;
77 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010078 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010079 }
80}
81
82impl From<MemoryAccessRights> for Attributes {
83 fn from(access_rights: MemoryAccessRights) -> Self {
84 let data_access_permissions = match (
85 access_rights.contains(MemoryAccessRights::USER),
86 access_rights.contains(MemoryAccessRights::W),
87 ) {
88 (false, false) => DataAccessPermissions::ReadOnly_None,
89 (false, true) => DataAccessPermissions::ReadWrite_None,
90 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
91 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
92 };
93
94 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
95 MemoryAttributesIndex::Device_nGnRnE
96 } else {
97 MemoryAttributesIndex::Normal_IWBWA_OWBWA
98 };
99
100 Attributes {
101 uxn: !access_rights.contains(MemoryAccessRights::X)
102 || !access_rights.contains(MemoryAccessRights::USER),
103 pxn: !access_rights.contains(MemoryAccessRights::X)
104 || access_rights.contains(MemoryAccessRights::USER),
105 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100106 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100107 access_flag: true,
108 shareability: Shareability::NonShareable,
109 data_access_permissions,
110 non_secure: access_rights.contains(MemoryAccessRights::NS),
111 mem_attr_index,
112 }
113 }
114}
115
Imre Kisc9a55ff2025-01-17 15:06:50 +0100116#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100117pub enum RegimeVaRange {
118 Lower,
119 Upper,
120}
121
Imre Kisc9a55ff2025-01-17 15:06:50 +0100122#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100123pub enum TranslationRegime {
124 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
125 #[cfg(target_feature = "vh")]
126 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
127 EL2, // EL2
128 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100129}
130
Imre Kisc9a55ff2025-01-17 15:06:50 +0100131impl TranslationRegime {
132 fn is_upper_va_range(&self) -> bool {
133 match self {
134 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
135 #[cfg(target_feature = "vh")]
136 EL2_0(RegimeVaRange::Upper, _) => true,
137 _ => false,
138 }
139 }
140}
141
Imre Kis725ef5e2024-11-20 14:20:19 +0100142pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
143
Imre Kis21d7f722025-01-17 17:55:35 +0100144/// Trait for converting between virtual address space of the running kernel environment and
145/// the physical address space.
146pub trait KernelAddressTranslator {
147 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress;
148 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress;
149}
150
151pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> {
Imre Kis631127d2024-11-21 13:09:01 +0100152 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100153 page_pool: PagePool,
154 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100155 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100156 granule: TranslationGranule<VA_BITS>,
Imre Kis21d7f722025-01-17 17:55:35 +0100157 _kernel_address_translator: PhantomData<K>,
Imre Kis703482d2023-11-30 15:51:26 +0100158}
159
160/// Memory translation table handling
161/// # High level interface
162/// * allocate and map zero initialized region (with or without VA)
163/// * allocate and map memory region and load contents (with or without VA)
164/// * map memory region by PA (with or without VA)
165/// * unmap memory region by PA
166/// * query PA by VA
167/// * set access rights of mapped memory areas
168/// * active mapping
169///
170/// # Debug features
171/// * print translation table details
172///
173/// # Region level interface
174/// * map regions
175/// * unmap region
176/// * find a mapped region which contains
177/// * find empty area for region
178/// * set access rights for a region
179/// * create blocks by region
180///
181/// # Block level interface
182/// * map block
183/// * unmap block
184/// * set access rights of block
Imre Kis21d7f722025-01-17 17:55:35 +0100185impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100186 pub fn new(
187 page_pool: PagePool,
188 address: VirtualAddressRange,
189 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100190 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100191 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100192 let initial_lookup_level = granule.initial_lookup_level();
193
Imre Kisc9a55ff2025-01-17 15:06:50 +0100194 if !address.start.is_valid_in_regime::<VA_BITS>(regime)
195 || !address.end.is_valid_in_regime::<VA_BITS>(regime)
196 {
197 panic!(
198 "Invalid address range {:?} for regime {:?}",
199 address, regime
200 );
201 }
202
Imre Kis631127d2024-11-21 13:09:01 +0100203 let base_table = page_pool
204 .allocate_pages(
205 granule.table_size::<Descriptor>(initial_lookup_level),
206 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
207 )
208 .unwrap();
209
Imre Kis703482d2023-11-30 15:51:26 +0100210 let mut regions = RegionPool::new();
211 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100212 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100213 .unwrap();
214 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100215 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100216 page_pool,
217 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100218 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100219 granule,
Imre Kis21d7f722025-01-17 17:55:35 +0100220 _kernel_address_translator: PhantomData,
Imre Kis703482d2023-11-30 15:51:26 +0100221 }
222 }
223
224 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
225 /// initial data
226 /// # Arguments
227 /// * va: Virtual address of the memory area
228 /// * data: Data to be loaded to the memory area
229 /// * access_rights: Memory access rights of the area
230 /// # Return value
231 /// * Virtual address of the mapped memory
232 pub fn allocate_initalized_range(
233 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200234 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100235 data: &[u8],
236 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200237 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100238 let mut pages = self
239 .page_pool
240 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100241 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100242
Imre Kis21d7f722025-01-17 17:55:35 +0100243 pages.copy_data_to_page::<K>(data);
Imre Kis703482d2023-11-30 15:51:26 +0100244
245 let pages_length = pages.length();
246 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
247 let region = if let Some(required_va) = va {
248 self.regions
249 .acquire(required_va, pages_length, physical_region)
250 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100251 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100252 }
Imre Kisd20b5292024-12-04 16:05:30 +0100253 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100254
255 self.map_region(region, access_rights.into())
256 }
257
258 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
259 /// # Arguments
260 /// * va: Virtual address of the memory area
261 /// * length: Length of the memory area in bytes
262 /// * access_rights: Memory access rights of the area
263 /// # Return value
264 /// * Virtual address of the mapped memory
265 pub fn allocate_zero_init_range(
266 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200267 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100268 length: usize,
269 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200270 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100271 let mut pages = self
272 .page_pool
273 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100274 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100275
Imre Kis21d7f722025-01-17 17:55:35 +0100276 pages.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100277
278 let pages_length = pages.length();
279 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
280 let region = if let Some(required_va) = va {
281 self.regions
282 .acquire(required_va, pages_length, physical_region)
283 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100284 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100285 }
Imre Kisd20b5292024-12-04 16:05:30 +0100286 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100287
288 self.map_region(region, access_rights.into())
289 }
290
291 /// Map memory area by physical address
292 /// # Arguments
293 /// * va: Virtual address of the memory area
294 /// * pa: Physical address of the memory area
295 /// * length: Length of the memory area in bytes
296 /// * access_rights: Memory access rights of the area
297 /// # Return value
298 /// * Virtual address of the mapped memory
299 pub fn map_physical_address_range(
300 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200301 va: Option<VirtualAddress>,
302 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100303 length: usize,
304 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200305 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100306 let resource = PhysicalRegion::PhysicalAddress(pa);
307 let region = if let Some(required_va) = va {
308 self.regions.acquire(required_va, length, resource)
309 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100310 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100311 }
Imre Kisd20b5292024-12-04 16:05:30 +0100312 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100313
314 self.map_region(region, access_rights.into())
315 }
316
317 /// Unmap memory area by virtual address
318 /// # Arguments
319 /// * va: Virtual address
320 /// * length: Length of the memory area in bytes
321 pub fn unmap_virtual_address_range(
322 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200323 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100324 length: usize,
325 ) -> Result<(), XlatError> {
326 let pa = self.get_pa_by_va(va, length)?;
327
328 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
329
330 self.unmap_region(&region_to_release)?;
331
332 self.regions
333 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100334 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100335 }
336
337 /// Query physical address by virtual address range. Only returns a value if the memory area
338 /// mapped as continuous area.
339 /// # Arguments
340 /// * va: Virtual address of the memory area
341 /// * length: Length of the memory area in bytes
342 /// # Return value
343 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200344 pub fn get_pa_by_va(
345 &self,
346 va: VirtualAddress,
347 length: usize,
348 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100349 let containing_region = self
350 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100351 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100352
353 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100354 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100355 }
356
357 Ok(containing_region.get_pa_for_va(va))
358 }
359
360 /// Sets the memory access right of memory area
361 /// # Arguments
362 /// * va: Virtual address of the memory area
363 /// * length: Length of the memory area in bytes
364 /// * access_rights: New memory access rights of the area
365 pub fn set_access_rights(
366 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200367 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100368 length: usize,
369 access_rights: MemoryAccessRights,
370 ) -> Result<(), XlatError> {
371 let containing_region = self
372 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100373 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100374
375 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100376 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100377 }
378
379 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
380 self.map_region(region, access_rights.into())?;
381
382 Ok(())
383 }
384
385 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100386 ///
387 /// # Safety
388 /// When activating memory mapping for the running exception level, the
389 /// caller must ensure that the new mapping will not break any existing
390 /// references. After activation the caller must ensure that there are no
391 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100392 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100393 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100394 // Select translation granule
395 let is_tg0 = match &self.regime {
396 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
397 | TranslationRegime::EL2
398 | TranslationRegime::EL3 => true,
399 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
400 #[cfg(target_feature = "vh")]
401 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
402 #[cfg(target_feature = "vh")]
403 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
404 };
405
Imre Kis631127d2024-11-21 13:09:01 +0100406 if is_tg0 {
407 self.modify_tcr(|tcr| {
408 let tg0 = match self.granule {
409 TranslationGranule::Granule4k => 0b00,
410 TranslationGranule::Granule16k => 0b10,
411 TranslationGranule::Granule64k => 0b01,
412 };
413
414 (tcr & !(3 << 14)) | (tg0 << 14)
415 });
416 } else {
417 self.modify_tcr(|tcr| {
418 let tg1 = match self.granule {
419 TranslationGranule::Granule4k => 0b10,
420 TranslationGranule::Granule16k => 0b01,
421 TranslationGranule::Granule64k => 0b11,
422 };
423
424 (tcr & !(3 << 30)) | (tg1 << 30)
425 });
426 }
427
428 // Set translation table
Imre Kis21d7f722025-01-17 17:55:35 +0100429 let base_table_pa = self.base_table.get_pa().0 as u64;
Imre Kisc1dab892024-03-26 12:03:58 +0100430
Imre Kisb5146b52024-10-31 14:03:06 +0100431 match &self.regime {
432 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
433 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100434 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100435 in(reg) ((*asid as u64) << 48) | base_table_pa),
436 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
437 "msr ttbr1_el1, {0}
438 isb",
439 in(reg) ((*asid as u64) << 48) | base_table_pa),
440 #[cfg(target_feature = "vh")]
441 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
442 "msr ttbr0_el2, {0}
443 isb",
444 in(reg) ((*asid as u64) << 48) | base_table_pa),
445 #[cfg(target_feature = "vh")]
446 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
447 "msr ttbr1_el2, {0}
448 isb",
449 in(reg) ((*asid as u64) << 48) | base_table_pa),
450 TranslationRegime::EL2 => core::arch::asm!(
451 "msr ttbr0_el2, {0}
452 isb",
453 in(reg) base_table_pa),
454 TranslationRegime::EL3 => core::arch::asm!(
455 "msr ttbr0_el3, {0}
456 isb",
457 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100458 }
Imre Kis703482d2023-11-30 15:51:26 +0100459 }
460
Imre Kis1278c9f2025-01-15 19:48:36 +0100461 /// # Safety
462 /// Dummy functions for test builds
463 #[cfg(not(target_arch = "aarch64"))]
464 pub unsafe fn activate(&self) {}
465
Imre Kis631127d2024-11-21 13:09:01 +0100466 /// Modifies the TCR register of the selected regime of the instance.
467 #[cfg(target_arch = "aarch64")]
468 unsafe fn modify_tcr<F>(&self, f: F)
469 where
470 F: Fn(u64) -> u64,
471 {
472 let mut tcr: u64;
473
474 match &self.regime {
475 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
476 "mrs {0}, tcr_el1
477 isb",
478 out(reg) tcr),
479 #[cfg(target_feature = "vh")]
480 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
481 "mrs {0}, tcr_el2
482 isb",
483 out(reg) tcr),
484 TranslationRegime::EL2 => core::arch::asm!(
485 "mrs {0}, tcr_el2
486 isb",
487 out(reg) tcr),
488 TranslationRegime::EL3 => core::arch::asm!(
489 "mrs {0}, tcr_el3
490 isb",
491 out(reg) tcr),
492 }
493
494 tcr = f(tcr);
495
496 match &self.regime {
497 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
498 "msr tcr_el1, {0}
499 isb",
500 in(reg) tcr),
501 #[cfg(target_feature = "vh")]
502 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
503 "msr tcr_el2, {0}
504 isb",
505 in(reg) tcr),
506 TranslationRegime::EL2 => core::arch::asm!(
507 "msr tcr_el2, {0}
508 isb",
509 in(reg) tcr),
510 TranslationRegime::EL3 => core::arch::asm!(
511 "msr tcr_el3, {0}
512 isb",
513 in(reg) tcr),
514 }
515 }
516
Imre Kis703482d2023-11-30 15:51:26 +0100517 /// Prints a single translation table to the debug console
518 /// # Arguments
519 /// * level: Level of the translation table
520 /// * va: Base virtual address of the table
521 /// * table: Table entries
Imre Kis5f960442024-11-29 16:49:43 +0100522 fn dump_table(
523 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100524 level: isize,
525 va: usize,
526 table: &[Descriptor],
527 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100528 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100529 let level_prefix = match level {
530 0 | 1 => "|-",
531 2 => "| |-",
532 _ => "| | |-",
533 };
534
Imre Kis631127d2024-11-21 13:09:01 +0100535 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100536 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100537 DescriptorType::Block => {
538 writeln!(
539 f,
540 "{} {:#010x} Block -> {:#010x}",
541 level_prefix,
542 va,
543 descriptor.get_block_output_address(granule, level).0
544 )?;
545 }
Imre Kis703482d2023-11-30 15:51:26 +0100546 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100547 let table_pa = descriptor.get_next_level_table(level);
Imre Kis5f960442024-11-29 16:49:43 +0100548 writeln!(
549 f,
Imre Kis703482d2023-11-30 15:51:26 +0100550 "{} {:#010x} Table -> {:#010x}",
Imre Kisa7ef6842025-01-17 13:12:52 +0100551 level_prefix, va, table_pa.0
Imre Kis5f960442024-11-29 16:49:43 +0100552 )?;
Imre Kisa7ef6842025-01-17 13:12:52 +0100553
554 let next_level_table =
555 unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Imre Kis5f960442024-11-29 16:49:43 +0100556 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100557 }
558 _ => {}
559 }
560 }
Imre Kis5f960442024-11-29 16:49:43 +0100561
562 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100563 }
564
565 /// Adds memory region from the translation table. The function splits the region to blocks and
566 /// uses the block level functions to do the mapping.
567 /// # Arguments
568 /// * region: Memory region object
569 /// # Return value
570 /// * Virtual address of the mapped memory
571 fn map_region(
572 &mut self,
573 region: VirtualRegion,
574 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200575 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100576 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100577 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100578 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100579 region.length(),
580 self.granule,
581 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100582 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100583 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100584 }
585
586 Ok(region.base())
587 }
588
589 /// Remove memory region from the translation table. The function splits the region to blocks
590 /// and uses the block level functions to do the unmapping.
591 /// # Arguments
592 /// * region: Memory region object
593 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100594 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100595 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100596 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100597 region.length(),
598 self.granule,
599 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100600 for block in blocks {
601 self.unmap_block(block);
602 }
603
604 Ok(())
605 }
606
607 /// Find mapped region that contains the whole region
608 /// # Arguments
609 /// * region: Virtual address to look for
610 /// # Return value
611 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200612 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100613 self.regions.find_containing_region(va, length).ok()
614 }
615
Imre Kis703482d2023-11-30 15:51:26 +0100616 /// Add block to memory mapping
617 /// # Arguments
618 /// * block: Memory block that can be represented by a single translation table entry
619 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100620 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100621 Self::set_block_descriptor_recursively(
622 attributes,
623 block.pa,
624 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100625 block.size,
626 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100627 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100628 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100629 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100630 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100631 )
Imre Kis703482d2023-11-30 15:51:26 +0100632 }
633
634 /// Adds the block descriptor to the translation table along all the intermediate tables the
635 /// reach the required granule.
636 /// # Arguments
637 /// * attributes: Memory block's permssions, flags
638 /// * pa: Physical address
639 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100640 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100641 /// * level: Translation table level
642 /// * table: Translation table on the given level
643 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100644 /// * regime: Translation regime
645 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100646 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100647 fn set_block_descriptor_recursively(
648 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200649 pa: PhysicalAddress,
650 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100651 block_size: usize,
652 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100653 table: &mut [Descriptor],
654 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100655 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100656 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100657 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100658 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100659 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100660
661 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100662 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100663 // Follow break-before-make sequence
664 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
665 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100666 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100667 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100668 }
669
670 // Need to iterate forward
671 match descriptor.get_descriptor_type(level) {
672 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100673 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100674 let mut page = page_pool
675 .allocate_pages(
676 granule.table_size::<Descriptor>(level + 1),
677 Some(granule.table_alignment::<Descriptor>(level + 1)),
678 )
Imre Kisd20b5292024-12-04 16:05:30 +0100679 .map_err(|e| {
680 XlatError::PageAllocationError(
681 e,
682 granule.table_size::<Descriptor>(level + 1),
683 )
684 })?;
685
Imre Kis21d7f722025-01-17 17:55:35 +0100686 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100687
688 // Fill next level table
689 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100690 attributes,
691 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100692 va.mask_for_level(granule, level),
693 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100694 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100695 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100696 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100697 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100698 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100699 );
700
701 if result.is_ok() {
702 // Set table descriptor if the table is configured properly
Imre Kis21d7f722025-01-17 17:55:35 +0100703 let next_table_pa =
704 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100705 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100706 } else {
707 // Release next level table on error and keep invalid descriptor on current level
708 page_pool.release_pages(page).unwrap();
709 }
710
711 result
Imre Kis703482d2023-11-30 15:51:26 +0100712 }
713 DescriptorType::Block => {
714 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100715 let current_va = va.mask_for_level(granule, level);
716 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100717 let current_attributes = descriptor.get_block_attributes(level);
718
719 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100720
Imre Kisd20b5292024-12-04 16:05:30 +0100721 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100722 let mut page = page_pool
723 .allocate_pages(
724 granule.table_size::<Descriptor>(level + 1),
725 Some(granule.table_alignment::<Descriptor>(level + 1)),
726 )
Imre Kisd20b5292024-12-04 16:05:30 +0100727 .map_err(|e| {
728 XlatError::PageAllocationError(
729 e,
730 granule.table_size::<Descriptor>(level + 1),
731 )
732 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100733
Imre Kis21d7f722025-01-17 17:55:35 +0100734 let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() };
Imre Kisd20b5292024-12-04 16:05:30 +0100735
736 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200737 for exploded_va in VirtualAddressRange::new(
738 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100739 current_va
740 .add_offset(granule.block_size_at_level(level))
741 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200742 )
Imre Kis631127d2024-11-21 13:09:01 +0100743 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100744 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200745 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100746
747 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100748 Self::set_block_descriptor_recursively(
749 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200750 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100751 exploded_va.mask_for_level(granule, level),
752 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100753 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100754 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100755 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100756 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100757 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100758 )
Imre Kisd20b5292024-12-04 16:05:30 +0100759 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100760 }
761
762 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100763 let result = Self::set_block_descriptor_recursively(
764 attributes,
765 pa,
766 va.mask_for_level(granule, level + 1),
767 block_size,
768 level + 1,
769 next_table,
770 page_pool,
771 regime,
772 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100773 );
Imre Kisd20b5292024-12-04 16:05:30 +0100774
775 if result.is_ok() {
Imre Kis21d7f722025-01-17 17:55:35 +0100776 let next_table_pa =
777 K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize));
Imre Kisa7ef6842025-01-17 13:12:52 +0100778
Imre Kisd20b5292024-12-04 16:05:30 +0100779 // Follow break-before-make sequence
780 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
781 Self::invalidate(regime, Some(current_va));
782
783 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100784 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100785 } else {
786 // Release next level table on error and keep invalid descriptor on current level
787 page_pool.release_pages(page).unwrap();
788 }
789
790 result
Imre Kis703482d2023-11-30 15:51:26 +0100791 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100792 DescriptorType::Table => {
793 let next_level_table = unsafe {
794 Self::get_table_from_pa_mut(
795 descriptor.get_next_level_table(level),
796 granule,
797 level + 1,
798 )
799 };
800
801 Self::set_block_descriptor_recursively(
802 attributes,
803 pa,
804 va.mask_for_level(granule, level),
805 block_size,
806 level + 1,
807 next_level_table,
808 page_pool,
809 regime,
810 granule,
811 )
812 }
Imre Kis703482d2023-11-30 15:51:26 +0100813 }
814 }
815
816 /// Remove block from memory mapping
817 /// # Arguments
818 /// * block: memory block that can be represented by a single translation entry
819 fn unmap_block(&mut self, block: Block) {
820 Self::remove_block_descriptor_recursively(
821 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100822 block.size,
823 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100824 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100825 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100826 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100827 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100828 )
Imre Kis703482d2023-11-30 15:51:26 +0100829 }
830
831 /// Removes block descriptor from the translation table along all the intermediate tables which
832 /// become empty during the removal process.
833 /// # Arguments
834 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100835 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100836 /// * level: Translation table level
837 /// * table: Translation table on the given level
838 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100839 /// * regime: Translation regime
840 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100841 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200842 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100843 block_size: usize,
844 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100845 table: &mut [Descriptor],
846 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100847 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100848 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100849 ) {
850 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100851 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100852
Imre Kis631127d2024-11-21 13:09:01 +0100853 // We reached the required level with the matching block size
854 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100855 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100856 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100857 return;
858 }
859
860 // Need to iterate forward
861 match descriptor.get_descriptor_type(level) {
862 DescriptorType::Invalid => {
863 panic!("Cannot remove block from non-existing table");
864 }
865 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100866 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100867 }
868 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100869 let next_level_table = unsafe {
870 Self::get_table_from_pa_mut(
871 descriptor.get_next_level_table(level),
872 granule,
873 level + 1,
874 )
875 };
876
Imre Kis703482d2023-11-30 15:51:26 +0100877 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100878 va.mask_for_level(granule, level),
879 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100880 level + 1,
881 next_level_table,
882 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100883 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100884 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100885 );
886
887 if next_level_table.iter().all(|d| !d.is_valid()) {
888 // Empty table
889 let mut page = unsafe {
Imre Kisa7ef6842025-01-17 13:12:52 +0100890 let table_pa = descriptor.set_table_descriptor_to_invalid(level);
891 let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
Imre Kis21d7f722025-01-17 17:55:35 +0100892 Pages::from_slice::<K, Descriptor>(next_table)
Imre Kis703482d2023-11-30 15:51:26 +0100893 };
Imre Kisa7ef6842025-01-17 13:12:52 +0100894
Imre Kis21d7f722025-01-17 17:55:35 +0100895 page.zero_init::<K>();
Imre Kis703482d2023-11-30 15:51:26 +0100896 page_pool.release_pages(page).unwrap();
897 }
898 }
899 }
900 }
901
Imre Kis631127d2024-11-21 13:09:01 +0100902 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
903 Self::walk_descriptors(
904 va,
905 block_size,
906 self.granule.initial_lookup_level(),
Imre Kis21d7f722025-01-17 17:55:35 +0100907 unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() },
Imre Kis631127d2024-11-21 13:09:01 +0100908 self.granule,
909 )
Imre Kis703482d2023-11-30 15:51:26 +0100910 }
911
912 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200913 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100914 block_size: usize,
915 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100916 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100917 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100918 ) -> &mut Descriptor {
919 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100920 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100921
Imre Kis631127d2024-11-21 13:09:01 +0100922 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100923 return descriptor;
924 }
925
926 // Need to iterate forward
927 match descriptor.get_descriptor_type(level) {
928 DescriptorType::Invalid => {
929 panic!("Invalid descriptor");
930 }
931 DescriptorType::Block => {
932 panic!("Cannot split existing block descriptor to table");
933 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100934 DescriptorType::Table => {
935 let next_level_table = unsafe {
936 Self::get_table_from_pa_mut(
937 descriptor.get_next_level_table(level),
938 granule,
939 level + 1,
940 )
941 };
942
943 Self::walk_descriptors(
944 va.mask_for_level(granule, level),
945 block_size,
946 level + 1,
947 next_level_table,
948 granule,
949 )
950 }
951 }
952 }
953
954 /// Create a translation table descriptor slice from a physical address.
955 ///
956 /// # Safety
957 /// The caller must ensure that the physical address points to a valid translation table and
958 /// it it mapped into the virtual address space of the running kernel context.
959 unsafe fn get_table_from_pa<'a>(
960 pa: PhysicalAddress,
961 granule: TranslationGranule<VA_BITS>,
962 level: isize,
963 ) -> &'a [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +0100964 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +0100965 unsafe {
966 core::slice::from_raw_parts(
Imre Kis21d7f722025-01-17 17:55:35 +0100967 table_va.0 as *const Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +0100968 granule.entry_count_at_level(level),
969 )
970 }
971 }
972
973 /// Create a mutable translation table descriptor slice from a physical address.
974 ///
975 /// # Safety
976 /// The caller must ensure that the physical address points to a valid translation table and
977 /// it it mapped into the virtual address space of the running kernel context.
978 unsafe fn get_table_from_pa_mut<'a>(
979 pa: PhysicalAddress,
980 granule: TranslationGranule<VA_BITS>,
981 level: isize,
982 ) -> &'a mut [Descriptor] {
Imre Kis21d7f722025-01-17 17:55:35 +0100983 let table_va = K::pa_to_kernel(pa);
Imre Kisa7ef6842025-01-17 13:12:52 +0100984 unsafe {
985 core::slice::from_raw_parts_mut(
Imre Kis21d7f722025-01-17 17:55:35 +0100986 table_va.0 as *mut Descriptor,
Imre Kisa7ef6842025-01-17 13:12:52 +0100987 granule.entry_count_at_level(level),
988 )
Imre Kis703482d2023-11-30 15:51:26 +0100989 }
990 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100991
Imre Kis1278c9f2025-01-15 19:48:36 +0100992 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +0100993 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
994 // SAFETY: The assembly code invalidates the translation table entry of
995 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +0100996 unsafe {
997 if let Some(VirtualAddress(va)) = va {
998 match regime {
999 TranslationRegime::EL1_0(_, _) => {
1000 core::arch::asm!(
1001 "tlbi vaae1is, {0}
1002 dsb nsh
1003 isb",
1004 in(reg) va)
1005 }
1006 #[cfg(target_feature = "vh")]
1007 TranslationRegime::EL2_0(_, _) => {
1008 core::arch::asm!(
1009 "tlbi vaae1is, {0}
1010 dsb nsh
1011 isb",
1012 in(reg) va)
1013 }
1014 TranslationRegime::EL2 => core::arch::asm!(
1015 "tlbi vae2is, {0}
1016 dsb nsh
1017 isb",
1018 in(reg) va),
1019 TranslationRegime::EL3 => core::arch::asm!(
1020 "tlbi vae3is, {0}
1021 dsb nsh
1022 isb",
1023 in(reg) va),
1024 }
1025 } else {
1026 match regime {
1027 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
1028 "tlbi aside1, {0}
1029 dsb nsh
1030 isb",
1031 in(reg) (*asid as u64) << 48
1032 ),
1033 #[cfg(target_feature = "vh")]
1034 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
1035 "tlbi aside1, {0}
1036 dsb nsh
1037 isb",
1038 in(reg) (*asid as u64) << 48
1039 ),
1040 TranslationRegime::EL2 => core::arch::asm!(
1041 "tlbi alle2
1042 dsb nsh
1043 isb"
1044 ),
1045 TranslationRegime::EL3 => core::arch::asm!(
1046 "tlbi alle3
1047 dsb nsh
1048 isb"
1049 ),
1050 }
1051 }
1052 }
1053 }
Imre Kis1278c9f2025-01-15 19:48:36 +01001054
1055 #[cfg(not(target_arch = "aarch64"))]
1056 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +01001057}
Imre Kis5f960442024-11-29 16:49:43 +01001058
Imre Kis21d7f722025-01-17 17:55:35 +01001059impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {
Imre Kis5f960442024-11-29 16:49:43 +01001060 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
1061 f.debug_struct("Xlat")
1062 .field("regime", &self.regime)
1063 .field("granule", &self.granule)
1064 .field("VA_BITS", &VA_BITS)
1065 .field("base_table", &self.base_table.get_pa())
1066 .finish()?;
1067
1068 Self::dump_table(
1069 f,
1070 self.granule.initial_lookup_level(),
1071 0,
Imre Kis21d7f722025-01-17 17:55:35 +01001072 unsafe { self.base_table.get_as_slice::<K, Descriptor>() },
Imre Kis5f960442024-11-29 16:49:43 +01001073 self.granule,
1074 )?;
1075
1076 Ok(())
1077 }
1078}