blob: c8abbdd7ce5b67050e9e0fa93492c60b9fc85ffc [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
6
7extern crate alloc;
8
Imre Kis5f960442024-11-29 16:49:43 +01009use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
Imre Kis86fd04a2024-11-29 16:09:59 +010011use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010012
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010014use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010015
16use bitflags::bitflags;
17use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010018use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010019
20use self::descriptor::DescriptorType;
21
22use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
23use self::kernel_space::KernelSpace;
Imre Kis631127d2024-11-21 13:09:01 +010024use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010025use self::region::{PhysicalRegion, VirtualRegion};
26use self::region_pool::{Region, RegionPool, RegionPoolError};
27
Imre Kisd5b96fd2024-09-11 17:04:32 +020028pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010029mod block;
Imre Kis703482d2023-11-30 15:51:26 +010030mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010031mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010032pub mod kernel_space;
33pub mod page_pool;
34mod region;
35mod region_pool;
36
Imre Kis703482d2023-11-30 15:51:26 +010037/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010038#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010039pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010040 #[error("Invalid parameter: {0}")]
41 InvalidParameterError(&'static str),
42 #[error("Cannot allocate {1}: {0:?}")]
43 PageAllocationError(RegionPoolError, usize),
44 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
45 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
46 #[error("Entry not found for {0:?}")]
47 VaNotFound(VirtualAddress),
48 #[error("Cannot allocate virtual address {0:?}")]
49 VaAllocationError(RegionPoolError),
50 #[error("Cannot release virtual address {1:?}: {0:?}")]
51 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010052}
53
54/// Memory attributes
55///
56/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010057#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010058#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
59pub enum MemoryAttributesIndex {
60 #[default]
61 Device_nGnRnE = 0x00,
62 Normal_IWBWA_OWBWA = 0x01,
63}
64
65bitflags! {
66 #[derive(Debug, Clone, Copy)]
67 pub struct MemoryAccessRights : u32 {
68 const R = 0b00000001;
69 const W = 0b00000010;
70 const X = 0b00000100;
71 const NS = 0b00001000;
72
73 const RW = Self::R.bits() | Self::W.bits();
74 const RX = Self::R.bits() | Self::X.bits();
75 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
76
77 const USER = 0b00010000;
78 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010079 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010080 }
81}
82
83impl From<MemoryAccessRights> for Attributes {
84 fn from(access_rights: MemoryAccessRights) -> Self {
85 let data_access_permissions = match (
86 access_rights.contains(MemoryAccessRights::USER),
87 access_rights.contains(MemoryAccessRights::W),
88 ) {
89 (false, false) => DataAccessPermissions::ReadOnly_None,
90 (false, true) => DataAccessPermissions::ReadWrite_None,
91 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
92 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
93 };
94
95 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
96 MemoryAttributesIndex::Device_nGnRnE
97 } else {
98 MemoryAttributesIndex::Normal_IWBWA_OWBWA
99 };
100
101 Attributes {
102 uxn: !access_rights.contains(MemoryAccessRights::X)
103 || !access_rights.contains(MemoryAccessRights::USER),
104 pxn: !access_rights.contains(MemoryAccessRights::X)
105 || access_rights.contains(MemoryAccessRights::USER),
106 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100107 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100108 access_flag: true,
109 shareability: Shareability::NonShareable,
110 data_access_permissions,
111 non_secure: access_rights.contains(MemoryAccessRights::NS),
112 mem_attr_index,
113 }
114 }
115}
116
Imre Kisc9a55ff2025-01-17 15:06:50 +0100117#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100118pub enum RegimeVaRange {
119 Lower,
120 Upper,
121}
122
Imre Kisc9a55ff2025-01-17 15:06:50 +0100123#[derive(Debug, Clone, Copy)]
Imre Kisb5146b52024-10-31 14:03:06 +0100124pub enum TranslationRegime {
125 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
126 #[cfg(target_feature = "vh")]
127 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
128 EL2, // EL2
129 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100130}
131
Imre Kisc9a55ff2025-01-17 15:06:50 +0100132impl TranslationRegime {
133 fn is_upper_va_range(&self) -> bool {
134 match self {
135 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true,
136 #[cfg(target_feature = "vh")]
137 EL2_0(RegimeVaRange::Upper, _) => true,
138 _ => false,
139 }
140 }
141}
142
Imre Kis725ef5e2024-11-20 14:20:19 +0100143pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
144
Imre Kis631127d2024-11-21 13:09:01 +0100145pub struct Xlat<const VA_BITS: usize> {
146 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100147 page_pool: PagePool,
148 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100149 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100150 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100151}
152
153/// Memory translation table handling
154/// # High level interface
155/// * allocate and map zero initialized region (with or without VA)
156/// * allocate and map memory region and load contents (with or without VA)
157/// * map memory region by PA (with or without VA)
158/// * unmap memory region by PA
159/// * query PA by VA
160/// * set access rights of mapped memory areas
161/// * active mapping
162///
163/// # Debug features
164/// * print translation table details
165///
166/// # Region level interface
167/// * map regions
168/// * unmap region
169/// * find a mapped region which contains
170/// * find empty area for region
171/// * set access rights for a region
172/// * create blocks by region
173///
174/// # Block level interface
175/// * map block
176/// * unmap block
177/// * set access rights of block
Imre Kis631127d2024-11-21 13:09:01 +0100178impl<const VA_BITS: usize> Xlat<VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100179 pub fn new(
180 page_pool: PagePool,
181 address: VirtualAddressRange,
182 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100183 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100184 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100185 let initial_lookup_level = granule.initial_lookup_level();
186
Imre Kisc9a55ff2025-01-17 15:06:50 +0100187 if !address.start.is_valid_in_regime::<VA_BITS>(regime)
188 || !address.end.is_valid_in_regime::<VA_BITS>(regime)
189 {
190 panic!(
191 "Invalid address range {:?} for regime {:?}",
192 address, regime
193 );
194 }
195
Imre Kis631127d2024-11-21 13:09:01 +0100196 let base_table = page_pool
197 .allocate_pages(
198 granule.table_size::<Descriptor>(initial_lookup_level),
199 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
200 )
201 .unwrap();
202
Imre Kis703482d2023-11-30 15:51:26 +0100203 let mut regions = RegionPool::new();
204 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100205 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100206 .unwrap();
207 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100208 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100209 page_pool,
210 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100211 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100212 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100213 }
214 }
215
216 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
217 /// initial data
218 /// # Arguments
219 /// * va: Virtual address of the memory area
220 /// * data: Data to be loaded to the memory area
221 /// * access_rights: Memory access rights of the area
222 /// # Return value
223 /// * Virtual address of the mapped memory
224 pub fn allocate_initalized_range(
225 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200226 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100227 data: &[u8],
228 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200229 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100230 let mut pages = self
231 .page_pool
232 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100233 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100234
235 pages.copy_data_to_page(data);
236
237 let pages_length = pages.length();
238 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
239 let region = if let Some(required_va) = va {
240 self.regions
241 .acquire(required_va, pages_length, physical_region)
242 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100243 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100244 }
Imre Kisd20b5292024-12-04 16:05:30 +0100245 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100246
247 self.map_region(region, access_rights.into())
248 }
249
250 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
251 /// # Arguments
252 /// * va: Virtual address of the memory area
253 /// * length: Length of the memory area in bytes
254 /// * access_rights: Memory access rights of the area
255 /// # Return value
256 /// * Virtual address of the mapped memory
257 pub fn allocate_zero_init_range(
258 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200259 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100260 length: usize,
261 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200262 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100263 let mut pages = self
264 .page_pool
265 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100266 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100267
268 pages.zero_init();
269
270 let pages_length = pages.length();
271 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
272 let region = if let Some(required_va) = va {
273 self.regions
274 .acquire(required_va, pages_length, physical_region)
275 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100276 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100277 }
Imre Kisd20b5292024-12-04 16:05:30 +0100278 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100279
280 self.map_region(region, access_rights.into())
281 }
282
283 /// Map memory area by physical address
284 /// # Arguments
285 /// * va: Virtual address of the memory area
286 /// * pa: Physical address of the memory area
287 /// * length: Length of the memory area in bytes
288 /// * access_rights: Memory access rights of the area
289 /// # Return value
290 /// * Virtual address of the mapped memory
291 pub fn map_physical_address_range(
292 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200293 va: Option<VirtualAddress>,
294 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100295 length: usize,
296 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200297 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100298 let resource = PhysicalRegion::PhysicalAddress(pa);
299 let region = if let Some(required_va) = va {
300 self.regions.acquire(required_va, length, resource)
301 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100302 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100303 }
Imre Kisd20b5292024-12-04 16:05:30 +0100304 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100305
306 self.map_region(region, access_rights.into())
307 }
308
309 /// Unmap memory area by virtual address
310 /// # Arguments
311 /// * va: Virtual address
312 /// * length: Length of the memory area in bytes
313 pub fn unmap_virtual_address_range(
314 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200315 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100316 length: usize,
317 ) -> Result<(), XlatError> {
318 let pa = self.get_pa_by_va(va, length)?;
319
320 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
321
322 self.unmap_region(&region_to_release)?;
323
324 self.regions
325 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100326 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100327 }
328
329 /// Query physical address by virtual address range. Only returns a value if the memory area
330 /// mapped as continuous area.
331 /// # Arguments
332 /// * va: Virtual address of the memory area
333 /// * length: Length of the memory area in bytes
334 /// # Return value
335 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200336 pub fn get_pa_by_va(
337 &self,
338 va: VirtualAddress,
339 length: usize,
340 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100341 let containing_region = self
342 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100343 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100344
345 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100346 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100347 }
348
349 Ok(containing_region.get_pa_for_va(va))
350 }
351
352 /// Sets the memory access right of memory area
353 /// # Arguments
354 /// * va: Virtual address of the memory area
355 /// * length: Length of the memory area in bytes
356 /// * access_rights: New memory access rights of the area
357 pub fn set_access_rights(
358 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200359 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100360 length: usize,
361 access_rights: MemoryAccessRights,
362 ) -> Result<(), XlatError> {
363 let containing_region = self
364 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100365 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100366
367 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100368 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100369 }
370
371 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
372 self.map_region(region, access_rights.into())?;
373
374 Ok(())
375 }
376
377 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100378 ///
379 /// # Safety
380 /// When activating memory mapping for the running exception level, the
381 /// caller must ensure that the new mapping will not break any existing
382 /// references. After activation the caller must ensure that there are no
383 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100384 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100385 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100386 // Select translation granule
387 let is_tg0 = match &self.regime {
388 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
389 | TranslationRegime::EL2
390 | TranslationRegime::EL3 => true,
391 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
392 #[cfg(target_feature = "vh")]
393 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
394 #[cfg(target_feature = "vh")]
395 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
396 };
397
Imre Kis631127d2024-11-21 13:09:01 +0100398 if is_tg0 {
399 self.modify_tcr(|tcr| {
400 let tg0 = match self.granule {
401 TranslationGranule::Granule4k => 0b00,
402 TranslationGranule::Granule16k => 0b10,
403 TranslationGranule::Granule64k => 0b01,
404 };
405
406 (tcr & !(3 << 14)) | (tg0 << 14)
407 });
408 } else {
409 self.modify_tcr(|tcr| {
410 let tg1 = match self.granule {
411 TranslationGranule::Granule4k => 0b10,
412 TranslationGranule::Granule16k => 0b01,
413 TranslationGranule::Granule64k => 0b11,
414 };
415
416 (tcr & !(3 << 30)) | (tg1 << 30)
417 });
418 }
419
420 // Set translation table
421 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100422
Imre Kisb5146b52024-10-31 14:03:06 +0100423 match &self.regime {
424 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
425 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100426 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100427 in(reg) ((*asid as u64) << 48) | base_table_pa),
428 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
429 "msr ttbr1_el1, {0}
430 isb",
431 in(reg) ((*asid as u64) << 48) | base_table_pa),
432 #[cfg(target_feature = "vh")]
433 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
434 "msr ttbr0_el2, {0}
435 isb",
436 in(reg) ((*asid as u64) << 48) | base_table_pa),
437 #[cfg(target_feature = "vh")]
438 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
439 "msr ttbr1_el2, {0}
440 isb",
441 in(reg) ((*asid as u64) << 48) | base_table_pa),
442 TranslationRegime::EL2 => core::arch::asm!(
443 "msr ttbr0_el2, {0}
444 isb",
445 in(reg) base_table_pa),
446 TranslationRegime::EL3 => core::arch::asm!(
447 "msr ttbr0_el3, {0}
448 isb",
449 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100450 }
Imre Kis703482d2023-11-30 15:51:26 +0100451 }
452
Imre Kis1278c9f2025-01-15 19:48:36 +0100453 /// # Safety
454 /// Dummy functions for test builds
455 #[cfg(not(target_arch = "aarch64"))]
456 pub unsafe fn activate(&self) {}
457
Imre Kis631127d2024-11-21 13:09:01 +0100458 /// Modifies the TCR register of the selected regime of the instance.
459 #[cfg(target_arch = "aarch64")]
460 unsafe fn modify_tcr<F>(&self, f: F)
461 where
462 F: Fn(u64) -> u64,
463 {
464 let mut tcr: u64;
465
466 match &self.regime {
467 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
468 "mrs {0}, tcr_el1
469 isb",
470 out(reg) tcr),
471 #[cfg(target_feature = "vh")]
472 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
473 "mrs {0}, tcr_el2
474 isb",
475 out(reg) tcr),
476 TranslationRegime::EL2 => core::arch::asm!(
477 "mrs {0}, tcr_el2
478 isb",
479 out(reg) tcr),
480 TranslationRegime::EL3 => core::arch::asm!(
481 "mrs {0}, tcr_el3
482 isb",
483 out(reg) tcr),
484 }
485
486 tcr = f(tcr);
487
488 match &self.regime {
489 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
490 "msr tcr_el1, {0}
491 isb",
492 in(reg) tcr),
493 #[cfg(target_feature = "vh")]
494 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
495 "msr tcr_el2, {0}
496 isb",
497 in(reg) tcr),
498 TranslationRegime::EL2 => core::arch::asm!(
499 "msr tcr_el2, {0}
500 isb",
501 in(reg) tcr),
502 TranslationRegime::EL3 => core::arch::asm!(
503 "msr tcr_el3, {0}
504 isb",
505 in(reg) tcr),
506 }
507 }
508
Imre Kis703482d2023-11-30 15:51:26 +0100509 /// Prints a single translation table to the debug console
510 /// # Arguments
511 /// * level: Level of the translation table
512 /// * va: Base virtual address of the table
513 /// * table: Table entries
Imre Kis5f960442024-11-29 16:49:43 +0100514 fn dump_table(
515 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100516 level: isize,
517 va: usize,
518 table: &[Descriptor],
519 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100520 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100521 let level_prefix = match level {
522 0 | 1 => "|-",
523 2 => "| |-",
524 _ => "| | |-",
525 };
526
Imre Kis631127d2024-11-21 13:09:01 +0100527 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100528 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100529 DescriptorType::Block => {
530 writeln!(
531 f,
532 "{} {:#010x} Block -> {:#010x}",
533 level_prefix,
534 va,
535 descriptor.get_block_output_address(granule, level).0
536 )?;
537 }
Imre Kis703482d2023-11-30 15:51:26 +0100538 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100539 let table_pa = descriptor.get_next_level_table(level);
Imre Kis5f960442024-11-29 16:49:43 +0100540 writeln!(
541 f,
Imre Kis703482d2023-11-30 15:51:26 +0100542 "{} {:#010x} Table -> {:#010x}",
Imre Kisa7ef6842025-01-17 13:12:52 +0100543 level_prefix, va, table_pa.0
Imre Kis5f960442024-11-29 16:49:43 +0100544 )?;
Imre Kisa7ef6842025-01-17 13:12:52 +0100545
546 let next_level_table =
547 unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) };
Imre Kis5f960442024-11-29 16:49:43 +0100548 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100549 }
550 _ => {}
551 }
552 }
Imre Kis5f960442024-11-29 16:49:43 +0100553
554 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100555 }
556
557 /// Adds memory region from the translation table. The function splits the region to blocks and
558 /// uses the block level functions to do the mapping.
559 /// # Arguments
560 /// * region: Memory region object
561 /// # Return value
562 /// * Virtual address of the mapped memory
563 fn map_region(
564 &mut self,
565 region: VirtualRegion,
566 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200567 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100568 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100569 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100570 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100571 region.length(),
572 self.granule,
573 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100574 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100575 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100576 }
577
578 Ok(region.base())
579 }
580
581 /// Remove memory region from the translation table. The function splits the region to blocks
582 /// and uses the block level functions to do the unmapping.
583 /// # Arguments
584 /// * region: Memory region object
585 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100586 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100587 region.get_pa(),
Imre Kisc9a55ff2025-01-17 15:06:50 +0100588 region.base().remove_upper_bits::<VA_BITS>(),
Imre Kis631127d2024-11-21 13:09:01 +0100589 region.length(),
590 self.granule,
591 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100592 for block in blocks {
593 self.unmap_block(block);
594 }
595
596 Ok(())
597 }
598
599 /// Find mapped region that contains the whole region
600 /// # Arguments
601 /// * region: Virtual address to look for
602 /// # Return value
603 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200604 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100605 self.regions.find_containing_region(va, length).ok()
606 }
607
Imre Kis703482d2023-11-30 15:51:26 +0100608 /// Add block to memory mapping
609 /// # Arguments
610 /// * block: Memory block that can be represented by a single translation table entry
611 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100612 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100613 Self::set_block_descriptor_recursively(
614 attributes,
615 block.pa,
616 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100617 block.size,
618 self.granule.initial_lookup_level(),
619 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100620 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100621 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100622 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100623 )
Imre Kis703482d2023-11-30 15:51:26 +0100624 }
625
626 /// Adds the block descriptor to the translation table along all the intermediate tables the
627 /// reach the required granule.
628 /// # Arguments
629 /// * attributes: Memory block's permssions, flags
630 /// * pa: Physical address
631 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100632 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100633 /// * level: Translation table level
634 /// * table: Translation table on the given level
635 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100636 /// * regime: Translation regime
637 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100638 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100639 fn set_block_descriptor_recursively(
640 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200641 pa: PhysicalAddress,
642 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100643 block_size: usize,
644 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100645 table: &mut [Descriptor],
646 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100647 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100648 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100649 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100650 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100651 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100652
653 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100654 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100655 // Follow break-before-make sequence
656 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
657 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100658 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100659 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100660 }
661
662 // Need to iterate forward
663 match descriptor.get_descriptor_type(level) {
664 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100665 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100666 let mut page = page_pool
667 .allocate_pages(
668 granule.table_size::<Descriptor>(level + 1),
669 Some(granule.table_alignment::<Descriptor>(level + 1)),
670 )
Imre Kisd20b5292024-12-04 16:05:30 +0100671 .map_err(|e| {
672 XlatError::PageAllocationError(
673 e,
674 granule.table_size::<Descriptor>(level + 1),
675 )
676 })?;
677
678 let next_table = unsafe { page.get_as_mut_slice() };
679
680 // Fill next level table
681 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100682 attributes,
683 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100684 va.mask_for_level(granule, level),
685 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100686 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100687 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100688 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100689 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100690 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100691 );
692
693 if result.is_ok() {
694 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100695 let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
696 next_table.as_ptr() as u64,
697 ) as usize);
698 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100699 } else {
700 // Release next level table on error and keep invalid descriptor on current level
701 page_pool.release_pages(page).unwrap();
702 }
703
704 result
Imre Kis703482d2023-11-30 15:51:26 +0100705 }
706 DescriptorType::Block => {
707 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100708 let current_va = va.mask_for_level(granule, level);
709 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100710 let current_attributes = descriptor.get_block_attributes(level);
711
712 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100713
Imre Kisd20b5292024-12-04 16:05:30 +0100714 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100715 let mut page = page_pool
716 .allocate_pages(
717 granule.table_size::<Descriptor>(level + 1),
718 Some(granule.table_alignment::<Descriptor>(level + 1)),
719 )
Imre Kisd20b5292024-12-04 16:05:30 +0100720 .map_err(|e| {
721 XlatError::PageAllocationError(
722 e,
723 granule.table_size::<Descriptor>(level + 1),
724 )
725 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100726
Imre Kisd20b5292024-12-04 16:05:30 +0100727 let next_table = unsafe { page.get_as_mut_slice() };
728
729 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200730 for exploded_va in VirtualAddressRange::new(
731 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100732 current_va
733 .add_offset(granule.block_size_at_level(level))
734 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200735 )
Imre Kis631127d2024-11-21 13:09:01 +0100736 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100737 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200738 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100739
740 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100741 Self::set_block_descriptor_recursively(
742 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200743 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100744 exploded_va.mask_for_level(granule, level),
745 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100746 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100747 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100748 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100749 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100750 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100751 )
Imre Kisd20b5292024-12-04 16:05:30 +0100752 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100753 }
754
755 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100756 let result = Self::set_block_descriptor_recursively(
757 attributes,
758 pa,
759 va.mask_for_level(granule, level + 1),
760 block_size,
761 level + 1,
762 next_table,
763 page_pool,
764 regime,
765 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100766 );
Imre Kisd20b5292024-12-04 16:05:30 +0100767
768 if result.is_ok() {
Imre Kisa7ef6842025-01-17 13:12:52 +0100769 let next_table_pa = PhysicalAddress(KernelSpace::kernel_to_pa(
770 next_table.as_ptr() as u64,
771 ) as usize);
772
Imre Kisd20b5292024-12-04 16:05:30 +0100773 // Follow break-before-make sequence
774 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
775 Self::invalidate(regime, Some(current_va));
776
777 // Set table descriptor if the table is configured properly
Imre Kisa7ef6842025-01-17 13:12:52 +0100778 descriptor.set_table_descriptor(level, next_table_pa, None);
Imre Kisd20b5292024-12-04 16:05:30 +0100779 } else {
780 // Release next level table on error and keep invalid descriptor on current level
781 page_pool.release_pages(page).unwrap();
782 }
783
784 result
Imre Kis703482d2023-11-30 15:51:26 +0100785 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100786 DescriptorType::Table => {
787 let next_level_table = unsafe {
788 Self::get_table_from_pa_mut(
789 descriptor.get_next_level_table(level),
790 granule,
791 level + 1,
792 )
793 };
794
795 Self::set_block_descriptor_recursively(
796 attributes,
797 pa,
798 va.mask_for_level(granule, level),
799 block_size,
800 level + 1,
801 next_level_table,
802 page_pool,
803 regime,
804 granule,
805 )
806 }
Imre Kis703482d2023-11-30 15:51:26 +0100807 }
808 }
809
810 /// Remove block from memory mapping
811 /// # Arguments
812 /// * block: memory block that can be represented by a single translation entry
813 fn unmap_block(&mut self, block: Block) {
814 Self::remove_block_descriptor_recursively(
815 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100816 block.size,
817 self.granule.initial_lookup_level(),
818 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100819 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100820 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100821 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100822 )
Imre Kis703482d2023-11-30 15:51:26 +0100823 }
824
825 /// Removes block descriptor from the translation table along all the intermediate tables which
826 /// become empty during the removal process.
827 /// # Arguments
828 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100829 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100830 /// * level: Translation table level
831 /// * table: Translation table on the given level
832 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100833 /// * regime: Translation regime
834 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100835 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200836 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100837 block_size: usize,
838 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100839 table: &mut [Descriptor],
840 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100841 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100842 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100843 ) {
844 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100845 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100846
Imre Kis631127d2024-11-21 13:09:01 +0100847 // We reached the required level with the matching block size
848 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100849 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100850 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100851 return;
852 }
853
854 // Need to iterate forward
855 match descriptor.get_descriptor_type(level) {
856 DescriptorType::Invalid => {
857 panic!("Cannot remove block from non-existing table");
858 }
859 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100860 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100861 }
862 DescriptorType::Table => {
Imre Kisa7ef6842025-01-17 13:12:52 +0100863 let next_level_table = unsafe {
864 Self::get_table_from_pa_mut(
865 descriptor.get_next_level_table(level),
866 granule,
867 level + 1,
868 )
869 };
870
Imre Kis703482d2023-11-30 15:51:26 +0100871 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100872 va.mask_for_level(granule, level),
873 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100874 level + 1,
875 next_level_table,
876 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100877 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100878 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100879 );
880
881 if next_level_table.iter().all(|d| !d.is_valid()) {
882 // Empty table
883 let mut page = unsafe {
Imre Kisa7ef6842025-01-17 13:12:52 +0100884 let table_pa = descriptor.set_table_descriptor_to_invalid(level);
885 let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1);
886 Pages::from_slice(next_table)
Imre Kis703482d2023-11-30 15:51:26 +0100887 };
Imre Kisa7ef6842025-01-17 13:12:52 +0100888
Imre Kis703482d2023-11-30 15:51:26 +0100889 page.zero_init();
890 page_pool.release_pages(page).unwrap();
891 }
892 }
893 }
894 }
895
Imre Kis631127d2024-11-21 13:09:01 +0100896 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
897 Self::walk_descriptors(
898 va,
899 block_size,
900 self.granule.initial_lookup_level(),
901 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
902 self.granule,
903 )
Imre Kis703482d2023-11-30 15:51:26 +0100904 }
905
906 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200907 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100908 block_size: usize,
909 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100910 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100911 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100912 ) -> &mut Descriptor {
913 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100914 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100915
Imre Kis631127d2024-11-21 13:09:01 +0100916 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100917 return descriptor;
918 }
919
920 // Need to iterate forward
921 match descriptor.get_descriptor_type(level) {
922 DescriptorType::Invalid => {
923 panic!("Invalid descriptor");
924 }
925 DescriptorType::Block => {
926 panic!("Cannot split existing block descriptor to table");
927 }
Imre Kisa7ef6842025-01-17 13:12:52 +0100928 DescriptorType::Table => {
929 let next_level_table = unsafe {
930 Self::get_table_from_pa_mut(
931 descriptor.get_next_level_table(level),
932 granule,
933 level + 1,
934 )
935 };
936
937 Self::walk_descriptors(
938 va.mask_for_level(granule, level),
939 block_size,
940 level + 1,
941 next_level_table,
942 granule,
943 )
944 }
945 }
946 }
947
948 /// Create a translation table descriptor slice from a physical address.
949 ///
950 /// # Safety
951 /// The caller must ensure that the physical address points to a valid translation table and
952 /// it it mapped into the virtual address space of the running kernel context.
953 unsafe fn get_table_from_pa<'a>(
954 pa: PhysicalAddress,
955 granule: TranslationGranule<VA_BITS>,
956 level: isize,
957 ) -> &'a [Descriptor] {
958 let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
959 unsafe {
960 core::slice::from_raw_parts(
961 table_va as *const Descriptor,
962 granule.entry_count_at_level(level),
963 )
964 }
965 }
966
967 /// Create a mutable translation table descriptor slice from a physical address.
968 ///
969 /// # Safety
970 /// The caller must ensure that the physical address points to a valid translation table and
971 /// it it mapped into the virtual address space of the running kernel context.
972 unsafe fn get_table_from_pa_mut<'a>(
973 pa: PhysicalAddress,
974 granule: TranslationGranule<VA_BITS>,
975 level: isize,
976 ) -> &'a mut [Descriptor] {
977 let table_va = KernelSpace::pa_to_kernel(pa.0 as u64);
978 unsafe {
979 core::slice::from_raw_parts_mut(
980 table_va as *mut Descriptor,
981 granule.entry_count_at_level(level),
982 )
Imre Kis703482d2023-11-30 15:51:26 +0100983 }
984 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100985
Imre Kis1278c9f2025-01-15 19:48:36 +0100986 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +0100987 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
988 // SAFETY: The assembly code invalidates the translation table entry of
989 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +0100990 unsafe {
991 if let Some(VirtualAddress(va)) = va {
992 match regime {
993 TranslationRegime::EL1_0(_, _) => {
994 core::arch::asm!(
995 "tlbi vaae1is, {0}
996 dsb nsh
997 isb",
998 in(reg) va)
999 }
1000 #[cfg(target_feature = "vh")]
1001 TranslationRegime::EL2_0(_, _) => {
1002 core::arch::asm!(
1003 "tlbi vaae1is, {0}
1004 dsb nsh
1005 isb",
1006 in(reg) va)
1007 }
1008 TranslationRegime::EL2 => core::arch::asm!(
1009 "tlbi vae2is, {0}
1010 dsb nsh
1011 isb",
1012 in(reg) va),
1013 TranslationRegime::EL3 => core::arch::asm!(
1014 "tlbi vae3is, {0}
1015 dsb nsh
1016 isb",
1017 in(reg) va),
1018 }
1019 } else {
1020 match regime {
1021 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
1022 "tlbi aside1, {0}
1023 dsb nsh
1024 isb",
1025 in(reg) (*asid as u64) << 48
1026 ),
1027 #[cfg(target_feature = "vh")]
1028 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
1029 "tlbi aside1, {0}
1030 dsb nsh
1031 isb",
1032 in(reg) (*asid as u64) << 48
1033 ),
1034 TranslationRegime::EL2 => core::arch::asm!(
1035 "tlbi alle2
1036 dsb nsh
1037 isb"
1038 ),
1039 TranslationRegime::EL3 => core::arch::asm!(
1040 "tlbi alle3
1041 dsb nsh
1042 isb"
1043 ),
1044 }
1045 }
1046 }
1047 }
Imre Kis1278c9f2025-01-15 19:48:36 +01001048
1049 #[cfg(not(target_arch = "aarch64"))]
1050 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +01001051}
Imre Kis5f960442024-11-29 16:49:43 +01001052
1053impl<const VA_BITS: usize> fmt::Debug for Xlat<VA_BITS> {
1054 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
1055 f.debug_struct("Xlat")
1056 .field("regime", &self.regime)
1057 .field("granule", &self.granule)
1058 .field("VA_BITS", &VA_BITS)
1059 .field("base_table", &self.base_table.get_pa())
1060 .finish()?;
1061
1062 Self::dump_table(
1063 f,
1064 self.granule.initial_lookup_level(),
1065 0,
1066 unsafe { self.base_table.get_as_slice() },
1067 self.granule,
1068 )?;
1069
1070 Ok(())
1071 }
1072}