blob: 202776dccc53a3b2cab1c19ccee7d90bc49a1714 [file] [log] [blame]
Imre Kis87cee5b2025-01-15 18:52:35 +01001// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
Imre Kis703482d2023-11-30 15:51:26 +01004#![allow(dead_code)]
Imre Kis703482d2023-11-30 15:51:26 +01005#![cfg_attr(not(test), no_std)]
6
7extern crate alloc;
8
Imre Kis5f960442024-11-29 16:49:43 +01009use core::fmt;
Imre Kis703482d2023-11-30 15:51:26 +010010use core::iter::zip;
Imre Kis86fd04a2024-11-29 16:09:59 +010011use core::panic;
Imre Kis703482d2023-11-30 15:51:26 +010012
Imre Kisb5146b52024-10-31 14:03:06 +010013use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange};
Imre Kis86fd04a2024-11-29 16:09:59 +010014use block::{Block, BlockIterator};
Imre Kis703482d2023-11-30 15:51:26 +010015
16use bitflags::bitflags;
17use packed_struct::prelude::*;
Imre Kisd20b5292024-12-04 16:05:30 +010018use thiserror::Error;
Imre Kis703482d2023-11-30 15:51:26 +010019
20use self::descriptor::DescriptorType;
21
22use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
23use self::kernel_space::KernelSpace;
Imre Kis631127d2024-11-21 13:09:01 +010024use self::page_pool::{PagePool, Pages};
Imre Kis703482d2023-11-30 15:51:26 +010025use self::region::{PhysicalRegion, VirtualRegion};
26use self::region_pool::{Region, RegionPool, RegionPoolError};
27
Imre Kisd5b96fd2024-09-11 17:04:32 +020028pub mod address;
Imre Kis86fd04a2024-11-29 16:09:59 +010029mod block;
Imre Kis703482d2023-11-30 15:51:26 +010030mod descriptor;
Imre Kis725ef5e2024-11-20 14:20:19 +010031mod granule;
Imre Kis703482d2023-11-30 15:51:26 +010032pub mod kernel_space;
33pub mod page_pool;
34mod region;
35mod region_pool;
36
Imre Kis703482d2023-11-30 15:51:26 +010037/// Translation table error type
Imre Kisd20b5292024-12-04 16:05:30 +010038#[derive(Debug, Error)]
Imre Kis703482d2023-11-30 15:51:26 +010039pub enum XlatError {
Imre Kisd20b5292024-12-04 16:05:30 +010040 #[error("Invalid parameter: {0}")]
41 InvalidParameterError(&'static str),
42 #[error("Cannot allocate {1}: {0:?}")]
43 PageAllocationError(RegionPoolError, usize),
44 #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")]
45 AlignmentError(PhysicalAddress, VirtualAddress, usize, usize),
46 #[error("Entry not found for {0:?}")]
47 VaNotFound(VirtualAddress),
48 #[error("Cannot allocate virtual address {0:?}")]
49 VaAllocationError(RegionPoolError),
50 #[error("Cannot release virtual address {1:?}: {0:?}")]
51 VaReleaseError(RegionPoolError, VirtualAddress),
Imre Kis703482d2023-11-30 15:51:26 +010052}
53
54/// Memory attributes
55///
56/// MAIR_EL1 should be configured in the same way in startup.s
Imre Kis1278c9f2025-01-15 19:48:36 +010057#[allow(non_camel_case_types)]
Imre Kis703482d2023-11-30 15:51:26 +010058#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
59pub enum MemoryAttributesIndex {
60 #[default]
61 Device_nGnRnE = 0x00,
62 Normal_IWBWA_OWBWA = 0x01,
63}
64
65bitflags! {
66 #[derive(Debug, Clone, Copy)]
67 pub struct MemoryAccessRights : u32 {
68 const R = 0b00000001;
69 const W = 0b00000010;
70 const X = 0b00000100;
71 const NS = 0b00001000;
72
73 const RW = Self::R.bits() | Self::W.bits();
74 const RX = Self::R.bits() | Self::X.bits();
75 const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
76
77 const USER = 0b00010000;
78 const DEVICE = 0b00100000;
Imre Kisc1dab892024-03-26 12:03:58 +010079 const GLOBAL = 0b01000000;
Imre Kis703482d2023-11-30 15:51:26 +010080 }
81}
82
83impl From<MemoryAccessRights> for Attributes {
84 fn from(access_rights: MemoryAccessRights) -> Self {
85 let data_access_permissions = match (
86 access_rights.contains(MemoryAccessRights::USER),
87 access_rights.contains(MemoryAccessRights::W),
88 ) {
89 (false, false) => DataAccessPermissions::ReadOnly_None,
90 (false, true) => DataAccessPermissions::ReadWrite_None,
91 (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
92 (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
93 };
94
95 let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
96 MemoryAttributesIndex::Device_nGnRnE
97 } else {
98 MemoryAttributesIndex::Normal_IWBWA_OWBWA
99 };
100
101 Attributes {
102 uxn: !access_rights.contains(MemoryAccessRights::X)
103 || !access_rights.contains(MemoryAccessRights::USER),
104 pxn: !access_rights.contains(MemoryAccessRights::X)
105 || access_rights.contains(MemoryAccessRights::USER),
106 contiguous: false,
Imre Kisc1dab892024-03-26 12:03:58 +0100107 not_global: !access_rights.contains(MemoryAccessRights::GLOBAL),
Imre Kis703482d2023-11-30 15:51:26 +0100108 access_flag: true,
109 shareability: Shareability::NonShareable,
110 data_access_permissions,
111 non_secure: access_rights.contains(MemoryAccessRights::NS),
112 mem_attr_index,
113 }
114 }
115}
116
Imre Kis5f960442024-11-29 16:49:43 +0100117#[derive(Debug)]
Imre Kisb5146b52024-10-31 14:03:06 +0100118pub enum RegimeVaRange {
119 Lower,
120 Upper,
121}
122
Imre Kis5f960442024-11-29 16:49:43 +0100123#[derive(Debug)]
Imre Kisb5146b52024-10-31 14:03:06 +0100124pub enum TranslationRegime {
125 EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1
126 #[cfg(target_feature = "vh")]
127 EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE
128 EL2, // EL2
129 EL3, // EL3, TTBR0_EL3
Imre Kisc1dab892024-03-26 12:03:58 +0100130}
131
Imre Kis725ef5e2024-11-20 14:20:19 +0100132pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>;
133
Imre Kis631127d2024-11-21 13:09:01 +0100134pub struct Xlat<const VA_BITS: usize> {
135 base_table: Pages,
Imre Kis703482d2023-11-30 15:51:26 +0100136 page_pool: PagePool,
137 regions: RegionPool<VirtualRegion>,
Imre Kisb5146b52024-10-31 14:03:06 +0100138 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100139 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100140}
141
142/// Memory translation table handling
143/// # High level interface
144/// * allocate and map zero initialized region (with or without VA)
145/// * allocate and map memory region and load contents (with or without VA)
146/// * map memory region by PA (with or without VA)
147/// * unmap memory region by PA
148/// * query PA by VA
149/// * set access rights of mapped memory areas
150/// * active mapping
151///
152/// # Debug features
153/// * print translation table details
154///
155/// # Region level interface
156/// * map regions
157/// * unmap region
158/// * find a mapped region which contains
159/// * find empty area for region
160/// * set access rights for a region
161/// * create blocks by region
162///
163/// # Block level interface
164/// * map block
165/// * unmap block
166/// * set access rights of block
Imre Kis631127d2024-11-21 13:09:01 +0100167impl<const VA_BITS: usize> Xlat<VA_BITS> {
Imre Kisb5146b52024-10-31 14:03:06 +0100168 pub fn new(
169 page_pool: PagePool,
170 address: VirtualAddressRange,
171 regime: TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100172 granule: TranslationGranule<VA_BITS>,
Imre Kisb5146b52024-10-31 14:03:06 +0100173 ) -> Self {
Imre Kis631127d2024-11-21 13:09:01 +0100174 let initial_lookup_level = granule.initial_lookup_level();
175
176 let base_table = page_pool
177 .allocate_pages(
178 granule.table_size::<Descriptor>(initial_lookup_level),
179 Some(granule.table_alignment::<Descriptor>(initial_lookup_level)),
180 )
181 .unwrap();
182
Imre Kis703482d2023-11-30 15:51:26 +0100183 let mut regions = RegionPool::new();
184 regions
Imre Kisb5146b52024-10-31 14:03:06 +0100185 .add(VirtualRegion::new(address.start, address.len().unwrap()))
Imre Kis703482d2023-11-30 15:51:26 +0100186 .unwrap();
187 Self {
Imre Kis631127d2024-11-21 13:09:01 +0100188 base_table,
Imre Kis703482d2023-11-30 15:51:26 +0100189 page_pool,
190 regions,
Imre Kisb5146b52024-10-31 14:03:06 +0100191 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100192 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100193 }
194 }
195
196 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
197 /// initial data
198 /// # Arguments
199 /// * va: Virtual address of the memory area
200 /// * data: Data to be loaded to the memory area
201 /// * access_rights: Memory access rights of the area
202 /// # Return value
203 /// * Virtual address of the mapped memory
204 pub fn allocate_initalized_range(
205 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200206 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100207 data: &[u8],
208 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200209 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100210 let mut pages = self
211 .page_pool
212 .allocate_pages(data.len(), Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100213 .map_err(|e| XlatError::PageAllocationError(e, data.len()))?;
Imre Kis703482d2023-11-30 15:51:26 +0100214
215 pages.copy_data_to_page(data);
216
217 let pages_length = pages.length();
218 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
219 let region = if let Some(required_va) = va {
220 self.regions
221 .acquire(required_va, pages_length, physical_region)
222 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100223 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100224 }
Imre Kisd20b5292024-12-04 16:05:30 +0100225 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100226
227 self.map_region(region, access_rights.into())
228 }
229
230 /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
231 /// # Arguments
232 /// * va: Virtual address of the memory area
233 /// * length: Length of the memory area in bytes
234 /// * access_rights: Memory access rights of the area
235 /// # Return value
236 /// * Virtual address of the mapped memory
237 pub fn allocate_zero_init_range(
238 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200239 va: Option<VirtualAddress>,
Imre Kis703482d2023-11-30 15:51:26 +0100240 length: usize,
241 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200242 ) -> Result<VirtualAddress, XlatError> {
Imre Kis631127d2024-11-21 13:09:01 +0100243 let mut pages = self
244 .page_pool
245 .allocate_pages(length, Some(self.granule as usize))
Imre Kisd20b5292024-12-04 16:05:30 +0100246 .map_err(|e| XlatError::PageAllocationError(e, length))?;
Imre Kis703482d2023-11-30 15:51:26 +0100247
248 pages.zero_init();
249
250 let pages_length = pages.length();
251 let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
252 let region = if let Some(required_va) = va {
253 self.regions
254 .acquire(required_va, pages_length, physical_region)
255 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100256 self.regions.allocate(pages_length, physical_region, None)
Imre Kis703482d2023-11-30 15:51:26 +0100257 }
Imre Kisd20b5292024-12-04 16:05:30 +0100258 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100259
260 self.map_region(region, access_rights.into())
261 }
262
263 /// Map memory area by physical address
264 /// # Arguments
265 /// * va: Virtual address of the memory area
266 /// * pa: Physical address of the memory area
267 /// * length: Length of the memory area in bytes
268 /// * access_rights: Memory access rights of the area
269 /// # Return value
270 /// * Virtual address of the mapped memory
271 pub fn map_physical_address_range(
272 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200273 va: Option<VirtualAddress>,
274 pa: PhysicalAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100275 length: usize,
276 access_rights: MemoryAccessRights,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200277 ) -> Result<VirtualAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100278 let resource = PhysicalRegion::PhysicalAddress(pa);
279 let region = if let Some(required_va) = va {
280 self.regions.acquire(required_va, length, resource)
281 } else {
Imre Kisf0370e82024-11-18 16:24:55 +0100282 self.regions.allocate(length, resource, None)
Imre Kis703482d2023-11-30 15:51:26 +0100283 }
Imre Kisd20b5292024-12-04 16:05:30 +0100284 .map_err(XlatError::VaAllocationError)?;
Imre Kis703482d2023-11-30 15:51:26 +0100285
286 self.map_region(region, access_rights.into())
287 }
288
289 /// Unmap memory area by virtual address
290 /// # Arguments
291 /// * va: Virtual address
292 /// * length: Length of the memory area in bytes
293 pub fn unmap_virtual_address_range(
294 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200295 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100296 length: usize,
297 ) -> Result<(), XlatError> {
298 let pa = self.get_pa_by_va(va, length)?;
299
300 let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
301
302 self.unmap_region(&region_to_release)?;
303
304 self.regions
305 .release(region_to_release)
Imre Kisd20b5292024-12-04 16:05:30 +0100306 .map_err(|e| XlatError::VaReleaseError(e, va))
Imre Kis703482d2023-11-30 15:51:26 +0100307 }
308
309 /// Query physical address by virtual address range. Only returns a value if the memory area
310 /// mapped as continuous area.
311 /// # Arguments
312 /// * va: Virtual address of the memory area
313 /// * length: Length of the memory area in bytes
314 /// # Return value
315 /// * Physical address of the mapped memory
Imre Kisd5b96fd2024-09-11 17:04:32 +0200316 pub fn get_pa_by_va(
317 &self,
318 va: VirtualAddress,
319 length: usize,
320 ) -> Result<PhysicalAddress, XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100321 let containing_region = self
322 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100323 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100324
325 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100326 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100327 }
328
329 Ok(containing_region.get_pa_for_va(va))
330 }
331
332 /// Sets the memory access right of memory area
333 /// # Arguments
334 /// * va: Virtual address of the memory area
335 /// * length: Length of the memory area in bytes
336 /// * access_rights: New memory access rights of the area
337 pub fn set_access_rights(
338 &mut self,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200339 va: VirtualAddress,
Imre Kis703482d2023-11-30 15:51:26 +0100340 length: usize,
341 access_rights: MemoryAccessRights,
342 ) -> Result<(), XlatError> {
343 let containing_region = self
344 .find_containing_region(va, length)
Imre Kisd20b5292024-12-04 16:05:30 +0100345 .ok_or(XlatError::VaNotFound(va))?;
Imre Kis703482d2023-11-30 15:51:26 +0100346
347 if !containing_region.used() {
Imre Kisd20b5292024-12-04 16:05:30 +0100348 return Err(XlatError::VaNotFound(va));
Imre Kis703482d2023-11-30 15:51:26 +0100349 }
350
351 let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
352 self.map_region(region, access_rights.into())?;
353
354 Ok(())
355 }
356
357 /// Activate memory mapping represented by the object
Imre Kisb5146b52024-10-31 14:03:06 +0100358 ///
359 /// # Safety
360 /// When activating memory mapping for the running exception level, the
361 /// caller must ensure that the new mapping will not break any existing
362 /// references. After activation the caller must ensure that there are no
363 /// active references when unmapping memory.
Imre Kis1278c9f2025-01-15 19:48:36 +0100364 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100365 pub unsafe fn activate(&self) {
Imre Kis631127d2024-11-21 13:09:01 +0100366 // Select translation granule
367 let is_tg0 = match &self.regime {
368 TranslationRegime::EL1_0(RegimeVaRange::Lower, _)
369 | TranslationRegime::EL2
370 | TranslationRegime::EL3 => true,
371 TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false,
372 #[cfg(target_feature = "vh")]
373 TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true,
374 #[cfg(target_feature = "vh")]
375 TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false,
376 };
377
Imre Kis631127d2024-11-21 13:09:01 +0100378 if is_tg0 {
379 self.modify_tcr(|tcr| {
380 let tg0 = match self.granule {
381 TranslationGranule::Granule4k => 0b00,
382 TranslationGranule::Granule16k => 0b10,
383 TranslationGranule::Granule64k => 0b01,
384 };
385
386 (tcr & !(3 << 14)) | (tg0 << 14)
387 });
388 } else {
389 self.modify_tcr(|tcr| {
390 let tg1 = match self.granule {
391 TranslationGranule::Granule4k => 0b10,
392 TranslationGranule::Granule16k => 0b01,
393 TranslationGranule::Granule64k => 0b11,
394 };
395
396 (tcr & !(3 << 30)) | (tg1 << 30)
397 });
398 }
399
400 // Set translation table
401 let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.get_pa().0 as u64);
Imre Kisc1dab892024-03-26 12:03:58 +0100402
Imre Kisb5146b52024-10-31 14:03:06 +0100403 match &self.regime {
404 TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
405 "msr ttbr0_el1, {0}
Imre Kisc1dab892024-03-26 12:03:58 +0100406 isb",
Imre Kisb5146b52024-10-31 14:03:06 +0100407 in(reg) ((*asid as u64) << 48) | base_table_pa),
408 TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
409 "msr ttbr1_el1, {0}
410 isb",
411 in(reg) ((*asid as u64) << 48) | base_table_pa),
412 #[cfg(target_feature = "vh")]
413 TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!(
414 "msr ttbr0_el2, {0}
415 isb",
416 in(reg) ((*asid as u64) << 48) | base_table_pa),
417 #[cfg(target_feature = "vh")]
418 TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!(
419 "msr ttbr1_el2, {0}
420 isb",
421 in(reg) ((*asid as u64) << 48) | base_table_pa),
422 TranslationRegime::EL2 => core::arch::asm!(
423 "msr ttbr0_el2, {0}
424 isb",
425 in(reg) base_table_pa),
426 TranslationRegime::EL3 => core::arch::asm!(
427 "msr ttbr0_el3, {0}
428 isb",
429 in(reg) base_table_pa),
Imre Kisc1dab892024-03-26 12:03:58 +0100430 }
Imre Kis703482d2023-11-30 15:51:26 +0100431 }
432
Imre Kis1278c9f2025-01-15 19:48:36 +0100433 /// # Safety
434 /// Dummy functions for test builds
435 #[cfg(not(target_arch = "aarch64"))]
436 pub unsafe fn activate(&self) {}
437
Imre Kis631127d2024-11-21 13:09:01 +0100438 /// Modifies the TCR register of the selected regime of the instance.
439 #[cfg(target_arch = "aarch64")]
440 unsafe fn modify_tcr<F>(&self, f: F)
441 where
442 F: Fn(u64) -> u64,
443 {
444 let mut tcr: u64;
445
446 match &self.regime {
447 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
448 "mrs {0}, tcr_el1
449 isb",
450 out(reg) tcr),
451 #[cfg(target_feature = "vh")]
452 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
453 "mrs {0}, tcr_el2
454 isb",
455 out(reg) tcr),
456 TranslationRegime::EL2 => core::arch::asm!(
457 "mrs {0}, tcr_el2
458 isb",
459 out(reg) tcr),
460 TranslationRegime::EL3 => core::arch::asm!(
461 "mrs {0}, tcr_el3
462 isb",
463 out(reg) tcr),
464 }
465
466 tcr = f(tcr);
467
468 match &self.regime {
469 TranslationRegime::EL1_0(_, _) => core::arch::asm!(
470 "msr tcr_el1, {0}
471 isb",
472 in(reg) tcr),
473 #[cfg(target_feature = "vh")]
474 TranslationRegime::EL2_0(_, _) => core::arch::asm!(
475 "msr tcr_el2, {0}
476 isb",
477 in(reg) tcr),
478 TranslationRegime::EL2 => core::arch::asm!(
479 "msr tcr_el2, {0}
480 isb",
481 in(reg) tcr),
482 TranslationRegime::EL3 => core::arch::asm!(
483 "msr tcr_el3, {0}
484 isb",
485 in(reg) tcr),
486 }
487 }
488
Imre Kis703482d2023-11-30 15:51:26 +0100489 /// Prints a single translation table to the debug console
490 /// # Arguments
491 /// * level: Level of the translation table
492 /// * va: Base virtual address of the table
493 /// * table: Table entries
Imre Kis5f960442024-11-29 16:49:43 +0100494 fn dump_table(
495 f: &mut fmt::Formatter<'_>,
Imre Kis631127d2024-11-21 13:09:01 +0100496 level: isize,
497 va: usize,
498 table: &[Descriptor],
499 granule: TranslationGranule<VA_BITS>,
Imre Kis5f960442024-11-29 16:49:43 +0100500 ) -> fmt::Result {
Imre Kis703482d2023-11-30 15:51:26 +0100501 let level_prefix = match level {
502 0 | 1 => "|-",
503 2 => "| |-",
504 _ => "| | |-",
505 };
506
Imre Kis631127d2024-11-21 13:09:01 +0100507 for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) {
Imre Kis703482d2023-11-30 15:51:26 +0100508 match descriptor.get_descriptor_type(level) {
Imre Kis5f960442024-11-29 16:49:43 +0100509 DescriptorType::Block => {
510 writeln!(
511 f,
512 "{} {:#010x} Block -> {:#010x}",
513 level_prefix,
514 va,
515 descriptor.get_block_output_address(granule, level).0
516 )?;
517 }
Imre Kis703482d2023-11-30 15:51:26 +0100518 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100519 let next_level_table =
520 unsafe { descriptor.get_next_level_table(granule, level) };
Imre Kis5f960442024-11-29 16:49:43 +0100521 writeln!(
522 f,
Imre Kis703482d2023-11-30 15:51:26 +0100523 "{} {:#010x} Table -> {:#010x}",
524 level_prefix,
525 va,
526 next_level_table.as_ptr() as usize
Imre Kis5f960442024-11-29 16:49:43 +0100527 )?;
528 Self::dump_table(f, level + 1, va, next_level_table, granule)?;
Imre Kis703482d2023-11-30 15:51:26 +0100529 }
530 _ => {}
531 }
532 }
Imre Kis5f960442024-11-29 16:49:43 +0100533
534 Ok(())
Imre Kis703482d2023-11-30 15:51:26 +0100535 }
536
537 /// Adds memory region from the translation table. The function splits the region to blocks and
538 /// uses the block level functions to do the mapping.
539 /// # Arguments
540 /// * region: Memory region object
541 /// # Return value
542 /// * Virtual address of the mapped memory
543 fn map_region(
544 &mut self,
545 region: VirtualRegion,
546 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200547 ) -> Result<VirtualAddress, XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100548 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100549 region.get_pa(),
550 region.base(),
551 region.length(),
552 self.granule,
553 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100554 for block in blocks {
Imre Kisd20b5292024-12-04 16:05:30 +0100555 self.map_block(block, attributes.clone())?;
Imre Kis703482d2023-11-30 15:51:26 +0100556 }
557
558 Ok(region.base())
559 }
560
561 /// Remove memory region from the translation table. The function splits the region to blocks
562 /// and uses the block level functions to do the unmapping.
563 /// # Arguments
564 /// * region: Memory region object
565 fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
Imre Kis86fd04a2024-11-29 16:09:59 +0100566 let blocks = BlockIterator::new(
Imre Kis631127d2024-11-21 13:09:01 +0100567 region.get_pa(),
568 region.base(),
569 region.length(),
570 self.granule,
571 )?;
Imre Kis703482d2023-11-30 15:51:26 +0100572 for block in blocks {
573 self.unmap_block(block);
574 }
575
576 Ok(())
577 }
578
579 /// Find mapped region that contains the whole region
580 /// # Arguments
581 /// * region: Virtual address to look for
582 /// # Return value
583 /// * Reference to virtual region if found
Imre Kisd5b96fd2024-09-11 17:04:32 +0200584 fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> {
Imre Kis703482d2023-11-30 15:51:26 +0100585 self.regions.find_containing_region(va, length).ok()
586 }
587
Imre Kis703482d2023-11-30 15:51:26 +0100588 /// Add block to memory mapping
589 /// # Arguments
590 /// * block: Memory block that can be represented by a single translation table entry
591 /// * attributes: Memory block's permissions, flags
Imre Kisd20b5292024-12-04 16:05:30 +0100592 fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100593 Self::set_block_descriptor_recursively(
594 attributes,
595 block.pa,
596 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100597 block.size,
598 self.granule.initial_lookup_level(),
599 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100600 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100601 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100602 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100603 )
Imre Kis703482d2023-11-30 15:51:26 +0100604 }
605
606 /// Adds the block descriptor to the translation table along all the intermediate tables the
607 /// reach the required granule.
608 /// # Arguments
609 /// * attributes: Memory block's permssions, flags
610 /// * pa: Physical address
611 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100612 /// * block_size: The block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100613 /// * level: Translation table level
614 /// * table: Translation table on the given level
615 /// * page_pool: Page pool where the function can allocate pages for the translation tables
Imre Kis631127d2024-11-21 13:09:01 +0100616 /// * regime: Translation regime
617 /// * granule: Translation granule
Imre Kis9a9d0492024-10-31 15:19:46 +0100618 #[allow(clippy::too_many_arguments)]
Imre Kis703482d2023-11-30 15:51:26 +0100619 fn set_block_descriptor_recursively(
620 attributes: Attributes,
Imre Kisd5b96fd2024-09-11 17:04:32 +0200621 pa: PhysicalAddress,
622 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100623 block_size: usize,
624 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100625 table: &mut [Descriptor],
626 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100627 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100628 granule: TranslationGranule<VA_BITS>,
Imre Kisd20b5292024-12-04 16:05:30 +0100629 ) -> Result<(), XlatError> {
Imre Kis703482d2023-11-30 15:51:26 +0100630 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100631 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100632
633 // We reached the required granule level
Imre Kis631127d2024-11-21 13:09:01 +0100634 if granule.block_size_at_level(level) == block_size {
Imre Kis9a9d0492024-10-31 15:19:46 +0100635 // Follow break-before-make sequence
636 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
637 Self::invalidate(regime, Some(va));
Imre Kis631127d2024-11-21 13:09:01 +0100638 descriptor.set_block_descriptor(granule, level, pa, attributes);
Imre Kisd20b5292024-12-04 16:05:30 +0100639 return Ok(());
Imre Kis703482d2023-11-30 15:51:26 +0100640 }
641
642 // Need to iterate forward
643 match descriptor.get_descriptor_type(level) {
644 DescriptorType::Invalid => {
Imre Kisd20b5292024-12-04 16:05:30 +0100645 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100646 let mut page = page_pool
647 .allocate_pages(
648 granule.table_size::<Descriptor>(level + 1),
649 Some(granule.table_alignment::<Descriptor>(level + 1)),
650 )
Imre Kisd20b5292024-12-04 16:05:30 +0100651 .map_err(|e| {
652 XlatError::PageAllocationError(
653 e,
654 granule.table_size::<Descriptor>(level + 1),
655 )
656 })?;
657
658 let next_table = unsafe { page.get_as_mut_slice() };
659
660 // Fill next level table
661 let result = Self::set_block_descriptor_recursively(
Imre Kis703482d2023-11-30 15:51:26 +0100662 attributes,
663 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100664 va.mask_for_level(granule, level),
665 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100666 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100667 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100668 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100669 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100670 granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100671 );
672
673 if result.is_ok() {
674 // Set table descriptor if the table is configured properly
675 unsafe { descriptor.set_table_descriptor(level, next_table, None) };
676 } else {
677 // Release next level table on error and keep invalid descriptor on current level
678 page_pool.release_pages(page).unwrap();
679 }
680
681 result
Imre Kis703482d2023-11-30 15:51:26 +0100682 }
683 DescriptorType::Block => {
684 // Saving current descriptor details
Imre Kis631127d2024-11-21 13:09:01 +0100685 let current_va = va.mask_for_level(granule, level);
686 let current_pa = descriptor.get_block_output_address(granule, level);
Imre Kis703482d2023-11-30 15:51:26 +0100687 let current_attributes = descriptor.get_block_attributes(level);
688
689 // Replace block descriptor by table descriptor
Imre Kis631127d2024-11-21 13:09:01 +0100690
Imre Kisd20b5292024-12-04 16:05:30 +0100691 // Allocate page for next level table
Imre Kis631127d2024-11-21 13:09:01 +0100692 let mut page = page_pool
693 .allocate_pages(
694 granule.table_size::<Descriptor>(level + 1),
695 Some(granule.table_alignment::<Descriptor>(level + 1)),
696 )
Imre Kisd20b5292024-12-04 16:05:30 +0100697 .map_err(|e| {
698 XlatError::PageAllocationError(
699 e,
700 granule.table_size::<Descriptor>(level + 1),
701 )
702 })?;
Imre Kis703482d2023-11-30 15:51:26 +0100703
Imre Kisd20b5292024-12-04 16:05:30 +0100704 let next_table = unsafe { page.get_as_mut_slice() };
705
706 // Explode existing block descriptor into table entries
Imre Kisd5b96fd2024-09-11 17:04:32 +0200707 for exploded_va in VirtualAddressRange::new(
708 current_va,
Imre Kis631127d2024-11-21 13:09:01 +0100709 current_va
710 .add_offset(granule.block_size_at_level(level))
711 .unwrap(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200712 )
Imre Kis631127d2024-11-21 13:09:01 +0100713 .step_by(granule.block_size_at_level(level + 1))
Imre Kis703482d2023-11-30 15:51:26 +0100714 {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200715 let offset = exploded_va.diff(current_va).unwrap();
Imre Kisd20b5292024-12-04 16:05:30 +0100716
717 // This call sets a single block descriptor and it should not fail
Imre Kis703482d2023-11-30 15:51:26 +0100718 Self::set_block_descriptor_recursively(
719 current_attributes.clone(),
Imre Kisd5b96fd2024-09-11 17:04:32 +0200720 current_pa.add_offset(offset).unwrap(),
Imre Kis631127d2024-11-21 13:09:01 +0100721 exploded_va.mask_for_level(granule, level),
722 granule.block_size_at_level(level + 1),
Imre Kis703482d2023-11-30 15:51:26 +0100723 level + 1,
Imre Kisd20b5292024-12-04 16:05:30 +0100724 next_table,
Imre Kis703482d2023-11-30 15:51:26 +0100725 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100726 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100727 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100728 )
Imre Kisd20b5292024-12-04 16:05:30 +0100729 .unwrap();
Imre Kis703482d2023-11-30 15:51:26 +0100730 }
731
732 // Invoke self to continue recursion on the newly created level
Imre Kisd20b5292024-12-04 16:05:30 +0100733 let result = Self::set_block_descriptor_recursively(
734 attributes,
735 pa,
736 va.mask_for_level(granule, level + 1),
737 block_size,
738 level + 1,
739 next_table,
740 page_pool,
741 regime,
742 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100743 );
Imre Kisd20b5292024-12-04 16:05:30 +0100744
745 if result.is_ok() {
746 // Follow break-before-make sequence
747 descriptor.set_block_or_invalid_descriptor_to_invalid(level);
748 Self::invalidate(regime, Some(current_va));
749
750 // Set table descriptor if the table is configured properly
751 unsafe { descriptor.set_table_descriptor(level, next_table, None) };
752 } else {
753 // Release next level table on error and keep invalid descriptor on current level
754 page_pool.release_pages(page).unwrap();
755 }
756
757 result
Imre Kis703482d2023-11-30 15:51:26 +0100758 }
759 DescriptorType::Table => Self::set_block_descriptor_recursively(
760 attributes,
761 pa,
Imre Kis631127d2024-11-21 13:09:01 +0100762 va.mask_for_level(granule, level),
763 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100764 level + 1,
Imre Kis631127d2024-11-21 13:09:01 +0100765 unsafe { descriptor.get_next_level_table_mut(granule, level) },
Imre Kis703482d2023-11-30 15:51:26 +0100766 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100767 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100768 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100769 ),
770 }
771 }
772
773 /// Remove block from memory mapping
774 /// # Arguments
775 /// * block: memory block that can be represented by a single translation entry
776 fn unmap_block(&mut self, block: Block) {
777 Self::remove_block_descriptor_recursively(
778 block.va,
Imre Kis631127d2024-11-21 13:09:01 +0100779 block.size,
780 self.granule.initial_lookup_level(),
781 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
Imre Kis703482d2023-11-30 15:51:26 +0100782 &self.page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100783 &self.regime,
Imre Kis631127d2024-11-21 13:09:01 +0100784 self.granule,
Imre Kisd20b5292024-12-04 16:05:30 +0100785 )
Imre Kis703482d2023-11-30 15:51:26 +0100786 }
787
788 /// Removes block descriptor from the translation table along all the intermediate tables which
789 /// become empty during the removal process.
790 /// # Arguments
791 /// * va: Virtual address
Imre Kis631127d2024-11-21 13:09:01 +0100792 /// * block_size: Translation block size in bytes
Imre Kis703482d2023-11-30 15:51:26 +0100793 /// * level: Translation table level
794 /// * table: Translation table on the given level
795 /// * page_pool: Page pool where the function can release the pages of empty tables
Imre Kis631127d2024-11-21 13:09:01 +0100796 /// * regime: Translation regime
797 /// * granule: Translation granule
Imre Kis703482d2023-11-30 15:51:26 +0100798 fn remove_block_descriptor_recursively(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200799 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100800 block_size: usize,
801 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100802 table: &mut [Descriptor],
803 page_pool: &PagePool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100804 regime: &TranslationRegime,
Imre Kis631127d2024-11-21 13:09:01 +0100805 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100806 ) {
807 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100808 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100809
Imre Kis631127d2024-11-21 13:09:01 +0100810 // We reached the required level with the matching block size
811 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100812 descriptor.set_block_descriptor_to_invalid(level);
Imre Kis9a9d0492024-10-31 15:19:46 +0100813 Self::invalidate(regime, Some(va));
Imre Kis703482d2023-11-30 15:51:26 +0100814 return;
815 }
816
817 // Need to iterate forward
818 match descriptor.get_descriptor_type(level) {
819 DescriptorType::Invalid => {
820 panic!("Cannot remove block from non-existing table");
821 }
822 DescriptorType::Block => {
Imre Kis631127d2024-11-21 13:09:01 +0100823 panic!("Cannot remove block with different block size");
Imre Kis703482d2023-11-30 15:51:26 +0100824 }
825 DescriptorType::Table => {
Imre Kis631127d2024-11-21 13:09:01 +0100826 let next_level_table =
827 unsafe { descriptor.get_next_level_table_mut(granule, level) };
Imre Kis703482d2023-11-30 15:51:26 +0100828 Self::remove_block_descriptor_recursively(
Imre Kis631127d2024-11-21 13:09:01 +0100829 va.mask_for_level(granule, level),
830 block_size,
Imre Kis703482d2023-11-30 15:51:26 +0100831 level + 1,
832 next_level_table,
833 page_pool,
Imre Kis9a9d0492024-10-31 15:19:46 +0100834 regime,
Imre Kis631127d2024-11-21 13:09:01 +0100835 granule,
Imre Kis703482d2023-11-30 15:51:26 +0100836 );
837
838 if next_level_table.iter().all(|d| !d.is_valid()) {
839 // Empty table
840 let mut page = unsafe {
Imre Kis631127d2024-11-21 13:09:01 +0100841 Pages::from_slice(
842 descriptor.set_table_descriptor_to_invalid(granule, level),
843 )
Imre Kis703482d2023-11-30 15:51:26 +0100844 };
845 page.zero_init();
846 page_pool.release_pages(page).unwrap();
847 }
848 }
849 }
850 }
851
Imre Kis631127d2024-11-21 13:09:01 +0100852 fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor {
853 Self::walk_descriptors(
854 va,
855 block_size,
856 self.granule.initial_lookup_level(),
857 unsafe { self.base_table.get_as_mut_slice::<Descriptor>() },
858 self.granule,
859 )
Imre Kis703482d2023-11-30 15:51:26 +0100860 }
861
862 fn walk_descriptors(
Imre Kisd5b96fd2024-09-11 17:04:32 +0200863 va: VirtualAddress,
Imre Kis631127d2024-11-21 13:09:01 +0100864 block_size: usize,
865 level: isize,
Imre Kis703482d2023-11-30 15:51:26 +0100866 table: &mut [Descriptor],
Imre Kis631127d2024-11-21 13:09:01 +0100867 granule: TranslationGranule<VA_BITS>,
Imre Kis703482d2023-11-30 15:51:26 +0100868 ) -> &mut Descriptor {
869 // Get descriptor of the current level
Imre Kis631127d2024-11-21 13:09:01 +0100870 let descriptor = &mut table[va.get_level_index(granule, level)];
Imre Kis703482d2023-11-30 15:51:26 +0100871
Imre Kis631127d2024-11-21 13:09:01 +0100872 if granule.block_size_at_level(level) == block_size {
Imre Kis703482d2023-11-30 15:51:26 +0100873 return descriptor;
874 }
875
876 // Need to iterate forward
877 match descriptor.get_descriptor_type(level) {
878 DescriptorType::Invalid => {
879 panic!("Invalid descriptor");
880 }
881 DescriptorType::Block => {
882 panic!("Cannot split existing block descriptor to table");
883 }
Imre Kis631127d2024-11-21 13:09:01 +0100884 DescriptorType::Table => Self::walk_descriptors(
885 va.mask_for_level(granule, level),
886 block_size,
887 level + 1,
888 unsafe { descriptor.get_next_level_table_mut(granule, level) },
889 granule,
890 ),
Imre Kis703482d2023-11-30 15:51:26 +0100891 }
892 }
Imre Kis9a9d0492024-10-31 15:19:46 +0100893
Imre Kis1278c9f2025-01-15 19:48:36 +0100894 #[cfg(target_arch = "aarch64")]
Imre Kis9a9d0492024-10-31 15:19:46 +0100895 fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
896 // SAFETY: The assembly code invalidates the translation table entry of
897 // the VA or all entries of the translation regime.
Imre Kis9a9d0492024-10-31 15:19:46 +0100898 unsafe {
899 if let Some(VirtualAddress(va)) = va {
900 match regime {
901 TranslationRegime::EL1_0(_, _) => {
902 core::arch::asm!(
903 "tlbi vaae1is, {0}
904 dsb nsh
905 isb",
906 in(reg) va)
907 }
908 #[cfg(target_feature = "vh")]
909 TranslationRegime::EL2_0(_, _) => {
910 core::arch::asm!(
911 "tlbi vaae1is, {0}
912 dsb nsh
913 isb",
914 in(reg) va)
915 }
916 TranslationRegime::EL2 => core::arch::asm!(
917 "tlbi vae2is, {0}
918 dsb nsh
919 isb",
920 in(reg) va),
921 TranslationRegime::EL3 => core::arch::asm!(
922 "tlbi vae3is, {0}
923 dsb nsh
924 isb",
925 in(reg) va),
926 }
927 } else {
928 match regime {
929 TranslationRegime::EL1_0(_, asid) => core::arch::asm!(
930 "tlbi aside1, {0}
931 dsb nsh
932 isb",
933 in(reg) (*asid as u64) << 48
934 ),
935 #[cfg(target_feature = "vh")]
936 TranslationRegime::EL2_0(_, asid) => core::arch::asm!(
937 "tlbi aside1, {0}
938 dsb nsh
939 isb",
940 in(reg) (*asid as u64) << 48
941 ),
942 TranslationRegime::EL2 => core::arch::asm!(
943 "tlbi alle2
944 dsb nsh
945 isb"
946 ),
947 TranslationRegime::EL3 => core::arch::asm!(
948 "tlbi alle3
949 dsb nsh
950 isb"
951 ),
952 }
953 }
954 }
955 }
Imre Kis1278c9f2025-01-15 19:48:36 +0100956
957 #[cfg(not(target_arch = "aarch64"))]
958 fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
Imre Kis703482d2023-11-30 15:51:26 +0100959}
Imre Kis5f960442024-11-29 16:49:43 +0100960
961impl<const VA_BITS: usize> fmt::Debug for Xlat<VA_BITS> {
962 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
963 f.debug_struct("Xlat")
964 .field("regime", &self.regime)
965 .field("granule", &self.granule)
966 .field("VA_BITS", &VA_BITS)
967 .field("base_table", &self.base_table.get_pa())
968 .finish()?;
969
970 Self::dump_table(
971 f,
972 self.granule.initial_lookup_level(),
973 0,
974 unsafe { self.base_table.get_as_slice() },
975 self.granule,
976 )?;
977
978 Ok(())
979 }
980}