| // SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com> |
| // SPDX-License-Identifier: MIT OR Apache-2.0 |
| |
| #![allow(dead_code)] |
| #![cfg_attr(not(test), no_std)] |
| #![doc = include_str!("../README.md")] |
| |
| extern crate alloc; |
| |
| use core::fmt; |
| use core::iter::zip; |
| use core::marker::PhantomData; |
| use core::panic; |
| |
| use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange}; |
| use block::{Block, BlockIterator}; |
| |
| use bitflags::bitflags; |
| use packed_struct::prelude::*; |
| use thiserror::Error; |
| |
| use self::descriptor::DescriptorType; |
| |
| use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability}; |
| use self::page_pool::{PagePool, Pages}; |
| use self::region::{PhysicalRegion, VirtualRegion}; |
| use self::region_pool::{Region, RegionPool, RegionPoolError}; |
| |
| pub mod address; |
| mod block; |
| mod descriptor; |
| mod granule; |
| pub mod page_pool; |
| mod region; |
| mod region_pool; |
| |
| /// Translation table error type |
| #[derive(Debug, Error)] |
| pub enum XlatError { |
| #[error("Invalid parameter: {0}")] |
| InvalidParameterError(&'static str), |
| #[error("Cannot allocate {1}: {0:?}")] |
| PageAllocationError(RegionPoolError, usize), |
| #[error("Alignment error: {0:?} {1:?} length={2:#x} granule={3:#x}")] |
| AlignmentError(PhysicalAddress, VirtualAddress, usize, usize), |
| #[error("Entry not found for {0:?}")] |
| VaNotFound(VirtualAddress), |
| #[error("Cannot allocate virtual address {0:?}")] |
| VaAllocationError(RegionPoolError), |
| #[error("Cannot release virtual address {1:?}: {0:?}")] |
| VaReleaseError(RegionPoolError, VirtualAddress), |
| } |
| |
| /// Memory attributes |
| /// |
| /// MAIR_EL1 should be configured in the same way in startup.s |
| #[allow(non_camel_case_types)] |
| #[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)] |
| pub enum MemoryAttributesIndex { |
| #[default] |
| Device_nGnRnE = 0x00, |
| Normal_IWBWA_OWBWA = 0x01, |
| } |
| |
| bitflags! { |
| /// Memory access rights |
| #[derive(Debug, Clone, Copy)] |
| pub struct MemoryAccessRights : u32 { |
| /// Read |
| const R = 0b00000001; |
| /// Write |
| const W = 0b00000010; |
| /// Execute |
| const X = 0b00000100; |
| /// Non-secure |
| const NS = 0b00001000; |
| |
| /// Read-write |
| const RW = Self::R.bits() | Self::W.bits(); |
| /// Read-execute |
| const RX = Self::R.bits() | Self::X.bits(); |
| /// Read-write-execute |
| const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits(); |
| |
| /// User accessible |
| const USER = 0b00010000; |
| /// Device region |
| const DEVICE = 0b00100000; |
| /// Global (not tied to ASID) |
| const GLOBAL = 0b01000000; |
| } |
| } |
| |
| impl From<MemoryAccessRights> for Attributes { |
| fn from(access_rights: MemoryAccessRights) -> Self { |
| let data_access_permissions = match ( |
| access_rights.contains(MemoryAccessRights::USER), |
| access_rights.contains(MemoryAccessRights::W), |
| ) { |
| (false, false) => DataAccessPermissions::ReadOnly_None, |
| (false, true) => DataAccessPermissions::ReadWrite_None, |
| (true, false) => DataAccessPermissions::ReadOnly_ReadOnly, |
| (true, true) => DataAccessPermissions::ReadWrite_ReadWrite, |
| }; |
| |
| let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) { |
| MemoryAttributesIndex::Device_nGnRnE |
| } else { |
| MemoryAttributesIndex::Normal_IWBWA_OWBWA |
| }; |
| |
| Attributes { |
| uxn: !access_rights.contains(MemoryAccessRights::X) |
| || !access_rights.contains(MemoryAccessRights::USER), |
| pxn: !access_rights.contains(MemoryAccessRights::X) |
| || access_rights.contains(MemoryAccessRights::USER), |
| contiguous: false, |
| not_global: !access_rights.contains(MemoryAccessRights::GLOBAL), |
| access_flag: true, |
| shareability: Shareability::NonShareable, |
| data_access_permissions, |
| non_secure: access_rights.contains(MemoryAccessRights::NS), |
| mem_attr_index, |
| } |
| } |
| } |
| |
| /// Virtual Address range, selects x in `TTBRx_EL*` |
| #[derive(Debug, Clone, Copy)] |
| pub enum RegimeVaRange { |
| /// Lower virtual address range, select `TTBR0_EL*` |
| Lower, |
| /// Upper virtual address range, select `TTBR1_EL*` |
| Upper, |
| } |
| |
| /// Translation regime |
| #[derive(Debug, Clone, Copy)] |
| pub enum TranslationRegime { |
| /// EL1 and EL0 stage 1, TTBRx_EL1 |
| EL1_0(RegimeVaRange, u8), |
| #[cfg(target_feature = "vh")] |
| /// EL2 and EL0 with VHE |
| EL2_0(RegimeVaRange, u8), |
| /// EL2 |
| EL2, |
| /// EL3, TTBR0_EL3 |
| EL3, |
| } |
| |
| impl TranslationRegime { |
| /// Checks if the translation regime uses the upper virtual address range. |
| fn is_upper_va_range(&self) -> bool { |
| match self { |
| TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => true, |
| #[cfg(target_feature = "vh")] |
| EL2_0(RegimeVaRange::Upper, _) => true, |
| _ => false, |
| } |
| } |
| } |
| |
| /// Translation granule |
| pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>; |
| |
| /// Trait for converting between virtual address space of the running kernel environment and |
| /// the physical address space. |
| pub trait KernelAddressTranslator { |
| /// Convert virtual address of the running kernel environment into a physical address. |
| fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress; |
| /// Convert physical address into a virtual address of the running kernel environment. |
| fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress; |
| } |
| |
| pub struct Xlat<K: KernelAddressTranslator, const VA_BITS: usize> { |
| base_table: Pages, |
| page_pool: PagePool, |
| regions: RegionPool<VirtualRegion>, |
| regime: TranslationRegime, |
| granule: TranslationGranule<VA_BITS>, |
| _kernel_address_translator: PhantomData<K>, |
| } |
| |
| /// Memory translation table handling |
| /// |
| /// # High level interface |
| /// * allocate and map zero initialized region (with or without VA) |
| /// * allocate and map memory region and load contents (with or without VA) |
| /// * map memory region by PA (with or without VA) |
| /// * unmap memory region by PA |
| /// * query PA by VA |
| /// * set access rights of mapped memory areas |
| /// * active mapping |
| /// |
| /// # Debug features |
| /// * print translation table details |
| /// |
| /// # Region level interface |
| /// * map regions |
| /// * unmap region |
| /// * find a mapped region which contains |
| /// * find empty area for region |
| /// * set access rights for a region |
| /// |
| /// # Block level interface |
| /// * map block |
| /// * unmap block |
| /// * set access rights of block |
| impl<K: KernelAddressTranslator, const VA_BITS: usize> Xlat<K, VA_BITS> { |
| /// Create new Xlat instance |
| /// # Arguments |
| /// * page_pool: Page pool to allocate translation tables |
| /// * address: Virtual address range |
| /// * regime: Translation regime |
| /// * granule: Translation granule |
| /// # Return value |
| /// * Xlat instance |
| pub fn new( |
| page_pool: PagePool, |
| address: VirtualAddressRange, |
| regime: TranslationRegime, |
| granule: TranslationGranule<VA_BITS>, |
| ) -> Self { |
| let initial_lookup_level = granule.initial_lookup_level(); |
| |
| if !address.start.is_valid_in_regime::<VA_BITS>(regime) |
| || !address.end.is_valid_in_regime::<VA_BITS>(regime) |
| { |
| panic!( |
| "Invalid address range {:?} for regime {:?}", |
| address, regime |
| ); |
| } |
| |
| let base_table = page_pool |
| .allocate_pages( |
| granule.table_size::<Descriptor>(initial_lookup_level), |
| Some(granule.table_alignment::<Descriptor>(initial_lookup_level)), |
| ) |
| .unwrap(); |
| |
| let mut regions = RegionPool::new(); |
| regions |
| .add(VirtualRegion::new(address.start, address.len().unwrap())) |
| .unwrap(); |
| Self { |
| base_table, |
| page_pool, |
| regions, |
| regime, |
| granule, |
| _kernel_address_translator: PhantomData, |
| } |
| } |
| |
| /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the |
| /// initial data |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * data: Data to be loaded to the memory area |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn allocate_initalized_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| data: &[u8], |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let mut pages = self |
| .page_pool |
| .allocate_pages(data.len(), Some(self.granule as usize)) |
| .map_err(|e| XlatError::PageAllocationError(e, data.len()))?; |
| |
| pages.copy_data_to_page::<K>(data); |
| |
| let pages_length = pages.length(); |
| let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages); |
| let region = if let Some(required_va) = va { |
| self.regions |
| .acquire(required_va, pages_length, physical_region) |
| } else { |
| self.regions.allocate(pages_length, physical_region, None) |
| } |
| .map_err(XlatError::VaAllocationError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn allocate_zero_init_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let mut pages = self |
| .page_pool |
| .allocate_pages(length, Some(self.granule as usize)) |
| .map_err(|e| XlatError::PageAllocationError(e, length))?; |
| |
| pages.zero_init::<K>(); |
| |
| let pages_length = pages.length(); |
| let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages); |
| let region = if let Some(required_va) = va { |
| self.regions |
| .acquire(required_va, pages_length, physical_region) |
| } else { |
| self.regions.allocate(pages_length, physical_region, None) |
| } |
| .map_err(XlatError::VaAllocationError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Map memory area by physical address |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * pa: Physical address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn map_physical_address_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| pa: PhysicalAddress, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let resource = PhysicalRegion::PhysicalAddress(pa); |
| let region = if let Some(required_va) = va { |
| self.regions.acquire(required_va, length, resource) |
| } else { |
| self.regions.allocate(length, resource, None) |
| } |
| .map_err(XlatError::VaAllocationError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Unmap memory area by virtual address |
| /// # Arguments |
| /// * va: Virtual address |
| /// * length: Length of the memory area in bytes |
| pub fn unmap_virtual_address_range( |
| &mut self, |
| va: VirtualAddress, |
| length: usize, |
| ) -> Result<(), XlatError> { |
| let pa = self.get_pa_by_va(va, length)?; |
| |
| let region_to_release = VirtualRegion::new_with_pa(pa, va, length); |
| |
| self.unmap_region(®ion_to_release)?; |
| |
| self.regions |
| .release(region_to_release) |
| .map_err(|e| XlatError::VaReleaseError(e, va)) |
| } |
| |
| /// Query physical address by virtual address range. Only returns a value if the memory area |
| /// mapped as continuous area. |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// # Return value |
| /// * Physical address of the mapped memory |
| pub fn get_pa_by_va( |
| &self, |
| va: VirtualAddress, |
| length: usize, |
| ) -> Result<PhysicalAddress, XlatError> { |
| let containing_region = self |
| .find_containing_region(va, length) |
| .ok_or(XlatError::VaNotFound(va))?; |
| |
| if !containing_region.used() { |
| return Err(XlatError::VaNotFound(va)); |
| } |
| |
| Ok(containing_region.get_pa_for_va(va)) |
| } |
| |
| /// Sets the memory access right of memory area |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: New memory access rights of the area |
| pub fn set_access_rights( |
| &mut self, |
| va: VirtualAddress, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<(), XlatError> { |
| let containing_region = self |
| .find_containing_region(va, length) |
| .ok_or(XlatError::VaNotFound(va))?; |
| |
| if !containing_region.used() { |
| return Err(XlatError::VaNotFound(va)); |
| } |
| |
| let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length); |
| self.map_region(region, access_rights.into())?; |
| |
| Ok(()) |
| } |
| |
| /// Activate memory mapping represented by the object |
| /// |
| /// # Safety |
| /// When activating memory mapping for the running exception level, the |
| /// caller must ensure that the new mapping will not break any existing |
| /// references. After activation the caller must ensure that there are no |
| /// active references when unmapping memory. |
| #[cfg(target_arch = "aarch64")] |
| pub unsafe fn activate(&self) { |
| // Select translation granule |
| let is_tg0 = match &self.regime { |
| TranslationRegime::EL1_0(RegimeVaRange::Lower, _) |
| | TranslationRegime::EL2 |
| | TranslationRegime::EL3 => true, |
| TranslationRegime::EL1_0(RegimeVaRange::Upper, _) => false, |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Lower, _) => true, |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Upper, _) => false, |
| }; |
| |
| if is_tg0 { |
| self.modify_tcr(|tcr| { |
| let tg0 = match self.granule { |
| TranslationGranule::Granule4k => 0b00, |
| TranslationGranule::Granule16k => 0b10, |
| TranslationGranule::Granule64k => 0b01, |
| }; |
| |
| (tcr & !(3 << 14)) | (tg0 << 14) |
| }); |
| } else { |
| self.modify_tcr(|tcr| { |
| let tg1 = match self.granule { |
| TranslationGranule::Granule4k => 0b10, |
| TranslationGranule::Granule16k => 0b01, |
| TranslationGranule::Granule64k => 0b11, |
| }; |
| |
| (tcr & !(3 << 30)) | (tg1 << 30) |
| }); |
| } |
| |
| // Set translation table |
| let base_table_pa = self.base_table.get_pa().0 as u64; |
| |
| match &self.regime { |
| TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!( |
| "msr ttbr0_el1, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!( |
| "msr ttbr1_el1, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!( |
| "msr ttbr0_el2, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!( |
| "msr ttbr1_el2, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "msr ttbr0_el2, {0} |
| isb", |
| in(reg) base_table_pa), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "msr ttbr0_el3, {0} |
| isb", |
| in(reg) base_table_pa), |
| } |
| } |
| |
| /// # Safety |
| /// Dummy functions for test builds |
| #[cfg(not(target_arch = "aarch64"))] |
| pub unsafe fn activate(&self) {} |
| |
| /// Modifies the TCR register of the selected regime of the instance. |
| #[cfg(target_arch = "aarch64")] |
| unsafe fn modify_tcr<F>(&self, f: F) |
| where |
| F: Fn(u64) -> u64, |
| { |
| let mut tcr: u64; |
| |
| match &self.regime { |
| TranslationRegime::EL1_0(_, _) => core::arch::asm!( |
| "mrs {0}, tcr_el1 |
| isb", |
| out(reg) tcr), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, _) => core::arch::asm!( |
| "mrs {0}, tcr_el2 |
| isb", |
| out(reg) tcr), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "mrs {0}, tcr_el2 |
| isb", |
| out(reg) tcr), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "mrs {0}, tcr_el3 |
| isb", |
| out(reg) tcr), |
| } |
| |
| tcr = f(tcr); |
| |
| match &self.regime { |
| TranslationRegime::EL1_0(_, _) => core::arch::asm!( |
| "msr tcr_el1, {0} |
| isb", |
| in(reg) tcr), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, _) => core::arch::asm!( |
| "msr tcr_el2, {0} |
| isb", |
| in(reg) tcr), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "msr tcr_el2, {0} |
| isb", |
| in(reg) tcr), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "msr tcr_el3, {0} |
| isb", |
| in(reg) tcr), |
| } |
| } |
| |
| /// Prints a single translation table to the debug console |
| /// # Arguments |
| /// * level: Level of the translation table |
| /// * va: Base virtual address of the table |
| /// * table: Table entries |
| /// * granule: Translation granule |
| fn dump_table( |
| f: &mut fmt::Formatter<'_>, |
| level: isize, |
| va: usize, |
| table: &[Descriptor], |
| granule: TranslationGranule<VA_BITS>, |
| ) -> fmt::Result { |
| let level_prefix = match level { |
| 0 | 1 => "|-", |
| 2 => "| |-", |
| _ => "| | |-", |
| }; |
| |
| for (descriptor, va) in zip(table, (va..).step_by(granule.block_size_at_level(level))) { |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Block => { |
| writeln!( |
| f, |
| "{} {:#010x} Block -> {:#010x}", |
| level_prefix, |
| va, |
| descriptor.get_block_output_address(granule, level).0 |
| )?; |
| } |
| DescriptorType::Table => { |
| let table_pa = descriptor.get_next_level_table(level); |
| writeln!( |
| f, |
| "{} {:#010x} Table -> {:#010x}", |
| level_prefix, va, table_pa.0 |
| )?; |
| |
| let next_level_table = |
| unsafe { Self::get_table_from_pa(table_pa, granule, level + 1) }; |
| Self::dump_table(f, level + 1, va, next_level_table, granule)?; |
| } |
| _ => {} |
| } |
| } |
| |
| Ok(()) |
| } |
| |
| /// Adds memory region from the translation table. The function splits the region to blocks and |
| /// uses the block level functions to do the mapping. |
| /// # Arguments |
| /// * region: Memory region object |
| /// * attributes: Memory attributes |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| fn map_region( |
| &mut self, |
| region: VirtualRegion, |
| attributes: Attributes, |
| ) -> Result<VirtualAddress, XlatError> { |
| let blocks = BlockIterator::new( |
| region.get_pa(), |
| region.base().remove_upper_bits::<VA_BITS>(), |
| region.length(), |
| self.granule, |
| )?; |
| for block in blocks { |
| self.map_block(block, attributes.clone())?; |
| } |
| |
| Ok(region.base()) |
| } |
| |
| /// Remove memory region from the translation table. The function splits the region to blocks |
| /// and uses the block level functions to do the unmapping. |
| /// # Arguments |
| /// * region: Memory region object |
| fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> { |
| let blocks = BlockIterator::new( |
| region.get_pa(), |
| region.base().remove_upper_bits::<VA_BITS>(), |
| region.length(), |
| self.granule, |
| )?; |
| for block in blocks { |
| self.unmap_block(block); |
| } |
| |
| Ok(()) |
| } |
| |
| /// Find mapped region that contains the whole region |
| /// # Arguments |
| /// * va: Virtual address to look for |
| /// * length: Length of the region |
| /// # Return value |
| /// * Reference to virtual region if found |
| fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> { |
| self.regions.find_containing_region(va, length).ok() |
| } |
| |
| /// Add block to memory mapping |
| /// # Arguments |
| /// * block: Memory block that can be represented by a single translation table entry |
| /// * attributes: Memory block's permissions, flags |
| fn map_block(&mut self, block: Block, attributes: Attributes) -> Result<(), XlatError> { |
| Self::set_block_descriptor_recursively( |
| attributes, |
| block.pa, |
| block.va, |
| block.size, |
| self.granule.initial_lookup_level(), |
| unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() }, |
| &self.page_pool, |
| &self.regime, |
| self.granule, |
| ) |
| } |
| |
| /// Adds the block descriptor to the translation table along all the intermediate tables the |
| /// reach the required granule. |
| /// # Arguments |
| /// * attributes: Memory block's permssions, flags |
| /// * pa: Physical address |
| /// * va: Virtual address |
| /// * block_size: The block size in bytes |
| /// * level: Translation table level |
| /// * table: Translation table on the given level |
| /// * page_pool: Page pool where the function can allocate pages for the translation tables |
| /// * regime: Translation regime |
| /// * granule: Translation granule |
| #[allow(clippy::too_many_arguments)] |
| fn set_block_descriptor_recursively( |
| attributes: Attributes, |
| pa: PhysicalAddress, |
| va: VirtualAddress, |
| block_size: usize, |
| level: isize, |
| table: &mut [Descriptor], |
| page_pool: &PagePool, |
| regime: &TranslationRegime, |
| granule: TranslationGranule<VA_BITS>, |
| ) -> Result<(), XlatError> { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(granule, level)]; |
| |
| // We reached the required granule level |
| if granule.block_size_at_level(level) == block_size { |
| // Follow break-before-make sequence |
| descriptor.set_block_or_invalid_descriptor_to_invalid(level); |
| Self::invalidate(regime, Some(va)); |
| descriptor.set_block_descriptor(granule, level, pa, attributes); |
| return Ok(()); |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| // Allocate page for next level table |
| let mut page = page_pool |
| .allocate_pages( |
| granule.table_size::<Descriptor>(level + 1), |
| Some(granule.table_alignment::<Descriptor>(level + 1)), |
| ) |
| .map_err(|e| { |
| XlatError::PageAllocationError( |
| e, |
| granule.table_size::<Descriptor>(level + 1), |
| ) |
| })?; |
| |
| let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() }; |
| |
| // Fill next level table |
| let result = Self::set_block_descriptor_recursively( |
| attributes, |
| pa, |
| va.mask_for_level(granule, level), |
| block_size, |
| level + 1, |
| next_table, |
| page_pool, |
| regime, |
| granule, |
| ); |
| |
| if result.is_ok() { |
| // Set table descriptor if the table is configured properly |
| let next_table_pa = |
| K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize)); |
| descriptor.set_table_descriptor(level, next_table_pa, None); |
| } else { |
| // Release next level table on error and keep invalid descriptor on current level |
| page_pool.release_pages(page).unwrap(); |
| } |
| |
| result |
| } |
| DescriptorType::Block => { |
| // Saving current descriptor details |
| let current_va = va.mask_bits(!(granule.block_size_at_level(level) - 1)); |
| let current_pa = descriptor.get_block_output_address(granule, level); |
| let current_attributes = descriptor.get_block_attributes(level); |
| |
| // Replace block descriptor by table descriptor |
| |
| // Allocate page for next level table |
| let mut page = page_pool |
| .allocate_pages( |
| granule.table_size::<Descriptor>(level + 1), |
| Some(granule.table_alignment::<Descriptor>(level + 1)), |
| ) |
| .map_err(|e| { |
| XlatError::PageAllocationError( |
| e, |
| granule.table_size::<Descriptor>(level + 1), |
| ) |
| })?; |
| |
| let next_table = unsafe { page.get_as_mut_slice::<K, Descriptor>() }; |
| |
| // Explode existing block descriptor into table entries |
| for exploded_va in VirtualAddressRange::new( |
| current_va, |
| current_va |
| .add_offset(granule.block_size_at_level(level)) |
| .unwrap(), |
| ) |
| .step_by(granule.block_size_at_level(level + 1)) |
| { |
| let offset = exploded_va.diff(current_va).unwrap(); |
| |
| // This call sets a single block descriptor and it should not fail |
| Self::set_block_descriptor_recursively( |
| current_attributes.clone(), |
| current_pa.add_offset(offset).unwrap(), |
| exploded_va.mask_for_level(granule, level), |
| granule.block_size_at_level(level + 1), |
| level + 1, |
| next_table, |
| page_pool, |
| regime, |
| granule, |
| ) |
| .unwrap(); |
| } |
| |
| // Invoke self to continue recursion on the newly created level |
| let result = Self::set_block_descriptor_recursively( |
| attributes, |
| pa, |
| va.mask_for_level(granule, level), |
| block_size, |
| level + 1, |
| next_table, |
| page_pool, |
| regime, |
| granule, |
| ); |
| |
| if result.is_ok() { |
| let next_table_pa = |
| K::kernel_to_pa(VirtualAddress(next_table.as_ptr() as usize)); |
| |
| // Follow break-before-make sequence |
| descriptor.set_block_or_invalid_descriptor_to_invalid(level); |
| Self::invalidate(regime, Some(current_va)); |
| |
| // Set table descriptor if the table is configured properly |
| descriptor.set_table_descriptor(level, next_table_pa, None); |
| } else { |
| // Release next level table on error and keep invalid descriptor on current level |
| page_pool.release_pages(page).unwrap(); |
| } |
| |
| result |
| } |
| DescriptorType::Table => { |
| let next_level_table = unsafe { |
| Self::get_table_from_pa_mut( |
| descriptor.get_next_level_table(level), |
| granule, |
| level + 1, |
| ) |
| }; |
| |
| Self::set_block_descriptor_recursively( |
| attributes, |
| pa, |
| va.mask_for_level(granule, level), |
| block_size, |
| level + 1, |
| next_level_table, |
| page_pool, |
| regime, |
| granule, |
| ) |
| } |
| } |
| } |
| |
| /// Remove block from memory mapping |
| /// # Arguments |
| /// * block: memory block that can be represented by a single translation entry |
| fn unmap_block(&mut self, block: Block) { |
| Self::remove_block_descriptor_recursively( |
| block.va, |
| block.size, |
| self.granule.initial_lookup_level(), |
| unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() }, |
| &self.page_pool, |
| &self.regime, |
| self.granule, |
| ) |
| } |
| |
| /// Removes block descriptor from the translation table along all the intermediate tables which |
| /// become empty during the removal process. |
| /// # Arguments |
| /// * va: Virtual address |
| /// * block_size: Translation block size in bytes |
| /// * level: Translation table level |
| /// * table: Translation table on the given level |
| /// * page_pool: Page pool where the function can release the pages of empty tables |
| /// * regime: Translation regime |
| /// * granule: Translation granule |
| fn remove_block_descriptor_recursively( |
| va: VirtualAddress, |
| block_size: usize, |
| level: isize, |
| table: &mut [Descriptor], |
| page_pool: &PagePool, |
| regime: &TranslationRegime, |
| granule: TranslationGranule<VA_BITS>, |
| ) { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(granule, level)]; |
| |
| // We reached the required level with the matching block size |
| if granule.block_size_at_level(level) == block_size { |
| descriptor.set_block_descriptor_to_invalid(level); |
| Self::invalidate(regime, Some(va)); |
| return; |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| panic!("Cannot remove block from non-existing table"); |
| } |
| DescriptorType::Block => { |
| panic!("Cannot remove block with different block size"); |
| } |
| DescriptorType::Table => { |
| let next_level_table = unsafe { |
| Self::get_table_from_pa_mut( |
| descriptor.get_next_level_table(level), |
| granule, |
| level + 1, |
| ) |
| }; |
| |
| Self::remove_block_descriptor_recursively( |
| va.mask_for_level(granule, level), |
| block_size, |
| level + 1, |
| next_level_table, |
| page_pool, |
| regime, |
| granule, |
| ); |
| |
| if next_level_table.iter().all(|d| !d.is_valid()) { |
| // Empty table |
| let mut page = unsafe { |
| let table_pa = descriptor.set_table_descriptor_to_invalid(level); |
| let next_table = Self::get_table_from_pa_mut(table_pa, granule, level + 1); |
| Pages::from_slice::<K, Descriptor>(next_table) |
| }; |
| |
| page.zero_init::<K>(); |
| page_pool.release_pages(page).unwrap(); |
| } |
| } |
| } |
| } |
| |
| fn get_descriptor(&mut self, va: VirtualAddress, block_size: usize) -> &mut Descriptor { |
| Self::walk_descriptors( |
| va, |
| block_size, |
| self.granule.initial_lookup_level(), |
| unsafe { self.base_table.get_as_mut_slice::<K, Descriptor>() }, |
| self.granule, |
| ) |
| } |
| |
| fn walk_descriptors( |
| va: VirtualAddress, |
| block_size: usize, |
| level: isize, |
| table: &mut [Descriptor], |
| granule: TranslationGranule<VA_BITS>, |
| ) -> &mut Descriptor { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(granule, level)]; |
| |
| if granule.block_size_at_level(level) == block_size { |
| return descriptor; |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| panic!("Invalid descriptor"); |
| } |
| DescriptorType::Block => { |
| panic!("Cannot split existing block descriptor to table"); |
| } |
| DescriptorType::Table => { |
| let next_level_table = unsafe { |
| Self::get_table_from_pa_mut( |
| descriptor.get_next_level_table(level), |
| granule, |
| level + 1, |
| ) |
| }; |
| |
| Self::walk_descriptors( |
| va.mask_for_level(granule, level), |
| block_size, |
| level + 1, |
| next_level_table, |
| granule, |
| ) |
| } |
| } |
| } |
| |
| /// Create a translation table descriptor slice from a physical address. |
| /// |
| /// # Safety |
| /// The caller must ensure that the physical address points to a valid translation table and |
| /// it it mapped into the virtual address space of the running kernel context. |
| unsafe fn get_table_from_pa<'a>( |
| pa: PhysicalAddress, |
| granule: TranslationGranule<VA_BITS>, |
| level: isize, |
| ) -> &'a [Descriptor] { |
| let table_va = K::pa_to_kernel(pa); |
| unsafe { |
| core::slice::from_raw_parts( |
| table_va.0 as *const Descriptor, |
| granule.entry_count_at_level(level), |
| ) |
| } |
| } |
| |
| /// Create a mutable translation table descriptor slice from a physical address. |
| /// |
| /// # Safety |
| /// The caller must ensure that the physical address points to a valid translation table and |
| /// it it mapped into the virtual address space of the running kernel context. |
| unsafe fn get_table_from_pa_mut<'a>( |
| pa: PhysicalAddress, |
| granule: TranslationGranule<VA_BITS>, |
| level: isize, |
| ) -> &'a mut [Descriptor] { |
| let table_va = K::pa_to_kernel(pa); |
| unsafe { |
| core::slice::from_raw_parts_mut( |
| table_va.0 as *mut Descriptor, |
| granule.entry_count_at_level(level), |
| ) |
| } |
| } |
| |
| #[cfg(target_arch = "aarch64")] |
| fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) { |
| // SAFETY: The assembly code invalidates the translation table entry of |
| // the VA or all entries of the translation regime. |
| unsafe { |
| if let Some(VirtualAddress(va)) = va { |
| match regime { |
| TranslationRegime::EL1_0(_, _) => { |
| core::arch::asm!( |
| "tlbi vaae1is, {0} |
| dsb nsh |
| isb", |
| in(reg) va) |
| } |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, _) => { |
| core::arch::asm!( |
| "tlbi vaae1is, {0} |
| dsb nsh |
| isb", |
| in(reg) va) |
| } |
| TranslationRegime::EL2 => core::arch::asm!( |
| "tlbi vae2is, {0} |
| dsb nsh |
| isb", |
| in(reg) va), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "tlbi vae3is, {0} |
| dsb nsh |
| isb", |
| in(reg) va), |
| } |
| } else { |
| match regime { |
| TranslationRegime::EL1_0(_, asid) => core::arch::asm!( |
| "tlbi aside1, {0} |
| dsb nsh |
| isb", |
| in(reg) (*asid as u64) << 48 |
| ), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, asid) => core::arch::asm!( |
| "tlbi aside1, {0} |
| dsb nsh |
| isb", |
| in(reg) (*asid as u64) << 48 |
| ), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "tlbi alle2 |
| dsb nsh |
| isb" |
| ), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "tlbi alle3 |
| dsb nsh |
| isb" |
| ), |
| } |
| } |
| } |
| } |
| |
| #[cfg(not(target_arch = "aarch64"))] |
| fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {} |
| } |
| |
| impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result { |
| f.debug_struct("Xlat") |
| .field("regime", &self.regime) |
| .field("granule", &self.granule) |
| .field("VA_BITS", &VA_BITS) |
| .field("base_table", &self.base_table.get_pa()) |
| .finish()?; |
| |
| Self::dump_table( |
| f, |
| self.granule.initial_lookup_level(), |
| 0, |
| unsafe { self.base_table.get_as_slice::<K, Descriptor>() }, |
| self.granule, |
| )?; |
| |
| Ok(()) |
| } |
| } |