| // SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com> |
| // SPDX-License-Identifier: MIT OR Apache-2.0 |
| |
| #![allow(dead_code)] |
| #![allow(non_camel_case_types)] |
| #![cfg_attr(not(test), no_std)] |
| |
| extern crate alloc; |
| |
| use core::iter::zip; |
| use core::{fmt, panic}; |
| |
| use address::{PhysicalAddress, VirtualAddress, VirtualAddressRange}; |
| use alloc::boxed::Box; |
| use alloc::format; |
| use alloc::string::{String, ToString}; |
| use alloc::vec::Vec; |
| use log::debug; |
| |
| use bitflags::bitflags; |
| use packed_struct::prelude::*; |
| |
| use self::descriptor::DescriptorType; |
| |
| use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability}; |
| use self::kernel_space::KernelSpace; |
| use self::page_pool::{Page, PagePool, Pages}; |
| use self::region::{PhysicalRegion, VirtualRegion}; |
| use self::region_pool::{Region, RegionPool, RegionPoolError}; |
| |
| pub mod address; |
| mod descriptor; |
| mod granule; |
| pub mod kernel_space; |
| pub mod page_pool; |
| mod region; |
| mod region_pool; |
| |
| /// The first level of memory descriptors table which |
| #[repr(C, align(512))] |
| pub struct BaseTable { |
| pub descriptors: [Descriptor; 64], |
| } |
| |
| impl BaseTable { |
| pub fn new() -> Self { |
| BaseTable { |
| descriptors: core::array::from_fn(|_| Descriptor::default()), |
| } |
| } |
| } |
| |
| /// Translation table error type |
| #[derive(Debug)] |
| pub enum XlatError { |
| InvalidParameterError(String), |
| AllocationError(String), |
| AlignmentError(String), |
| Overflow, |
| InvalidOperation(String), |
| Overlap, |
| NotFound, |
| RegionPoolError(RegionPoolError), |
| } |
| |
| /// Memory attributes |
| /// |
| /// MAIR_EL1 should be configured in the same way in startup.s |
| #[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)] |
| pub enum MemoryAttributesIndex { |
| #[default] |
| Device_nGnRnE = 0x00, |
| Normal_IWBWA_OWBWA = 0x01, |
| } |
| |
| bitflags! { |
| #[derive(Debug, Clone, Copy)] |
| pub struct MemoryAccessRights : u32 { |
| const R = 0b00000001; |
| const W = 0b00000010; |
| const X = 0b00000100; |
| const NS = 0b00001000; |
| |
| const RW = Self::R.bits() | Self::W.bits(); |
| const RX = Self::R.bits() | Self::X.bits(); |
| const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits(); |
| |
| const USER = 0b00010000; |
| const DEVICE = 0b00100000; |
| const GLOBAL = 0b01000000; |
| } |
| } |
| |
| impl From<MemoryAccessRights> for Attributes { |
| fn from(access_rights: MemoryAccessRights) -> Self { |
| let data_access_permissions = match ( |
| access_rights.contains(MemoryAccessRights::USER), |
| access_rights.contains(MemoryAccessRights::W), |
| ) { |
| (false, false) => DataAccessPermissions::ReadOnly_None, |
| (false, true) => DataAccessPermissions::ReadWrite_None, |
| (true, false) => DataAccessPermissions::ReadOnly_ReadOnly, |
| (true, true) => DataAccessPermissions::ReadWrite_ReadWrite, |
| }; |
| |
| let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) { |
| MemoryAttributesIndex::Device_nGnRnE |
| } else { |
| MemoryAttributesIndex::Normal_IWBWA_OWBWA |
| }; |
| |
| Attributes { |
| uxn: !access_rights.contains(MemoryAccessRights::X) |
| || !access_rights.contains(MemoryAccessRights::USER), |
| pxn: !access_rights.contains(MemoryAccessRights::X) |
| || access_rights.contains(MemoryAccessRights::USER), |
| contiguous: false, |
| not_global: !access_rights.contains(MemoryAccessRights::GLOBAL), |
| access_flag: true, |
| shareability: Shareability::NonShareable, |
| data_access_permissions, |
| non_secure: access_rights.contains(MemoryAccessRights::NS), |
| mem_attr_index, |
| } |
| } |
| } |
| |
| #[derive(PartialEq)] |
| struct Block { |
| pa: PhysicalAddress, |
| va: VirtualAddress, |
| granule: usize, |
| } |
| |
| impl Block { |
| fn new(pa: PhysicalAddress, va: VirtualAddress, granule: usize) -> Self { |
| assert!(Xlat::GRANULE_SIZES.contains(&granule)); |
| Self { pa, va, granule } |
| } |
| } |
| |
| impl fmt::Debug for Block { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| f.debug_struct("Block") |
| .field("pa", &format_args!("{:#010x}", self.pa.0)) |
| .field("va", &format_args!("{:#010x}", self.va.0)) |
| .field("granule", &format_args!("{:#010x}", self.granule)) |
| .finish() |
| } |
| } |
| |
| pub enum RegimeVaRange { |
| Lower, |
| Upper, |
| } |
| |
| pub enum TranslationRegime { |
| EL1_0(RegimeVaRange, u8), // EL1 and EL0 stage 1, TTBRx_EL1 |
| #[cfg(target_feature = "vh")] |
| EL2_0(RegimeVaRange, u8), // EL2 and EL0 with VHE |
| EL2, // EL2 |
| EL3, // EL3, TTBR0_EL3 |
| } |
| |
| pub type TranslationGranule<const VA_BITS: usize> = granule::TranslationGranule<VA_BITS>; |
| |
| pub struct Xlat { |
| base_table: Box<BaseTable>, |
| page_pool: PagePool, |
| regions: RegionPool<VirtualRegion>, |
| regime: TranslationRegime, |
| } |
| |
| /// Memory translation table handling |
| /// # High level interface |
| /// * allocate and map zero initialized region (with or without VA) |
| /// * allocate and map memory region and load contents (with or without VA) |
| /// * map memory region by PA (with or without VA) |
| /// * unmap memory region by PA |
| /// * query PA by VA |
| /// * set access rights of mapped memory areas |
| /// * active mapping |
| /// |
| /// # Debug features |
| /// * print translation table details |
| /// |
| /// # Region level interface |
| /// * map regions |
| /// * unmap region |
| /// * find a mapped region which contains |
| /// * find empty area for region |
| /// * set access rights for a region |
| /// * create blocks by region |
| /// |
| /// # Block level interface |
| /// * map block |
| /// * unmap block |
| /// * set access rights of block |
| impl Xlat { |
| pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000]; |
| |
| pub fn new( |
| page_pool: PagePool, |
| address: VirtualAddressRange, |
| regime: TranslationRegime, |
| ) -> Self { |
| let mut regions = RegionPool::new(); |
| regions |
| .add(VirtualRegion::new(address.start, address.len().unwrap())) |
| .unwrap(); |
| Self { |
| base_table: Box::new(BaseTable::new()), |
| page_pool, |
| regions, |
| regime, |
| } |
| } |
| |
| /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the |
| /// initial data |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * data: Data to be loaded to the memory area |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn allocate_initalized_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| data: &[u8], |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| { |
| XlatError::AllocationError(format!( |
| "Cannot allocate pages for {} bytes ({:?})", |
| data.len(), |
| e |
| )) |
| })?; |
| |
| pages.copy_data_to_page(data); |
| |
| let pages_length = pages.length(); |
| let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages); |
| let region = if let Some(required_va) = va { |
| self.regions |
| .acquire(required_va, pages_length, physical_region) |
| } else { |
| self.regions.allocate(pages_length, physical_region, None) |
| } |
| .map_err(XlatError::RegionPoolError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn allocate_zero_init_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let mut pages = self.page_pool.allocate_pages(length).map_err(|e| { |
| XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})")) |
| })?; |
| |
| pages.zero_init(); |
| |
| let pages_length = pages.length(); |
| let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages); |
| let region = if let Some(required_va) = va { |
| self.regions |
| .acquire(required_va, pages_length, physical_region) |
| } else { |
| self.regions.allocate(pages_length, physical_region, None) |
| } |
| .map_err(XlatError::RegionPoolError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Map memory area by physical address |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * pa: Physical address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: Memory access rights of the area |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| pub fn map_physical_address_range( |
| &mut self, |
| va: Option<VirtualAddress>, |
| pa: PhysicalAddress, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<VirtualAddress, XlatError> { |
| let resource = PhysicalRegion::PhysicalAddress(pa); |
| let region = if let Some(required_va) = va { |
| self.regions.acquire(required_va, length, resource) |
| } else { |
| self.regions.allocate(length, resource, None) |
| } |
| .map_err(XlatError::RegionPoolError)?; |
| |
| self.map_region(region, access_rights.into()) |
| } |
| |
| /// Unmap memory area by virtual address |
| /// # Arguments |
| /// * va: Virtual address |
| /// * length: Length of the memory area in bytes |
| pub fn unmap_virtual_address_range( |
| &mut self, |
| va: VirtualAddress, |
| length: usize, |
| ) -> Result<(), XlatError> { |
| let pa = self.get_pa_by_va(va, length)?; |
| |
| let region_to_release = VirtualRegion::new_with_pa(pa, va, length); |
| |
| self.unmap_region(®ion_to_release)?; |
| |
| self.regions |
| .release(region_to_release) |
| .map_err(XlatError::RegionPoolError) |
| } |
| |
| /// Query physical address by virtual address range. Only returns a value if the memory area |
| /// mapped as continuous area. |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// # Return value |
| /// * Physical address of the mapped memory |
| pub fn get_pa_by_va( |
| &self, |
| va: VirtualAddress, |
| length: usize, |
| ) -> Result<PhysicalAddress, XlatError> { |
| let containing_region = self |
| .find_containing_region(va, length) |
| .ok_or(XlatError::NotFound)?; |
| |
| if !containing_region.used() { |
| return Err(XlatError::NotFound); |
| } |
| |
| Ok(containing_region.get_pa_for_va(va)) |
| } |
| |
| /// Sets the memory access right of memory area |
| /// # Arguments |
| /// * va: Virtual address of the memory area |
| /// * length: Length of the memory area in bytes |
| /// * access_rights: New memory access rights of the area |
| pub fn set_access_rights( |
| &mut self, |
| va: VirtualAddress, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<(), XlatError> { |
| let containing_region = self |
| .find_containing_region(va, length) |
| .ok_or(XlatError::NotFound)?; |
| |
| if !containing_region.used() { |
| return Err(XlatError::NotFound); |
| } |
| |
| let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length); |
| self.map_region(region, access_rights.into())?; |
| |
| Ok(()) |
| } |
| |
| /// Activate memory mapping represented by the object |
| /// |
| /// # Safety |
| /// When activating memory mapping for the running exception level, the |
| /// caller must ensure that the new mapping will not break any existing |
| /// references. After activation the caller must ensure that there are no |
| /// active references when unmapping memory. |
| pub unsafe fn activate(&self) { |
| let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64); |
| |
| #[cfg(target_arch = "aarch64")] |
| match &self.regime { |
| TranslationRegime::EL1_0(RegimeVaRange::Lower, asid) => core::arch::asm!( |
| "msr ttbr0_el1, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| TranslationRegime::EL1_0(RegimeVaRange::Upper, asid) => core::arch::asm!( |
| "msr ttbr1_el1, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Lower, asid) => core::arch::asm!( |
| "msr ttbr0_el2, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(RegimeVaRange::Upper, asid) => core::arch::asm!( |
| "msr ttbr1_el2, {0} |
| isb", |
| in(reg) ((*asid as u64) << 48) | base_table_pa), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "msr ttbr0_el2, {0} |
| isb", |
| in(reg) base_table_pa), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "msr ttbr0_el3, {0} |
| isb", |
| in(reg) base_table_pa), |
| } |
| } |
| |
| /// Prints the translation tables to debug console recursively |
| pub fn print(&self) { |
| debug!( |
| "Xlat table -> {:#010x}", |
| self.base_table.descriptors.as_ptr() as u64 |
| ); |
| Self::print_table(1, 0, &self.base_table.descriptors); |
| } |
| |
| /// Prints a single translation table to the debug console |
| /// # Arguments |
| /// * level: Level of the translation table |
| /// * va: Base virtual address of the table |
| /// * table: Table entries |
| pub fn print_table(level: usize, va: usize, table: &[Descriptor]) { |
| let level_prefix = match level { |
| 0 | 1 => "|-", |
| 2 => "| |-", |
| _ => "| | |-", |
| }; |
| |
| for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) { |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Block => debug!( |
| "{} {:#010x} Block -> {:#010x}", |
| level_prefix, |
| va, |
| descriptor.get_block_output_address(level).0 |
| ), |
| DescriptorType::Table => { |
| let next_level_table = unsafe { descriptor.get_next_level_table(level) }; |
| debug!( |
| "{} {:#010x} Table -> {:#010x}", |
| level_prefix, |
| va, |
| next_level_table.as_ptr() as usize |
| ); |
| Self::print_table(level + 1, va, next_level_table); |
| } |
| _ => {} |
| } |
| } |
| } |
| |
| /// Adds memory region from the translation table. The function splits the region to blocks and |
| /// uses the block level functions to do the mapping. |
| /// # Arguments |
| /// * region: Memory region object |
| /// # Return value |
| /// * Virtual address of the mapped memory |
| fn map_region( |
| &mut self, |
| region: VirtualRegion, |
| attributes: Attributes, |
| ) -> Result<VirtualAddress, XlatError> { |
| let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?; |
| for block in blocks { |
| self.map_block(block, attributes.clone()); |
| } |
| |
| Ok(region.base()) |
| } |
| |
| /// Remove memory region from the translation table. The function splits the region to blocks |
| /// and uses the block level functions to do the unmapping. |
| /// # Arguments |
| /// * region: Memory region object |
| fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> { |
| let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?; |
| for block in blocks { |
| self.unmap_block(block); |
| } |
| |
| Ok(()) |
| } |
| |
| /// Find mapped region that contains the whole region |
| /// # Arguments |
| /// * region: Virtual address to look for |
| /// # Return value |
| /// * Reference to virtual region if found |
| fn find_containing_region(&self, va: VirtualAddress, length: usize) -> Option<&VirtualRegion> { |
| self.regions.find_containing_region(va, length).ok() |
| } |
| |
| /// Splits memory region to blocks that matches the granule size of the translation table. |
| /// # Arguments |
| /// * pa: Physical address |
| /// * va: Virtual address |
| /// * length: Region size in bytes |
| /// # Return value |
| /// * Vector of granule sized blocks |
| fn split_region_to_blocks( |
| mut pa: PhysicalAddress, |
| mut va: VirtualAddress, |
| mut length: usize, |
| ) -> Result<Vec<Block>, XlatError> { |
| let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1; |
| |
| if length == 0 { |
| return Err(XlatError::InvalidParameterError( |
| "Length cannot be 0".to_string(), |
| )); |
| } |
| |
| if (pa.0 | va.0 | length) & min_granule_mask != 0 { |
| return Err(XlatError::InvalidParameterError(format!( |
| "Addresses and length must be aligned {:#08x} {:#08x} {:#x}", |
| pa.0, va.0, length |
| ))); |
| } |
| |
| let mut pages = Vec::new(); |
| |
| while length > 0 { |
| for granule in &Self::GRANULE_SIZES { |
| if (pa.0 | va.0) & (*granule - 1) == 0 && length >= *granule { |
| pages.push(Block::new(pa, va, *granule)); |
| pa = pa.add_offset(*granule).ok_or(XlatError::Overflow)?; |
| va = va.add_offset(*granule).ok_or(XlatError::Overflow)?; |
| |
| length -= *granule; |
| break; |
| } |
| } |
| } |
| |
| Ok(pages) |
| } |
| |
| /// Add block to memory mapping |
| /// # Arguments |
| /// * block: Memory block that can be represented by a single translation table entry |
| /// * attributes: Memory block's permissions, flags |
| fn map_block(&mut self, block: Block, attributes: Attributes) { |
| Self::set_block_descriptor_recursively( |
| attributes, |
| block.pa, |
| block.va, |
| block.granule, |
| 1, |
| self.base_table.descriptors.as_mut_slice(), |
| &self.page_pool, |
| &self.regime, |
| ); |
| } |
| |
| /// Adds the block descriptor to the translation table along all the intermediate tables the |
| /// reach the required granule. |
| /// # Arguments |
| /// * attributes: Memory block's permssions, flags |
| /// * pa: Physical address |
| /// * va: Virtual address |
| /// * granule: Translation granule in bytes |
| /// * level: Translation table level |
| /// * table: Translation table on the given level |
| /// * page_pool: Page pool where the function can allocate pages for the translation tables |
| #[allow(clippy::too_many_arguments)] |
| fn set_block_descriptor_recursively( |
| attributes: Attributes, |
| pa: PhysicalAddress, |
| va: VirtualAddress, |
| granule: usize, |
| level: usize, |
| table: &mut [Descriptor], |
| page_pool: &PagePool, |
| regime: &TranslationRegime, |
| ) { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(level)]; |
| |
| // We reached the required granule level |
| if Self::GRANULE_SIZES[level] == granule { |
| // Follow break-before-make sequence |
| descriptor.set_block_or_invalid_descriptor_to_invalid(level); |
| Self::invalidate(regime, Some(va)); |
| descriptor.set_block_descriptor(level, pa, attributes); |
| return; |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| let mut page = page_pool.allocate_pages(Page::SIZE).unwrap(); |
| unsafe { |
| let next_table = page.get_as_slice(); |
| descriptor.set_table_descriptor(level, next_table, None); |
| } |
| Self::set_block_descriptor_recursively( |
| attributes, |
| pa, |
| va.mask_for_level(level), |
| granule, |
| level + 1, |
| unsafe { descriptor.get_next_level_table_mut(level) }, |
| page_pool, |
| regime, |
| ) |
| } |
| DescriptorType::Block => { |
| // Saving current descriptor details |
| let current_va = va.mask_for_level(level); |
| let current_pa = descriptor.get_block_output_address(level); |
| let current_attributes = descriptor.get_block_attributes(level); |
| |
| // Replace block descriptor by table descriptor |
| let mut page = page_pool.allocate_pages(Page::SIZE).unwrap(); |
| unsafe { |
| let next_table = page.get_as_slice(); |
| descriptor.set_table_descriptor(level, next_table, None); |
| } |
| |
| // Explode block descriptor to table entries |
| for exploded_va in VirtualAddressRange::new( |
| current_va, |
| current_va.add_offset(Self::GRANULE_SIZES[level]).unwrap(), |
| ) |
| .step_by(Self::GRANULE_SIZES[level + 1]) |
| { |
| let offset = exploded_va.diff(current_va).unwrap(); |
| Self::set_block_descriptor_recursively( |
| current_attributes.clone(), |
| current_pa.add_offset(offset).unwrap(), |
| exploded_va.mask_for_level(level), |
| Self::GRANULE_SIZES[level + 1], |
| level + 1, |
| unsafe { descriptor.get_next_level_table_mut(level) }, |
| page_pool, |
| regime, |
| ) |
| } |
| |
| // Invoke self to continue recursion on the newly created level |
| Self::set_block_descriptor_recursively( |
| attributes, pa, va, granule, level, table, page_pool, regime, |
| ); |
| } |
| DescriptorType::Table => Self::set_block_descriptor_recursively( |
| attributes, |
| pa, |
| va.mask_for_level(level), |
| granule, |
| level + 1, |
| unsafe { descriptor.get_next_level_table_mut(level) }, |
| page_pool, |
| regime, |
| ), |
| } |
| } |
| |
| /// Remove block from memory mapping |
| /// # Arguments |
| /// * block: memory block that can be represented by a single translation entry |
| fn unmap_block(&mut self, block: Block) { |
| Self::remove_block_descriptor_recursively( |
| block.va, |
| block.granule, |
| 1, |
| self.base_table.descriptors.as_mut_slice(), |
| &self.page_pool, |
| &self.regime, |
| ); |
| } |
| |
| /// Removes block descriptor from the translation table along all the intermediate tables which |
| /// become empty during the removal process. |
| /// # Arguments |
| /// * va: Virtual address |
| /// * granule: Translation granule in bytes |
| /// * level: Translation table level |
| /// * table: Translation table on the given level |
| /// * page_pool: Page pool where the function can release the pages of empty tables |
| fn remove_block_descriptor_recursively( |
| va: VirtualAddress, |
| granule: usize, |
| level: usize, |
| table: &mut [Descriptor], |
| page_pool: &PagePool, |
| regime: &TranslationRegime, |
| ) { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(level)]; |
| |
| // We reached the required granule level |
| if Self::GRANULE_SIZES[level] == granule { |
| descriptor.set_block_descriptor_to_invalid(level); |
| Self::invalidate(regime, Some(va)); |
| return; |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| panic!("Cannot remove block from non-existing table"); |
| } |
| DescriptorType::Block => { |
| panic!("Cannot remove block with different granule"); |
| } |
| DescriptorType::Table => { |
| let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) }; |
| Self::remove_block_descriptor_recursively( |
| va.mask_for_level(level), |
| granule, |
| level + 1, |
| next_level_table, |
| page_pool, |
| regime, |
| ); |
| |
| if next_level_table.iter().all(|d| !d.is_valid()) { |
| // Empty table |
| let mut page = unsafe { |
| Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level)) |
| }; |
| page.zero_init(); |
| page_pool.release_pages(page).unwrap(); |
| } |
| } |
| } |
| } |
| |
| fn get_descriptor(&mut self, va: VirtualAddress, granule: usize) -> &mut Descriptor { |
| Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors) |
| } |
| |
| fn walk_descriptors( |
| va: VirtualAddress, |
| granule: usize, |
| level: usize, |
| table: &mut [Descriptor], |
| ) -> &mut Descriptor { |
| // Get descriptor of the current level |
| let descriptor = &mut table[va.get_level_index(level)]; |
| |
| if Self::GRANULE_SIZES[level] == granule { |
| return descriptor; |
| } |
| |
| // Need to iterate forward |
| match descriptor.get_descriptor_type(level) { |
| DescriptorType::Invalid => { |
| panic!("Invalid descriptor"); |
| } |
| DescriptorType::Block => { |
| panic!("Cannot split existing block descriptor to table"); |
| } |
| DescriptorType::Table => { |
| Self::walk_descriptors(va.mask_for_level(level), granule, level + 1, unsafe { |
| descriptor.get_next_level_table_mut(level) |
| }) |
| } |
| } |
| } |
| |
| fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) { |
| // SAFETY: The assembly code invalidates the translation table entry of |
| // the VA or all entries of the translation regime. |
| #[cfg(target_arch = "aarch64")] |
| unsafe { |
| if let Some(VirtualAddress(va)) = va { |
| match regime { |
| TranslationRegime::EL1_0(_, _) => { |
| core::arch::asm!( |
| "tlbi vaae1is, {0} |
| dsb nsh |
| isb", |
| in(reg) va) |
| } |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, _) => { |
| core::arch::asm!( |
| "tlbi vaae1is, {0} |
| dsb nsh |
| isb", |
| in(reg) va) |
| } |
| TranslationRegime::EL2 => core::arch::asm!( |
| "tlbi vae2is, {0} |
| dsb nsh |
| isb", |
| in(reg) va), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "tlbi vae3is, {0} |
| dsb nsh |
| isb", |
| in(reg) va), |
| } |
| } else { |
| match regime { |
| TranslationRegime::EL1_0(_, asid) => core::arch::asm!( |
| "tlbi aside1, {0} |
| dsb nsh |
| isb", |
| in(reg) (*asid as u64) << 48 |
| ), |
| #[cfg(target_feature = "vh")] |
| TranslationRegime::EL2_0(_, asid) => core::arch::asm!( |
| "tlbi aside1, {0} |
| dsb nsh |
| isb", |
| in(reg) (*asid as u64) << 48 |
| ), |
| TranslationRegime::EL2 => core::arch::asm!( |
| "tlbi alle2 |
| dsb nsh |
| isb" |
| ), |
| TranslationRegime::EL3 => core::arch::asm!( |
| "tlbi alle3 |
| dsb nsh |
| isb" |
| ), |
| } |
| } |
| } |
| } |
| } |
| |
| #[cfg(test)] |
| mod tests { |
| use super::*; |
| |
| fn make_block(pa: usize, va: usize, granule: usize) -> Block { |
| Block::new(PhysicalAddress(pa), VirtualAddress(va), granule) |
| } |
| |
| #[test] |
| fn test_split_to_pages() { |
| let pages = Xlat::split_region_to_blocks( |
| PhysicalAddress(0x3fff_c000), |
| VirtualAddress(0x3fff_c000), |
| 0x4020_5000, |
| ) |
| .unwrap(); |
| assert_eq!(make_block(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]); |
| assert_eq!(make_block(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]); |
| assert_eq!(make_block(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]); |
| assert_eq!(make_block(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]); |
| assert_eq!(make_block(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]); |
| assert_eq!(make_block(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]); |
| assert_eq!(make_block(0x8020_0000, 0x8020_0000, 0x1000), pages[6]); |
| } |
| |
| #[test] |
| fn test_split_to_pages_unaligned() { |
| let pages = Xlat::split_region_to_blocks( |
| PhysicalAddress(0x3fff_c000), |
| VirtualAddress(0x3f20_0000), |
| 0x200000, |
| ) |
| .unwrap(); |
| for (i, block) in pages.iter().enumerate().take(512) { |
| assert_eq!( |
| make_block(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000), |
| *block |
| ); |
| } |
| } |
| } |