| // SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com> |
| // SPDX-License-Identifier: MIT OR Apache-2.0 |
| |
| //! Module for converting addresses between kernel virtual address space to physical address space |
| |
| use core::ops::Range; |
| |
| use alloc::string::String; |
| use spin::Mutex; |
| |
| use super::{ |
| page_pool::{Page, PagePool}, |
| MemoryAccessRights, Xlat, XlatError, |
| }; |
| |
| static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None; |
| |
| pub struct KernelSpace { |
| xlat: Mutex<Xlat>, |
| } |
| |
| /// Kernel space memory mapping |
| /// |
| /// This object handles the translation tables of the kernel address space. The main goal is to |
| /// limit the kernel's access to the memory and only map the ranges which are necessary for |
| /// operation. |
| /// The current implementation uses identity mapping into the upper virtual address range, e.g. |
| /// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000. |
| impl KernelSpace { |
| pub const PAGE_SIZE: usize = Page::SIZE; |
| |
| /// Creates the kernel memory mapping instance. This should be called from the main core's init |
| /// code. |
| /// # Arguments |
| /// * page_pool: Page pool for allocation kernel translation tables |
| pub fn create_instance(page_pool: PagePool) { |
| unsafe { |
| assert!(KERNEL_SPACE_INSTANCE.is_none()); |
| |
| KERNEL_SPACE_INSTANCE = Some(Self { |
| xlat: Mutex::new(Xlat::new(page_pool, 0x0000_0000..0x10_0000_0000)), |
| }); |
| } |
| } |
| |
| /// Maps the code (RX) and data (RW) segments of the SPMC itself. |
| /// # Arguments |
| /// * code_range: (start, end) addresses of the code segment |
| /// * data_range: (start, end) addresses of the data segment |
| /// # Return value |
| /// * The result of the operation |
| pub fn init(code_range: Range<usize>, data_range: Range<usize>) -> Result<(), XlatError> { |
| if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } { |
| let mut xlat = kernel_space.xlat.lock(); |
| |
| xlat.map_physical_address_range( |
| Some(code_range.start), |
| code_range.start, |
| code_range.len(), |
| MemoryAccessRights::RX | MemoryAccessRights::GLOBAL, |
| )?; |
| |
| xlat.map_physical_address_range( |
| Some(data_range.start), |
| data_range.start, |
| data_range.len(), |
| MemoryAccessRights::RW | MemoryAccessRights::GLOBAL, |
| )?; |
| |
| Ok(()) |
| } else { |
| Err(XlatError::InvalidOperation(String::from( |
| "KernelSpace is not initialized", |
| ))) |
| } |
| } |
| |
| /// Map memory range into the kernel address space |
| /// # Arguments |
| /// * pa: Physical address of the memory |
| /// * length: Length of the range in bytes |
| /// * access_right: Memory access rights |
| /// # Return value |
| /// * Virtual address of the mapped memory or error |
| pub fn map_memory( |
| pa: usize, |
| length: usize, |
| access_rights: MemoryAccessRights, |
| ) -> Result<usize, XlatError> { |
| if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } { |
| let lower_va = kernel_space.xlat.lock().map_physical_address_range( |
| Some(pa), |
| pa, |
| length, |
| access_rights | MemoryAccessRights::GLOBAL, |
| )?; |
| |
| Ok(Self::pa_to_kernel(lower_va as u64) as usize) |
| } else { |
| Err(XlatError::InvalidOperation(String::from( |
| "KernelSpace is not initialized", |
| ))) |
| } |
| } |
| |
| /// Unmap memory range from the kernel address space |
| /// # Arguments |
| /// * va: Virtual address of the memory |
| /// * length: Length of the range in bytes |
| /// # Return value |
| /// The result of the operation |
| pub fn unmap_memory(va: usize, length: usize) -> Result<(), XlatError> { |
| if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } { |
| kernel_space |
| .xlat |
| .lock() |
| .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length) |
| } else { |
| Err(XlatError::InvalidOperation(String::from( |
| "KernelSpace is not initialized", |
| ))) |
| } |
| } |
| |
| /// Activate kernel address space mapping |
| pub fn activate() { |
| if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } { |
| kernel_space.xlat.lock().activate(0, super::TTBR::TTBR1_EL1); |
| } |
| } |
| |
| /// Rounds a value down to a kernel space page boundary |
| pub const fn round_down_to_page_size(size: usize) -> usize { |
| size & !(Self::PAGE_SIZE - 1) |
| } |
| |
| /// Rounds a value up to a kernel space page boundary |
| pub const fn round_up_to_page_size(size: usize) -> usize { |
| (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1) |
| } |
| |
| /// Returns the offset to the preceding page aligned address |
| pub const fn offset_in_page(address: usize) -> usize { |
| address & (Self::PAGE_SIZE - 1) |
| } |
| |
| /// Kernel virtual address to physical address |
| #[cfg(not(test))] |
| pub const fn kernel_to_pa(kernel_address: u64) -> u64 { |
| kernel_address & 0x0000_000f_ffff_ffff |
| } |
| /// Physical address to kernel virtual address |
| #[cfg(not(test))] |
| pub const fn pa_to_kernel(pa: u64) -> u64 { |
| // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0); |
| pa | 0xffff_fff0_0000_0000 |
| } |
| |
| // Do not use any mapping in test build |
| #[cfg(test)] |
| pub const fn kernel_to_pa(kernel_address: u64) -> u64 { |
| kernel_address |
| } |
| |
| #[cfg(test)] |
| pub const fn pa_to_kernel(pa: u64) -> u64 { |
| pa |
| } |
| } |