blob: 68296e96833aa829b94a321d8bcfa6f62b1ac8ae [file] [log] [blame]
// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
// SPDX-License-Identifier: MIT OR Apache-2.0
//! Module for converting addresses between kernel virtual address space to physical address space
use core::ops::{Deref, DerefMut, Range};
use alloc::sync::Arc;
use spin::Mutex;
use crate::KernelAddressTranslator;
use super::{
address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
page_pool::{Page, PagePool},
MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError,
};
struct KernelAddressTranslatorIdentity;
impl KernelAddressTranslator for KernelAddressTranslatorIdentity {
fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress {
PhysicalAddress(va.0 & 0x0000_000f_ffff_ffff)
}
fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress {
VirtualAddress(pa.0 | 0xffff_fff0_0000_0000)
}
}
#[derive(Clone)]
pub struct KernelSpace {
xlat: Arc<Mutex<Xlat<KernelAddressTranslatorIdentity, 36>>>,
}
/// # Kernel space memory mapping
///
/// This object handles the translation tables of the kernel address space. The main goal is to
/// limit the kernel's access to the memory and only map the ranges which are necessary for
/// operation.
/// The current implementation uses identity mapping into the upper virtual address range, e.g.
/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
impl KernelSpace {
pub const PAGE_SIZE: usize = Page::SIZE;
/// Creates the kernel memory mapping instance. This should be called from the main core's init
/// code.
/// # Arguments
/// * page_pool: Page pool for allocation kernel translation tables
pub fn new(page_pool: PagePool) -> Self {
Self {
xlat: Arc::new(Mutex::new(Xlat::new(
page_pool,
unsafe {
VirtualAddressRange::from_range(0xffff_fff0_0000_0000..0xffff_ffff_ffff_ffff)
},
TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
TranslationGranule::Granule4k,
))),
}
}
/// Maps the code (RX) and data (RW) segments of the SPMC itself.
/// # Arguments
/// * code_range: (start, end) addresses of the code segment
/// * data_range: (start, end) addresses of the data segment
/// # Return value
/// * The result of the operation
pub fn init(
&self,
code_range: Range<usize>,
data_range: Range<usize>,
) -> Result<(), XlatError> {
let mut xlat = self.xlat.lock();
let code_pa = PhysicalAddress(code_range.start & 0x0000_000f_ffff_ffff);
let data_pa = PhysicalAddress(data_range.start & 0x0000_000f_ffff_ffff);
xlat.map_physical_address_range(
Some(
code_pa
.identity_va()
.set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
),
code_pa,
code_range.len(),
MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
)?;
xlat.map_physical_address_range(
Some(
data_pa
.identity_va()
.set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
),
data_pa,
data_range.len(),
MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
)?;
Ok(())
}
/// Map memory range into the kernel address space
/// # Arguments
/// * pa: Physical address of the memory
/// * length: Length of the range in bytes
/// * access_right: Memory access rights
/// # Return value
/// * Virtual address of the mapped memory or error
pub fn map_memory(
&self,
pa: usize,
length: usize,
access_rights: MemoryAccessRights,
) -> Result<usize, XlatError> {
let pa = PhysicalAddress(pa);
let va = self.xlat.lock().map_physical_address_range(
Some(
pa.identity_va()
.set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
),
pa,
length,
access_rights | MemoryAccessRights::GLOBAL,
)?;
Ok(va.0)
}
/// Unmap memory range from the kernel address space
/// # Arguments
/// * va: Virtual address of the memory
/// * length: Length of the range in bytes
/// # Return value
/// The result of the operation
pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
self.xlat
.lock()
.unmap_virtual_address_range(VirtualAddress(va), length)
}
/// Activate kernel address space mapping
///
/// # Safety
/// This changes the mapping of the running execution context. The caller
/// must ensure that existing references will be mapped to the same address
/// after activation.
#[cfg(target_arch = "aarch64")]
pub unsafe fn activate(&self) {
self.xlat.lock().activate();
}
/// Rounds a value down to a kernel space page boundary
pub const fn round_down_to_page_size(size: usize) -> usize {
size & !(Self::PAGE_SIZE - 1)
}
/// Rounds a value up to a kernel space page boundary
pub const fn round_up_to_page_size(size: usize) -> usize {
(size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
}
/// Returns the offset to the preceding page aligned address
pub const fn offset_in_page(address: usize) -> usize {
address & (Self::PAGE_SIZE - 1)
}
/// Kernel virtual address to physical address
// Do not use any mapping in test build
#[cfg(test)]
pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
kernel_address
}
#[cfg(test)]
pub const fn pa_to_kernel(pa: u64) -> u64 {
pa
}
}
/// # Kernel mapping wrapper
///
/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
/// virtual kernel address space and provides the same access to the object via the Deref trait.
/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
pub struct KernelMapper<T>
where
T: Deref,
T::Target: Sized,
{
physical_instance: T,
va: *const T::Target,
kernel_space: KernelSpace,
}
impl<T> KernelMapper<T>
where
T: Deref,
T::Target: Sized,
{
/// Create new mapped object
/// The access_rights parameter must contain read access
pub fn new(
physical_instance: T,
kernel_space: KernelSpace,
access_rights: MemoryAccessRights,
) -> Self {
assert!(access_rights.contains(MemoryAccessRights::R));
let pa = physical_instance.deref() as *const _ as usize;
let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
let va = kernel_space
.map_memory(pa, length, access_rights)
.expect("Failed to map area");
Self {
physical_instance,
va: va as *const T::Target,
kernel_space,
}
}
}
impl<T> Deref for KernelMapper<T>
where
T: Deref,
T::Target: Sized,
{
type Target = T::Target;
#[inline(always)]
fn deref(&self) -> &Self::Target {
unsafe { &*self.va }
}
}
impl<T> Drop for KernelMapper<T>
where
T: Deref,
T::Target: Sized,
{
fn drop(&mut self) {
let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
self.kernel_space
.unmap_memory(self.va as usize, length)
.expect("Failed to unmap area");
}
}
unsafe impl<T> Send for KernelMapper<T>
where
T: Deref + Send,
T::Target: Sized,
{
}
/// # Mutable version of kernel mapping wrapper
pub struct KernelMapperMut<T>
where
T: DerefMut,
T::Target: Sized,
{
physical_instance: T,
va: *mut T::Target,
kernel_space: KernelSpace,
}
impl<T> KernelMapperMut<T>
where
T: DerefMut,
T::Target: Sized,
{
/// Create new mapped object
/// The access_rights parameter must contain read and write access
pub fn new(
physical_instance: T,
kernel_space: KernelSpace,
access_rights: MemoryAccessRights,
) -> Self {
assert!(access_rights.contains(MemoryAccessRights::RW));
let pa = physical_instance.deref() as *const _ as usize;
let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
let va = kernel_space
.map_memory(pa, length, access_rights)
.expect("Failed to map area");
Self {
physical_instance,
va: va as *mut T::Target,
kernel_space,
}
}
}
impl<T> Deref for KernelMapperMut<T>
where
T: DerefMut,
T::Target: Sized,
{
type Target = T::Target;
#[inline(always)]
fn deref(&self) -> &Self::Target {
unsafe { &*self.va }
}
}
impl<T> DerefMut for KernelMapperMut<T>
where
T: DerefMut,
T::Target: Sized,
{
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.va }
}
}
impl<T> Drop for KernelMapperMut<T>
where
T: DerefMut,
T::Target: Sized,
{
fn drop(&mut self) {
let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
self.kernel_space
.unmap_memory(self.va as usize, length)
.expect("Failed to unmap area");
}
}
unsafe impl<T> Send for KernelMapperMut<T>
where
T: DerefMut + Send,
T::Target: Sized,
{
}