blob: 2ccea8c1a48f42afb77f2adcbea1972e9cb0d909 [file] [log] [blame]
// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
// SPDX-License-Identifier: MIT OR Apache-2.0
//! Objects for representing physical and virtual addresses
use core::{fmt, ops::Range};
use crate::TranslationRegime;
use super::TranslationGranule;
/// Physical address object
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub struct PhysicalAddress(pub(super) usize);
impl PhysicalAddress {
/// Create a new PhysicalAddress from the raw address value
///
/// # Safety
/// The address has to be a valid physical address
pub const unsafe fn new(address: usize) -> Self {
Self(address)
}
/// Add offset to the physical address and check for overflow
pub const fn add_offset(self, offset: usize) -> Option<Self> {
if let Some(address) = self.0.checked_add(offset) {
Some(Self(address))
} else {
None
}
}
/// Identity map physical address to virtual address
pub const fn identity_va(self) -> VirtualAddress {
VirtualAddress(self.0)
}
/// Calculate difference of physical addresses
pub const fn diff(self, rhs: Self) -> Option<usize> {
self.0.checked_sub(rhs.0)
}
}
impl From<PhysicalAddress> for usize {
fn from(value: PhysicalAddress) -> Self {
value.0
}
}
impl From<PhysicalAddress> for u64 {
fn from(value: PhysicalAddress) -> Self {
value.0 as u64
}
}
impl fmt::Debug for PhysicalAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_tuple("PA")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
/// Virtual address object
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub struct VirtualAddress(pub(super) usize);
impl VirtualAddress {
/// Create a new VirtualAddress from the raw address value
///
/// # Safety
/// The address has to be a valid virtual address
pub const unsafe fn new(address: usize) -> Self {
Self(address)
}
/// Add offset to the virtual address and check for overflow
pub const fn add_offset(self, offset: usize) -> Option<Self> {
if let Some(address) = self.0.checked_add(offset) {
Some(Self(address))
} else {
None
}
}
/// Identity map virtual address to physical address
pub const fn identity_pa(self) -> PhysicalAddress {
PhysicalAddress(self.0)
}
/// Mask the lower bits of the virtual address for the given granule and level
pub const fn mask_for_level<const VA_BITS: usize>(
self,
translation_granule: TranslationGranule<VA_BITS>,
level: isize,
) -> Self {
Self(self.0 & (translation_granule.block_size_at_level(level) - 1))
}
/// Calculate the index of the virtual address in a translation table at the
/// given granule and level.
pub const fn get_level_index<const VA_BITS: usize>(
self,
translation_granule: TranslationGranule<VA_BITS>,
level: isize,
) -> usize {
self.0 >> translation_granule.total_bits_at_level(level)
}
/// Check if the address is valid in the translation regime, i.e. if the top bits match the
/// VA range.
pub fn is_valid_in_regime<const VA_BITS: usize>(&self, regime: TranslationRegime) -> bool {
let mask = Self::get_upper_bit_mask::<VA_BITS>();
let required_upper_bits = if regime.is_upper_va_range() { mask } else { 0 };
(self.0 & mask) == required_upper_bits
}
/// Sets the upper bits of the virtual address according to the translation regime.
/// Fill with '1' bits for upper VA range, fill with '0' bits for lower VA range.
pub fn set_upper_bits<const VA_BITS: usize>(self, regime: TranslationRegime) -> Self {
let mask = Self::get_upper_bit_mask::<VA_BITS>();
Self(if regime.is_upper_va_range() {
self.0 | mask
} else {
self.0 & !mask
})
}
/// Remove top bits, i.e fill top bits with zeroes.
pub fn remove_upper_bits<const VA_BITS: usize>(self) -> Self {
Self(self.0 & !Self::get_upper_bit_mask::<VA_BITS>())
}
/// Mask bits in the address
pub const fn mask_bits(self, mask: usize) -> Self {
Self(self.0 & mask)
}
/// Calculate difference of virtual addresses
pub const fn diff(self, rhs: Self) -> Option<usize> {
self.0.checked_sub(rhs.0)
}
/// Align address to the next aligned address
pub const fn align_up(self, alignment: usize) -> Self {
Self(self.0.next_multiple_of(alignment))
}
const fn get_upper_bit_mask<const VA_BITS: usize>() -> usize {
!((1 << VA_BITS) - 1)
}
}
impl From<VirtualAddress> for usize {
fn from(value: VirtualAddress) -> Self {
value.0
}
}
impl From<VirtualAddress> for u64 {
fn from(value: VirtualAddress) -> Self {
value.0 as u64
}
}
impl fmt::Debug for VirtualAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_tuple("VA")
.field(&format_args!("{:#x}", self.0))
.finish()
}
}
/// Represents a virtual address range
#[derive(Debug)]
pub struct VirtualAddressRange {
pub(super) start: VirtualAddress,
pub(super) end: VirtualAddress,
}
impl VirtualAddressRange {
pub fn new(start: VirtualAddress, end: VirtualAddress) -> Self {
Self { start, end }
}
/// Create a new VirtualAddressRange from the raw address values
///
/// # Safety
/// The addresses have to be valid virtual addresses
pub unsafe fn from_range(value: Range<usize>) -> Self {
Self::new(
VirtualAddress::new(value.start),
VirtualAddress::new(value.end),
)
}
/// The length of the range in bytes
pub fn len(&self) -> Option<usize> {
self.end.diff(self.start)
}
/// Create an iterator which returns virtual addresses in the range of the given step in bytes.
pub fn step_by(self, step: usize) -> VirtualAddressIterator {
VirtualAddressIterator {
next: self.start,
end: self.end,
step,
}
}
}
/// Iterator for walking the virtual address range using the given step.
pub struct VirtualAddressIterator {
next: VirtualAddress,
end: VirtualAddress,
step: usize,
}
impl Iterator for VirtualAddressIterator {
type Item = VirtualAddress;
fn next(&mut self) -> Option<Self::Item> {
if self.next < self.end {
let current = self.next;
self.next = if let Some(next) = self.next.add_offset(self.step) {
next
} else {
self.end
};
Some(current)
} else {
None
}
}
}