Add translation table library
Add AArch64 MMU handler component.
Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Ief463cb783e1b8f825d8be37bb42988992879e68
diff --git a/Cargo.lock b/Cargo.lock
index e891ee8..4be19d5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5,3 +5,154 @@
[[package]]
name = "arm-xlat"
version = "0.1.0"
+dependencies = [
+ "bitflags",
+ "log",
+ "num_enum",
+ "packed_struct",
+ "spin",
+]
+
+[[package]]
+name = "bitflags"
+version = "2.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36"
+
+[[package]]
+name = "bitvec"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
+dependencies = [
+ "funty",
+ "radium",
+ "tap",
+ "wyz",
+]
+
+[[package]]
+name = "funty"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
+
+[[package]]
+name = "log"
+version = "0.4.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
+
+[[package]]
+name = "num_enum"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179"
+dependencies = [
+ "num_enum_derive",
+]
+
+[[package]]
+name = "num_enum_derive"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+]
+
+[[package]]
+name = "packed_struct"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36b29691432cc9eff8b282278473b63df73bea49bc3ec5e67f31a3ae9c3ec190"
+dependencies = [
+ "bitvec",
+ "packed_struct_codegen",
+]
+
+[[package]]
+name = "packed_struct_codegen"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9cd6706dfe50d53e0f6aa09e12c034c44faacd23e966ae5a209e8bdb8f179f98"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "radium"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
+
+[[package]]
+name = "spin"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.96"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "tap"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
+
+[[package]]
+name = "wyz"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
+dependencies = [
+ "tap",
+]
diff --git a/Cargo.toml b/Cargo.toml
index 3746e76..b417589 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -18,3 +18,15 @@
rust-version = "1.82"
[dependencies]
+bitflags = "2.4"
+log = { version = "0.4", features = [
+ "max_level_trace",
+ "release_max_level_info",
+] }
+num_enum = { version = "0.7", default-features = false }
+packed_struct = { version = "0.10", default-features = false }
+spin = { version = "0.9", default-features = false, features = [
+ "mutex",
+ "spin_mutex",
+ "rwlock",
+] }
\ No newline at end of file
diff --git a/src/descriptor.rs b/src/descriptor.rs
new file mode 100644
index 0000000..039cdae
--- /dev/null
+++ b/src/descriptor.rs
@@ -0,0 +1,712 @@
+// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+//! Memory descriptor
+
+use packed_struct::prelude::*;
+
+use core::cell::UnsafeCell;
+use core::ptr;
+
+use crate::kernel_space::KernelSpace;
+use crate::MemoryAttributesIndex;
+
+/// Memory shareability
+#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
+pub enum Shareability {
+ #[default]
+ NonShareable = 0b00,
+ Outer = 0b10,
+ Inner = 0b11,
+}
+
+/// Data access permission
+#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
+pub enum DataAccessPermissions {
+ #[default]
+ ReadWrite_None = 0b00,
+ ReadWrite_ReadWrite = 0b01,
+ ReadOnly_None = 0b10,
+ ReadOnly_ReadOnly = 0b11,
+}
+
+/// Memory attributes
+#[derive(PackedStruct, Clone, Debug, PartialEq, Eq, Default)]
+#[packed_struct(size_bytes = "8", bit_numbering = "lsb0")]
+pub struct Attributes {
+ #[packed_field(bits = "54")]
+ pub uxn: bool,
+ #[packed_field(bits = "53")]
+ pub pxn: bool,
+ #[packed_field(bits = "52")]
+ pub contiguous: bool,
+ #[packed_field(bits = "11")]
+ pub not_global: bool,
+ #[packed_field(bits = "10")]
+ pub access_flag: bool,
+ #[packed_field(bits = "9..=8", ty = "enum")]
+ pub shareability: Shareability,
+ #[packed_field(bits = "7..=6", ty = "enum")]
+ pub data_access_permissions: DataAccessPermissions,
+ #[packed_field(bits = "5")]
+ pub non_secure: bool,
+ #[packed_field(bits = "4..=2", ty = "enum")]
+ pub mem_attr_index: MemoryAttributesIndex,
+}
+
+impl From<Attributes> for u64 {
+ fn from(attributes: Attributes) -> Self {
+ u64::from_be_bytes(attributes.pack().unwrap())
+ }
+}
+
+impl From<u64> for Attributes {
+ fn from(bits: u64) -> Self {
+ Self::unpack(&bits.to_be_bytes()).unwrap()
+ }
+}
+
+/// Next level attributes
+#[derive(PackedStruct, Clone, Debug, PartialEq, Eq, Default)]
+#[packed_struct(size_bytes = "8", bit_numbering = "lsb0")]
+pub struct NextLevelAttributes {
+ #[packed_field(bits = "63")]
+ ns_table: bool,
+ #[packed_field(bits = "62..=61")]
+ ap_table: Integer<u8, packed_bits::Bits<2>>,
+ #[packed_field(bits = "60")]
+ xn_table: bool,
+ #[packed_field(bits = "59")]
+ pxn_table: bool,
+}
+
+impl From<NextLevelAttributes> for u64 {
+ fn from(attributes: NextLevelAttributes) -> Self {
+ u64::from_be_bytes(attributes.pack().unwrap())
+ }
+}
+
+impl From<u64> for NextLevelAttributes {
+ fn from(bits: u64) -> Self {
+ Self::unpack(&bits.to_be_bytes()).unwrap()
+ }
+}
+
+/// Memory descriptor type
+#[derive(PartialEq, Eq, Debug)]
+pub enum DescriptorType {
+ Invalid,
+ Block,
+ Table,
+}
+
+/// Memory descriptor of a memory translation table
+#[repr(C)]
+pub struct Descriptor {
+ cell: UnsafeCell<u64>,
+}
+
+impl Descriptor {
+ const ATTR_MASK: u64 = 0xfff8_0000_0000_0ffc;
+ const DESCRIPTOR_TYPE_MASK: u64 = 0b11;
+ pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
+ const INVALID_DESCRIPTOR_VALUE: u64 = 0x0;
+ const NEXT_ATTR_MASK: u64 = 0xf800_0000_0000_0000;
+ const OA_MASK: u64 = 0x0000_ffff_ffff_f000;
+ const TABLE_BIT: u64 = 0b10;
+ const TABLE_ENTRY_COUNT: usize = 512;
+ const TA_MASK: u64 = 0x0000_ffff_ffff_f000;
+ const VALID_BIT: u64 = 0b01;
+
+ /// Query descriptor type
+ pub fn get_descriptor_type(&self, level: usize) -> DescriptorType {
+ assert!(level <= 3);
+
+ let desc_type_bits = unsafe { self.get() } & Self::DESCRIPTOR_TYPE_MASK;
+ if desc_type_bits & Self::VALID_BIT != 0 {
+ if level == 3 {
+ assert_eq!(Self::TABLE_BIT, desc_type_bits & Self::TABLE_BIT);
+ DescriptorType::Block
+ } else if desc_type_bits & Self::TABLE_BIT != 0 {
+ DescriptorType::Table
+ } else {
+ DescriptorType::Block
+ }
+ } else {
+ DescriptorType::Invalid
+ }
+ }
+
+ // Invalid descriptor functions
+
+ /// Check if it is a valid descriptor
+ pub fn is_valid(&self) -> bool {
+ unsafe { self.get() & Self::VALID_BIT != 0 }
+ }
+
+ // Block descriptor functions
+
+ /// Set block descriptor
+ pub fn set_block_descriptor(
+ &mut self,
+ level: usize,
+ output_address: usize,
+ attributes: Attributes,
+ ) {
+ let attr: u64 = attributes.into();
+
+ assert!(level <= 3);
+ assert!(self.get_descriptor_type(level) != DescriptorType::Table);
+ assert_eq!(0, output_address & !Self::get_oa_mask(level));
+ assert_eq!(0, attr & !Self::ATTR_MASK);
+
+ let table_bit = if level < 3 { 0 } else { Self::TABLE_BIT };
+
+ unsafe {
+ self.set(Self::VALID_BIT | table_bit | output_address as u64 | attr);
+ }
+ }
+
+ /// Get output address from the block descriptor
+ pub fn get_block_output_address(&self, level: usize) -> usize {
+ assert!(level <= 3);
+ assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
+
+ ((unsafe { self.get() }) & Self::OA_MASK) as usize
+ }
+
+ /// Set the attributes of the block descriptor
+ pub fn set_block_attributes(&mut self, level: usize, attributes: Attributes) {
+ assert!(level <= 3);
+ let attr: u64 = attributes.into();
+ assert_eq!(0, attr & !Self::ATTR_MASK);
+ assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
+
+ unsafe { self.modify(|d| (d & !Self::ATTR_MASK) | attr) };
+ }
+
+ /// Get the attributes of the block descriptor
+ pub fn get_block_attributes(&self, level: usize) -> Attributes {
+ assert!(level <= 3);
+ assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
+
+ Attributes::from((unsafe { self.get() }) & Self::ATTR_MASK)
+ }
+
+ /// Set block descriptor to invalid
+ pub fn set_block_descriptor_to_invalid(&mut self, level: usize) {
+ assert!(level <= 3);
+ assert_eq!(DescriptorType::Block, self.get_descriptor_type(level));
+
+ unsafe { self.set(Self::INVALID_DESCRIPTOR_VALUE) }
+ }
+
+ /// Set table descriptor
+ ///
+ /// **Unsafe**: The caller has to ensure that the passed next level table has the same life as
+ /// the descriptor.
+ pub unsafe fn set_table_descriptor(
+ &mut self,
+ level: usize,
+ next_level_table: &mut [Descriptor],
+ next_level_attributes: Option<NextLevelAttributes>,
+ ) {
+ assert!(level <= 2);
+ assert_eq!(Self::TABLE_ENTRY_COUNT, next_level_table.len());
+ assert!(self.get_descriptor_type(level) != DescriptorType::Table);
+
+ let table_addr = KernelSpace::kernel_to_pa(next_level_table.as_ptr() as u64);
+ assert_eq!(0, table_addr & !Self::TA_MASK);
+
+ let mut raw_desc_value = Self::VALID_BIT | Self::TABLE_BIT | table_addr;
+
+ if let Some(next_attr) = next_level_attributes {
+ let next_attr_bits: u64 = next_attr.into();
+ assert_eq!(0, next_attr_bits & !Self::NEXT_ATTR_MASK);
+ raw_desc_value |= next_attr_bits;
+ }
+
+ self.set(raw_desc_value);
+ }
+
+ /// Get next level table
+ ///
+ /// **Unsafe**: The returned next level table is based on the address read from the descriptor.
+ /// The caller has to ensure that no other references are being used of the table.
+ pub unsafe fn get_next_level_table(&self, level: usize) -> &[Descriptor] {
+ assert!(level <= 2);
+ assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
+
+ let table_address =
+ KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *const Descriptor;
+ core::slice::from_raw_parts(table_address, Self::TABLE_ENTRY_COUNT)
+ }
+
+ /// Get mutable next level table
+ ///
+ /// **Unsafe**: The returned next level table is based on the address read from the descriptor.
+ /// The caller has to ensure that no other references are being used of the table.
+ pub unsafe fn get_next_level_table_mut(&mut self, level: usize) -> &mut [Descriptor] {
+ assert!(level <= 2);
+ assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
+
+ let table_address =
+ KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
+ core::slice::from_raw_parts_mut(table_address, Self::TABLE_ENTRY_COUNT)
+ }
+
+ /// Get next level attributes
+ pub fn get_next_level_attributes(&self, level: usize) -> NextLevelAttributes {
+ assert!(level <= 2);
+ assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
+
+ NextLevelAttributes::from((unsafe { self.get() }) & Self::NEXT_ATTR_MASK)
+ }
+
+ /// Set table descriptor to invalid
+ ///
+ /// **Unsafe:** The returned descriptor reference must be released by the caller, i.e. release
+ /// to `PagePool`
+ pub unsafe fn set_table_descriptor_to_invalid(&mut self, level: usize) -> &mut [Descriptor] {
+ assert!(level <= 2);
+ assert_eq!(DescriptorType::Table, self.get_descriptor_type(level));
+
+ let table_address =
+ KernelSpace::pa_to_kernel(self.get() & Self::TA_MASK) as *mut Descriptor;
+ self.set(Self::INVALID_DESCRIPTOR_VALUE);
+ core::slice::from_raw_parts_mut(table_address, Self::TABLE_ENTRY_COUNT)
+ }
+
+ /// Get raw descriptor value
+ unsafe fn get(&self) -> u64 {
+ ptr::read_volatile(self.cell.get())
+ }
+
+ /// Set raw descriptor value
+ unsafe fn set(&mut self, value: u64) {
+ ptr::write_volatile(self.cell.get(), value)
+ }
+
+ /// Modify raw descriptor value
+ unsafe fn modify<F>(&mut self, f: F)
+ where
+ F: Fn(u64) -> u64,
+ {
+ self.set(f(self.get()))
+ }
+
+ /// Get output address mask
+ fn get_oa_mask(level: usize) -> usize {
+ Self::OA_MASK as usize & !(Self::GRANULE_SIZES[level] - 1)
+ }
+}
+
+#[test]
+fn test_attributes() {
+ let attributes = Attributes::default();
+ assert_eq!(0u64, attributes.into());
+
+ let attributes = Attributes {
+ uxn: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 54, attributes.into());
+
+ let attributes = Attributes {
+ pxn: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 53, attributes.into());
+
+ let attributes = Attributes {
+ contiguous: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 52, attributes.into());
+
+ let attributes = Attributes {
+ not_global: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 11, attributes.into());
+
+ let attributes = Attributes {
+ access_flag: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 10, attributes.into());
+
+ let attributes = Attributes {
+ non_secure: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 5, attributes.into());
+
+ let attributes = Attributes {
+ mem_attr_index: MemoryAttributesIndex::Normal_IWBWA_OWBWA,
+ ..Default::default()
+ };
+ assert_eq!(
+ (MemoryAttributesIndex::Normal_IWBWA_OWBWA as u64) << 2,
+ attributes.into()
+ );
+
+ let attributes: Attributes = 0.into();
+ assert!(!attributes.uxn);
+ assert!(!attributes.pxn);
+ assert!(!attributes.contiguous);
+ assert!(!attributes.not_global);
+ assert!(!attributes.access_flag);
+ assert_eq!(Shareability::NonShareable, attributes.shareability);
+ assert_eq!(
+ DataAccessPermissions::ReadWrite_None,
+ attributes.data_access_permissions
+ );
+ assert!(!attributes.non_secure);
+ assert_eq!(
+ MemoryAttributesIndex::Device_nGnRnE,
+ attributes.mem_attr_index
+ );
+}
+
+#[test]
+fn test_next_level_attributes() {
+ let next_level_attributes = NextLevelAttributes::default();
+ assert_eq!(0u64, next_level_attributes.into());
+
+ let next_level_attributes = NextLevelAttributes {
+ ns_table: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 63, next_level_attributes.into());
+
+ let next_level_attributes = NextLevelAttributes {
+ ap_table: 3.into(),
+ ..Default::default()
+ };
+ assert_eq!(3u64 << 61, next_level_attributes.into());
+
+ let next_level_attributes = NextLevelAttributes {
+ xn_table: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 60, next_level_attributes.into());
+
+ let next_level_attributes = NextLevelAttributes {
+ pxn_table: true,
+ ..Default::default()
+ };
+ assert_eq!(1u64 << 59, next_level_attributes.into());
+
+ let next_level_attributes: NextLevelAttributes = 0.into();
+ assert!(!next_level_attributes.ns_table);
+ assert_eq!(0u8, next_level_attributes.ap_table.into());
+ assert!(!next_level_attributes.xn_table);
+ assert!(!next_level_attributes.pxn_table);
+
+ let next_level_attributes: NextLevelAttributes = u64::MAX.into();
+ assert!(next_level_attributes.ns_table);
+ assert_eq!(3u8, next_level_attributes.ap_table.into());
+ assert!(next_level_attributes.xn_table);
+ assert!(next_level_attributes.pxn_table);
+}
+
+#[test]
+fn test_descriptor_get_type() {
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+ assert_eq!(DescriptorType::Invalid, descriptor.get_descriptor_type(1));
+
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(1),
+ };
+ assert_eq!(DescriptorType::Block, descriptor.get_descriptor_type(1));
+
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(3),
+ };
+ assert_eq!(DescriptorType::Table, descriptor.get_descriptor_type(1));
+
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+ assert_eq!(DescriptorType::Invalid, descriptor.get_descriptor_type(3));
+
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(3),
+ };
+ assert_eq!(DescriptorType::Block, descriptor.get_descriptor_type(3));
+}
+
+#[test]
+fn test_descriptor_is_valid() {
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+ assert!(!descriptor.is_valid());
+
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(1),
+ };
+ assert!(descriptor.is_valid());
+}
+
+#[test]
+fn test_descriptor_set_block_to_block_again() {
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(1),
+ };
+
+ descriptor.set_block_descriptor(1, 0, Attributes::default());
+ assert_eq!(0x1, unsafe { descriptor.get() });
+}
+
+#[test]
+#[should_panic]
+fn test_descriptor_set_block_invalid_oa() {
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ descriptor.set_block_descriptor(1, 1 << 63, Attributes::default());
+}
+
+#[test]
+fn test_descriptor_block() {
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ descriptor.set_block_descriptor(
+ 1,
+ 0x0000000f_c0000000,
+ Attributes {
+ uxn: true,
+ ..Default::default()
+ },
+ );
+ assert_eq!(0x0040000f_c0000001, unsafe { descriptor.get() });
+
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ descriptor.set_block_descriptor(
+ 3,
+ 0x0000000f_fffff000,
+ Attributes {
+ uxn: true,
+ ..Default::default()
+ },
+ );
+ assert_eq!(0x0040000f_fffff003, unsafe { descriptor.get() });
+
+ assert_eq!(0x0000000f_fffff000, descriptor.get_block_output_address(3));
+ assert_eq!(
+ Attributes {
+ uxn: true,
+ ..Default::default()
+ },
+ descriptor.get_block_attributes(3)
+ );
+
+ descriptor.set_block_attributes(
+ 3,
+ Attributes {
+ pxn: true,
+ ..Default::default()
+ },
+ );
+ assert_eq!(
+ Attributes {
+ pxn: true,
+ ..Default::default()
+ },
+ descriptor.get_block_attributes(3)
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_descriptor_invalid_block_to_invalid() {
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ descriptor.set_block_descriptor_to_invalid(0);
+}
+
+#[test]
+fn test_descriptor_block_to_invalid() {
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(3),
+ };
+
+ descriptor.set_block_descriptor_to_invalid(3);
+ assert_eq!(0, unsafe { descriptor.get() });
+}
+
+#[test]
+#[should_panic]
+fn test_descriptor_level3_to_table() {
+ let mut next_level_table = [Descriptor {
+ cell: UnsafeCell::new(0),
+ }];
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(3, &mut next_level_table, None);
+ }
+}
+
+#[test]
+fn test_descriptor_block_to_table() {
+ let next_level_table =
+ unsafe { core::slice::from_raw_parts_mut(0x1000 as *mut Descriptor, 512) };
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(1),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(0, next_level_table, None);
+ }
+ assert_eq!(0x1003, unsafe { descriptor.get() });
+}
+
+#[test]
+#[should_panic]
+fn test_descriptor_table_invalid_count() {
+ let next_level_table =
+ unsafe { core::slice::from_raw_parts_mut(0x800 as *mut Descriptor, 511) };
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(0, next_level_table, None);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_descriptor_table_non_aligned() {
+ let next_level_table =
+ unsafe { core::slice::from_raw_parts_mut(0x800 as *mut Descriptor, 512) };
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(0, next_level_table, None);
+ }
+}
+
+#[test]
+fn test_descriptor_table() {
+ let next_level_table =
+ unsafe { core::slice::from_raw_parts_mut(0x0000_000c_ba98_7000 as *mut Descriptor, 512) };
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(0, next_level_table, None);
+ }
+ assert_eq!(0x0000_000c_ba98_7003, unsafe { descriptor.get() });
+}
+
+#[test]
+fn test_descriptor_table_next_level_attr() {
+ const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
+ let next_level_table =
+ unsafe { core::slice::from_raw_parts_mut(NEXT_LEVEL_ADDR as *mut Descriptor, 512) };
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(0),
+ };
+
+ unsafe {
+ descriptor.set_table_descriptor(
+ 0,
+ next_level_table,
+ Some(NextLevelAttributes {
+ ns_table: true,
+ ..Default::default()
+ }),
+ );
+ }
+ assert_eq!(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003, unsafe {
+ descriptor.get()
+ });
+}
+
+#[test]
+fn test_descriptor_table_get_next_level_table() {
+ const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
+ };
+ assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
+ descriptor.get_next_level_table(0).as_ptr() as u64
+ });
+}
+
+#[test]
+fn test_descriptor_table_get_next_level_table_mut() {
+ const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
+ };
+ assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
+ descriptor.get_next_level_table_mut(0).as_ptr() as *mut Descriptor as u64
+ });
+}
+
+#[test]
+fn test_descriptor_table_get_next_level_attr() {
+ const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
+ let descriptor = Descriptor {
+ cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
+ };
+ assert_eq!(
+ NextLevelAttributes {
+ ns_table: true,
+ ..Default::default()
+ },
+ descriptor.get_next_level_attributes(0)
+ );
+}
+
+#[test]
+fn test_descriptor_table_set_to_invalid() {
+ const NEXT_LEVEL_ADDR: u64 = 0x0000_000c_ba98_7000;
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(NEXT_LEVEL_ADDR | 0x8000_0000_0000_0003),
+ };
+ assert_eq!(KernelSpace::pa_to_kernel(NEXT_LEVEL_ADDR), unsafe {
+ descriptor.set_table_descriptor_to_invalid(0).as_ptr() as *mut Descriptor as u64
+ });
+ assert_eq!(0, unsafe { descriptor.get() });
+}
+
+#[test]
+fn test_descriptor_raw_interface() {
+ let cell_value = 0x01234567_89abcdefu64;
+ let cell_new_value = 0x12345678_9abcdef0u64;
+
+ let mut descriptor = Descriptor {
+ cell: UnsafeCell::new(cell_value),
+ };
+
+ unsafe {
+ assert_eq!(cell_value, descriptor.get());
+
+ descriptor.set(cell_new_value);
+ assert_eq!(cell_new_value, descriptor.get());
+
+ descriptor.modify(|d| d + 1);
+ assert_eq!(cell_new_value + 1, descriptor.get());
+ }
+}
diff --git a/src/kernel_space.rs b/src/kernel_space.rs
new file mode 100644
index 0000000..320d16e
--- /dev/null
+++ b/src/kernel_space.rs
@@ -0,0 +1,31 @@
+// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+//! Module for converting addresses between kernel virtual address space to physical address space
+
+pub struct KernelSpace {}
+
+#[cfg(not(test))]
+impl KernelSpace {
+ /// Kernel virtual address to physical address
+ pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
+ kernel_address & 0x0000_000f_ffff_ffff
+ }
+
+ /// Physical address to kernel virtual address
+ pub const fn pa_to_kernel(pa: u64) -> u64 {
+ // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
+ pa | 0xffff_fff0_0000_0000
+ }
+}
+
+#[cfg(test)]
+impl KernelSpace {
+ pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
+ kernel_address
+ }
+
+ pub const fn pa_to_kernel(pa: u64) -> u64 {
+ pa
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 4e11d95..4986ceb 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,736 @@
// SPDX-FileCopyrightText: Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
// SPDX-License-Identifier: MIT OR Apache-2.0
-#![cfg_attr(not(test), no_std)]
\ No newline at end of file
+#![allow(dead_code)]
+#![allow(non_camel_case_types)]
+#![cfg_attr(not(test), no_std)]
+
+extern crate alloc;
+
+use core::arch::asm;
+use core::iter::zip;
+use core::{fmt, panic};
+
+use alloc::boxed::Box;
+use alloc::format;
+use alloc::string::{String, ToString};
+use alloc::vec::Vec;
+use log::debug;
+
+use bitflags::bitflags;
+use packed_struct::prelude::*;
+
+use self::descriptor::DescriptorType;
+
+use self::descriptor::{Attributes, DataAccessPermissions, Descriptor, Shareability};
+use self::kernel_space::KernelSpace;
+use self::page_pool::{Page, PagePool, Pages};
+use self::region::{PhysicalRegion, VirtualRegion};
+use self::region_pool::{Region, RegionPool, RegionPoolError};
+
+mod descriptor;
+pub mod kernel_space;
+pub mod page_pool;
+mod region;
+mod region_pool;
+
+/// The first level of memory descriptors table which
+#[repr(C, align(512))]
+pub struct BaseTable {
+ pub descriptors: [Descriptor; 64],
+}
+
+impl BaseTable {
+ pub fn new() -> Self {
+ BaseTable {
+ descriptors: unsafe { core::mem::transmute([0u64; 64]) },
+ }
+ }
+}
+
+/// Translation table error type
+#[derive(Debug)]
+pub enum XlatError {
+ InvalidParameterError(String),
+ AllocationError(String),
+ AlignmentError(String),
+ Overflow,
+ InvalidOperation(String),
+ Overlap,
+ NotFound,
+ RegionPoolError(RegionPoolError),
+}
+
+/// Memory attributes
+///
+/// MAIR_EL1 should be configured in the same way in startup.s
+#[derive(PrimitiveEnum_u8, Clone, Copy, Debug, PartialEq, Eq, Default)]
+pub enum MemoryAttributesIndex {
+ #[default]
+ Device_nGnRnE = 0x00,
+ Normal_IWBWA_OWBWA = 0x01,
+}
+
+bitflags! {
+ #[derive(Debug, Clone, Copy)]
+ pub struct MemoryAccessRights : u32 {
+ const R = 0b00000001;
+ const W = 0b00000010;
+ const X = 0b00000100;
+ const NS = 0b00001000;
+
+ const RW = Self::R.bits() | Self::W.bits();
+ const RX = Self::R.bits() | Self::X.bits();
+ const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
+
+ const USER = 0b00010000;
+ const DEVICE = 0b00100000;
+ }
+}
+
+impl From<MemoryAccessRights> for Attributes {
+ fn from(access_rights: MemoryAccessRights) -> Self {
+ let data_access_permissions = match (
+ access_rights.contains(MemoryAccessRights::USER),
+ access_rights.contains(MemoryAccessRights::W),
+ ) {
+ (false, false) => DataAccessPermissions::ReadOnly_None,
+ (false, true) => DataAccessPermissions::ReadWrite_None,
+ (true, false) => DataAccessPermissions::ReadOnly_ReadOnly,
+ (true, true) => DataAccessPermissions::ReadWrite_ReadWrite,
+ };
+
+ let mem_attr_index = if access_rights.contains(MemoryAccessRights::DEVICE) {
+ MemoryAttributesIndex::Device_nGnRnE
+ } else {
+ MemoryAttributesIndex::Normal_IWBWA_OWBWA
+ };
+
+ Attributes {
+ uxn: !access_rights.contains(MemoryAccessRights::X)
+ || !access_rights.contains(MemoryAccessRights::USER),
+ pxn: !access_rights.contains(MemoryAccessRights::X)
+ || access_rights.contains(MemoryAccessRights::USER),
+ contiguous: false,
+ not_global: true,
+ access_flag: true,
+ shareability: Shareability::NonShareable,
+ data_access_permissions,
+ non_secure: access_rights.contains(MemoryAccessRights::NS),
+ mem_attr_index,
+ }
+ }
+}
+
+#[derive(PartialEq)]
+struct Block {
+ pa: usize,
+ va: usize,
+ granule: usize,
+}
+
+impl Block {
+ fn new(pa: usize, va: usize, granule: usize) -> Self {
+ assert!(Xlat::GRANULE_SIZES.contains(&granule));
+ Self { pa, va, granule }
+ }
+}
+
+impl fmt::Debug for Block {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Block")
+ .field("pa", &format_args!("{:#010x}", self.pa))
+ .field("va", &format_args!("{:#010x}", self.va))
+ .field("granule", &format_args!("{:#010x}", self.granule))
+ .finish()
+ }
+}
+
+pub struct Xlat {
+ base_table: Box<BaseTable>,
+ page_pool: PagePool,
+ regions: RegionPool<VirtualRegion>,
+}
+
+/// Memory translation table handling
+/// # High level interface
+/// * allocate and map zero initialized region (with or without VA)
+/// * allocate and map memory region and load contents (with or without VA)
+/// * map memory region by PA (with or without VA)
+/// * unmap memory region by PA
+/// * query PA by VA
+/// * set access rights of mapped memory areas
+/// * active mapping
+///
+/// # Debug features
+/// * print translation table details
+///
+/// # Region level interface
+/// * map regions
+/// * unmap region
+/// * find a mapped region which contains
+/// * find empty area for region
+/// * set access rights for a region
+/// * create blocks by region
+///
+/// # Block level interface
+/// * map block
+/// * unmap block
+/// * set access rights of block
+impl Xlat {
+ const BASE_VA: usize = 0x4000_0000;
+ pub const GRANULE_SIZES: [usize; 4] = [0, 0x4000_0000, 0x0020_0000, 0x0000_1000];
+
+ pub fn new(page_pool: PagePool) -> Self {
+ let mut regions = RegionPool::new();
+ regions
+ .add(VirtualRegion::new(
+ Self::BASE_VA,
+ 0x1_0000_0000 - Self::BASE_VA,
+ ))
+ .unwrap();
+ Self {
+ base_table: Box::new(BaseTable::new()),
+ page_pool,
+ regions,
+ }
+ }
+
+ /// Allocate memory pages from the page pool, maps it to the given VA and fills it with the
+ /// initial data
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * data: Data to be loaded to the memory area
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn allocate_initalized_range(
+ &mut self,
+ va: Option<usize>,
+ data: &[u8],
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let mut pages = self.page_pool.allocate_pages(data.len()).map_err(|e| {
+ XlatError::AllocationError(format!(
+ "Cannot allocate pages for {} bytes ({:?})",
+ data.len(),
+ e
+ ))
+ })?;
+
+ pages.copy_data_to_page(data);
+
+ let pages_length = pages.length();
+ let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
+ let region = if let Some(required_va) = va {
+ self.regions
+ .acquire(required_va, pages_length, physical_region)
+ } else {
+ self.regions.allocate(pages_length, physical_region)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Allocate memory pages from the page pool, maps it to the given VA and fills it with zeros
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn allocate_zero_init_range(
+ &mut self,
+ va: Option<usize>,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let mut pages = self.page_pool.allocate_pages(length).map_err(|e| {
+ XlatError::AllocationError(format!("Cannot allocate pages for {length} bytes ({e:?})"))
+ })?;
+
+ pages.zero_init();
+
+ let pages_length = pages.length();
+ let physical_region = PhysicalRegion::Allocated(self.page_pool.clone(), pages);
+ let region = if let Some(required_va) = va {
+ self.regions
+ .acquire(required_va, pages_length, physical_region)
+ } else {
+ self.regions.allocate(pages_length, physical_region)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Map memory area by physical address
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * pa: Physical address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: Memory access rights of the area
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ pub fn map_physical_address_range(
+ &mut self,
+ va: Option<usize>,
+ pa: usize,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<usize, XlatError> {
+ let resource = PhysicalRegion::PhysicalAddress(pa);
+ let region = if let Some(required_va) = va {
+ self.regions.acquire(required_va, length, resource)
+ } else {
+ self.regions.allocate(length, resource)
+ }
+ .map_err(XlatError::RegionPoolError)?;
+
+ self.map_region(region, access_rights.into())
+ }
+
+ /// Unmap memory area by virtual address
+ /// # Arguments
+ /// * va: Virtual address
+ /// * length: Length of the memory area in bytes
+ pub fn unmap_virtual_address_range(
+ &mut self,
+ va: usize,
+ length: usize,
+ ) -> Result<(), XlatError> {
+ let pa = self.get_pa_by_va(va, length)?;
+
+ let region_to_release = VirtualRegion::new_with_pa(pa, va, length);
+
+ self.unmap_region(®ion_to_release)?;
+
+ self.regions
+ .release(region_to_release)
+ .map_err(XlatError::RegionPoolError)
+ }
+
+ /// Query physical address by virtual address range. Only returns a value if the memory area
+ /// mapped as continuous area.
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// # Return value
+ /// * Physical address of the mapped memory
+ pub fn get_pa_by_va(&self, va: usize, length: usize) -> Result<usize, XlatError> {
+ let containing_region = self
+ .find_containing_region(va, length)
+ .ok_or(XlatError::NotFound)?;
+
+ if !containing_region.used() {
+ return Err(XlatError::NotFound);
+ }
+
+ Ok(containing_region.get_pa_for_va(va))
+ }
+
+ /// Sets the memory access right of memory area
+ /// # Arguments
+ /// * va: Virtual address of the memory area
+ /// * length: Length of the memory area in bytes
+ /// * access_rights: New memory access rights of the area
+ pub fn set_access_rights(
+ &mut self,
+ va: usize,
+ length: usize,
+ access_rights: MemoryAccessRights,
+ ) -> Result<(), XlatError> {
+ let containing_region = self
+ .find_containing_region(va, length)
+ .ok_or(XlatError::NotFound)?;
+
+ if !containing_region.used() {
+ return Err(XlatError::NotFound);
+ }
+
+ let region = VirtualRegion::new_with_pa(containing_region.get_pa_for_va(va), va, length);
+ self.map_region(region, access_rights.into())?;
+
+ Ok(())
+ }
+
+ /// Activate memory mapping represented by the object
+ /// # Arguments
+ /// * asid: ASID of the table base address
+ pub fn activate(&self, asid: u8) {
+ let base_table_pa = KernelSpace::kernel_to_pa(self.base_table.descriptors.as_ptr() as u64);
+ let ttbr = ((asid as u64) << 48) | base_table_pa;
+ unsafe {
+ #[cfg(target_arch = "aarch64")]
+ asm!(
+ "msr ttbr0_el1, {0}
+ isb",
+ in(reg) ttbr)
+ };
+ }
+
+ /// Prints the translation tables to debug console recursively
+ pub fn print(&self) {
+ debug!(
+ "Xlat table -> {:#010x}",
+ self.base_table.descriptors.as_ptr() as u64
+ );
+ Self::print_table(1, 0, &self.base_table.descriptors);
+ }
+
+ /// Prints a single translation table to the debug console
+ /// # Arguments
+ /// * level: Level of the translation table
+ /// * va: Base virtual address of the table
+ /// * table: Table entries
+ pub fn print_table(level: usize, va: usize, table: &[Descriptor]) {
+ let level_prefix = match level {
+ 0 | 1 => "|-",
+ 2 => "| |-",
+ _ => "| | |-",
+ };
+
+ for (descriptor, va) in zip(table, (va..).step_by(Self::GRANULE_SIZES[level])) {
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Block => debug!(
+ "{} {:#010x} Block -> {:#010x}",
+ level_prefix,
+ va,
+ descriptor.get_block_output_address(level)
+ ),
+ DescriptorType::Table => {
+ let next_level_table = unsafe { descriptor.get_next_level_table(level) };
+ debug!(
+ "{} {:#010x} Table -> {:#010x}",
+ level_prefix,
+ va,
+ next_level_table.as_ptr() as usize
+ );
+ Self::print_table(level + 1, va, next_level_table);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ /// Adds memory region from the translation table. The function splits the region to blocks and
+ /// uses the block level functions to do the mapping.
+ /// # Arguments
+ /// * region: Memory region object
+ /// # Return value
+ /// * Virtual address of the mapped memory
+ fn map_region(
+ &mut self,
+ region: VirtualRegion,
+ attributes: Attributes,
+ ) -> Result<usize, XlatError> {
+ let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ for block in blocks {
+ self.map_block(block, attributes.clone());
+ }
+
+ Ok(region.base())
+ }
+
+ /// Remove memory region from the translation table. The function splits the region to blocks
+ /// and uses the block level functions to do the unmapping.
+ /// # Arguments
+ /// * region: Memory region object
+ fn unmap_region(&mut self, region: &VirtualRegion) -> Result<(), XlatError> {
+ let blocks = Self::split_region_to_blocks(region.get_pa(), region.base(), region.length())?;
+ for block in blocks {
+ self.unmap_block(block);
+ }
+
+ Ok(())
+ }
+
+ /// Find mapped region that contains the whole region
+ /// # Arguments
+ /// * region: Virtual address to look for
+ /// # Return value
+ /// * Reference to virtual region if found
+ fn find_containing_region(&self, va: usize, length: usize) -> Option<&VirtualRegion> {
+ self.regions.find_containing_region(va, length).ok()
+ }
+
+ /// Splits memory region to blocks that matches the granule size of the translation table.
+ /// # Arguments
+ /// * pa: Physical address
+ /// * va: Virtual address
+ /// * length: Region size in bytes
+ /// # Return value
+ /// * Vector of granule sized blocks
+ fn split_region_to_blocks(
+ mut pa: usize,
+ mut va: usize,
+ mut length: usize,
+ ) -> Result<Vec<Block>, XlatError> {
+ let min_granule_mask = Self::GRANULE_SIZES.last().unwrap() - 1;
+
+ if length == 0 {
+ return Err(XlatError::InvalidParameterError(
+ "Length cannot be 0".to_string(),
+ ));
+ }
+
+ if pa & min_granule_mask != 0
+ || va & min_granule_mask != 0
+ || length & min_granule_mask != 0
+ {
+ return Err(XlatError::InvalidParameterError(format!(
+ "Addresses and length must be aligned {pa:#010x} {va:#010x} {length:#x}"
+ )));
+ }
+
+ let mut pages = Vec::new();
+
+ while length > 0 {
+ for granule in &Self::GRANULE_SIZES {
+ if (pa | va) & (*granule - 1) == 0 && length >= *granule {
+ pages.push(Block::new(pa, va, *granule));
+ pa += *granule;
+ va = va.checked_add(*granule).ok_or(XlatError::Overflow)?;
+
+ length -= *granule;
+ break;
+ }
+ }
+ }
+
+ Ok(pages)
+ }
+
+ /// Add block to memory mapping
+ /// # Arguments
+ /// * block: Memory block that can be represented by a single translation table entry
+ /// * attributes: Memory block's permissions, flags
+ fn map_block(&mut self, block: Block, attributes: Attributes) {
+ Self::set_block_descriptor_recursively(
+ attributes,
+ block.pa,
+ block.va,
+ block.granule,
+ 1,
+ self.base_table.descriptors.as_mut_slice(),
+ &self.page_pool,
+ );
+ }
+
+ /// Adds the block descriptor to the translation table along all the intermediate tables the
+ /// reach the required granule.
+ /// # Arguments
+ /// * attributes: Memory block's permssions, flags
+ /// * pa: Physical address
+ /// * va: Virtual address
+ /// * granule: Translation granule in bytes
+ /// * level: Translation table level
+ /// * table: Translation table on the given level
+ /// * page_pool: Page pool where the function can allocate pages for the translation tables
+ fn set_block_descriptor_recursively(
+ attributes: Attributes,
+ pa: usize,
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ page_pool: &PagePool,
+ ) {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ // We reached the required granule level
+ if Self::GRANULE_SIZES[level] == granule {
+ descriptor.set_block_descriptor(level, pa, attributes);
+ return;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+ unsafe {
+ let next_table = page.get_as_slice();
+ descriptor.set_table_descriptor(level, next_table, None);
+ }
+ Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ )
+ }
+ DescriptorType::Block => {
+ // Saving current descriptor details
+ let current_va = va & !(Self::GRANULE_SIZES[level] - 1);
+ let current_pa = descriptor.get_block_output_address(level);
+ let current_attributes = descriptor.get_block_attributes(level);
+
+ // Replace block descriptor by table descriptor
+ let mut page = page_pool.allocate_pages(Page::SIZE).unwrap();
+ unsafe {
+ let next_table = page.get_as_slice();
+ descriptor.set_table_descriptor(level, next_table, None);
+ }
+
+ // Explode block descriptor to table entries
+ for exploded_va in (current_va..(current_va + Self::GRANULE_SIZES[level]))
+ .step_by(Self::GRANULE_SIZES[level + 1])
+ {
+ let offset = exploded_va - current_va;
+ Self::set_block_descriptor_recursively(
+ current_attributes.clone(),
+ current_pa + offset,
+ exploded_va & (Self::GRANULE_SIZES[level] - 1),
+ Self::GRANULE_SIZES[level + 1],
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ )
+ }
+
+ // Invoke self to continue recursion on the newly created level
+ Self::set_block_descriptor_recursively(
+ attributes, pa, va, granule, level, table, page_pool,
+ );
+ }
+ DescriptorType::Table => Self::set_block_descriptor_recursively(
+ attributes,
+ pa,
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ page_pool,
+ ),
+ }
+ }
+
+ /// Remove block from memory mapping
+ /// # Arguments
+ /// * block: memory block that can be represented by a single translation entry
+ fn unmap_block(&mut self, block: Block) {
+ Self::remove_block_descriptor_recursively(
+ block.va,
+ block.granule,
+ 1,
+ self.base_table.descriptors.as_mut_slice(),
+ &self.page_pool,
+ );
+ }
+
+ /// Removes block descriptor from the translation table along all the intermediate tables which
+ /// become empty during the removal process.
+ /// # Arguments
+ /// * va: Virtual address
+ /// * granule: Translation granule in bytes
+ /// * level: Translation table level
+ /// * table: Translation table on the given level
+ /// * page_pool: Page pool where the function can release the pages of empty tables
+ fn remove_block_descriptor_recursively(
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ page_pool: &PagePool,
+ ) {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ // We reached the required granule level
+ if Self::GRANULE_SIZES[level] == granule {
+ descriptor.set_block_descriptor_to_invalid(level);
+ return;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ panic!("Cannot remove block from non-existing table");
+ }
+ DescriptorType::Block => {
+ panic!("Cannot remove block with different granule");
+ }
+ DescriptorType::Table => {
+ let next_level_table = unsafe { descriptor.get_next_level_table_mut(level) };
+ Self::remove_block_descriptor_recursively(
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ next_level_table,
+ page_pool,
+ );
+
+ if next_level_table.iter().all(|d| !d.is_valid()) {
+ // Empty table
+ let mut page = unsafe {
+ Pages::from_slice(descriptor.set_table_descriptor_to_invalid(level))
+ };
+ page.zero_init();
+ page_pool.release_pages(page).unwrap();
+ }
+ }
+ }
+ }
+
+ fn get_descriptor(&mut self, va: usize, granule: usize) -> &mut Descriptor {
+ Self::walk_descriptors(va, granule, 0, &mut self.base_table.descriptors)
+ }
+
+ fn walk_descriptors(
+ va: usize,
+ granule: usize,
+ level: usize,
+ table: &mut [Descriptor],
+ ) -> &mut Descriptor {
+ // Get descriptor of the current level
+ let descriptor = &mut table[va / Self::GRANULE_SIZES[level]];
+
+ if Self::GRANULE_SIZES[level] == granule {
+ return descriptor;
+ }
+
+ // Need to iterate forward
+ match descriptor.get_descriptor_type(level) {
+ DescriptorType::Invalid => {
+ panic!("Invalid descriptor");
+ }
+ DescriptorType::Block => {
+ panic!("Cannot split existing block descriptor to table");
+ }
+ DescriptorType::Table => Self::walk_descriptors(
+ va & (Self::GRANULE_SIZES[level] - 1),
+ granule,
+ level + 1,
+ unsafe { descriptor.get_next_level_table_mut(level) },
+ ),
+ }
+ }
+}
+
+#[test]
+fn test_split_to_pages() {
+ let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3fff_c000, 0x4020_5000).unwrap();
+ assert_eq!(Block::new(0x3fff_c000, 0x3fff_c000, 0x1000), pages[0]);
+ assert_eq!(Block::new(0x3fff_d000, 0x3fff_d000, 0x1000), pages[1]);
+ assert_eq!(Block::new(0x3fff_e000, 0x3fff_e000, 0x1000), pages[2]);
+ assert_eq!(Block::new(0x3fff_f000, 0x3fff_f000, 0x1000), pages[3]);
+ assert_eq!(Block::new(0x4000_0000, 0x4000_0000, 0x4000_0000), pages[4]);
+ assert_eq!(Block::new(0x8000_0000, 0x8000_0000, 0x0020_0000), pages[5]);
+ assert_eq!(Block::new(0x8020_0000, 0x8020_0000, 0x1000), pages[6]);
+}
+
+#[test]
+fn test_split_to_pages_unaligned() {
+ let pages = Xlat::split_region_to_blocks(0x3fff_c000, 0x3f20_0000, 0x200000).unwrap();
+ for (i, block) in pages.iter().enumerate().take(512) {
+ assert_eq!(
+ Block::new(0x3fff_c000 + (i << 12), 0x3f20_0000 + (i << 12), 0x1000),
+ *block
+ );
+ }
+}
diff --git a/src/page_pool.rs b/src/page_pool.rs
new file mode 100644
index 0000000..66f22c7
--- /dev/null
+++ b/src/page_pool.rs
@@ -0,0 +1,240 @@
+// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+//! Region pool implementation for allocating pages
+
+use core::slice;
+
+use alloc::sync::Arc;
+use alloc::vec::Vec;
+use spin::Mutex;
+
+use super::kernel_space::KernelSpace;
+use super::region_pool::{Region, RegionPool, RegionPoolError};
+
+/// Single 4kB page definition
+pub struct Page {}
+
+impl Page {
+ pub const SIZE: usize = 4096;
+}
+
+/// Area for allocating pages
+#[repr(C, align(4096))]
+pub struct PagePoolArea<const AREA_SIZE: usize> {
+ area: [u8; AREA_SIZE],
+}
+
+impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
+ pub const fn new() -> Self {
+ Self {
+ area: [0; AREA_SIZE],
+ }
+ }
+}
+
+/// Continuous pages
+pub struct Pages {
+ pa: usize,
+ length: usize,
+ used: bool,
+}
+
+impl Pages {
+ // Create new instance
+ pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
+ Pages { pa, length, used }
+ }
+
+ /// Copy data to pages
+ pub fn copy_data_to_page(&mut self, data: &[u8]) {
+ assert!(data.len() <= self.length);
+
+ let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
+ page_contents.clone_from_slice(data);
+ }
+
+ /// Zero init pages
+ pub fn zero_init(&mut self) {
+ unsafe {
+ self.get_as_slice::<u8>().fill(0);
+ }
+ }
+
+ /// Get physical address
+ pub fn get_pa(&self) -> usize {
+ self.pa
+ }
+
+ /// Get as mutable slice
+ ///
+ /// **Unsafe**: The returned slice is created from its address and length which is stored in the
+ /// object. The caller has to ensure that no other references are being used of the pages.
+ pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
+ assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
+
+ core::slice::from_raw_parts_mut(
+ KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
+ self.length / core::mem::size_of::<T>(),
+ )
+ }
+
+ /// Set contents from slice
+ ///
+ /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
+ pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
+ Pages {
+ pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
+ length: core::mem::size_of_val(s),
+ used: true,
+ }
+ }
+}
+
+impl Region for Pages {
+ type Resource = ();
+
+ fn base(&self) -> usize {
+ self.pa
+ }
+
+ fn length(&self) -> usize {
+ self.length
+ }
+
+ fn used(&self) -> bool {
+ self.used
+ }
+
+ fn contains(&self, base: usize, length: usize) -> bool {
+ if let (Some(end), Some(self_end)) =
+ (base.checked_add(length), self.pa.checked_add(self.length))
+ {
+ self.pa <= base && end <= self_end
+ } else {
+ false
+ }
+ }
+
+ fn try_append(&mut self, other: &Self) -> bool {
+ if let (Some(self_end), Some(new_length)) = (
+ self.pa.checked_add(self.length),
+ self.length.checked_add(other.length),
+ ) {
+ if self.used == other.used && self_end == other.pa {
+ self.length = new_length;
+ true
+ } else {
+ false
+ }
+ } else {
+ false
+ }
+ }
+
+ fn create_split(
+ &self,
+ base: usize,
+ length: usize,
+ resource: Option<Self::Resource>,
+ ) -> (Self, Vec<Self>) {
+ assert!(self.contains(base, length));
+
+ let used = resource.is_some();
+ let mut res = Vec::new();
+ if self.pa != base {
+ res.push(Pages::new(self.pa, base - self.pa, self.used));
+ }
+ res.push(Pages::new(base, length, used));
+ if self.pa + self.length != base + length {
+ res.push(Pages::new(
+ base + length,
+ (self.pa + self.length) - (base + length),
+ self.used,
+ ));
+ }
+
+ (Pages::new(base, length, used), res)
+ }
+}
+
+/// RegionPool implementation for pages
+#[derive(Clone)]
+pub struct PagePool {
+ pages: Arc<Mutex<RegionPool<Pages>>>,
+}
+
+type PagePoolError = RegionPoolError;
+
+impl PagePool {
+ /// Create new page pool
+ pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
+ let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
+ let length = page_pool_area.area.len();
+
+ let mut region_pool = RegionPool::new();
+ region_pool.add(Pages::new(pa, length, false)).unwrap();
+ Self {
+ pages: Arc::new(Mutex::new(region_pool)),
+ }
+ }
+
+ /// Allocate pages for given length
+ pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
+ self.pages
+ .lock()
+ .allocate(Self::round_up_to_page_size(length), ())
+ }
+
+ /// Release pages
+ pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
+ self.pages.lock().release(pages_to_release)
+ }
+
+ fn round_up_to_page_size(length: usize) -> usize {
+ (length + Page::SIZE - 1) & !(Page::SIZE - 1)
+ }
+}
+
+#[test]
+fn test_pages() {
+ let area = [0x5au8; 4096];
+ let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
+
+ assert_eq!(area.as_ptr() as usize, pages.pa);
+ assert_eq!(area.len(), pages.length);
+ assert!(pages.used);
+ assert_eq!(area.as_ptr() as usize, pages.get_pa());
+ assert_eq!(area.as_ptr() as usize, pages.base());
+ assert_eq!(area.len(), pages.length());
+ assert!(pages.used());
+
+ pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
+ assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
+
+ pages.zero_init();
+ assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
+
+ let s = unsafe { pages.get_as_slice() };
+ for (i, e) in s.iter_mut().enumerate().take(8) {
+ *e = i as u8;
+ }
+ assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
+
+ let from_slice = unsafe { Pages::from_slice(s) };
+ assert_eq!(area.as_ptr() as usize, from_slice.pa);
+ assert_eq!(area.len(), from_slice.length);
+ assert!(from_slice.used);
+}
+
+#[test]
+fn test_pages_contains() {
+ let pages = Pages::new(0x4000_0000, 0x4000, true);
+
+ assert!(!pages.contains(0x3fff_f000, 0x1000));
+ assert!(!pages.contains(0x3fff_f000, 0x1_0000));
+ assert!(!pages.contains(0x4000_4000, 0x1000));
+ assert!(!pages.contains(0x4000_0000, 0x1_0000));
+
+ // Overflow tests
+}
diff --git a/src/region.rs b/src/region.rs
new file mode 100644
index 0000000..fb13db9
--- /dev/null
+++ b/src/region.rs
@@ -0,0 +1,632 @@
+// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+//! Module for handling physical and virtual memory regions
+//!
+//! A region is a continuous memory area in the given memory space.
+
+use alloc::vec::Vec;
+use log::debug;
+
+use super::{
+ page_pool::{PagePool, Pages},
+ region_pool::Region,
+};
+
+/// Physical region
+///
+/// A physical memory region can be in three different state
+/// * Unused
+/// * Points to a page pool allocated address
+/// * Points to a physical address without allocation
+pub enum PhysicalRegion {
+ Unused,
+ Allocated(PagePool, Pages),
+ PhysicalAddress(usize),
+}
+
+impl PhysicalRegion {
+ /// Get physical memory address
+ fn get_pa(&self) -> usize {
+ match self {
+ PhysicalRegion::Unused => panic!("Unused area has no PA"),
+ PhysicalRegion::Allocated(_page_pool, pages) => pages.get_pa(),
+ PhysicalRegion::PhysicalAddress(pa) => *pa,
+ }
+ }
+}
+
+/// Virtual region
+///
+/// A virtual memory region has a virtual address, a length and a physical region.
+pub struct VirtualRegion {
+ va: usize,
+ length: usize,
+ physical_region: PhysicalRegion,
+}
+
+impl VirtualRegion {
+ /// Create new virtual memory region without a physical region
+ pub fn new(va: usize, length: usize) -> Self {
+ Self::new_from_fields(va, length, PhysicalRegion::Unused)
+ }
+
+ /// Create virtual region with points to a given physical address
+ pub fn new_with_pa(pa: usize, va: usize, length: usize) -> Self {
+ Self::new_from_fields(va, length, PhysicalRegion::PhysicalAddress(pa))
+ }
+
+ /// Create virtual region by defining all the fields of the object
+ fn new_from_fields(va: usize, length: usize, physical_region: PhysicalRegion) -> Self {
+ Self {
+ va,
+ length,
+ physical_region,
+ }
+ }
+
+ /// Get the base address of the linked physical region
+ pub fn get_pa(&self) -> usize {
+ self.physical_region.get_pa()
+ }
+
+ /// Get physical address for a virtual address
+ pub fn get_pa_for_va(&self, va: usize) -> usize {
+ let offset = va.checked_sub(self.va).unwrap();
+
+ assert!(offset < self.length);
+ self.get_pa().checked_add(offset).unwrap()
+ }
+}
+
+impl Region for VirtualRegion {
+ type Resource = PhysicalRegion;
+
+ fn base(&self) -> usize {
+ self.va
+ }
+
+ fn length(&self) -> usize {
+ self.length
+ }
+
+ fn used(&self) -> bool {
+ !matches!(self.physical_region, PhysicalRegion::Unused)
+ }
+
+ fn contains(&self, base: usize, length: usize) -> bool {
+ if let (Some(end), Some(self_end)) =
+ (base.checked_add(length), self.va.checked_add(self.length))
+ {
+ self.va <= base && end <= self_end
+ } else {
+ false
+ }
+ }
+
+ fn try_append(&mut self, other: &Self) -> bool {
+ if let (Some(self_end), Some(new_length)) = (
+ self.va.checked_add(self.length),
+ self.length.checked_add(other.length),
+ ) {
+ if self_end == other.va {
+ // VA is consecutive
+ match (&self.physical_region, &other.physical_region) {
+ (PhysicalRegion::Unused, PhysicalRegion::Unused) => {
+ // Unused range can be merged without further conditions
+ self.length = new_length;
+ return true;
+ }
+ (
+ PhysicalRegion::PhysicalAddress(self_pa),
+ PhysicalRegion::PhysicalAddress(other_pa),
+ ) => {
+ // Used ranges can be only merged if the PA doesn't overflow and it is
+ // consecutive
+ if let Some(self_end_pa) = self_pa.checked_add(self.length) {
+ if self_end_pa == *other_pa {
+ self.length = new_length;
+ return true;
+ }
+ }
+ }
+
+ // PhyisicalRegion::Allocated instances cannot be merged at the moment. Not sure
+ // if it's a valid use case. If needed the pages has to be merged which might
+ // require tricks to invalidate the pages of 'other'.
+ _ => {}
+ }
+ }
+ }
+
+ false
+ }
+
+ fn create_split(
+ &self,
+ base: usize,
+ length: usize,
+ resource: Option<Self::Resource>,
+ ) -> (Self, Vec<Self>) {
+ assert!(self.contains(base, length));
+ assert!(self.used() != resource.is_some());
+
+ if let Some(physical_region) = resource {
+ // Self is unused, setting part of it to used
+ let pa = physical_region.get_pa();
+
+ let mut res = Vec::new();
+ if self.va != base {
+ res.push(Self::new(self.va, base.checked_sub(self.va).unwrap()));
+ }
+
+ res.push(Self::new_from_fields(base, length, physical_region));
+
+ let end = base.checked_add(length).unwrap();
+ let self_end = self.va.checked_add(self.length).unwrap();
+ if end != self_end {
+ res.push(Self::new(end, self_end.checked_sub(end).unwrap()));
+ }
+
+ (
+ Self::new_from_fields(base, length, PhysicalRegion::PhysicalAddress(pa)),
+ res,
+ )
+ } else {
+ // Self is used, mark part of it unused
+ let mut res = Vec::new();
+ if self.va != base {
+ let physical_region = match &self.physical_region {
+ PhysicalRegion::Allocated(_page_pool, _pages) => {
+ todo!("Implement Pages::split");
+ }
+ PhysicalRegion::PhysicalAddress(pa) => PhysicalRegion::PhysicalAddress(*pa),
+ _ => {
+ panic!("Splitting unused region by other unused")
+ }
+ };
+
+ res.push(Self::new_from_fields(
+ self.va,
+ base.checked_sub(self.va).unwrap(),
+ physical_region,
+ ));
+ }
+
+ res.push(Self::new(base, length));
+
+ let end = base.checked_add(length).unwrap();
+ let self_end = self.va.checked_add(self.length).unwrap();
+ if end != self_end {
+ let physical_region = match &self.physical_region {
+ PhysicalRegion::Allocated(_page_pool, _pages) => {
+ todo!("Implement Pages::split");
+ }
+ PhysicalRegion::PhysicalAddress(pa) => {
+ let offset = end.checked_sub(self.va).unwrap();
+ PhysicalRegion::PhysicalAddress(pa.checked_add(offset).unwrap())
+ }
+ _ => {
+ panic!("Splitting unused region by other unused")
+ }
+ };
+
+ res.push(Self::new_from_fields(
+ end,
+ self_end.checked_sub(end).unwrap(),
+ physical_region,
+ ));
+ }
+
+ (Self::new(base, length), res)
+ }
+ }
+}
+
+impl Drop for VirtualRegion {
+ /// If the virtual region has a linked physical region which was allocated then release the
+ /// allocated pages.
+ fn drop(&mut self) {
+ let mut physical_region = PhysicalRegion::Unused;
+
+ core::mem::swap(&mut self.physical_region, &mut physical_region);
+
+ if let PhysicalRegion::Allocated(page_pool, pages) = physical_region {
+ debug!(
+ "Dropping physical region with pages: PA={:#010x} VA={:#010x}",
+ pages.get_pa(),
+ self.base(),
+ );
+
+ page_pool.release_pages(pages).unwrap();
+ }
+ }
+}
+
+#[cfg(test)]
+use super::page_pool::PagePoolArea;
+
+#[test]
+#[should_panic]
+fn test_physical_region_unused() {
+ let region = PhysicalRegion::Unused;
+ region.get_pa();
+}
+
+#[test]
+fn test_physical_region() {
+ const PA: usize = 0x0123_4567_89ab_cdef;
+ const LENGTH: usize = 0x8000_0000_0000;
+
+ static PAGE_POOL_AREA: PagePoolArea<16> = PagePoolArea::new();
+ let region =
+ PhysicalRegion::Allocated(PagePool::new(&PAGE_POOL_AREA), Pages::new(PA, LENGTH, true));
+ assert_eq!(PA, region.get_pa());
+
+ let region = PhysicalRegion::PhysicalAddress(PA);
+ assert_eq!(PA, region.get_pa());
+}
+
+#[test]
+fn test_virtual_region() {
+ const VA: usize = 0x0123_4567_89ab_cdef;
+ const PA: usize = 0xfedc_ba98_7654_3210;
+ const LENGTH: usize = 0x8000_0000_0000;
+
+ let region = VirtualRegion::new(VA, LENGTH);
+ assert_eq!(VA, region.va);
+ assert_eq!(VA, region.base());
+ assert_eq!(LENGTH, region.length);
+ assert_eq!(LENGTH, region.length());
+ assert!(matches!(region.physical_region, PhysicalRegion::Unused));
+ assert!(!region.used());
+
+ let region = VirtualRegion::new_with_pa(PA, VA, LENGTH);
+ assert_eq!(VA, region.va);
+ assert_eq!(VA, region.base());
+ assert_eq!(LENGTH, region.length);
+ assert_eq!(LENGTH, region.length());
+ assert!(matches!(
+ region.physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+ assert_eq!(PA, region.get_pa());
+ assert!(region.used());
+}
+
+#[test]
+fn test_virtual_region_get_pa_for_va() {
+ let region = VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
+ assert_eq!(
+ 0x8000_0000_0000_0000,
+ region.get_pa_for_va(0x4000_0000_0000_0000)
+ );
+ assert_eq!(
+ 0x8000_0000_0000_0001,
+ region.get_pa_for_va(0x4000_0000_0000_0001)
+ );
+ assert_eq!(
+ 0x8000_0000_0000_0fff,
+ region.get_pa_for_va(0x4000_0000_0000_0fff)
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_virtual_region_get_pa_for_va_low_va() {
+ let region = VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
+ region.get_pa_for_va(0x3fff_ffff_ffff_ffff);
+}
+
+#[test]
+#[should_panic]
+fn test_virtual_region_get_pa_for_va_high_va() {
+ let region = VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000_0000_0000, 0x1000);
+ region.get_pa_for_va(0x4000_0000_0000_1000);
+}
+
+#[test]
+fn test_virtual_region_contains() {
+ const VA: usize = 0x8000_0000_0000_0000;
+ const LENGTH: usize = 0x8000_0000_0000;
+
+ let region_overflow_end = VirtualRegion::new(0x8000_0000_0000_0000, 0x8000_0000_0000_0000);
+ assert!(!region_overflow_end.contains(0x8000_0000_0000_0000, 1));
+
+ let region = VirtualRegion::new(0x4000_0000_0000_0000, 0x8000_0000_0000_0000);
+ assert!(!region.contains(0x8000_0000_0000_0000, 0x8000_0000_0000_0000));
+
+ assert!(!region.contains(0x4000_0000_0000_0000, 0x8000_0000_0000_0001));
+ assert!(!region.contains(0x3fff_ffff_ffff_ffff, 0x8000_0000_0000_0000));
+ assert!(region.contains(0x4000_0000_0000_0000, 0x8000_0000_0000_0000));
+ assert!(region.contains(0x4000_0000_0000_0000, 0x7fff_ffff_ffff_ffff));
+ assert!(region.contains(0x4000_0000_0000_0001, 0x7fff_ffff_ffff_ffff));
+}
+
+#[test]
+fn test_virtual_region_try_append() {
+ // Both unused
+ let mut region_unused0 = VirtualRegion::new(0x4000_0000, 0x1000);
+ let mut region_unused1 = VirtualRegion::new(0x4000_1000, 0x1000);
+
+ assert!(!region_unused1.try_append(®ion_unused0));
+ assert_eq!(0x4000_0000, region_unused0.va);
+ assert_eq!(0x1000, region_unused0.length);
+ assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(0x1000, region_unused1.length);
+
+ assert!(region_unused0.try_append(®ion_unused1));
+ assert_eq!(0x4000_0000, region_unused0.va);
+ assert_eq!(0x2000, region_unused0.length);
+ assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(0x1000, region_unused1.length);
+
+ // Unused and PA region
+ let mut region_unused = VirtualRegion::new(0x4000_0000, 0x1000);
+ let region_physical = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_1000, 0x1000);
+ assert!(!region_unused.try_append(®ion_physical));
+ assert_eq!(0x4000_0000, region_unused.va);
+ assert_eq!(0x1000, region_unused.length);
+ assert_eq!(0x4000_1000, region_physical.va);
+ assert_eq!(0x1000, region_physical.length);
+
+ // Both PA regions but non-consecutive PA ranges
+ let mut region_physical0 = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x1000);
+ let region_physical1 = VirtualRegion::new_with_pa(0x9000_0000, 0x4000_1000, 0x1000);
+ assert!(!region_physical0.try_append(®ion_physical1));
+ assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(0x1000, region_physical0.length);
+ assert_eq!(0x4000_1000, region_physical1.va);
+ assert_eq!(0x1000, region_physical1.length);
+
+ // Both PA regions with consecutive PA ranges
+ let mut region_physical0 = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x1000);
+ let region_physical1 = VirtualRegion::new_with_pa(0x8000_1000, 0x4000_1000, 0x1000);
+ assert!(region_physical0.try_append(®ion_physical1));
+ assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(0x2000, region_physical0.length);
+ assert_eq!(0x4000_1000, region_physical1.va);
+ assert_eq!(0x1000, region_physical1.length);
+
+ // VA overflow
+ let mut region_unused0 = VirtualRegion::new(0x8000_0000_0000_0000, 0x8000_0000_0000_0000);
+ let mut region_unused1 = VirtualRegion::new(0x4000_1000, 0x1000);
+
+ assert!(!region_unused0.try_append(®ion_unused1));
+ assert_eq!(0x8000_0000_0000_0000, region_unused0.va);
+ assert_eq!(0x8000_0000_0000_0000, region_unused0.length);
+ assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(0x1000, region_unused1.length);
+
+ assert!(!region_unused1.try_append(®ion_unused0));
+ assert_eq!(0x8000_0000_0000_0000, region_unused0.va);
+ assert_eq!(0x8000_0000_0000_0000, region_unused0.length);
+ assert_eq!(0x4000_1000, region_unused1.va);
+ assert_eq!(0x1000, region_unused1.length);
+
+ // PA overflow
+ let mut region_physical0 =
+ VirtualRegion::new_with_pa(0x8000_0000_0000_0000, 0x4000_0000, 0x8000_0000_0000_0000);
+ let region_physical1 = VirtualRegion::new_with_pa(0x9000_0000, 0x8000_0000_4000_0000, 0x1000);
+ assert!(!region_physical0.try_append(®ion_physical1));
+ assert_eq!(0x4000_0000, region_physical0.va);
+ assert_eq!(0x8000_0000_0000_0000, region_physical0.length);
+ assert_eq!(0x8000_0000_4000_0000, region_physical1.va);
+ assert_eq!(0x1000, region_physical1.length);
+}
+
+#[test]
+fn test_virtual_region_create_split_by_used() {
+ let region_unused = VirtualRegion::new(0x4000_0000, 0x4000);
+
+ // New region at the start
+ let (new_region, splitted_regions) = region_unused.create_split(
+ 0x4000_0000,
+ 0x1000,
+ Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ );
+
+ assert_eq!(0x4000_0000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert_eq!(0x8000_0000, new_region.get_pa());
+ assert!(matches!(
+ new_region.physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x1000, splitted_regions[0].length);
+ assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(0x3000, splitted_regions[1].length);
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ // New region in the middle
+ let (new_region, splitted_regions) = region_unused.create_split(
+ 0x4000_1000,
+ 0x1000,
+ Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ );
+
+ assert_eq!(0x4000_1000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert_eq!(0x8000_0000, new_region.get_pa());
+ assert!(matches!(
+ new_region.physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x1000, splitted_regions[0].length);
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(0x1000, splitted_regions[1].length);
+ assert_eq!(0x8000_0000, splitted_regions[1].get_pa());
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_2000, splitted_regions[2].va);
+ assert_eq!(0x2000, splitted_regions[2].length);
+ assert!(matches!(
+ splitted_regions[2].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ // New region at the end
+ let (new_region, splitted_regions) = region_unused.create_split(
+ 0x4000_3000,
+ 0x1000,
+ Some(PhysicalRegion::PhysicalAddress(0x8000_0000)),
+ );
+
+ assert_eq!(0x4000_3000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert_eq!(0x8000_0000, new_region.get_pa());
+ assert!(matches!(
+ new_region.physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x3000, splitted_regions[0].length);
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ assert_eq!(0x4000_3000, splitted_regions[1].va);
+ assert_eq!(0x1000, splitted_regions[1].length);
+ assert_eq!(0x8000_0000, splitted_regions[1].get_pa());
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+}
+
+#[test]
+fn test_virtual_region_create_split_by_unused() {
+ let region_unused = VirtualRegion::new_with_pa(0x8000_0000, 0x4000_0000, 0x4000);
+
+ // New region at the start
+ let (new_region, splitted_regions) = region_unused.create_split(0x4000_0000, 0x1000, None);
+
+ assert_eq!(0x4000_0000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x1000, splitted_regions[0].length);
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(0x3000, splitted_regions[1].length);
+ assert_eq!(0x8000_1000, splitted_regions[1].get_pa());
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ // New region in the middle
+ let (new_region, splitted_regions) = region_unused.create_split(0x4000_1000, 0x1000, None);
+
+ assert_eq!(0x4000_1000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x1000, splitted_regions[0].length);
+ assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_1000, splitted_regions[1].va);
+ assert_eq!(0x1000, splitted_regions[1].length);
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::Unused
+ ));
+
+ assert_eq!(0x4000_2000, splitted_regions[2].va);
+ assert_eq!(0x2000, splitted_regions[2].length);
+ assert_eq!(0x8000_2000, splitted_regions[2].get_pa());
+ assert!(matches!(
+ splitted_regions[2].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ // New region at the end
+ let (new_region, splitted_regions) = region_unused.create_split(0x4000_3000, 0x1000, None);
+
+ assert_eq!(0x4000_3000, new_region.va);
+ assert_eq!(0x1000, new_region.length);
+ assert!(matches!(new_region.physical_region, PhysicalRegion::Unused));
+
+ assert_eq!(0x4000_0000, splitted_regions[0].va);
+ assert_eq!(0x3000, splitted_regions[0].length);
+ assert_eq!(0x8000_0000, splitted_regions[0].get_pa());
+ assert!(matches!(
+ splitted_regions[0].physical_region,
+ PhysicalRegion::PhysicalAddress(_)
+ ));
+
+ assert_eq!(0x4000_3000, splitted_regions[1].va);
+ assert_eq!(0x1000, splitted_regions[1].length);
+
+ assert!(matches!(
+ splitted_regions[1].physical_region,
+ PhysicalRegion::Unused
+ ));
+}
+
+#[test]
+#[should_panic]
+fn test_virtual_region_does_not_contain() {
+ let region = VirtualRegion::new(0x4000_0000, 0x1000);
+ region.create_split(
+ 0x8000_0000,
+ 0x1000,
+ Some(PhysicalRegion::PhysicalAddress(0xc000_0000)),
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_virtual_region_create_split_same_used() {
+ let region = VirtualRegion::new(0x4000_0000, 0x1000);
+ region.create_split(0x4000_0000, 0x1000, Some(PhysicalRegion::Unused));
+}
+
+#[test]
+fn test_virtual_region_drop() {
+ const PA: usize = 0x0123_4567_89ab_cdef;
+ const LENGTH: usize = 0x8000_0000_0000;
+
+ static PAGE_POOL_AREA: PagePoolArea<8192> = PagePoolArea::new();
+ let page_pool = PagePool::new(&PAGE_POOL_AREA);
+ let page = page_pool.allocate_pages(4096).unwrap();
+
+ let physical_region = PhysicalRegion::Allocated(page_pool, page);
+
+ // Testing physical region drop through virtualregion
+ let virtual_region = VirtualRegion::new_from_fields(0x4000_0000, 1000, physical_region);
+ drop(virtual_region);
+}
diff --git a/src/region_pool.rs b/src/region_pool.rs
new file mode 100644
index 0000000..f6fa3b4
--- /dev/null
+++ b/src/region_pool.rs
@@ -0,0 +1,416 @@
+// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+//! Region pool
+//!
+//! The region pool component handles regions allocations from memory areas in a generic way.
+
+use core::ops::Range;
+
+use alloc::vec::Vec;
+
+/// Region interface
+///
+/// This trait provides the necessary information about a region to the `RegionPool`.
+pub trait Region: Sized {
+ type Resource;
+
+ /// Get base address
+ fn base(&self) -> usize;
+
+ // Get length
+ fn length(&self) -> usize;
+
+ /// Check if the region is used
+ fn used(&self) -> bool;
+
+ /// Check if an area defined by its base address and length is inside the region
+ fn contains(&self, base: usize, length: usize) -> bool;
+
+ /// Try append the parameter region. Return true on success.
+ fn try_append(&mut self, other: &Self) -> bool;
+
+ /// Split region into multiple regions by the area passed as a parameter. It returns the region
+ /// of the area as the first member of the return tuple and all the new sliced regions as the
+ /// second member of the tuple.
+ /// The function has to handle four cases:
+ /// * The area matches the region exactly -> return one region
+ /// * The area is at the beginning of the region -> return two areas
+ /// * The area is at the end of the region -> return two areas
+ /// * The area is in the middle of the region -> return three areas
+ fn create_split(
+ &self,
+ base: usize,
+ length: usize,
+ resource: Option<Self::Resource>,
+ ) -> (Self, Vec<Self>);
+}
+
+/// Region pool error type
+#[derive(Debug, PartialEq)]
+pub enum RegionPoolError {
+ NoSpace,
+ AlreadyUsed,
+ NotFound,
+}
+
+/// Region pool
+pub struct RegionPool<T: Region> {
+ regions: Vec<T>,
+}
+
+impl<T: Region> RegionPool<T> {
+ /// Create empty region pool
+ pub fn new() -> Self {
+ Self {
+ regions: Vec::new(),
+ }
+ }
+
+ /// Add a region to the pool
+ pub fn add(&mut self, region: T) -> Result<(), RegionPoolError> {
+ if !self
+ .regions
+ .iter()
+ .any(|r| r.contains(region.base(), region.length()))
+ {
+ self.regions.push(region);
+ self.regions.sort_by_key(|a| a.base());
+
+ Ok(())
+ } else {
+ Err(RegionPoolError::AlreadyUsed)
+ }
+ }
+
+ /// Allocate a region from the pool of a given length. It will select an area in which the
+ /// region fits and has the minimal size. Then it splits this area to get the allocated region
+ /// with the exact size.
+ pub fn allocate(&mut self, length: usize, resource: T::Resource) -> Result<T, RegionPoolError> {
+ let region_to_allocate_from = self
+ .regions
+ .iter()
+ .enumerate()
+ .filter(|(_index, region)| region.length() >= length && !region.used())
+ .min_by_key(|(_index_a, region)| region.length());
+
+ if let Some((index, region)) = region_to_allocate_from {
+ let (new_region, split_regions) =
+ region.create_split(region.base(), length, Some(resource));
+ self.replace(index, split_regions);
+ Ok(new_region)
+ } else {
+ Err(RegionPoolError::NoSpace)
+ }
+ }
+
+ /// Acquire a region with the given base address and length.
+ pub fn acquire(
+ &mut self,
+ base: usize,
+ length: usize,
+ resource: T::Resource,
+ ) -> Result<T, RegionPoolError> {
+ let region_to_acquire_from = self
+ .regions
+ .iter()
+ .enumerate()
+ .find(|(_, region)| region.contains(base, length) && !region.used());
+
+ if let Some((index, region)) = region_to_acquire_from {
+ let (new_region, split_regions) = region.create_split(base, length, Some(resource));
+ self.replace(index, split_regions);
+ Ok(new_region)
+ } else {
+ Err(RegionPoolError::AlreadyUsed)
+ }
+ }
+
+ /// Release region
+ pub fn release(&mut self, region_to_release: T) -> Result<(), RegionPoolError> {
+ assert!(region_to_release.used());
+
+ let region_to_release_from = self.regions.iter().enumerate().find(|(_, r)| {
+ r.contains(region_to_release.base(), region_to_release.length()) && r.used()
+ });
+
+ if let Some((index, region)) = region_to_release_from {
+ self.replace(
+ index,
+ region
+ .create_split(region_to_release.base(), region_to_release.length(), None)
+ .1,
+ );
+ self.regions.dedup_by(|a, b| b.try_append(a));
+
+ Ok(())
+ } else {
+ Err(RegionPoolError::NotFound)
+ }
+ }
+
+ /// Find the region which contains the given area
+ pub fn find_containing_region(
+ &self,
+ base: usize,
+ length: usize,
+ ) -> Result<&T, RegionPoolError> {
+ let region_index = match self.regions.binary_search_by(|r| r.base().cmp(&base)) {
+ Ok(exact_index) => Ok(exact_index),
+ Err(insert_index) => {
+ if insert_index > 0 {
+ Ok(insert_index - 1)
+ } else {
+ Err(RegionPoolError::NotFound)
+ }
+ }
+ }?;
+
+ if self.regions[region_index].contains(base, length) {
+ Ok(&self.regions[region_index])
+ } else {
+ Err(RegionPoolError::NotFound)
+ }
+ }
+
+ fn replace(&mut self, index: usize, replacements: Vec<T>) {
+ self.regions.splice(
+ Range {
+ start: index,
+ end: index + 1,
+ },
+ replacements,
+ );
+ }
+}
+
+#[cfg(test)]
+#[derive(Debug, PartialEq, Eq)]
+struct RegionExample {
+ base: usize,
+ length: usize,
+ used: bool,
+}
+
+#[cfg(test)]
+impl RegionExample {
+ fn new(base: usize, length: usize, used: bool) -> Self {
+ Self { base, length, used }
+ }
+}
+
+#[cfg(test)]
+impl Region for RegionExample {
+ type Resource = ();
+
+ fn base(&self) -> usize {
+ self.base
+ }
+
+ fn length(&self) -> usize {
+ self.length
+ }
+
+ fn used(&self) -> bool {
+ self.used
+ }
+
+ fn contains(&self, base: usize, length: usize) -> bool {
+ self.base <= base && base + length <= self.base + self.length
+ }
+
+ fn try_append(&mut self, other: &Self) -> bool {
+ if self.used == other.used && self.base + self.length == other.base {
+ self.length += other.length;
+ true
+ } else {
+ false
+ }
+ }
+
+ fn create_split(
+ &self,
+ base: usize,
+ length: usize,
+ resource: Option<Self::Resource>,
+ ) -> (Self, Vec<Self>) {
+ let mut res = Vec::new();
+ if self.base != base {
+ res.push(RegionExample::new(self.base, base - self.base, self.used));
+ }
+ res.push(RegionExample::new(base, length, resource.is_some()));
+ if self.base + self.length != base + length {
+ res.push(RegionExample::new(
+ base + length,
+ (self.base + self.length) - (base + length),
+ self.used,
+ ));
+ }
+
+ (RegionExample::new(base, length, resource.is_some()), res)
+ }
+}
+
+#[test]
+fn test_region_contains() {
+ let region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+
+ assert!(region.contains(0x4000_0000, 0x1000_0000));
+ assert!(!region.contains(0x4000_0000 - 1, 0x1000_0000 + 1));
+ assert!(!region.contains(0x4000_0000, 0x1000_0000 + 1));
+ assert!(region.contains(0x4000_0000 + 1, 0x1000_0000 - 1));
+}
+
+#[test]
+fn test_region_try_append() {
+ // Normal append
+ let mut region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+ let appending = RegionExample::new(0x5000_0000, 0x0000_1000, false);
+
+ assert!(region.try_append(&appending));
+ assert_eq!(RegionExample::new(0x4000_0000, 0x1000_1000, false), region);
+
+ // Different used flags
+ let mut region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+ let appending = RegionExample::new(0x5000_0000, 0x0000_1000, true);
+
+ assert!(!region.try_append(&appending));
+ assert_eq!(RegionExample::new(0x4000_0000, 0x1000_0000, false), region);
+
+ // Not continious
+ let mut region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+ let appending = RegionExample::new(0x5000_1000, 0x0000_1000, false);
+
+ assert!(!region.try_append(&appending));
+ assert_eq!(RegionExample::new(0x4000_0000, 0x1000_0000, false), region);
+}
+
+#[test]
+fn test_region_create_split() {
+ // Cut first part
+ let region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+
+ let res = region.create_split(0x4000_0000, 0x0000_1000, Some(())).1;
+ assert_eq!(RegionExample::new(0x4000_0000, 0x0000_1000, true), res[0]);
+ assert_eq!(RegionExample::new(0x4000_1000, 0x0fff_f000, false), res[1]);
+
+ // Cut last part
+ let region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+
+ let res = region.create_split(0x4fff_f000, 0x0000_1000, Some(())).1;
+ assert_eq!(RegionExample::new(0x4000_0000, 0x0fff_f000, false), res[0]);
+ assert_eq!(RegionExample::new(0x4fff_f000, 0x0000_1000, true), res[1]);
+
+ // Cut middle part
+ let region = RegionExample::new(0x4000_0000, 0x1000_0000, false);
+
+ let res = region.create_split(0x4020_0000, 0x0000_1000, Some(())).1;
+ assert_eq!(RegionExample::new(0x4000_0000, 0x0020_0000, false), res[0]);
+ assert_eq!(RegionExample::new(0x4020_0000, 0x0000_1000, true), res[1]);
+ assert_eq!(RegionExample::new(0x4020_1000, 0x0fdf_f000, false), res[2]);
+}
+
+#[test]
+fn test_region_pool_add() {
+ let mut pool = RegionPool::new();
+ assert_eq!(
+ Ok(()),
+ pool.add(RegionExample::new(0x4000_0000, 0x1000_0000, false))
+ );
+ assert_eq!(
+ Ok(()),
+ pool.add(RegionExample::new(0x5000_0000, 0x1000_0000, false))
+ );
+ assert_eq!(
+ Err(RegionPoolError::AlreadyUsed),
+ pool.add(RegionExample::new(0x4000_1000, 0x1000, false))
+ );
+}
+
+#[test]
+fn test_pool_allocate() {
+ let mut pool = RegionPool::new();
+ pool.add(RegionExample::new(0x4000_0000, 0x1000_0000, false))
+ .unwrap();
+
+ let res = pool.allocate(0x1000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_0000, 0x1000, true)), res);
+ let res = pool.allocate(0x1_0000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_1000, 0x1_0000, true)), res);
+ let res = pool.allocate(0x2000_0000, ());
+ assert_eq!(Err(RegionPoolError::NoSpace), res);
+}
+
+#[test]
+fn test_region_pool_acquire() {
+ let mut pool = RegionPool::new();
+ pool.add(RegionExample::new(0x4000_0000, 0x1000_0000, false))
+ .unwrap();
+
+ let res = pool.acquire(0x4000_1000, 0x1000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_1000, 0x1000, true)), res);
+ let res = pool.allocate(0x1_0000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_2000, 0x10000, true)), res);
+ let res = pool.acquire(0x4000_1000, 0x1000, ());
+ assert_eq!(Err(RegionPoolError::AlreadyUsed), res);
+}
+
+#[test]
+fn test_region_pool_release() {
+ let mut pool = RegionPool::new();
+ pool.add(RegionExample::new(0x4000_0000, 0x1000_0000, false))
+ .unwrap();
+
+ let res = pool.allocate(0x1000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_0000, 0x1000, true)), res);
+ assert_eq!(Ok(()), pool.release(res.unwrap()));
+
+ let res = pool.allocate(0x1_0000, ());
+ assert_eq!(Ok(RegionExample::new(0x4000_0000, 0x10000, true)), res);
+ assert_eq!(Ok(()), pool.release(res.unwrap()));
+
+ assert_eq!(
+ Err(RegionPoolError::NotFound),
+ pool.release(RegionExample::new(0x4000_0000, 0x1000, true))
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_region_pool_release_not_used() {
+ let mut pool = RegionPool::new();
+ pool.release(RegionExample::new(0x4000_0000, 0x1000, false))
+ .unwrap();
+}
+
+#[test]
+fn test_region_pool_find_containing_region() {
+ let mut pool = RegionPool::<RegionExample>::new();
+ pool.add(RegionExample::new(0x4000_0000, 0x1000_0000, false))
+ .unwrap();
+
+ assert_eq!(
+ Err(RegionPoolError::NotFound),
+ pool.find_containing_region(0x0000_0000, 0x1000)
+ );
+
+ assert_eq!(
+ Err(RegionPoolError::NotFound),
+ pool.find_containing_region(0x5000_0000, 0x1000)
+ );
+
+ assert_eq!(
+ Err(RegionPoolError::NotFound),
+ pool.find_containing_region(0x4000_0000, 0x2000_0000)
+ );
+
+ assert_eq!(
+ Ok(&RegionExample::new(0x4000_0000, 0x1000_0000, false)),
+ pool.find_containing_region(0x4000_0000, 0x1000_0000)
+ );
+
+ assert_eq!(
+ Ok(&RegionExample::new(0x4000_0000, 0x1000_0000, false)),
+ pool.find_containing_region(0x4000_1000, 0x1000)
+ );
+}