Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 1 | // SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com> |
| 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 |
| 3 | |
| 4 | //! Module for converting addresses between kernel virtual address space to physical address space |
| 5 | |
Imre Kis | 012f3ba | 2024-08-14 16:47:08 +0200 | [diff] [blame] | 6 | use core::ops::{Deref, DerefMut, Range}; |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 7 | |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 8 | use alloc::sync::Arc; |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 9 | use spin::Mutex; |
| 10 | |
Imre Kis | 21d7f72 | 2025-01-17 17:55:35 +0100 | [diff] [blame^] | 11 | use crate::KernelAddressTranslator; |
| 12 | |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 13 | use super::{ |
Imre Kis | d5b96fd | 2024-09-11 17:04:32 +0200 | [diff] [blame] | 14 | address::{PhysicalAddress, VirtualAddress, VirtualAddressRange}, |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 15 | page_pool::{Page, PagePool}, |
Imre Kis | 631127d | 2024-11-21 13:09:01 +0100 | [diff] [blame] | 16 | MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError, |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 17 | }; |
| 18 | |
Imre Kis | 21d7f72 | 2025-01-17 17:55:35 +0100 | [diff] [blame^] | 19 | struct KernelAddressTranslatorIdentity; |
| 20 | |
| 21 | impl KernelAddressTranslator for KernelAddressTranslatorIdentity { |
| 22 | fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress { |
| 23 | PhysicalAddress(va.0 & 0x0000_000f_ffff_ffff) |
| 24 | } |
| 25 | |
| 26 | fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress { |
| 27 | VirtualAddress(pa.0 | 0xffff_fff0_0000_0000) |
| 28 | } |
| 29 | } |
| 30 | |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 31 | #[derive(Clone)] |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 32 | pub struct KernelSpace { |
Imre Kis | 21d7f72 | 2025-01-17 17:55:35 +0100 | [diff] [blame^] | 33 | xlat: Arc<Mutex<Xlat<KernelAddressTranslatorIdentity, 36>>>, |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 34 | } |
| 35 | |
Imre Kis | 012f3ba | 2024-08-14 16:47:08 +0200 | [diff] [blame] | 36 | /// # Kernel space memory mapping |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 37 | /// |
| 38 | /// This object handles the translation tables of the kernel address space. The main goal is to |
| 39 | /// limit the kernel's access to the memory and only map the ranges which are necessary for |
| 40 | /// operation. |
| 41 | /// The current implementation uses identity mapping into the upper virtual address range, e.g. |
| 42 | /// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000. |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 43 | impl KernelSpace { |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 44 | pub const PAGE_SIZE: usize = Page::SIZE; |
| 45 | |
| 46 | /// Creates the kernel memory mapping instance. This should be called from the main core's init |
| 47 | /// code. |
| 48 | /// # Arguments |
| 49 | /// * page_pool: Page pool for allocation kernel translation tables |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 50 | pub fn new(page_pool: PagePool) -> Self { |
| 51 | Self { |
Imre Kis | b5146b5 | 2024-10-31 14:03:06 +0100 | [diff] [blame] | 52 | xlat: Arc::new(Mutex::new(Xlat::new( |
| 53 | page_pool, |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 54 | unsafe { |
| 55 | VirtualAddressRange::from_range(0xffff_fff0_0000_0000..0xffff_ffff_ffff_ffff) |
| 56 | }, |
Imre Kis | b5146b5 | 2024-10-31 14:03:06 +0100 | [diff] [blame] | 57 | TranslationRegime::EL1_0(RegimeVaRange::Upper, 0), |
Imre Kis | 631127d | 2024-11-21 13:09:01 +0100 | [diff] [blame] | 58 | TranslationGranule::Granule4k, |
Imre Kis | b5146b5 | 2024-10-31 14:03:06 +0100 | [diff] [blame] | 59 | ))), |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | |
| 63 | /// Maps the code (RX) and data (RW) segments of the SPMC itself. |
| 64 | /// # Arguments |
| 65 | /// * code_range: (start, end) addresses of the code segment |
| 66 | /// * data_range: (start, end) addresses of the data segment |
| 67 | /// # Return value |
| 68 | /// * The result of the operation |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 69 | pub fn init( |
| 70 | &self, |
| 71 | code_range: Range<usize>, |
| 72 | data_range: Range<usize>, |
| 73 | ) -> Result<(), XlatError> { |
| 74 | let mut xlat = self.xlat.lock(); |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 75 | |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 76 | let code_pa = PhysicalAddress(code_range.start & 0x0000_000f_ffff_ffff); |
| 77 | let data_pa = PhysicalAddress(data_range.start & 0x0000_000f_ffff_ffff); |
Imre Kis | d5b96fd | 2024-09-11 17:04:32 +0200 | [diff] [blame] | 78 | |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 79 | xlat.map_physical_address_range( |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 80 | Some( |
| 81 | code_pa |
| 82 | .identity_va() |
| 83 | .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)), |
| 84 | ), |
Imre Kis | d5b96fd | 2024-09-11 17:04:32 +0200 | [diff] [blame] | 85 | code_pa, |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 86 | code_range.len(), |
| 87 | MemoryAccessRights::RX | MemoryAccessRights::GLOBAL, |
| 88 | )?; |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 89 | |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 90 | xlat.map_physical_address_range( |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 91 | Some( |
| 92 | data_pa |
| 93 | .identity_va() |
| 94 | .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)), |
| 95 | ), |
Imre Kis | d5b96fd | 2024-09-11 17:04:32 +0200 | [diff] [blame] | 96 | data_pa, |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 97 | data_range.len(), |
| 98 | MemoryAccessRights::RW | MemoryAccessRights::GLOBAL, |
| 99 | )?; |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 100 | |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 101 | Ok(()) |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /// Map memory range into the kernel address space |
| 105 | /// # Arguments |
| 106 | /// * pa: Physical address of the memory |
| 107 | /// * length: Length of the range in bytes |
| 108 | /// * access_right: Memory access rights |
| 109 | /// # Return value |
| 110 | /// * Virtual address of the mapped memory or error |
| 111 | pub fn map_memory( |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 112 | &self, |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 113 | pa: usize, |
| 114 | length: usize, |
| 115 | access_rights: MemoryAccessRights, |
| 116 | ) -> Result<usize, XlatError> { |
Imre Kis | d5b96fd | 2024-09-11 17:04:32 +0200 | [diff] [blame] | 117 | let pa = PhysicalAddress(pa); |
| 118 | |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 119 | let va = self.xlat.lock().map_physical_address_range( |
| 120 | Some( |
| 121 | pa.identity_va() |
| 122 | .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)), |
| 123 | ), |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 124 | pa, |
| 125 | length, |
| 126 | access_rights | MemoryAccessRights::GLOBAL, |
| 127 | )?; |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 128 | |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 129 | Ok(va.0) |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /// Unmap memory range from the kernel address space |
| 133 | /// # Arguments |
| 134 | /// * va: Virtual address of the memory |
| 135 | /// * length: Length of the range in bytes |
| 136 | /// # Return value |
| 137 | /// The result of the operation |
Imre Kis | 9a7440e | 2024-04-18 15:44:45 +0200 | [diff] [blame] | 138 | pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> { |
Imre Kis | c9a55ff | 2025-01-17 15:06:50 +0100 | [diff] [blame] | 139 | self.xlat |
| 140 | .lock() |
| 141 | .unmap_virtual_address_range(VirtualAddress(va), length) |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | /// Activate kernel address space mapping |
Imre Kis | b5146b5 | 2024-10-31 14:03:06 +0100 | [diff] [blame] | 145 | /// |
| 146 | /// # Safety |
| 147 | /// This changes the mapping of the running execution context. The caller |
| 148 | /// must ensure that existing references will be mapped to the same address |
| 149 | /// after activation. |
Imre Kis | 1278c9f | 2025-01-15 19:48:36 +0100 | [diff] [blame] | 150 | #[cfg(target_arch = "aarch64")] |
Imre Kis | b5146b5 | 2024-10-31 14:03:06 +0100 | [diff] [blame] | 151 | pub unsafe fn activate(&self) { |
| 152 | self.xlat.lock().activate(); |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | /// Rounds a value down to a kernel space page boundary |
| 156 | pub const fn round_down_to_page_size(size: usize) -> usize { |
| 157 | size & !(Self::PAGE_SIZE - 1) |
| 158 | } |
| 159 | |
| 160 | /// Rounds a value up to a kernel space page boundary |
| 161 | pub const fn round_up_to_page_size(size: usize) -> usize { |
| 162 | (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1) |
| 163 | } |
| 164 | |
Imre Kis | 49c08e3 | 2024-04-19 14:16:39 +0200 | [diff] [blame] | 165 | /// Returns the offset to the preceding page aligned address |
| 166 | pub const fn offset_in_page(address: usize) -> usize { |
| 167 | address & (Self::PAGE_SIZE - 1) |
| 168 | } |
| 169 | |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 170 | /// Kernel virtual address to physical address |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 171 | |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 172 | // Do not use any mapping in test build |
| 173 | #[cfg(test)] |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 174 | pub const fn kernel_to_pa(kernel_address: u64) -> u64 { |
| 175 | kernel_address |
| 176 | } |
| 177 | |
Imre Kis | c1dab89 | 2024-03-26 12:03:58 +0100 | [diff] [blame] | 178 | #[cfg(test)] |
Imre Kis | 703482d | 2023-11-30 15:51:26 +0100 | [diff] [blame] | 179 | pub const fn pa_to_kernel(pa: u64) -> u64 { |
| 180 | pa |
| 181 | } |
| 182 | } |
Imre Kis | 012f3ba | 2024-08-14 16:47:08 +0200 | [diff] [blame] | 183 | |
| 184 | /// # Kernel mapping wrapper |
| 185 | /// |
| 186 | /// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the |
| 187 | /// virtual kernel address space and provides the same access to the object via the Deref trait. |
| 188 | /// When the mapper is dropped it unmaps the mapped object from the virtual kernel space. |
| 189 | pub struct KernelMapper<T> |
| 190 | where |
| 191 | T: Deref, |
| 192 | T::Target: Sized, |
| 193 | { |
| 194 | physical_instance: T, |
| 195 | va: *const T::Target, |
| 196 | kernel_space: KernelSpace, |
| 197 | } |
| 198 | |
| 199 | impl<T> KernelMapper<T> |
| 200 | where |
| 201 | T: Deref, |
| 202 | T::Target: Sized, |
| 203 | { |
| 204 | /// Create new mapped object |
| 205 | /// The access_rights parameter must contain read access |
| 206 | pub fn new( |
| 207 | physical_instance: T, |
| 208 | kernel_space: KernelSpace, |
| 209 | access_rights: MemoryAccessRights, |
| 210 | ) -> Self { |
| 211 | assert!(access_rights.contains(MemoryAccessRights::R)); |
| 212 | |
| 213 | let pa = physical_instance.deref() as *const _ as usize; |
| 214 | let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>()); |
| 215 | |
| 216 | let va = kernel_space |
| 217 | .map_memory(pa, length, access_rights) |
| 218 | .expect("Failed to map area"); |
| 219 | |
| 220 | Self { |
| 221 | physical_instance, |
| 222 | va: va as *const T::Target, |
| 223 | kernel_space, |
| 224 | } |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | impl<T> Deref for KernelMapper<T> |
| 229 | where |
| 230 | T: Deref, |
| 231 | T::Target: Sized, |
| 232 | { |
| 233 | type Target = T::Target; |
| 234 | |
| 235 | #[inline(always)] |
| 236 | fn deref(&self) -> &Self::Target { |
| 237 | unsafe { &*self.va } |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | impl<T> Drop for KernelMapper<T> |
| 242 | where |
| 243 | T: Deref, |
| 244 | T::Target: Sized, |
| 245 | { |
| 246 | fn drop(&mut self) { |
| 247 | let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>()); |
| 248 | self.kernel_space |
| 249 | .unmap_memory(self.va as usize, length) |
| 250 | .expect("Failed to unmap area"); |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | unsafe impl<T> Send for KernelMapper<T> |
| 255 | where |
| 256 | T: Deref + Send, |
| 257 | T::Target: Sized, |
| 258 | { |
| 259 | } |
| 260 | |
| 261 | /// # Mutable version of kernel mapping wrapper |
| 262 | pub struct KernelMapperMut<T> |
| 263 | where |
| 264 | T: DerefMut, |
| 265 | T::Target: Sized, |
| 266 | { |
| 267 | physical_instance: T, |
| 268 | va: *mut T::Target, |
| 269 | kernel_space: KernelSpace, |
| 270 | } |
| 271 | |
| 272 | impl<T> KernelMapperMut<T> |
| 273 | where |
| 274 | T: DerefMut, |
| 275 | T::Target: Sized, |
| 276 | { |
| 277 | /// Create new mapped object |
| 278 | /// The access_rights parameter must contain read and write access |
| 279 | pub fn new( |
| 280 | physical_instance: T, |
| 281 | kernel_space: KernelSpace, |
| 282 | access_rights: MemoryAccessRights, |
| 283 | ) -> Self { |
| 284 | assert!(access_rights.contains(MemoryAccessRights::RW)); |
| 285 | |
| 286 | let pa = physical_instance.deref() as *const _ as usize; |
| 287 | let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>()); |
| 288 | |
| 289 | let va = kernel_space |
| 290 | .map_memory(pa, length, access_rights) |
| 291 | .expect("Failed to map area"); |
| 292 | |
| 293 | Self { |
| 294 | physical_instance, |
| 295 | va: va as *mut T::Target, |
| 296 | kernel_space, |
| 297 | } |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | impl<T> Deref for KernelMapperMut<T> |
| 302 | where |
| 303 | T: DerefMut, |
| 304 | T::Target: Sized, |
| 305 | { |
| 306 | type Target = T::Target; |
| 307 | |
| 308 | #[inline(always)] |
| 309 | fn deref(&self) -> &Self::Target { |
| 310 | unsafe { &*self.va } |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | impl<T> DerefMut for KernelMapperMut<T> |
| 315 | where |
| 316 | T: DerefMut, |
| 317 | T::Target: Sized, |
| 318 | { |
| 319 | #[inline(always)] |
| 320 | fn deref_mut(&mut self) -> &mut Self::Target { |
| 321 | unsafe { &mut *self.va } |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | impl<T> Drop for KernelMapperMut<T> |
| 326 | where |
| 327 | T: DerefMut, |
| 328 | T::Target: Sized, |
| 329 | { |
| 330 | fn drop(&mut self) { |
| 331 | let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>()); |
| 332 | self.kernel_space |
| 333 | .unmap_memory(self.va as usize, length) |
| 334 | .expect("Failed to unmap area"); |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | unsafe impl<T> Send for KernelMapperMut<T> |
| 339 | where |
| 340 | T: DerefMut + Send, |
| 341 | T::Target: Sized, |
| 342 | { |
| 343 | } |