blob: 68296e96833aa829b94a321d8bcfa6f62b1ac8ae [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kis012f3ba2024-08-14 16:47:08 +02006use core::ops::{Deref, DerefMut, Range};
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
Imre Kis21d7f722025-01-17 17:55:35 +010011use crate::KernelAddressTranslator;
12
Imre Kisc1dab892024-03-26 12:03:58 +010013use super::{
Imre Kisd5b96fd2024-09-11 17:04:32 +020014 address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
Imre Kisc1dab892024-03-26 12:03:58 +010015 page_pool::{Page, PagePool},
Imre Kis631127d2024-11-21 13:09:01 +010016 MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError,
Imre Kisc1dab892024-03-26 12:03:58 +010017};
18
Imre Kis21d7f722025-01-17 17:55:35 +010019struct KernelAddressTranslatorIdentity;
20
21impl KernelAddressTranslator for KernelAddressTranslatorIdentity {
22 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress {
23 PhysicalAddress(va.0 & 0x0000_000f_ffff_ffff)
24 }
25
26 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress {
27 VirtualAddress(pa.0 | 0xffff_fff0_0000_0000)
28 }
29}
30
Imre Kis9a7440e2024-04-18 15:44:45 +020031#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010032pub struct KernelSpace {
Imre Kis21d7f722025-01-17 17:55:35 +010033 xlat: Arc<Mutex<Xlat<KernelAddressTranslatorIdentity, 36>>>,
Imre Kisc1dab892024-03-26 12:03:58 +010034}
35
Imre Kis012f3ba2024-08-14 16:47:08 +020036/// # Kernel space memory mapping
Imre Kisc1dab892024-03-26 12:03:58 +010037///
38/// This object handles the translation tables of the kernel address space. The main goal is to
39/// limit the kernel's access to the memory and only map the ranges which are necessary for
40/// operation.
41/// The current implementation uses identity mapping into the upper virtual address range, e.g.
42/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010043impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010044 pub const PAGE_SIZE: usize = Page::SIZE;
45
46 /// Creates the kernel memory mapping instance. This should be called from the main core's init
47 /// code.
48 /// # Arguments
49 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020050 pub fn new(page_pool: PagePool) -> Self {
51 Self {
Imre Kisb5146b52024-10-31 14:03:06 +010052 xlat: Arc::new(Mutex::new(Xlat::new(
53 page_pool,
Imre Kisc9a55ff2025-01-17 15:06:50 +010054 unsafe {
55 VirtualAddressRange::from_range(0xffff_fff0_0000_0000..0xffff_ffff_ffff_ffff)
56 },
Imre Kisb5146b52024-10-31 14:03:06 +010057 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
Imre Kis631127d2024-11-21 13:09:01 +010058 TranslationGranule::Granule4k,
Imre Kisb5146b52024-10-31 14:03:06 +010059 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010060 }
61 }
62
63 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
64 /// # Arguments
65 /// * code_range: (start, end) addresses of the code segment
66 /// * data_range: (start, end) addresses of the data segment
67 /// # Return value
68 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020069 pub fn init(
70 &self,
71 code_range: Range<usize>,
72 data_range: Range<usize>,
73 ) -> Result<(), XlatError> {
74 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010075
Imre Kisc9a55ff2025-01-17 15:06:50 +010076 let code_pa = PhysicalAddress(code_range.start & 0x0000_000f_ffff_ffff);
77 let data_pa = PhysicalAddress(data_range.start & 0x0000_000f_ffff_ffff);
Imre Kisd5b96fd2024-09-11 17:04:32 +020078
Imre Kis9a7440e2024-04-18 15:44:45 +020079 xlat.map_physical_address_range(
Imre Kisc9a55ff2025-01-17 15:06:50 +010080 Some(
81 code_pa
82 .identity_va()
83 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
84 ),
Imre Kisd5b96fd2024-09-11 17:04:32 +020085 code_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020086 code_range.len(),
87 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
88 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010089
Imre Kis9a7440e2024-04-18 15:44:45 +020090 xlat.map_physical_address_range(
Imre Kisc9a55ff2025-01-17 15:06:50 +010091 Some(
92 data_pa
93 .identity_va()
94 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
95 ),
Imre Kisd5b96fd2024-09-11 17:04:32 +020096 data_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020097 data_range.len(),
98 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
99 )?;
Imre Kisc1dab892024-03-26 12:03:58 +0100100
Imre Kis9a7440e2024-04-18 15:44:45 +0200101 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +0100102 }
103
104 /// Map memory range into the kernel address space
105 /// # Arguments
106 /// * pa: Physical address of the memory
107 /// * length: Length of the range in bytes
108 /// * access_right: Memory access rights
109 /// # Return value
110 /// * Virtual address of the mapped memory or error
111 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +0200112 &self,
Imre Kisc1dab892024-03-26 12:03:58 +0100113 pa: usize,
114 length: usize,
115 access_rights: MemoryAccessRights,
116 ) -> Result<usize, XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200117 let pa = PhysicalAddress(pa);
118
Imre Kisc9a55ff2025-01-17 15:06:50 +0100119 let va = self.xlat.lock().map_physical_address_range(
120 Some(
121 pa.identity_va()
122 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
123 ),
Imre Kis9a7440e2024-04-18 15:44:45 +0200124 pa,
125 length,
126 access_rights | MemoryAccessRights::GLOBAL,
127 )?;
Imre Kisc1dab892024-03-26 12:03:58 +0100128
Imre Kisc9a55ff2025-01-17 15:06:50 +0100129 Ok(va.0)
Imre Kisc1dab892024-03-26 12:03:58 +0100130 }
131
132 /// Unmap memory range from the kernel address space
133 /// # Arguments
134 /// * va: Virtual address of the memory
135 /// * length: Length of the range in bytes
136 /// # Return value
137 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200138 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
Imre Kisc9a55ff2025-01-17 15:06:50 +0100139 self.xlat
140 .lock()
141 .unmap_virtual_address_range(VirtualAddress(va), length)
Imre Kisc1dab892024-03-26 12:03:58 +0100142 }
143
144 /// Activate kernel address space mapping
Imre Kisb5146b52024-10-31 14:03:06 +0100145 ///
146 /// # Safety
147 /// This changes the mapping of the running execution context. The caller
148 /// must ensure that existing references will be mapped to the same address
149 /// after activation.
Imre Kis1278c9f2025-01-15 19:48:36 +0100150 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100151 pub unsafe fn activate(&self) {
152 self.xlat.lock().activate();
Imre Kisc1dab892024-03-26 12:03:58 +0100153 }
154
155 /// Rounds a value down to a kernel space page boundary
156 pub const fn round_down_to_page_size(size: usize) -> usize {
157 size & !(Self::PAGE_SIZE - 1)
158 }
159
160 /// Rounds a value up to a kernel space page boundary
161 pub const fn round_up_to_page_size(size: usize) -> usize {
162 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
163 }
164
Imre Kis49c08e32024-04-19 14:16:39 +0200165 /// Returns the offset to the preceding page aligned address
166 pub const fn offset_in_page(address: usize) -> usize {
167 address & (Self::PAGE_SIZE - 1)
168 }
169
Imre Kis703482d2023-11-30 15:51:26 +0100170 /// Kernel virtual address to physical address
Imre Kis703482d2023-11-30 15:51:26 +0100171
Imre Kisc1dab892024-03-26 12:03:58 +0100172 // Do not use any mapping in test build
173 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100174 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
175 kernel_address
176 }
177
Imre Kisc1dab892024-03-26 12:03:58 +0100178 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100179 pub const fn pa_to_kernel(pa: u64) -> u64 {
180 pa
181 }
182}
Imre Kis012f3ba2024-08-14 16:47:08 +0200183
184/// # Kernel mapping wrapper
185///
186/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
187/// virtual kernel address space and provides the same access to the object via the Deref trait.
188/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
189pub struct KernelMapper<T>
190where
191 T: Deref,
192 T::Target: Sized,
193{
194 physical_instance: T,
195 va: *const T::Target,
196 kernel_space: KernelSpace,
197}
198
199impl<T> KernelMapper<T>
200where
201 T: Deref,
202 T::Target: Sized,
203{
204 /// Create new mapped object
205 /// The access_rights parameter must contain read access
206 pub fn new(
207 physical_instance: T,
208 kernel_space: KernelSpace,
209 access_rights: MemoryAccessRights,
210 ) -> Self {
211 assert!(access_rights.contains(MemoryAccessRights::R));
212
213 let pa = physical_instance.deref() as *const _ as usize;
214 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
215
216 let va = kernel_space
217 .map_memory(pa, length, access_rights)
218 .expect("Failed to map area");
219
220 Self {
221 physical_instance,
222 va: va as *const T::Target,
223 kernel_space,
224 }
225 }
226}
227
228impl<T> Deref for KernelMapper<T>
229where
230 T: Deref,
231 T::Target: Sized,
232{
233 type Target = T::Target;
234
235 #[inline(always)]
236 fn deref(&self) -> &Self::Target {
237 unsafe { &*self.va }
238 }
239}
240
241impl<T> Drop for KernelMapper<T>
242where
243 T: Deref,
244 T::Target: Sized,
245{
246 fn drop(&mut self) {
247 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
248 self.kernel_space
249 .unmap_memory(self.va as usize, length)
250 .expect("Failed to unmap area");
251 }
252}
253
254unsafe impl<T> Send for KernelMapper<T>
255where
256 T: Deref + Send,
257 T::Target: Sized,
258{
259}
260
261/// # Mutable version of kernel mapping wrapper
262pub struct KernelMapperMut<T>
263where
264 T: DerefMut,
265 T::Target: Sized,
266{
267 physical_instance: T,
268 va: *mut T::Target,
269 kernel_space: KernelSpace,
270}
271
272impl<T> KernelMapperMut<T>
273where
274 T: DerefMut,
275 T::Target: Sized,
276{
277 /// Create new mapped object
278 /// The access_rights parameter must contain read and write access
279 pub fn new(
280 physical_instance: T,
281 kernel_space: KernelSpace,
282 access_rights: MemoryAccessRights,
283 ) -> Self {
284 assert!(access_rights.contains(MemoryAccessRights::RW));
285
286 let pa = physical_instance.deref() as *const _ as usize;
287 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
288
289 let va = kernel_space
290 .map_memory(pa, length, access_rights)
291 .expect("Failed to map area");
292
293 Self {
294 physical_instance,
295 va: va as *mut T::Target,
296 kernel_space,
297 }
298 }
299}
300
301impl<T> Deref for KernelMapperMut<T>
302where
303 T: DerefMut,
304 T::Target: Sized,
305{
306 type Target = T::Target;
307
308 #[inline(always)]
309 fn deref(&self) -> &Self::Target {
310 unsafe { &*self.va }
311 }
312}
313
314impl<T> DerefMut for KernelMapperMut<T>
315where
316 T: DerefMut,
317 T::Target: Sized,
318{
319 #[inline(always)]
320 fn deref_mut(&mut self) -> &mut Self::Target {
321 unsafe { &mut *self.va }
322 }
323}
324
325impl<T> Drop for KernelMapperMut<T>
326where
327 T: DerefMut,
328 T::Target: Sized,
329{
330 fn drop(&mut self) {
331 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
332 self.kernel_space
333 .unmap_memory(self.va as usize, length)
334 .expect("Failed to unmap area");
335 }
336}
337
338unsafe impl<T> Send for KernelMapperMut<T>
339where
340 T: DerefMut + Send,
341 T::Target: Sized,
342{
343}