blob: cc0454190512ae62d7ed4d11855e7eb21ccbb4ca [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kis012f3ba2024-08-14 16:47:08 +02006use core::ops::{Deref, DerefMut, Range};
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
11use super::{
Imre Kisd5b96fd2024-09-11 17:04:32 +020012 address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
Imre Kisc1dab892024-03-26 12:03:58 +010013 page_pool::{Page, PagePool},
Imre Kis631127d2024-11-21 13:09:01 +010014 MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError,
Imre Kisc1dab892024-03-26 12:03:58 +010015};
16
Imre Kis9a7440e2024-04-18 15:44:45 +020017#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010018pub struct KernelSpace {
Imre Kis631127d2024-11-21 13:09:01 +010019 xlat: Arc<Mutex<Xlat<36>>>,
Imre Kisc1dab892024-03-26 12:03:58 +010020}
21
Imre Kis012f3ba2024-08-14 16:47:08 +020022/// # Kernel space memory mapping
Imre Kisc1dab892024-03-26 12:03:58 +010023///
24/// This object handles the translation tables of the kernel address space. The main goal is to
25/// limit the kernel's access to the memory and only map the ranges which are necessary for
26/// operation.
27/// The current implementation uses identity mapping into the upper virtual address range, e.g.
28/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010029impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010030 pub const PAGE_SIZE: usize = Page::SIZE;
31
32 /// Creates the kernel memory mapping instance. This should be called from the main core's init
33 /// code.
34 /// # Arguments
35 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020036 pub fn new(page_pool: PagePool) -> Self {
37 Self {
Imre Kisb5146b52024-10-31 14:03:06 +010038 xlat: Arc::new(Mutex::new(Xlat::new(
39 page_pool,
Imre Kisc9a55ff2025-01-17 15:06:50 +010040 unsafe {
41 VirtualAddressRange::from_range(0xffff_fff0_0000_0000..0xffff_ffff_ffff_ffff)
42 },
Imre Kisb5146b52024-10-31 14:03:06 +010043 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
Imre Kis631127d2024-11-21 13:09:01 +010044 TranslationGranule::Granule4k,
Imre Kisb5146b52024-10-31 14:03:06 +010045 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010046 }
47 }
48
49 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
50 /// # Arguments
51 /// * code_range: (start, end) addresses of the code segment
52 /// * data_range: (start, end) addresses of the data segment
53 /// # Return value
54 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020055 pub fn init(
56 &self,
57 code_range: Range<usize>,
58 data_range: Range<usize>,
59 ) -> Result<(), XlatError> {
60 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010061
Imre Kisc9a55ff2025-01-17 15:06:50 +010062 let code_pa = PhysicalAddress(code_range.start & 0x0000_000f_ffff_ffff);
63 let data_pa = PhysicalAddress(data_range.start & 0x0000_000f_ffff_ffff);
Imre Kisd5b96fd2024-09-11 17:04:32 +020064
Imre Kis9a7440e2024-04-18 15:44:45 +020065 xlat.map_physical_address_range(
Imre Kisc9a55ff2025-01-17 15:06:50 +010066 Some(
67 code_pa
68 .identity_va()
69 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
70 ),
Imre Kisd5b96fd2024-09-11 17:04:32 +020071 code_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020072 code_range.len(),
73 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
74 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010075
Imre Kis9a7440e2024-04-18 15:44:45 +020076 xlat.map_physical_address_range(
Imre Kisc9a55ff2025-01-17 15:06:50 +010077 Some(
78 data_pa
79 .identity_va()
80 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
81 ),
Imre Kisd5b96fd2024-09-11 17:04:32 +020082 data_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020083 data_range.len(),
84 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
85 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010086
Imre Kis9a7440e2024-04-18 15:44:45 +020087 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +010088 }
89
90 /// Map memory range into the kernel address space
91 /// # Arguments
92 /// * pa: Physical address of the memory
93 /// * length: Length of the range in bytes
94 /// * access_right: Memory access rights
95 /// # Return value
96 /// * Virtual address of the mapped memory or error
97 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +020098 &self,
Imre Kisc1dab892024-03-26 12:03:58 +010099 pa: usize,
100 length: usize,
101 access_rights: MemoryAccessRights,
102 ) -> Result<usize, XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200103 let pa = PhysicalAddress(pa);
104
Imre Kisc9a55ff2025-01-17 15:06:50 +0100105 let va = self.xlat.lock().map_physical_address_range(
106 Some(
107 pa.identity_va()
108 .set_upper_bits::<36>(TranslationRegime::EL1_0(RegimeVaRange::Upper, 0)),
109 ),
Imre Kis9a7440e2024-04-18 15:44:45 +0200110 pa,
111 length,
112 access_rights | MemoryAccessRights::GLOBAL,
113 )?;
Imre Kisc1dab892024-03-26 12:03:58 +0100114
Imre Kisc9a55ff2025-01-17 15:06:50 +0100115 Ok(va.0)
Imre Kisc1dab892024-03-26 12:03:58 +0100116 }
117
118 /// Unmap memory range from the kernel address space
119 /// # Arguments
120 /// * va: Virtual address of the memory
121 /// * length: Length of the range in bytes
122 /// # Return value
123 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200124 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
Imre Kisc9a55ff2025-01-17 15:06:50 +0100125 self.xlat
126 .lock()
127 .unmap_virtual_address_range(VirtualAddress(va), length)
Imre Kisc1dab892024-03-26 12:03:58 +0100128 }
129
130 /// Activate kernel address space mapping
Imre Kisb5146b52024-10-31 14:03:06 +0100131 ///
132 /// # Safety
133 /// This changes the mapping of the running execution context. The caller
134 /// must ensure that existing references will be mapped to the same address
135 /// after activation.
Imre Kis1278c9f2025-01-15 19:48:36 +0100136 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100137 pub unsafe fn activate(&self) {
138 self.xlat.lock().activate();
Imre Kisc1dab892024-03-26 12:03:58 +0100139 }
140
141 /// Rounds a value down to a kernel space page boundary
142 pub const fn round_down_to_page_size(size: usize) -> usize {
143 size & !(Self::PAGE_SIZE - 1)
144 }
145
146 /// Rounds a value up to a kernel space page boundary
147 pub const fn round_up_to_page_size(size: usize) -> usize {
148 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
149 }
150
Imre Kis49c08e32024-04-19 14:16:39 +0200151 /// Returns the offset to the preceding page aligned address
152 pub const fn offset_in_page(address: usize) -> usize {
153 address & (Self::PAGE_SIZE - 1)
154 }
155
Imre Kis703482d2023-11-30 15:51:26 +0100156 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100157 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100158 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
159 kernel_address & 0x0000_000f_ffff_ffff
160 }
Imre Kis703482d2023-11-30 15:51:26 +0100161 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100162 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100163 pub const fn pa_to_kernel(pa: u64) -> u64 {
164 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
165 pa | 0xffff_fff0_0000_0000
166 }
Imre Kis703482d2023-11-30 15:51:26 +0100167
Imre Kisc1dab892024-03-26 12:03:58 +0100168 // Do not use any mapping in test build
169 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100170 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
171 kernel_address
172 }
173
Imre Kisc1dab892024-03-26 12:03:58 +0100174 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100175 pub const fn pa_to_kernel(pa: u64) -> u64 {
176 pa
177 }
178}
Imre Kis012f3ba2024-08-14 16:47:08 +0200179
180/// # Kernel mapping wrapper
181///
182/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
183/// virtual kernel address space and provides the same access to the object via the Deref trait.
184/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
185pub struct KernelMapper<T>
186where
187 T: Deref,
188 T::Target: Sized,
189{
190 physical_instance: T,
191 va: *const T::Target,
192 kernel_space: KernelSpace,
193}
194
195impl<T> KernelMapper<T>
196where
197 T: Deref,
198 T::Target: Sized,
199{
200 /// Create new mapped object
201 /// The access_rights parameter must contain read access
202 pub fn new(
203 physical_instance: T,
204 kernel_space: KernelSpace,
205 access_rights: MemoryAccessRights,
206 ) -> Self {
207 assert!(access_rights.contains(MemoryAccessRights::R));
208
209 let pa = physical_instance.deref() as *const _ as usize;
210 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
211
212 let va = kernel_space
213 .map_memory(pa, length, access_rights)
214 .expect("Failed to map area");
215
216 Self {
217 physical_instance,
218 va: va as *const T::Target,
219 kernel_space,
220 }
221 }
222}
223
224impl<T> Deref for KernelMapper<T>
225where
226 T: Deref,
227 T::Target: Sized,
228{
229 type Target = T::Target;
230
231 #[inline(always)]
232 fn deref(&self) -> &Self::Target {
233 unsafe { &*self.va }
234 }
235}
236
237impl<T> Drop for KernelMapper<T>
238where
239 T: Deref,
240 T::Target: Sized,
241{
242 fn drop(&mut self) {
243 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
244 self.kernel_space
245 .unmap_memory(self.va as usize, length)
246 .expect("Failed to unmap area");
247 }
248}
249
250unsafe impl<T> Send for KernelMapper<T>
251where
252 T: Deref + Send,
253 T::Target: Sized,
254{
255}
256
257/// # Mutable version of kernel mapping wrapper
258pub struct KernelMapperMut<T>
259where
260 T: DerefMut,
261 T::Target: Sized,
262{
263 physical_instance: T,
264 va: *mut T::Target,
265 kernel_space: KernelSpace,
266}
267
268impl<T> KernelMapperMut<T>
269where
270 T: DerefMut,
271 T::Target: Sized,
272{
273 /// Create new mapped object
274 /// The access_rights parameter must contain read and write access
275 pub fn new(
276 physical_instance: T,
277 kernel_space: KernelSpace,
278 access_rights: MemoryAccessRights,
279 ) -> Self {
280 assert!(access_rights.contains(MemoryAccessRights::RW));
281
282 let pa = physical_instance.deref() as *const _ as usize;
283 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
284
285 let va = kernel_space
286 .map_memory(pa, length, access_rights)
287 .expect("Failed to map area");
288
289 Self {
290 physical_instance,
291 va: va as *mut T::Target,
292 kernel_space,
293 }
294 }
295}
296
297impl<T> Deref for KernelMapperMut<T>
298where
299 T: DerefMut,
300 T::Target: Sized,
301{
302 type Target = T::Target;
303
304 #[inline(always)]
305 fn deref(&self) -> &Self::Target {
306 unsafe { &*self.va }
307 }
308}
309
310impl<T> DerefMut for KernelMapperMut<T>
311where
312 T: DerefMut,
313 T::Target: Sized,
314{
315 #[inline(always)]
316 fn deref_mut(&mut self) -> &mut Self::Target {
317 unsafe { &mut *self.va }
318 }
319}
320
321impl<T> Drop for KernelMapperMut<T>
322where
323 T: DerefMut,
324 T::Target: Sized,
325{
326 fn drop(&mut self) {
327 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
328 self.kernel_space
329 .unmap_memory(self.va as usize, length)
330 .expect("Failed to unmap area");
331 }
332}
333
334unsafe impl<T> Send for KernelMapperMut<T>
335where
336 T: DerefMut + Send,
337 T::Target: Sized,
338{
339}