blob: f9e1d683ec018d3f65ab9225186951a176230fdb [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kis012f3ba2024-08-14 16:47:08 +02006use core::ops::{Deref, DerefMut, Range};
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
11use super::{
Imre Kisd5b96fd2024-09-11 17:04:32 +020012 address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
Imre Kisc1dab892024-03-26 12:03:58 +010013 page_pool::{Page, PagePool},
Imre Kis631127d2024-11-21 13:09:01 +010014 MemoryAccessRights, RegimeVaRange, TranslationGranule, TranslationRegime, Xlat, XlatError,
Imre Kisc1dab892024-03-26 12:03:58 +010015};
16
Imre Kis9a7440e2024-04-18 15:44:45 +020017#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010018pub struct KernelSpace {
Imre Kis631127d2024-11-21 13:09:01 +010019 xlat: Arc<Mutex<Xlat<36>>>,
Imre Kisc1dab892024-03-26 12:03:58 +010020}
21
Imre Kis012f3ba2024-08-14 16:47:08 +020022/// # Kernel space memory mapping
Imre Kisc1dab892024-03-26 12:03:58 +010023///
24/// This object handles the translation tables of the kernel address space. The main goal is to
25/// limit the kernel's access to the memory and only map the ranges which are necessary for
26/// operation.
27/// The current implementation uses identity mapping into the upper virtual address range, e.g.
28/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010029impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010030 pub const PAGE_SIZE: usize = Page::SIZE;
31
32 /// Creates the kernel memory mapping instance. This should be called from the main core's init
33 /// code.
34 /// # Arguments
35 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020036 pub fn new(page_pool: PagePool) -> Self {
37 Self {
Imre Kisb5146b52024-10-31 14:03:06 +010038 xlat: Arc::new(Mutex::new(Xlat::new(
39 page_pool,
40 unsafe { VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000) },
41 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
Imre Kis631127d2024-11-21 13:09:01 +010042 TranslationGranule::Granule4k,
Imre Kisb5146b52024-10-31 14:03:06 +010043 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010044 }
45 }
46
47 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
48 /// # Arguments
49 /// * code_range: (start, end) addresses of the code segment
50 /// * data_range: (start, end) addresses of the data segment
51 /// # Return value
52 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020053 pub fn init(
54 &self,
55 code_range: Range<usize>,
56 data_range: Range<usize>,
57 ) -> Result<(), XlatError> {
58 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010059
Imre Kisd5b96fd2024-09-11 17:04:32 +020060 let code_pa = PhysicalAddress(code_range.start);
61 let data_pa = PhysicalAddress(data_range.start);
62
Imre Kis9a7440e2024-04-18 15:44:45 +020063 xlat.map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020064 Some(code_pa.identity_va()),
65 code_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020066 code_range.len(),
67 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
68 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010069
Imre Kis9a7440e2024-04-18 15:44:45 +020070 xlat.map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020071 Some(data_pa.identity_va()),
72 data_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020073 data_range.len(),
74 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
75 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010076
Imre Kis9a7440e2024-04-18 15:44:45 +020077 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +010078 }
79
80 /// Map memory range into the kernel address space
81 /// # Arguments
82 /// * pa: Physical address of the memory
83 /// * length: Length of the range in bytes
84 /// * access_right: Memory access rights
85 /// # Return value
86 /// * Virtual address of the mapped memory or error
87 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +020088 &self,
Imre Kisc1dab892024-03-26 12:03:58 +010089 pa: usize,
90 length: usize,
91 access_rights: MemoryAccessRights,
92 ) -> Result<usize, XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +020093 let pa = PhysicalAddress(pa);
94
Imre Kis9a7440e2024-04-18 15:44:45 +020095 let lower_va = self.xlat.lock().map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020096 Some(pa.identity_va()),
Imre Kis9a7440e2024-04-18 15:44:45 +020097 pa,
98 length,
99 access_rights | MemoryAccessRights::GLOBAL,
100 )?;
Imre Kisc1dab892024-03-26 12:03:58 +0100101
Imre Kisd5b96fd2024-09-11 17:04:32 +0200102 Ok(Self::pa_to_kernel(lower_va.0 as u64) as usize)
Imre Kisc1dab892024-03-26 12:03:58 +0100103 }
104
105 /// Unmap memory range from the kernel address space
106 /// # Arguments
107 /// * va: Virtual address of the memory
108 /// * length: Length of the range in bytes
109 /// # Return value
110 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200111 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200112 self.xlat.lock().unmap_virtual_address_range(
113 VirtualAddress(Self::kernel_to_pa(va as u64) as usize),
114 length,
115 )
Imre Kisc1dab892024-03-26 12:03:58 +0100116 }
117
118 /// Activate kernel address space mapping
Imre Kisb5146b52024-10-31 14:03:06 +0100119 ///
120 /// # Safety
121 /// This changes the mapping of the running execution context. The caller
122 /// must ensure that existing references will be mapped to the same address
123 /// after activation.
Imre Kis1278c9f2025-01-15 19:48:36 +0100124 #[cfg(target_arch = "aarch64")]
Imre Kisb5146b52024-10-31 14:03:06 +0100125 pub unsafe fn activate(&self) {
126 self.xlat.lock().activate();
Imre Kisc1dab892024-03-26 12:03:58 +0100127 }
128
129 /// Rounds a value down to a kernel space page boundary
130 pub const fn round_down_to_page_size(size: usize) -> usize {
131 size & !(Self::PAGE_SIZE - 1)
132 }
133
134 /// Rounds a value up to a kernel space page boundary
135 pub const fn round_up_to_page_size(size: usize) -> usize {
136 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
137 }
138
Imre Kis49c08e32024-04-19 14:16:39 +0200139 /// Returns the offset to the preceding page aligned address
140 pub const fn offset_in_page(address: usize) -> usize {
141 address & (Self::PAGE_SIZE - 1)
142 }
143
Imre Kis703482d2023-11-30 15:51:26 +0100144 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100145 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100146 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
147 kernel_address & 0x0000_000f_ffff_ffff
148 }
Imre Kis703482d2023-11-30 15:51:26 +0100149 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100150 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100151 pub const fn pa_to_kernel(pa: u64) -> u64 {
152 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
153 pa | 0xffff_fff0_0000_0000
154 }
Imre Kis703482d2023-11-30 15:51:26 +0100155
Imre Kisc1dab892024-03-26 12:03:58 +0100156 // Do not use any mapping in test build
157 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100158 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
159 kernel_address
160 }
161
Imre Kisc1dab892024-03-26 12:03:58 +0100162 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100163 pub const fn pa_to_kernel(pa: u64) -> u64 {
164 pa
165 }
166}
Imre Kis012f3ba2024-08-14 16:47:08 +0200167
168/// # Kernel mapping wrapper
169///
170/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
171/// virtual kernel address space and provides the same access to the object via the Deref trait.
172/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
173pub struct KernelMapper<T>
174where
175 T: Deref,
176 T::Target: Sized,
177{
178 physical_instance: T,
179 va: *const T::Target,
180 kernel_space: KernelSpace,
181}
182
183impl<T> KernelMapper<T>
184where
185 T: Deref,
186 T::Target: Sized,
187{
188 /// Create new mapped object
189 /// The access_rights parameter must contain read access
190 pub fn new(
191 physical_instance: T,
192 kernel_space: KernelSpace,
193 access_rights: MemoryAccessRights,
194 ) -> Self {
195 assert!(access_rights.contains(MemoryAccessRights::R));
196
197 let pa = physical_instance.deref() as *const _ as usize;
198 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
199
200 let va = kernel_space
201 .map_memory(pa, length, access_rights)
202 .expect("Failed to map area");
203
204 Self {
205 physical_instance,
206 va: va as *const T::Target,
207 kernel_space,
208 }
209 }
210}
211
212impl<T> Deref for KernelMapper<T>
213where
214 T: Deref,
215 T::Target: Sized,
216{
217 type Target = T::Target;
218
219 #[inline(always)]
220 fn deref(&self) -> &Self::Target {
221 unsafe { &*self.va }
222 }
223}
224
225impl<T> Drop for KernelMapper<T>
226where
227 T: Deref,
228 T::Target: Sized,
229{
230 fn drop(&mut self) {
231 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
232 self.kernel_space
233 .unmap_memory(self.va as usize, length)
234 .expect("Failed to unmap area");
235 }
236}
237
238unsafe impl<T> Send for KernelMapper<T>
239where
240 T: Deref + Send,
241 T::Target: Sized,
242{
243}
244
245/// # Mutable version of kernel mapping wrapper
246pub struct KernelMapperMut<T>
247where
248 T: DerefMut,
249 T::Target: Sized,
250{
251 physical_instance: T,
252 va: *mut T::Target,
253 kernel_space: KernelSpace,
254}
255
256impl<T> KernelMapperMut<T>
257where
258 T: DerefMut,
259 T::Target: Sized,
260{
261 /// Create new mapped object
262 /// The access_rights parameter must contain read and write access
263 pub fn new(
264 physical_instance: T,
265 kernel_space: KernelSpace,
266 access_rights: MemoryAccessRights,
267 ) -> Self {
268 assert!(access_rights.contains(MemoryAccessRights::RW));
269
270 let pa = physical_instance.deref() as *const _ as usize;
271 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
272
273 let va = kernel_space
274 .map_memory(pa, length, access_rights)
275 .expect("Failed to map area");
276
277 Self {
278 physical_instance,
279 va: va as *mut T::Target,
280 kernel_space,
281 }
282 }
283}
284
285impl<T> Deref for KernelMapperMut<T>
286where
287 T: DerefMut,
288 T::Target: Sized,
289{
290 type Target = T::Target;
291
292 #[inline(always)]
293 fn deref(&self) -> &Self::Target {
294 unsafe { &*self.va }
295 }
296}
297
298impl<T> DerefMut for KernelMapperMut<T>
299where
300 T: DerefMut,
301 T::Target: Sized,
302{
303 #[inline(always)]
304 fn deref_mut(&mut self) -> &mut Self::Target {
305 unsafe { &mut *self.va }
306 }
307}
308
309impl<T> Drop for KernelMapperMut<T>
310where
311 T: DerefMut,
312 T::Target: Sized,
313{
314 fn drop(&mut self) {
315 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
316 self.kernel_space
317 .unmap_memory(self.va as usize, length)
318 .expect("Failed to unmap area");
319 }
320}
321
322unsafe impl<T> Send for KernelMapperMut<T>
323where
324 T: DerefMut + Send,
325 T::Target: Sized,
326{
327}