blob: d0be3150fdddce9048de9382fda0e4ea4de5d38d [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kis012f3ba2024-08-14 16:47:08 +02006use core::ops::{Deref, DerefMut, Range};
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
11use super::{
Imre Kisd5b96fd2024-09-11 17:04:32 +020012 address::{PhysicalAddress, VirtualAddress, VirtualAddressRange},
Imre Kisc1dab892024-03-26 12:03:58 +010013 page_pool::{Page, PagePool},
Imre Kisb5146b52024-10-31 14:03:06 +010014 MemoryAccessRights, RegimeVaRange, TranslationRegime, Xlat, XlatError,
Imre Kisc1dab892024-03-26 12:03:58 +010015};
16
Imre Kis9a7440e2024-04-18 15:44:45 +020017#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010018pub struct KernelSpace {
Imre Kis9a7440e2024-04-18 15:44:45 +020019 xlat: Arc<Mutex<Xlat>>,
Imre Kisc1dab892024-03-26 12:03:58 +010020}
21
Imre Kis012f3ba2024-08-14 16:47:08 +020022/// # Kernel space memory mapping
Imre Kisc1dab892024-03-26 12:03:58 +010023///
24/// This object handles the translation tables of the kernel address space. The main goal is to
25/// limit the kernel's access to the memory and only map the ranges which are necessary for
26/// operation.
27/// The current implementation uses identity mapping into the upper virtual address range, e.g.
28/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010029impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010030 pub const PAGE_SIZE: usize = Page::SIZE;
31
32 /// Creates the kernel memory mapping instance. This should be called from the main core's init
33 /// code.
34 /// # Arguments
35 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020036 pub fn new(page_pool: PagePool) -> Self {
37 Self {
Imre Kisb5146b52024-10-31 14:03:06 +010038 xlat: Arc::new(Mutex::new(Xlat::new(
39 page_pool,
40 unsafe { VirtualAddressRange::from_range(0x0000_0000..0x10_0000_0000) },
41 TranslationRegime::EL1_0(RegimeVaRange::Upper, 0),
42 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010043 }
44 }
45
46 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
47 /// # Arguments
48 /// * code_range: (start, end) addresses of the code segment
49 /// * data_range: (start, end) addresses of the data segment
50 /// # Return value
51 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020052 pub fn init(
53 &self,
54 code_range: Range<usize>,
55 data_range: Range<usize>,
56 ) -> Result<(), XlatError> {
57 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010058
Imre Kisd5b96fd2024-09-11 17:04:32 +020059 let code_pa = PhysicalAddress(code_range.start);
60 let data_pa = PhysicalAddress(data_range.start);
61
Imre Kis9a7440e2024-04-18 15:44:45 +020062 xlat.map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020063 Some(code_pa.identity_va()),
64 code_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020065 code_range.len(),
66 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
67 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010068
Imre Kis9a7440e2024-04-18 15:44:45 +020069 xlat.map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020070 Some(data_pa.identity_va()),
71 data_pa,
Imre Kis9a7440e2024-04-18 15:44:45 +020072 data_range.len(),
73 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
74 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010075
Imre Kis9a7440e2024-04-18 15:44:45 +020076 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +010077 }
78
79 /// Map memory range into the kernel address space
80 /// # Arguments
81 /// * pa: Physical address of the memory
82 /// * length: Length of the range in bytes
83 /// * access_right: Memory access rights
84 /// # Return value
85 /// * Virtual address of the mapped memory or error
86 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +020087 &self,
Imre Kisc1dab892024-03-26 12:03:58 +010088 pa: usize,
89 length: usize,
90 access_rights: MemoryAccessRights,
91 ) -> Result<usize, XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +020092 let pa = PhysicalAddress(pa);
93
Imre Kis9a7440e2024-04-18 15:44:45 +020094 let lower_va = self.xlat.lock().map_physical_address_range(
Imre Kisd5b96fd2024-09-11 17:04:32 +020095 Some(pa.identity_va()),
Imre Kis9a7440e2024-04-18 15:44:45 +020096 pa,
97 length,
98 access_rights | MemoryAccessRights::GLOBAL,
99 )?;
Imre Kisc1dab892024-03-26 12:03:58 +0100100
Imre Kisd5b96fd2024-09-11 17:04:32 +0200101 Ok(Self::pa_to_kernel(lower_va.0 as u64) as usize)
Imre Kisc1dab892024-03-26 12:03:58 +0100102 }
103
104 /// Unmap memory range from the kernel address space
105 /// # Arguments
106 /// * va: Virtual address of the memory
107 /// * length: Length of the range in bytes
108 /// # Return value
109 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200110 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
Imre Kisd5b96fd2024-09-11 17:04:32 +0200111 self.xlat.lock().unmap_virtual_address_range(
112 VirtualAddress(Self::kernel_to_pa(va as u64) as usize),
113 length,
114 )
Imre Kisc1dab892024-03-26 12:03:58 +0100115 }
116
117 /// Activate kernel address space mapping
Imre Kisb5146b52024-10-31 14:03:06 +0100118 ///
119 /// # Safety
120 /// This changes the mapping of the running execution context. The caller
121 /// must ensure that existing references will be mapped to the same address
122 /// after activation.
123 pub unsafe fn activate(&self) {
124 self.xlat.lock().activate();
Imre Kisc1dab892024-03-26 12:03:58 +0100125 }
126
127 /// Rounds a value down to a kernel space page boundary
128 pub const fn round_down_to_page_size(size: usize) -> usize {
129 size & !(Self::PAGE_SIZE - 1)
130 }
131
132 /// Rounds a value up to a kernel space page boundary
133 pub const fn round_up_to_page_size(size: usize) -> usize {
134 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
135 }
136
Imre Kis49c08e32024-04-19 14:16:39 +0200137 /// Returns the offset to the preceding page aligned address
138 pub const fn offset_in_page(address: usize) -> usize {
139 address & (Self::PAGE_SIZE - 1)
140 }
141
Imre Kis703482d2023-11-30 15:51:26 +0100142 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100143 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100144 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
145 kernel_address & 0x0000_000f_ffff_ffff
146 }
Imre Kis703482d2023-11-30 15:51:26 +0100147 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100148 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100149 pub const fn pa_to_kernel(pa: u64) -> u64 {
150 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
151 pa | 0xffff_fff0_0000_0000
152 }
Imre Kis703482d2023-11-30 15:51:26 +0100153
Imre Kisc1dab892024-03-26 12:03:58 +0100154 // Do not use any mapping in test build
155 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100156 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
157 kernel_address
158 }
159
Imre Kisc1dab892024-03-26 12:03:58 +0100160 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100161 pub const fn pa_to_kernel(pa: u64) -> u64 {
162 pa
163 }
164}
Imre Kis012f3ba2024-08-14 16:47:08 +0200165
166/// # Kernel mapping wrapper
167///
168/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
169/// virtual kernel address space and provides the same access to the object via the Deref trait.
170/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
171pub struct KernelMapper<T>
172where
173 T: Deref,
174 T::Target: Sized,
175{
176 physical_instance: T,
177 va: *const T::Target,
178 kernel_space: KernelSpace,
179}
180
181impl<T> KernelMapper<T>
182where
183 T: Deref,
184 T::Target: Sized,
185{
186 /// Create new mapped object
187 /// The access_rights parameter must contain read access
188 pub fn new(
189 physical_instance: T,
190 kernel_space: KernelSpace,
191 access_rights: MemoryAccessRights,
192 ) -> Self {
193 assert!(access_rights.contains(MemoryAccessRights::R));
194
195 let pa = physical_instance.deref() as *const _ as usize;
196 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
197
198 let va = kernel_space
199 .map_memory(pa, length, access_rights)
200 .expect("Failed to map area");
201
202 Self {
203 physical_instance,
204 va: va as *const T::Target,
205 kernel_space,
206 }
207 }
208}
209
210impl<T> Deref for KernelMapper<T>
211where
212 T: Deref,
213 T::Target: Sized,
214{
215 type Target = T::Target;
216
217 #[inline(always)]
218 fn deref(&self) -> &Self::Target {
219 unsafe { &*self.va }
220 }
221}
222
223impl<T> Drop for KernelMapper<T>
224where
225 T: Deref,
226 T::Target: Sized,
227{
228 fn drop(&mut self) {
229 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
230 self.kernel_space
231 .unmap_memory(self.va as usize, length)
232 .expect("Failed to unmap area");
233 }
234}
235
236unsafe impl<T> Send for KernelMapper<T>
237where
238 T: Deref + Send,
239 T::Target: Sized,
240{
241}
242
243/// # Mutable version of kernel mapping wrapper
244pub struct KernelMapperMut<T>
245where
246 T: DerefMut,
247 T::Target: Sized,
248{
249 physical_instance: T,
250 va: *mut T::Target,
251 kernel_space: KernelSpace,
252}
253
254impl<T> KernelMapperMut<T>
255where
256 T: DerefMut,
257 T::Target: Sized,
258{
259 /// Create new mapped object
260 /// The access_rights parameter must contain read and write access
261 pub fn new(
262 physical_instance: T,
263 kernel_space: KernelSpace,
264 access_rights: MemoryAccessRights,
265 ) -> Self {
266 assert!(access_rights.contains(MemoryAccessRights::RW));
267
268 let pa = physical_instance.deref() as *const _ as usize;
269 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
270
271 let va = kernel_space
272 .map_memory(pa, length, access_rights)
273 .expect("Failed to map area");
274
275 Self {
276 physical_instance,
277 va: va as *mut T::Target,
278 kernel_space,
279 }
280 }
281}
282
283impl<T> Deref for KernelMapperMut<T>
284where
285 T: DerefMut,
286 T::Target: Sized,
287{
288 type Target = T::Target;
289
290 #[inline(always)]
291 fn deref(&self) -> &Self::Target {
292 unsafe { &*self.va }
293 }
294}
295
296impl<T> DerefMut for KernelMapperMut<T>
297where
298 T: DerefMut,
299 T::Target: Sized,
300{
301 #[inline(always)]
302 fn deref_mut(&mut self) -> &mut Self::Target {
303 unsafe { &mut *self.va }
304 }
305}
306
307impl<T> Drop for KernelMapperMut<T>
308where
309 T: DerefMut,
310 T::Target: Sized,
311{
312 fn drop(&mut self) {
313 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
314 self.kernel_space
315 .unmap_memory(self.va as usize, length)
316 .expect("Failed to unmap area");
317 }
318}
319
320unsafe impl<T> Send for KernelMapperMut<T>
321where
322 T: DerefMut + Send,
323 T::Target: Sized,
324{
325}