blob: d0a52a259075c7d64f64c54e2aa8181b7124e37d [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kis012f3ba2024-08-14 16:47:08 +02006use core::ops::{Deref, DerefMut, Range};
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
11use super::{
12 page_pool::{Page, PagePool},
13 MemoryAccessRights, Xlat, XlatError,
14};
15
16static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
17
Imre Kis9a7440e2024-04-18 15:44:45 +020018#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010019pub struct KernelSpace {
Imre Kis9a7440e2024-04-18 15:44:45 +020020 xlat: Arc<Mutex<Xlat>>,
Imre Kisc1dab892024-03-26 12:03:58 +010021}
22
Imre Kis012f3ba2024-08-14 16:47:08 +020023/// # Kernel space memory mapping
Imre Kisc1dab892024-03-26 12:03:58 +010024///
25/// This object handles the translation tables of the kernel address space. The main goal is to
26/// limit the kernel's access to the memory and only map the ranges which are necessary for
27/// operation.
28/// The current implementation uses identity mapping into the upper virtual address range, e.g.
29/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010030impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010031 pub const PAGE_SIZE: usize = Page::SIZE;
32
33 /// Creates the kernel memory mapping instance. This should be called from the main core's init
34 /// code.
35 /// # Arguments
36 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020037 pub fn new(page_pool: PagePool) -> Self {
38 Self {
39 xlat: Arc::new(Mutex::new(Xlat::new(
40 page_pool,
41 0x0000_0000..0x10_0000_0000,
42 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010043 }
44 }
45
46 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
47 /// # Arguments
48 /// * code_range: (start, end) addresses of the code segment
49 /// * data_range: (start, end) addresses of the data segment
50 /// # Return value
51 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020052 pub fn init(
53 &self,
54 code_range: Range<usize>,
55 data_range: Range<usize>,
56 ) -> Result<(), XlatError> {
57 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010058
Imre Kis9a7440e2024-04-18 15:44:45 +020059 xlat.map_physical_address_range(
60 Some(code_range.start),
61 code_range.start,
62 code_range.len(),
63 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
64 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010065
Imre Kis9a7440e2024-04-18 15:44:45 +020066 xlat.map_physical_address_range(
67 Some(data_range.start),
68 data_range.start,
69 data_range.len(),
70 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
71 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010072
Imre Kis9a7440e2024-04-18 15:44:45 +020073 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +010074 }
75
76 /// Map memory range into the kernel address space
77 /// # Arguments
78 /// * pa: Physical address of the memory
79 /// * length: Length of the range in bytes
80 /// * access_right: Memory access rights
81 /// # Return value
82 /// * Virtual address of the mapped memory or error
83 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +020084 &self,
Imre Kisc1dab892024-03-26 12:03:58 +010085 pa: usize,
86 length: usize,
87 access_rights: MemoryAccessRights,
88 ) -> Result<usize, XlatError> {
Imre Kis9a7440e2024-04-18 15:44:45 +020089 let lower_va = self.xlat.lock().map_physical_address_range(
90 Some(pa),
91 pa,
92 length,
93 access_rights | MemoryAccessRights::GLOBAL,
94 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010095
Imre Kis9a7440e2024-04-18 15:44:45 +020096 Ok(Self::pa_to_kernel(lower_va as u64) as usize)
Imre Kisc1dab892024-03-26 12:03:58 +010097 }
98
99 /// Unmap memory range from the kernel address space
100 /// # Arguments
101 /// * va: Virtual address of the memory
102 /// * length: Length of the range in bytes
103 /// # Return value
104 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200105 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
106 self.xlat
107 .lock()
108 .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
Imre Kisc1dab892024-03-26 12:03:58 +0100109 }
110
111 /// Activate kernel address space mapping
Imre Kis9a7440e2024-04-18 15:44:45 +0200112 pub fn activate(&self) {
113 self.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
Imre Kisc1dab892024-03-26 12:03:58 +0100114 }
115
116 /// Rounds a value down to a kernel space page boundary
117 pub const fn round_down_to_page_size(size: usize) -> usize {
118 size & !(Self::PAGE_SIZE - 1)
119 }
120
121 /// Rounds a value up to a kernel space page boundary
122 pub const fn round_up_to_page_size(size: usize) -> usize {
123 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
124 }
125
Imre Kis49c08e32024-04-19 14:16:39 +0200126 /// Returns the offset to the preceding page aligned address
127 pub const fn offset_in_page(address: usize) -> usize {
128 address & (Self::PAGE_SIZE - 1)
129 }
130
Imre Kis703482d2023-11-30 15:51:26 +0100131 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100132 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100133 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
134 kernel_address & 0x0000_000f_ffff_ffff
135 }
Imre Kis703482d2023-11-30 15:51:26 +0100136 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100137 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100138 pub const fn pa_to_kernel(pa: u64) -> u64 {
139 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
140 pa | 0xffff_fff0_0000_0000
141 }
Imre Kis703482d2023-11-30 15:51:26 +0100142
Imre Kisc1dab892024-03-26 12:03:58 +0100143 // Do not use any mapping in test build
144 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100145 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
146 kernel_address
147 }
148
Imre Kisc1dab892024-03-26 12:03:58 +0100149 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100150 pub const fn pa_to_kernel(pa: u64) -> u64 {
151 pa
152 }
153}
Imre Kis012f3ba2024-08-14 16:47:08 +0200154
155/// # Kernel mapping wrapper
156///
157/// Objects in the physical address space can be wrapped into a KernelMapper which maps it to the
158/// virtual kernel address space and provides the same access to the object via the Deref trait.
159/// When the mapper is dropped it unmaps the mapped object from the virtual kernel space.
160pub struct KernelMapper<T>
161where
162 T: Deref,
163 T::Target: Sized,
164{
165 physical_instance: T,
166 va: *const T::Target,
167 kernel_space: KernelSpace,
168}
169
170impl<T> KernelMapper<T>
171where
172 T: Deref,
173 T::Target: Sized,
174{
175 /// Create new mapped object
176 /// The access_rights parameter must contain read access
177 pub fn new(
178 physical_instance: T,
179 kernel_space: KernelSpace,
180 access_rights: MemoryAccessRights,
181 ) -> Self {
182 assert!(access_rights.contains(MemoryAccessRights::R));
183
184 let pa = physical_instance.deref() as *const _ as usize;
185 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
186
187 let va = kernel_space
188 .map_memory(pa, length, access_rights)
189 .expect("Failed to map area");
190
191 Self {
192 physical_instance,
193 va: va as *const T::Target,
194 kernel_space,
195 }
196 }
197}
198
199impl<T> Deref for KernelMapper<T>
200where
201 T: Deref,
202 T::Target: Sized,
203{
204 type Target = T::Target;
205
206 #[inline(always)]
207 fn deref(&self) -> &Self::Target {
208 unsafe { &*self.va }
209 }
210}
211
212impl<T> Drop for KernelMapper<T>
213where
214 T: Deref,
215 T::Target: Sized,
216{
217 fn drop(&mut self) {
218 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
219 self.kernel_space
220 .unmap_memory(self.va as usize, length)
221 .expect("Failed to unmap area");
222 }
223}
224
225unsafe impl<T> Send for KernelMapper<T>
226where
227 T: Deref + Send,
228 T::Target: Sized,
229{
230}
231
232/// # Mutable version of kernel mapping wrapper
233pub struct KernelMapperMut<T>
234where
235 T: DerefMut,
236 T::Target: Sized,
237{
238 physical_instance: T,
239 va: *mut T::Target,
240 kernel_space: KernelSpace,
241}
242
243impl<T> KernelMapperMut<T>
244where
245 T: DerefMut,
246 T::Target: Sized,
247{
248 /// Create new mapped object
249 /// The access_rights parameter must contain read and write access
250 pub fn new(
251 physical_instance: T,
252 kernel_space: KernelSpace,
253 access_rights: MemoryAccessRights,
254 ) -> Self {
255 assert!(access_rights.contains(MemoryAccessRights::RW));
256
257 let pa = physical_instance.deref() as *const _ as usize;
258 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
259
260 let va = kernel_space
261 .map_memory(pa, length, access_rights)
262 .expect("Failed to map area");
263
264 Self {
265 physical_instance,
266 va: va as *mut T::Target,
267 kernel_space,
268 }
269 }
270}
271
272impl<T> Deref for KernelMapperMut<T>
273where
274 T: DerefMut,
275 T::Target: Sized,
276{
277 type Target = T::Target;
278
279 #[inline(always)]
280 fn deref(&self) -> &Self::Target {
281 unsafe { &*self.va }
282 }
283}
284
285impl<T> DerefMut for KernelMapperMut<T>
286where
287 T: DerefMut,
288 T::Target: Sized,
289{
290 #[inline(always)]
291 fn deref_mut(&mut self) -> &mut Self::Target {
292 unsafe { &mut *self.va }
293 }
294}
295
296impl<T> Drop for KernelMapperMut<T>
297where
298 T: DerefMut,
299 T::Target: Sized,
300{
301 fn drop(&mut self) {
302 let length = KernelSpace::round_up_to_page_size(core::mem::size_of::<T::Target>());
303 self.kernel_space
304 .unmap_memory(self.va as usize, length)
305 .expect("Failed to unmap area");
306 }
307}
308
309unsafe impl<T> Send for KernelMapperMut<T>
310where
311 T: DerefMut + Send,
312 T::Target: Sized,
313{
314}