blob: 7b013cb8f5e98d4851746de71edceb1da888b37e [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kisc1dab892024-03-26 12:03:58 +01006use core::ops::Range;
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kisc1dab892024-03-26 12:03:58 +01008use alloc::string::String;
9use spin::Mutex;
10
11use super::{
12 page_pool::{Page, PagePool},
13 MemoryAccessRights, Xlat, XlatError,
14};
15
16static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
17
18pub struct KernelSpace {
19 xlat: Mutex<Xlat>,
20}
21
22/// Kernel space memory mapping
23///
24/// This object handles the translation tables of the kernel address space. The main goal is to
25/// limit the kernel's access to the memory and only map the ranges which are necessary for
26/// operation.
27/// The current implementation uses identity mapping into the upper virtual address range, e.g.
28/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010029impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010030 pub const PAGE_SIZE: usize = Page::SIZE;
31
32 /// Creates the kernel memory mapping instance. This should be called from the main core's init
33 /// code.
34 /// # Arguments
35 /// * page_pool: Page pool for allocation kernel translation tables
36 pub fn create_instance(page_pool: PagePool) {
37 unsafe {
38 assert!(KERNEL_SPACE_INSTANCE.is_none());
39
40 KERNEL_SPACE_INSTANCE = Some(Self {
41 xlat: Mutex::new(Xlat::new(page_pool, 0x0000_0000..0x10_0000_0000)),
42 });
43 }
44 }
45
46 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
47 /// # Arguments
48 /// * code_range: (start, end) addresses of the code segment
49 /// * data_range: (start, end) addresses of the data segment
50 /// # Return value
51 /// * The result of the operation
52 pub fn init(code_range: Range<usize>, data_range: Range<usize>) -> Result<(), XlatError> {
53 if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
54 let mut xlat = kernel_space.xlat.lock();
55
56 xlat.map_physical_address_range(
57 Some(code_range.start),
58 code_range.start,
59 code_range.len(),
60 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
61 )?;
62
63 xlat.map_physical_address_range(
64 Some(data_range.start),
65 data_range.start,
66 data_range.len(),
67 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
68 )?;
69
70 Ok(())
71 } else {
72 Err(XlatError::InvalidOperation(String::from(
73 "KernelSpace is not initialized",
74 )))
75 }
76 }
77
78 /// Map memory range into the kernel address space
79 /// # Arguments
80 /// * pa: Physical address of the memory
81 /// * length: Length of the range in bytes
82 /// * access_right: Memory access rights
83 /// # Return value
84 /// * Virtual address of the mapped memory or error
85 pub fn map_memory(
86 pa: usize,
87 length: usize,
88 access_rights: MemoryAccessRights,
89 ) -> Result<usize, XlatError> {
90 if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
91 let lower_va = kernel_space.xlat.lock().map_physical_address_range(
92 Some(pa),
93 pa,
94 length,
95 access_rights | MemoryAccessRights::GLOBAL,
96 )?;
97
98 Ok(Self::pa_to_kernel(lower_va as u64) as usize)
99 } else {
100 Err(XlatError::InvalidOperation(String::from(
101 "KernelSpace is not initialized",
102 )))
103 }
104 }
105
106 /// Unmap memory range from the kernel address space
107 /// # Arguments
108 /// * va: Virtual address of the memory
109 /// * length: Length of the range in bytes
110 /// # Return value
111 /// The result of the operation
112 pub fn unmap_memory(va: usize, length: usize) -> Result<(), XlatError> {
113 if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
114 kernel_space
115 .xlat
116 .lock()
117 .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
118 } else {
119 Err(XlatError::InvalidOperation(String::from(
120 "KernelSpace is not initialized",
121 )))
122 }
123 }
124
125 /// Activate kernel address space mapping
126 pub fn activate() {
127 if let Some(kernel_space) = unsafe { &KERNEL_SPACE_INSTANCE } {
128 kernel_space.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
129 }
130 }
131
132 /// Rounds a value down to a kernel space page boundary
133 pub const fn round_down_to_page_size(size: usize) -> usize {
134 size & !(Self::PAGE_SIZE - 1)
135 }
136
137 /// Rounds a value up to a kernel space page boundary
138 pub const fn round_up_to_page_size(size: usize) -> usize {
139 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
140 }
141
Imre Kis49c08e32024-04-19 14:16:39 +0200142 /// Returns the offset to the preceding page aligned address
143 pub const fn offset_in_page(address: usize) -> usize {
144 address & (Self::PAGE_SIZE - 1)
145 }
146
Imre Kis703482d2023-11-30 15:51:26 +0100147 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100148 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100149 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
150 kernel_address & 0x0000_000f_ffff_ffff
151 }
Imre Kis703482d2023-11-30 15:51:26 +0100152 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100153 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100154 pub const fn pa_to_kernel(pa: u64) -> u64 {
155 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
156 pa | 0xffff_fff0_0000_0000
157 }
Imre Kis703482d2023-11-30 15:51:26 +0100158
Imre Kisc1dab892024-03-26 12:03:58 +0100159 // Do not use any mapping in test build
160 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100161 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
162 kernel_address
163 }
164
Imre Kisc1dab892024-03-26 12:03:58 +0100165 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100166 pub const fn pa_to_kernel(pa: u64) -> u64 {
167 pa
168 }
169}