blob: 4aab4603154669aac538926ca88d22398cde3e39 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Module for converting addresses between kernel virtual address space to physical address space
5
Imre Kisc1dab892024-03-26 12:03:58 +01006use core::ops::Range;
Imre Kis703482d2023-11-30 15:51:26 +01007
Imre Kis9a7440e2024-04-18 15:44:45 +02008use alloc::sync::Arc;
Imre Kisc1dab892024-03-26 12:03:58 +01009use spin::Mutex;
10
11use super::{
12 page_pool::{Page, PagePool},
13 MemoryAccessRights, Xlat, XlatError,
14};
15
16static mut KERNEL_SPACE_INSTANCE: Option<KernelSpace> = None;
17
Imre Kis9a7440e2024-04-18 15:44:45 +020018#[derive(Clone)]
Imre Kisc1dab892024-03-26 12:03:58 +010019pub struct KernelSpace {
Imre Kis9a7440e2024-04-18 15:44:45 +020020 xlat: Arc<Mutex<Xlat>>,
Imre Kisc1dab892024-03-26 12:03:58 +010021}
22
23/// Kernel space memory mapping
24///
25/// This object handles the translation tables of the kernel address space. The main goal is to
26/// limit the kernel's access to the memory and only map the ranges which are necessary for
27/// operation.
28/// The current implementation uses identity mapping into the upper virtual address range, e.g.
29/// PA = 0x0000_0001_2345_0000 -> VA = 0xffff_fff1_2345_0000.
Imre Kis703482d2023-11-30 15:51:26 +010030impl KernelSpace {
Imre Kisc1dab892024-03-26 12:03:58 +010031 pub const PAGE_SIZE: usize = Page::SIZE;
32
33 /// Creates the kernel memory mapping instance. This should be called from the main core's init
34 /// code.
35 /// # Arguments
36 /// * page_pool: Page pool for allocation kernel translation tables
Imre Kis9a7440e2024-04-18 15:44:45 +020037 pub fn new(page_pool: PagePool) -> Self {
38 Self {
39 xlat: Arc::new(Mutex::new(Xlat::new(
40 page_pool,
41 0x0000_0000..0x10_0000_0000,
42 ))),
Imre Kisc1dab892024-03-26 12:03:58 +010043 }
44 }
45
46 /// Maps the code (RX) and data (RW) segments of the SPMC itself.
47 /// # Arguments
48 /// * code_range: (start, end) addresses of the code segment
49 /// * data_range: (start, end) addresses of the data segment
50 /// # Return value
51 /// * The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +020052 pub fn init(
53 &self,
54 code_range: Range<usize>,
55 data_range: Range<usize>,
56 ) -> Result<(), XlatError> {
57 let mut xlat = self.xlat.lock();
Imre Kisc1dab892024-03-26 12:03:58 +010058
Imre Kis9a7440e2024-04-18 15:44:45 +020059 xlat.map_physical_address_range(
60 Some(code_range.start),
61 code_range.start,
62 code_range.len(),
63 MemoryAccessRights::RX | MemoryAccessRights::GLOBAL,
64 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010065
Imre Kis9a7440e2024-04-18 15:44:45 +020066 xlat.map_physical_address_range(
67 Some(data_range.start),
68 data_range.start,
69 data_range.len(),
70 MemoryAccessRights::RW | MemoryAccessRights::GLOBAL,
71 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010072
Imre Kis9a7440e2024-04-18 15:44:45 +020073 Ok(())
Imre Kisc1dab892024-03-26 12:03:58 +010074 }
75
76 /// Map memory range into the kernel address space
77 /// # Arguments
78 /// * pa: Physical address of the memory
79 /// * length: Length of the range in bytes
80 /// * access_right: Memory access rights
81 /// # Return value
82 /// * Virtual address of the mapped memory or error
83 pub fn map_memory(
Imre Kis9a7440e2024-04-18 15:44:45 +020084 &self,
Imre Kisc1dab892024-03-26 12:03:58 +010085 pa: usize,
86 length: usize,
87 access_rights: MemoryAccessRights,
88 ) -> Result<usize, XlatError> {
Imre Kis9a7440e2024-04-18 15:44:45 +020089 let lower_va = self.xlat.lock().map_physical_address_range(
90 Some(pa),
91 pa,
92 length,
93 access_rights | MemoryAccessRights::GLOBAL,
94 )?;
Imre Kisc1dab892024-03-26 12:03:58 +010095
Imre Kis9a7440e2024-04-18 15:44:45 +020096 Ok(Self::pa_to_kernel(lower_va as u64) as usize)
Imre Kisc1dab892024-03-26 12:03:58 +010097 }
98
99 /// Unmap memory range from the kernel address space
100 /// # Arguments
101 /// * va: Virtual address of the memory
102 /// * length: Length of the range in bytes
103 /// # Return value
104 /// The result of the operation
Imre Kis9a7440e2024-04-18 15:44:45 +0200105 pub fn unmap_memory(&self, va: usize, length: usize) -> Result<(), XlatError> {
106 self.xlat
107 .lock()
108 .unmap_virtual_address_range(Self::kernel_to_pa(va as u64) as usize, length)
Imre Kisc1dab892024-03-26 12:03:58 +0100109 }
110
111 /// Activate kernel address space mapping
Imre Kis9a7440e2024-04-18 15:44:45 +0200112 pub fn activate(&self) {
113 self.xlat.lock().activate(0, super::TTBR::TTBR1_EL1);
Imre Kisc1dab892024-03-26 12:03:58 +0100114 }
115
116 /// Rounds a value down to a kernel space page boundary
117 pub const fn round_down_to_page_size(size: usize) -> usize {
118 size & !(Self::PAGE_SIZE - 1)
119 }
120
121 /// Rounds a value up to a kernel space page boundary
122 pub const fn round_up_to_page_size(size: usize) -> usize {
123 (size + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1)
124 }
125
Imre Kis49c08e32024-04-19 14:16:39 +0200126 /// Returns the offset to the preceding page aligned address
127 pub const fn offset_in_page(address: usize) -> usize {
128 address & (Self::PAGE_SIZE - 1)
129 }
130
Imre Kis703482d2023-11-30 15:51:26 +0100131 /// Kernel virtual address to physical address
Imre Kisc1dab892024-03-26 12:03:58 +0100132 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100133 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
134 kernel_address & 0x0000_000f_ffff_ffff
135 }
Imre Kis703482d2023-11-30 15:51:26 +0100136 /// Physical address to kernel virtual address
Imre Kisc1dab892024-03-26 12:03:58 +0100137 #[cfg(not(test))]
Imre Kis703482d2023-11-30 15:51:26 +0100138 pub const fn pa_to_kernel(pa: u64) -> u64 {
139 // TODO: make this consts assert_eq!(pa & 0xffff_fff0_0000_0000, 0);
140 pa | 0xffff_fff0_0000_0000
141 }
Imre Kis703482d2023-11-30 15:51:26 +0100142
Imre Kisc1dab892024-03-26 12:03:58 +0100143 // Do not use any mapping in test build
144 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100145 pub const fn kernel_to_pa(kernel_address: u64) -> u64 {
146 kernel_address
147 }
148
Imre Kisc1dab892024-03-26 12:03:58 +0100149 #[cfg(test)]
Imre Kis703482d2023-11-30 15:51:26 +0100150 pub const fn pa_to_kernel(pa: u64) -> u64 {
151 pa
152 }
153}