blob: d767caf0e591954fc5d5024c54a39f84eeda8ca0 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
Imre Kis703482d2023-11-30 15:51:26 +01006use alloc::sync::Arc;
7use alloc::vec::Vec;
8use spin::Mutex;
9
Imre Kis21d7f722025-01-17 17:55:35 +010010use crate::address::VirtualAddress;
11use crate::KernelAddressTranslator;
12
Imre Kisd5b96fd2024-09-11 17:04:32 +020013use super::address::PhysicalAddress;
Imre Kis703482d2023-11-30 15:51:26 +010014use super::region_pool::{Region, RegionPool, RegionPoolError};
15
16/// Single 4kB page definition
17pub struct Page {}
18
19impl Page {
20 pub const SIZE: usize = 4096;
21}
22
23/// Area for allocating pages
24#[repr(C, align(4096))]
25pub struct PagePoolArea<const AREA_SIZE: usize> {
26 area: [u8; AREA_SIZE],
27}
28
29impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
30 pub const fn new() -> Self {
31 Self {
32 area: [0; AREA_SIZE],
33 }
34 }
35}
36
37/// Continuous pages
38pub struct Pages {
39 pa: usize,
40 length: usize,
41 used: bool,
42}
43
44impl Pages {
45 // Create new instance
46 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
47 Pages { pa, length, used }
48 }
49
50 /// Copy data to pages
Imre Kis21d7f722025-01-17 17:55:35 +010051 pub fn copy_data_to_page<K: KernelAddressTranslator>(&mut self, data: &[u8]) {
Imre Kis703482d2023-11-30 15:51:26 +010052 assert!(data.len() <= self.length);
53
Imre Kis21d7f722025-01-17 17:55:35 +010054 let page_contents = unsafe {
55 core::slice::from_raw_parts_mut(
56 K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *mut u8,
57 data.len(),
58 )
59 };
Imre Kis703482d2023-11-30 15:51:26 +010060 page_contents.clone_from_slice(data);
61 }
62
63 /// Zero init pages
Imre Kis21d7f722025-01-17 17:55:35 +010064 pub fn zero_init<K: KernelAddressTranslator>(&mut self) {
Imre Kis703482d2023-11-30 15:51:26 +010065 unsafe {
Imre Kis21d7f722025-01-17 17:55:35 +010066 self.get_as_mut_slice::<K, u8>().fill(0);
Imre Kis703482d2023-11-30 15:51:26 +010067 }
68 }
69
70 /// Get physical address
Imre Kisd5b96fd2024-09-11 17:04:32 +020071 pub fn get_pa(&self) -> PhysicalAddress {
72 PhysicalAddress(self.pa)
Imre Kis703482d2023-11-30 15:51:26 +010073 }
74
Imre Kis631127d2024-11-21 13:09:01 +010075 /// Get as slice
76 ///
Imre Kis1278c9f2025-01-15 19:48:36 +010077 /// # Safety
78 /// The returned slice is created from its address and length which is stored in the
Imre Kis631127d2024-11-21 13:09:01 +010079 /// object. The caller has to ensure that no other references are being used of the pages.
Imre Kis21d7f722025-01-17 17:55:35 +010080 pub unsafe fn get_as_slice<K: KernelAddressTranslator, T>(&self) -> &[T] {
Imre Kis631127d2024-11-21 13:09:01 +010081 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
82
83 core::slice::from_raw_parts(
Imre Kis21d7f722025-01-17 17:55:35 +010084 K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *const T,
Imre Kis631127d2024-11-21 13:09:01 +010085 self.length / core::mem::size_of::<T>(),
86 )
87 }
88
Imre Kis703482d2023-11-30 15:51:26 +010089 /// Get as mutable slice
90 ///
Imre Kis1278c9f2025-01-15 19:48:36 +010091 /// # Safety
92 /// The returned slice is created from its address and length which is stored in the
Imre Kis703482d2023-11-30 15:51:26 +010093 /// object. The caller has to ensure that no other references are being used of the pages.
Imre Kis21d7f722025-01-17 17:55:35 +010094 pub unsafe fn get_as_mut_slice<K: KernelAddressTranslator, T>(&mut self) -> &mut [T] {
Imre Kis703482d2023-11-30 15:51:26 +010095 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
96
97 core::slice::from_raw_parts_mut(
Imre Kis21d7f722025-01-17 17:55:35 +010098 K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *mut T,
Imre Kis703482d2023-11-30 15:51:26 +010099 self.length / core::mem::size_of::<T>(),
100 )
101 }
102
103 /// Set contents from slice
104 ///
Imre Kis1278c9f2025-01-15 19:48:36 +0100105 /// # Safety
106 /// The caller has to ensure that the passed slice is a valid page range.
Imre Kis21d7f722025-01-17 17:55:35 +0100107 pub unsafe fn from_slice<K: KernelAddressTranslator, T>(s: &mut [T]) -> Pages {
Imre Kis703482d2023-11-30 15:51:26 +0100108 Pages {
Imre Kis21d7f722025-01-17 17:55:35 +0100109 pa: K::kernel_to_pa(VirtualAddress(s.as_ptr() as usize)).0,
Imre Kis703482d2023-11-30 15:51:26 +0100110 length: core::mem::size_of_val(s),
111 used: true,
112 }
113 }
114}
115
116impl Region for Pages {
117 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +0200118 type Base = usize;
119 type Length = usize;
Imre Kisf0370e82024-11-18 16:24:55 +0100120 type Alignment = usize;
Imre Kis703482d2023-11-30 15:51:26 +0100121
122 fn base(&self) -> usize {
123 self.pa
124 }
125
126 fn length(&self) -> usize {
127 self.length
128 }
129
130 fn used(&self) -> bool {
131 self.used
132 }
133
134 fn contains(&self, base: usize, length: usize) -> bool {
135 if let (Some(end), Some(self_end)) =
136 (base.checked_add(length), self.pa.checked_add(self.length))
137 {
138 self.pa <= base && end <= self_end
139 } else {
140 false
141 }
142 }
143
Imre Kisf0370e82024-11-18 16:24:55 +0100144 fn try_alloc_aligned(
145 &self,
146 length: Self::Length,
147 alignment: Self::Alignment,
148 ) -> Option<Self::Base> {
149 let aligned_base = self.pa.next_multiple_of(alignment);
150 let base_offset = aligned_base.checked_sub(self.pa)?;
151
152 let required_length = base_offset.checked_add(length)?;
153 if required_length <= self.length {
154 Some(aligned_base)
155 } else {
156 None
157 }
158 }
159
Imre Kis703482d2023-11-30 15:51:26 +0100160 fn try_append(&mut self, other: &Self) -> bool {
161 if let (Some(self_end), Some(new_length)) = (
162 self.pa.checked_add(self.length),
163 self.length.checked_add(other.length),
164 ) {
165 if self.used == other.used && self_end == other.pa {
166 self.length = new_length;
167 true
168 } else {
169 false
170 }
171 } else {
172 false
173 }
174 }
175
176 fn create_split(
177 &self,
178 base: usize,
179 length: usize,
180 resource: Option<Self::Resource>,
181 ) -> (Self, Vec<Self>) {
182 assert!(self.contains(base, length));
183
184 let used = resource.is_some();
185 let mut res = Vec::new();
186 if self.pa != base {
187 res.push(Pages::new(self.pa, base - self.pa, self.used));
188 }
189 res.push(Pages::new(base, length, used));
190 if self.pa + self.length != base + length {
191 res.push(Pages::new(
192 base + length,
193 (self.pa + self.length) - (base + length),
194 self.used,
195 ));
196 }
197
198 (Pages::new(base, length, used), res)
199 }
200}
201
202/// RegionPool implementation for pages
203#[derive(Clone)]
204pub struct PagePool {
205 pages: Arc<Mutex<RegionPool<Pages>>>,
206}
207
208type PagePoolError = RegionPoolError;
209
210impl PagePool {
211 /// Create new page pool
Imre Kis21d7f722025-01-17 17:55:35 +0100212 pub fn new<K: KernelAddressTranslator, const AREA_SIZE: usize>(
213 page_pool_area: &'static PagePoolArea<AREA_SIZE>,
214 ) -> Self {
215 let pa = K::kernel_to_pa(VirtualAddress(
216 &page_pool_area.area[0] as *const u8 as usize,
217 ))
218 .0;
Imre Kis703482d2023-11-30 15:51:26 +0100219 let length = page_pool_area.area.len();
220
221 let mut region_pool = RegionPool::new();
222 region_pool.add(Pages::new(pa, length, false)).unwrap();
223 Self {
224 pages: Arc::new(Mutex::new(region_pool)),
225 }
226 }
227
228 /// Allocate pages for given length
Imre Kis631127d2024-11-21 13:09:01 +0100229 pub fn allocate_pages(
230 &self,
231 length: usize,
232 alignment: Option<usize>,
233 ) -> Result<Pages, PagePoolError> {
234 let aligned_length = if let Some(alignment) = alignment {
235 length.next_multiple_of(alignment)
236 } else {
237 length
238 };
239
240 self.pages.lock().allocate(aligned_length, (), alignment)
Imre Kis703482d2023-11-30 15:51:26 +0100241 }
242
243 /// Release pages
244 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
245 self.pages.lock().release(pages_to_release)
246 }
Imre Kis703482d2023-11-30 15:51:26 +0100247}
248
Imre Kis42935a22024-10-17 11:30:16 +0200249#[cfg(test)]
250mod tests {
251 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100252
Imre Kis21d7f722025-01-17 17:55:35 +0100253 struct DummyKernelAddressTranslator {}
254
255 impl KernelAddressTranslator for DummyKernelAddressTranslator {
256 fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress {
257 va.identity_pa()
258 }
259
260 fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress {
261 pa.identity_va()
262 }
263 }
264
Imre Kis42935a22024-10-17 11:30:16 +0200265 #[test]
266 fn test_pages() {
267 let area = [0x5au8; 4096];
268 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100269
Imre Kis42935a22024-10-17 11:30:16 +0200270 assert_eq!(area.as_ptr() as usize, pages.pa);
271 assert_eq!(area.len(), pages.length);
272 assert!(pages.used);
Imre Kisd5b96fd2024-09-11 17:04:32 +0200273 assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
Imre Kis42935a22024-10-17 11:30:16 +0200274 assert_eq!(area.as_ptr() as usize, pages.base());
275 assert_eq!(area.len(), pages.length());
276 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100277
Imre Kis21d7f722025-01-17 17:55:35 +0100278 pages.copy_data_to_page::<DummyKernelAddressTranslator>(&[0, 1, 2, 3, 4, 5, 6, 7]);
Imre Kis42935a22024-10-17 11:30:16 +0200279 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100280
Imre Kis21d7f722025-01-17 17:55:35 +0100281 pages.zero_init::<DummyKernelAddressTranslator>();
Imre Kis42935a22024-10-17 11:30:16 +0200282 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
283
Imre Kis21d7f722025-01-17 17:55:35 +0100284 let s = unsafe { pages.get_as_mut_slice::<DummyKernelAddressTranslator, u8>() };
Imre Kis42935a22024-10-17 11:30:16 +0200285 for (i, e) in s.iter_mut().enumerate().take(8) {
286 *e = i as u8;
287 }
288 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
289
Imre Kis21d7f722025-01-17 17:55:35 +0100290 let from_slice = unsafe { Pages::from_slice::<DummyKernelAddressTranslator, u8>(s) };
Imre Kis42935a22024-10-17 11:30:16 +0200291 assert_eq!(area.as_ptr() as usize, from_slice.pa);
292 assert_eq!(area.len(), from_slice.length);
293 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100294 }
Imre Kis703482d2023-11-30 15:51:26 +0100295
Imre Kis42935a22024-10-17 11:30:16 +0200296 #[test]
297 fn test_pages_contains() {
298 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100299
Imre Kis42935a22024-10-17 11:30:16 +0200300 assert!(!pages.contains(0x3fff_f000, 0x1000));
301 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
302 assert!(!pages.contains(0x4000_4000, 0x1000));
303 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100304
Imre Kis42935a22024-10-17 11:30:16 +0200305 // Overflow tests
306 }
Imre Kisf0370e82024-11-18 16:24:55 +0100307
308 #[test]
309 fn test_pages_try_alloc() {
310 let pages = Pages::new(0x4000_1000, 0x10000, false);
311
312 assert_eq!(Some(0x4000_1000), pages.try_alloc(0x1000, None));
313 assert_eq!(Some(0x4000_2000), pages.try_alloc(0x1000, Some(0x2000)));
314 assert_eq!(None, pages.try_alloc(0x1000, Some(0x10_0000)));
315 }
Imre Kis703482d2023-11-30 15:51:26 +0100316}