blob: c2707c722efc605fe0b8db00ad9441b66024a6c7 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
Imre Kisd5b96fd2024-09-11 17:04:32 +020012use super::address::PhysicalAddress;
Imre Kis703482d2023-11-30 15:51:26 +010013use super::kernel_space::KernelSpace;
14use super::region_pool::{Region, RegionPool, RegionPoolError};
15
16/// Single 4kB page definition
17pub struct Page {}
18
19impl Page {
20 pub const SIZE: usize = 4096;
21}
22
23/// Area for allocating pages
24#[repr(C, align(4096))]
25pub struct PagePoolArea<const AREA_SIZE: usize> {
26 area: [u8; AREA_SIZE],
27}
28
29impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
30 pub const fn new() -> Self {
31 Self {
32 area: [0; AREA_SIZE],
33 }
34 }
35}
36
37/// Continuous pages
38pub struct Pages {
39 pa: usize,
40 length: usize,
41 used: bool,
42}
43
44impl Pages {
45 // Create new instance
46 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
47 Pages { pa, length, used }
48 }
49
50 /// Copy data to pages
51 pub fn copy_data_to_page(&mut self, data: &[u8]) {
52 assert!(data.len() <= self.length);
53
54 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
55 page_contents.clone_from_slice(data);
56 }
57
58 /// Zero init pages
59 pub fn zero_init(&mut self) {
60 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +010061 self.get_as_mut_slice::<u8>().fill(0);
Imre Kis703482d2023-11-30 15:51:26 +010062 }
63 }
64
65 /// Get physical address
Imre Kisd5b96fd2024-09-11 17:04:32 +020066 pub fn get_pa(&self) -> PhysicalAddress {
67 PhysicalAddress(self.pa)
Imre Kis703482d2023-11-30 15:51:26 +010068 }
69
Imre Kis631127d2024-11-21 13:09:01 +010070 /// Get as slice
71 ///
Imre Kis1278c9f2025-01-15 19:48:36 +010072 /// # Safety
73 /// The returned slice is created from its address and length which is stored in the
Imre Kis631127d2024-11-21 13:09:01 +010074 /// object. The caller has to ensure that no other references are being used of the pages.
75 pub unsafe fn get_as_slice<T>(&self) -> &[T] {
76 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
77
78 core::slice::from_raw_parts(
79 KernelSpace::pa_to_kernel(self.pa as u64) as *const T,
80 self.length / core::mem::size_of::<T>(),
81 )
82 }
83
Imre Kis703482d2023-11-30 15:51:26 +010084 /// Get as mutable slice
85 ///
Imre Kis1278c9f2025-01-15 19:48:36 +010086 /// # Safety
87 /// The returned slice is created from its address and length which is stored in the
Imre Kis703482d2023-11-30 15:51:26 +010088 /// object. The caller has to ensure that no other references are being used of the pages.
Imre Kis631127d2024-11-21 13:09:01 +010089 pub unsafe fn get_as_mut_slice<T>(&mut self) -> &mut [T] {
Imre Kis703482d2023-11-30 15:51:26 +010090 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
91
92 core::slice::from_raw_parts_mut(
93 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
94 self.length / core::mem::size_of::<T>(),
95 )
96 }
97
98 /// Set contents from slice
99 ///
Imre Kis1278c9f2025-01-15 19:48:36 +0100100 /// # Safety
101 /// The caller has to ensure that the passed slice is a valid page range.
Imre Kis703482d2023-11-30 15:51:26 +0100102 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
103 Pages {
104 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
105 length: core::mem::size_of_val(s),
106 used: true,
107 }
108 }
109}
110
111impl Region for Pages {
112 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +0200113 type Base = usize;
114 type Length = usize;
Imre Kisf0370e82024-11-18 16:24:55 +0100115 type Alignment = usize;
Imre Kis703482d2023-11-30 15:51:26 +0100116
117 fn base(&self) -> usize {
118 self.pa
119 }
120
121 fn length(&self) -> usize {
122 self.length
123 }
124
125 fn used(&self) -> bool {
126 self.used
127 }
128
129 fn contains(&self, base: usize, length: usize) -> bool {
130 if let (Some(end), Some(self_end)) =
131 (base.checked_add(length), self.pa.checked_add(self.length))
132 {
133 self.pa <= base && end <= self_end
134 } else {
135 false
136 }
137 }
138
Imre Kisf0370e82024-11-18 16:24:55 +0100139 fn try_alloc_aligned(
140 &self,
141 length: Self::Length,
142 alignment: Self::Alignment,
143 ) -> Option<Self::Base> {
144 let aligned_base = self.pa.next_multiple_of(alignment);
145 let base_offset = aligned_base.checked_sub(self.pa)?;
146
147 let required_length = base_offset.checked_add(length)?;
148 if required_length <= self.length {
149 Some(aligned_base)
150 } else {
151 None
152 }
153 }
154
Imre Kis703482d2023-11-30 15:51:26 +0100155 fn try_append(&mut self, other: &Self) -> bool {
156 if let (Some(self_end), Some(new_length)) = (
157 self.pa.checked_add(self.length),
158 self.length.checked_add(other.length),
159 ) {
160 if self.used == other.used && self_end == other.pa {
161 self.length = new_length;
162 true
163 } else {
164 false
165 }
166 } else {
167 false
168 }
169 }
170
171 fn create_split(
172 &self,
173 base: usize,
174 length: usize,
175 resource: Option<Self::Resource>,
176 ) -> (Self, Vec<Self>) {
177 assert!(self.contains(base, length));
178
179 let used = resource.is_some();
180 let mut res = Vec::new();
181 if self.pa != base {
182 res.push(Pages::new(self.pa, base - self.pa, self.used));
183 }
184 res.push(Pages::new(base, length, used));
185 if self.pa + self.length != base + length {
186 res.push(Pages::new(
187 base + length,
188 (self.pa + self.length) - (base + length),
189 self.used,
190 ));
191 }
192
193 (Pages::new(base, length, used), res)
194 }
195}
196
197/// RegionPool implementation for pages
198#[derive(Clone)]
199pub struct PagePool {
200 pages: Arc<Mutex<RegionPool<Pages>>>,
201}
202
203type PagePoolError = RegionPoolError;
204
205impl PagePool {
206 /// Create new page pool
207 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
208 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
209 let length = page_pool_area.area.len();
210
211 let mut region_pool = RegionPool::new();
212 region_pool.add(Pages::new(pa, length, false)).unwrap();
213 Self {
214 pages: Arc::new(Mutex::new(region_pool)),
215 }
216 }
217
218 /// Allocate pages for given length
Imre Kis631127d2024-11-21 13:09:01 +0100219 pub fn allocate_pages(
220 &self,
221 length: usize,
222 alignment: Option<usize>,
223 ) -> Result<Pages, PagePoolError> {
224 let aligned_length = if let Some(alignment) = alignment {
225 length.next_multiple_of(alignment)
226 } else {
227 length
228 };
229
230 self.pages.lock().allocate(aligned_length, (), alignment)
Imre Kis703482d2023-11-30 15:51:26 +0100231 }
232
233 /// Release pages
234 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
235 self.pages.lock().release(pages_to_release)
236 }
Imre Kis703482d2023-11-30 15:51:26 +0100237}
238
Imre Kis42935a22024-10-17 11:30:16 +0200239#[cfg(test)]
240mod tests {
241 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100242
Imre Kis42935a22024-10-17 11:30:16 +0200243 #[test]
244 fn test_pages() {
245 let area = [0x5au8; 4096];
246 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100247
Imre Kis42935a22024-10-17 11:30:16 +0200248 assert_eq!(area.as_ptr() as usize, pages.pa);
249 assert_eq!(area.len(), pages.length);
250 assert!(pages.used);
Imre Kisd5b96fd2024-09-11 17:04:32 +0200251 assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
Imre Kis42935a22024-10-17 11:30:16 +0200252 assert_eq!(area.as_ptr() as usize, pages.base());
253 assert_eq!(area.len(), pages.length());
254 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100255
Imre Kis42935a22024-10-17 11:30:16 +0200256 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
257 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100258
Imre Kis42935a22024-10-17 11:30:16 +0200259 pages.zero_init();
260 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
261
Imre Kis631127d2024-11-21 13:09:01 +0100262 let s = unsafe { pages.get_as_mut_slice() };
Imre Kis42935a22024-10-17 11:30:16 +0200263 for (i, e) in s.iter_mut().enumerate().take(8) {
264 *e = i as u8;
265 }
266 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
267
268 let from_slice = unsafe { Pages::from_slice(s) };
269 assert_eq!(area.as_ptr() as usize, from_slice.pa);
270 assert_eq!(area.len(), from_slice.length);
271 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100272 }
Imre Kis703482d2023-11-30 15:51:26 +0100273
Imre Kis42935a22024-10-17 11:30:16 +0200274 #[test]
275 fn test_pages_contains() {
276 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100277
Imre Kis42935a22024-10-17 11:30:16 +0200278 assert!(!pages.contains(0x3fff_f000, 0x1000));
279 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
280 assert!(!pages.contains(0x4000_4000, 0x1000));
281 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100282
Imre Kis42935a22024-10-17 11:30:16 +0200283 // Overflow tests
284 }
Imre Kisf0370e82024-11-18 16:24:55 +0100285
286 #[test]
287 fn test_pages_try_alloc() {
288 let pages = Pages::new(0x4000_1000, 0x10000, false);
289
290 assert_eq!(Some(0x4000_1000), pages.try_alloc(0x1000, None));
291 assert_eq!(Some(0x4000_2000), pages.try_alloc(0x1000, Some(0x2000)));
292 assert_eq!(None, pages.try_alloc(0x1000, Some(0x10_0000)));
293 }
Imre Kis703482d2023-11-30 15:51:26 +0100294}