blob: e0b6dc4e2806d2c6ba2db51d12b59b203598b4da [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
Imre Kisd5b96fd2024-09-11 17:04:32 +020012use super::address::PhysicalAddress;
Imre Kis703482d2023-11-30 15:51:26 +010013use super::kernel_space::KernelSpace;
14use super::region_pool::{Region, RegionPool, RegionPoolError};
15
16/// Single 4kB page definition
17pub struct Page {}
18
19impl Page {
20 pub const SIZE: usize = 4096;
21}
22
23/// Area for allocating pages
24#[repr(C, align(4096))]
25pub struct PagePoolArea<const AREA_SIZE: usize> {
26 area: [u8; AREA_SIZE],
27}
28
29impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
30 pub const fn new() -> Self {
31 Self {
32 area: [0; AREA_SIZE],
33 }
34 }
35}
36
37/// Continuous pages
38pub struct Pages {
39 pa: usize,
40 length: usize,
41 used: bool,
42}
43
44impl Pages {
45 // Create new instance
46 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
47 Pages { pa, length, used }
48 }
49
50 /// Copy data to pages
51 pub fn copy_data_to_page(&mut self, data: &[u8]) {
52 assert!(data.len() <= self.length);
53
54 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
55 page_contents.clone_from_slice(data);
56 }
57
58 /// Zero init pages
59 pub fn zero_init(&mut self) {
60 unsafe {
Imre Kis631127d2024-11-21 13:09:01 +010061 self.get_as_mut_slice::<u8>().fill(0);
Imre Kis703482d2023-11-30 15:51:26 +010062 }
63 }
64
65 /// Get physical address
Imre Kisd5b96fd2024-09-11 17:04:32 +020066 pub fn get_pa(&self) -> PhysicalAddress {
67 PhysicalAddress(self.pa)
Imre Kis703482d2023-11-30 15:51:26 +010068 }
69
Imre Kis631127d2024-11-21 13:09:01 +010070 /// Get as slice
71 ///
72 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
73 /// object. The caller has to ensure that no other references are being used of the pages.
74 pub unsafe fn get_as_slice<T>(&self) -> &[T] {
75 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
76
77 core::slice::from_raw_parts(
78 KernelSpace::pa_to_kernel(self.pa as u64) as *const T,
79 self.length / core::mem::size_of::<T>(),
80 )
81 }
82
Imre Kis703482d2023-11-30 15:51:26 +010083 /// Get as mutable slice
84 ///
85 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
86 /// object. The caller has to ensure that no other references are being used of the pages.
Imre Kis631127d2024-11-21 13:09:01 +010087 pub unsafe fn get_as_mut_slice<T>(&mut self) -> &mut [T] {
Imre Kis703482d2023-11-30 15:51:26 +010088 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
89
90 core::slice::from_raw_parts_mut(
91 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
92 self.length / core::mem::size_of::<T>(),
93 )
94 }
95
96 /// Set contents from slice
97 ///
98 /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
99 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
100 Pages {
101 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
102 length: core::mem::size_of_val(s),
103 used: true,
104 }
105 }
106}
107
108impl Region for Pages {
109 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +0200110 type Base = usize;
111 type Length = usize;
Imre Kisf0370e82024-11-18 16:24:55 +0100112 type Alignment = usize;
Imre Kis703482d2023-11-30 15:51:26 +0100113
114 fn base(&self) -> usize {
115 self.pa
116 }
117
118 fn length(&self) -> usize {
119 self.length
120 }
121
122 fn used(&self) -> bool {
123 self.used
124 }
125
126 fn contains(&self, base: usize, length: usize) -> bool {
127 if let (Some(end), Some(self_end)) =
128 (base.checked_add(length), self.pa.checked_add(self.length))
129 {
130 self.pa <= base && end <= self_end
131 } else {
132 false
133 }
134 }
135
Imre Kisf0370e82024-11-18 16:24:55 +0100136 fn try_alloc_aligned(
137 &self,
138 length: Self::Length,
139 alignment: Self::Alignment,
140 ) -> Option<Self::Base> {
141 let aligned_base = self.pa.next_multiple_of(alignment);
142 let base_offset = aligned_base.checked_sub(self.pa)?;
143
144 let required_length = base_offset.checked_add(length)?;
145 if required_length <= self.length {
146 Some(aligned_base)
147 } else {
148 None
149 }
150 }
151
Imre Kis703482d2023-11-30 15:51:26 +0100152 fn try_append(&mut self, other: &Self) -> bool {
153 if let (Some(self_end), Some(new_length)) = (
154 self.pa.checked_add(self.length),
155 self.length.checked_add(other.length),
156 ) {
157 if self.used == other.used && self_end == other.pa {
158 self.length = new_length;
159 true
160 } else {
161 false
162 }
163 } else {
164 false
165 }
166 }
167
168 fn create_split(
169 &self,
170 base: usize,
171 length: usize,
172 resource: Option<Self::Resource>,
173 ) -> (Self, Vec<Self>) {
174 assert!(self.contains(base, length));
175
176 let used = resource.is_some();
177 let mut res = Vec::new();
178 if self.pa != base {
179 res.push(Pages::new(self.pa, base - self.pa, self.used));
180 }
181 res.push(Pages::new(base, length, used));
182 if self.pa + self.length != base + length {
183 res.push(Pages::new(
184 base + length,
185 (self.pa + self.length) - (base + length),
186 self.used,
187 ));
188 }
189
190 (Pages::new(base, length, used), res)
191 }
192}
193
194/// RegionPool implementation for pages
195#[derive(Clone)]
196pub struct PagePool {
197 pages: Arc<Mutex<RegionPool<Pages>>>,
198}
199
200type PagePoolError = RegionPoolError;
201
202impl PagePool {
203 /// Create new page pool
204 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
205 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
206 let length = page_pool_area.area.len();
207
208 let mut region_pool = RegionPool::new();
209 region_pool.add(Pages::new(pa, length, false)).unwrap();
210 Self {
211 pages: Arc::new(Mutex::new(region_pool)),
212 }
213 }
214
215 /// Allocate pages for given length
Imre Kis631127d2024-11-21 13:09:01 +0100216 pub fn allocate_pages(
217 &self,
218 length: usize,
219 alignment: Option<usize>,
220 ) -> Result<Pages, PagePoolError> {
221 let aligned_length = if let Some(alignment) = alignment {
222 length.next_multiple_of(alignment)
223 } else {
224 length
225 };
226
227 self.pages.lock().allocate(aligned_length, (), alignment)
Imre Kis703482d2023-11-30 15:51:26 +0100228 }
229
230 /// Release pages
231 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
232 self.pages.lock().release(pages_to_release)
233 }
234
235 fn round_up_to_page_size(length: usize) -> usize {
236 (length + Page::SIZE - 1) & !(Page::SIZE - 1)
237 }
238}
239
Imre Kis42935a22024-10-17 11:30:16 +0200240#[cfg(test)]
241mod tests {
242 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100243
Imre Kis42935a22024-10-17 11:30:16 +0200244 #[test]
245 fn test_pages() {
246 let area = [0x5au8; 4096];
247 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100248
Imre Kis42935a22024-10-17 11:30:16 +0200249 assert_eq!(area.as_ptr() as usize, pages.pa);
250 assert_eq!(area.len(), pages.length);
251 assert!(pages.used);
Imre Kisd5b96fd2024-09-11 17:04:32 +0200252 assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
Imre Kis42935a22024-10-17 11:30:16 +0200253 assert_eq!(area.as_ptr() as usize, pages.base());
254 assert_eq!(area.len(), pages.length());
255 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100256
Imre Kis42935a22024-10-17 11:30:16 +0200257 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
258 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100259
Imre Kis42935a22024-10-17 11:30:16 +0200260 pages.zero_init();
261 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
262
Imre Kis631127d2024-11-21 13:09:01 +0100263 let s = unsafe { pages.get_as_mut_slice() };
Imre Kis42935a22024-10-17 11:30:16 +0200264 for (i, e) in s.iter_mut().enumerate().take(8) {
265 *e = i as u8;
266 }
267 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
268
269 let from_slice = unsafe { Pages::from_slice(s) };
270 assert_eq!(area.as_ptr() as usize, from_slice.pa);
271 assert_eq!(area.len(), from_slice.length);
272 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100273 }
Imre Kis703482d2023-11-30 15:51:26 +0100274
Imre Kis42935a22024-10-17 11:30:16 +0200275 #[test]
276 fn test_pages_contains() {
277 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100278
Imre Kis42935a22024-10-17 11:30:16 +0200279 assert!(!pages.contains(0x3fff_f000, 0x1000));
280 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
281 assert!(!pages.contains(0x4000_4000, 0x1000));
282 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100283
Imre Kis42935a22024-10-17 11:30:16 +0200284 // Overflow tests
285 }
Imre Kisf0370e82024-11-18 16:24:55 +0100286
287 #[test]
288 fn test_pages_try_alloc() {
289 let pages = Pages::new(0x4000_1000, 0x10000, false);
290
291 assert_eq!(Some(0x4000_1000), pages.try_alloc(0x1000, None));
292 assert_eq!(Some(0x4000_2000), pages.try_alloc(0x1000, Some(0x2000)));
293 assert_eq!(None, pages.try_alloc(0x1000, Some(0x10_0000)));
294 }
Imre Kis703482d2023-11-30 15:51:26 +0100295}