blob: 2d966d91e199e807618d9fb55b2cf1e6b976b464 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
Imre Kisd5b96fd2024-09-11 17:04:32 +020012use super::address::PhysicalAddress;
Imre Kis703482d2023-11-30 15:51:26 +010013use super::kernel_space::KernelSpace;
14use super::region_pool::{Region, RegionPool, RegionPoolError};
15
16/// Single 4kB page definition
17pub struct Page {}
18
19impl Page {
20 pub const SIZE: usize = 4096;
21}
22
23/// Area for allocating pages
24#[repr(C, align(4096))]
25pub struct PagePoolArea<const AREA_SIZE: usize> {
26 area: [u8; AREA_SIZE],
27}
28
29impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
30 pub const fn new() -> Self {
31 Self {
32 area: [0; AREA_SIZE],
33 }
34 }
35}
36
37/// Continuous pages
38pub struct Pages {
39 pa: usize,
40 length: usize,
41 used: bool,
42}
43
44impl Pages {
45 // Create new instance
46 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
47 Pages { pa, length, used }
48 }
49
50 /// Copy data to pages
51 pub fn copy_data_to_page(&mut self, data: &[u8]) {
52 assert!(data.len() <= self.length);
53
54 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
55 page_contents.clone_from_slice(data);
56 }
57
58 /// Zero init pages
59 pub fn zero_init(&mut self) {
60 unsafe {
61 self.get_as_slice::<u8>().fill(0);
62 }
63 }
64
65 /// Get physical address
Imre Kisd5b96fd2024-09-11 17:04:32 +020066 pub fn get_pa(&self) -> PhysicalAddress {
67 PhysicalAddress(self.pa)
Imre Kis703482d2023-11-30 15:51:26 +010068 }
69
70 /// Get as mutable slice
71 ///
72 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
73 /// object. The caller has to ensure that no other references are being used of the pages.
74 pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
75 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
76
77 core::slice::from_raw_parts_mut(
78 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
79 self.length / core::mem::size_of::<T>(),
80 )
81 }
82
83 /// Set contents from slice
84 ///
85 /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
86 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
87 Pages {
88 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
89 length: core::mem::size_of_val(s),
90 used: true,
91 }
92 }
93}
94
95impl Region for Pages {
96 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +020097 type Base = usize;
98 type Length = usize;
Imre Kisf0370e82024-11-18 16:24:55 +010099 type Alignment = usize;
Imre Kis703482d2023-11-30 15:51:26 +0100100
101 fn base(&self) -> usize {
102 self.pa
103 }
104
105 fn length(&self) -> usize {
106 self.length
107 }
108
109 fn used(&self) -> bool {
110 self.used
111 }
112
113 fn contains(&self, base: usize, length: usize) -> bool {
114 if let (Some(end), Some(self_end)) =
115 (base.checked_add(length), self.pa.checked_add(self.length))
116 {
117 self.pa <= base && end <= self_end
118 } else {
119 false
120 }
121 }
122
Imre Kisf0370e82024-11-18 16:24:55 +0100123 fn try_alloc_aligned(
124 &self,
125 length: Self::Length,
126 alignment: Self::Alignment,
127 ) -> Option<Self::Base> {
128 let aligned_base = self.pa.next_multiple_of(alignment);
129 let base_offset = aligned_base.checked_sub(self.pa)?;
130
131 let required_length = base_offset.checked_add(length)?;
132 if required_length <= self.length {
133 Some(aligned_base)
134 } else {
135 None
136 }
137 }
138
Imre Kis703482d2023-11-30 15:51:26 +0100139 fn try_append(&mut self, other: &Self) -> bool {
140 if let (Some(self_end), Some(new_length)) = (
141 self.pa.checked_add(self.length),
142 self.length.checked_add(other.length),
143 ) {
144 if self.used == other.used && self_end == other.pa {
145 self.length = new_length;
146 true
147 } else {
148 false
149 }
150 } else {
151 false
152 }
153 }
154
155 fn create_split(
156 &self,
157 base: usize,
158 length: usize,
159 resource: Option<Self::Resource>,
160 ) -> (Self, Vec<Self>) {
161 assert!(self.contains(base, length));
162
163 let used = resource.is_some();
164 let mut res = Vec::new();
165 if self.pa != base {
166 res.push(Pages::new(self.pa, base - self.pa, self.used));
167 }
168 res.push(Pages::new(base, length, used));
169 if self.pa + self.length != base + length {
170 res.push(Pages::new(
171 base + length,
172 (self.pa + self.length) - (base + length),
173 self.used,
174 ));
175 }
176
177 (Pages::new(base, length, used), res)
178 }
179}
180
181/// RegionPool implementation for pages
182#[derive(Clone)]
183pub struct PagePool {
184 pages: Arc<Mutex<RegionPool<Pages>>>,
185}
186
187type PagePoolError = RegionPoolError;
188
189impl PagePool {
190 /// Create new page pool
191 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
192 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
193 let length = page_pool_area.area.len();
194
195 let mut region_pool = RegionPool::new();
196 region_pool.add(Pages::new(pa, length, false)).unwrap();
197 Self {
198 pages: Arc::new(Mutex::new(region_pool)),
199 }
200 }
201
202 /// Allocate pages for given length
203 pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
204 self.pages
205 .lock()
Imre Kisf0370e82024-11-18 16:24:55 +0100206 .allocate(Self::round_up_to_page_size(length), (), None)
Imre Kis703482d2023-11-30 15:51:26 +0100207 }
208
209 /// Release pages
210 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
211 self.pages.lock().release(pages_to_release)
212 }
213
214 fn round_up_to_page_size(length: usize) -> usize {
215 (length + Page::SIZE - 1) & !(Page::SIZE - 1)
216 }
217}
218
Imre Kis42935a22024-10-17 11:30:16 +0200219#[cfg(test)]
220mod tests {
221 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100222
Imre Kis42935a22024-10-17 11:30:16 +0200223 #[test]
224 fn test_pages() {
225 let area = [0x5au8; 4096];
226 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100227
Imre Kis42935a22024-10-17 11:30:16 +0200228 assert_eq!(area.as_ptr() as usize, pages.pa);
229 assert_eq!(area.len(), pages.length);
230 assert!(pages.used);
Imre Kisd5b96fd2024-09-11 17:04:32 +0200231 assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
Imre Kis42935a22024-10-17 11:30:16 +0200232 assert_eq!(area.as_ptr() as usize, pages.base());
233 assert_eq!(area.len(), pages.length());
234 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100235
Imre Kis42935a22024-10-17 11:30:16 +0200236 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
237 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100238
Imre Kis42935a22024-10-17 11:30:16 +0200239 pages.zero_init();
240 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
241
242 let s = unsafe { pages.get_as_slice() };
243 for (i, e) in s.iter_mut().enumerate().take(8) {
244 *e = i as u8;
245 }
246 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
247
248 let from_slice = unsafe { Pages::from_slice(s) };
249 assert_eq!(area.as_ptr() as usize, from_slice.pa);
250 assert_eq!(area.len(), from_slice.length);
251 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100252 }
Imre Kis703482d2023-11-30 15:51:26 +0100253
Imre Kis42935a22024-10-17 11:30:16 +0200254 #[test]
255 fn test_pages_contains() {
256 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100257
Imre Kis42935a22024-10-17 11:30:16 +0200258 assert!(!pages.contains(0x3fff_f000, 0x1000));
259 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
260 assert!(!pages.contains(0x4000_4000, 0x1000));
261 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100262
Imre Kis42935a22024-10-17 11:30:16 +0200263 // Overflow tests
264 }
Imre Kisf0370e82024-11-18 16:24:55 +0100265
266 #[test]
267 fn test_pages_try_alloc() {
268 let pages = Pages::new(0x4000_1000, 0x10000, false);
269
270 assert_eq!(Some(0x4000_1000), pages.try_alloc(0x1000, None));
271 assert_eq!(Some(0x4000_2000), pages.try_alloc(0x1000, Some(0x2000)));
272 assert_eq!(None, pages.try_alloc(0x1000, Some(0x10_0000)));
273 }
Imre Kis703482d2023-11-30 15:51:26 +0100274}