blob: cc50b30c1c5c49a4b94a1461590987462e812112 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
12use super::kernel_space::KernelSpace;
13use super::region_pool::{Region, RegionPool, RegionPoolError};
14
15/// Single 4kB page definition
16pub struct Page {}
17
18impl Page {
19 pub const SIZE: usize = 4096;
20}
21
22/// Area for allocating pages
23#[repr(C, align(4096))]
24pub struct PagePoolArea<const AREA_SIZE: usize> {
25 area: [u8; AREA_SIZE],
26}
27
28impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
29 pub const fn new() -> Self {
30 Self {
31 area: [0; AREA_SIZE],
32 }
33 }
34}
35
36/// Continuous pages
37pub struct Pages {
38 pa: usize,
39 length: usize,
40 used: bool,
41}
42
43impl Pages {
44 // Create new instance
45 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
46 Pages { pa, length, used }
47 }
48
49 /// Copy data to pages
50 pub fn copy_data_to_page(&mut self, data: &[u8]) {
51 assert!(data.len() <= self.length);
52
53 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
54 page_contents.clone_from_slice(data);
55 }
56
57 /// Zero init pages
58 pub fn zero_init(&mut self) {
59 unsafe {
60 self.get_as_slice::<u8>().fill(0);
61 }
62 }
63
64 /// Get physical address
65 pub fn get_pa(&self) -> usize {
66 self.pa
67 }
68
69 /// Get as mutable slice
70 ///
71 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
72 /// object. The caller has to ensure that no other references are being used of the pages.
73 pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
74 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
75
76 core::slice::from_raw_parts_mut(
77 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
78 self.length / core::mem::size_of::<T>(),
79 )
80 }
81
82 /// Set contents from slice
83 ///
84 /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
85 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
86 Pages {
87 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
88 length: core::mem::size_of_val(s),
89 used: true,
90 }
91 }
92}
93
94impl Region for Pages {
95 type Resource = ();
96
97 fn base(&self) -> usize {
98 self.pa
99 }
100
101 fn length(&self) -> usize {
102 self.length
103 }
104
105 fn used(&self) -> bool {
106 self.used
107 }
108
109 fn contains(&self, base: usize, length: usize) -> bool {
110 if let (Some(end), Some(self_end)) =
111 (base.checked_add(length), self.pa.checked_add(self.length))
112 {
113 self.pa <= base && end <= self_end
114 } else {
115 false
116 }
117 }
118
119 fn try_append(&mut self, other: &Self) -> bool {
120 if let (Some(self_end), Some(new_length)) = (
121 self.pa.checked_add(self.length),
122 self.length.checked_add(other.length),
123 ) {
124 if self.used == other.used && self_end == other.pa {
125 self.length = new_length;
126 true
127 } else {
128 false
129 }
130 } else {
131 false
132 }
133 }
134
135 fn create_split(
136 &self,
137 base: usize,
138 length: usize,
139 resource: Option<Self::Resource>,
140 ) -> (Self, Vec<Self>) {
141 assert!(self.contains(base, length));
142
143 let used = resource.is_some();
144 let mut res = Vec::new();
145 if self.pa != base {
146 res.push(Pages::new(self.pa, base - self.pa, self.used));
147 }
148 res.push(Pages::new(base, length, used));
149 if self.pa + self.length != base + length {
150 res.push(Pages::new(
151 base + length,
152 (self.pa + self.length) - (base + length),
153 self.used,
154 ));
155 }
156
157 (Pages::new(base, length, used), res)
158 }
159}
160
161/// RegionPool implementation for pages
162#[derive(Clone)]
163pub struct PagePool {
164 pages: Arc<Mutex<RegionPool<Pages>>>,
165}
166
167type PagePoolError = RegionPoolError;
168
169impl PagePool {
170 /// Create new page pool
171 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
172 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
173 let length = page_pool_area.area.len();
174
175 let mut region_pool = RegionPool::new();
176 region_pool.add(Pages::new(pa, length, false)).unwrap();
177 Self {
178 pages: Arc::new(Mutex::new(region_pool)),
179 }
180 }
181
182 /// Allocate pages for given length
183 pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
184 self.pages
185 .lock()
186 .allocate(Self::round_up_to_page_size(length), ())
187 }
188
189 /// Release pages
190 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
191 self.pages.lock().release(pages_to_release)
192 }
193
194 fn round_up_to_page_size(length: usize) -> usize {
195 (length + Page::SIZE - 1) & !(Page::SIZE - 1)
196 }
197}
198
Imre Kis42935a22024-10-17 11:30:16 +0200199#[cfg(test)]
200mod tests {
201 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100202
Imre Kis42935a22024-10-17 11:30:16 +0200203 #[test]
204 fn test_pages() {
205 let area = [0x5au8; 4096];
206 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100207
Imre Kis42935a22024-10-17 11:30:16 +0200208 assert_eq!(area.as_ptr() as usize, pages.pa);
209 assert_eq!(area.len(), pages.length);
210 assert!(pages.used);
211 assert_eq!(area.as_ptr() as usize, pages.get_pa());
212 assert_eq!(area.as_ptr() as usize, pages.base());
213 assert_eq!(area.len(), pages.length());
214 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100215
Imre Kis42935a22024-10-17 11:30:16 +0200216 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
217 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100218
Imre Kis42935a22024-10-17 11:30:16 +0200219 pages.zero_init();
220 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
221
222 let s = unsafe { pages.get_as_slice() };
223 for (i, e) in s.iter_mut().enumerate().take(8) {
224 *e = i as u8;
225 }
226 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
227
228 let from_slice = unsafe { Pages::from_slice(s) };
229 assert_eq!(area.as_ptr() as usize, from_slice.pa);
230 assert_eq!(area.len(), from_slice.length);
231 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100232 }
Imre Kis703482d2023-11-30 15:51:26 +0100233
Imre Kis42935a22024-10-17 11:30:16 +0200234 #[test]
235 fn test_pages_contains() {
236 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100237
Imre Kis42935a22024-10-17 11:30:16 +0200238 assert!(!pages.contains(0x3fff_f000, 0x1000));
239 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
240 assert!(!pages.contains(0x4000_4000, 0x1000));
241 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100242
Imre Kis42935a22024-10-17 11:30:16 +0200243 // Overflow tests
244 }
Imre Kis703482d2023-11-30 15:51:26 +0100245}