blob: 5161ae7a131f7f0e9a6802d2750377bd3e4eb20d [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
12use super::kernel_space::KernelSpace;
13use super::region_pool::{Region, RegionPool, RegionPoolError};
14
15/// Single 4kB page definition
16pub struct Page {}
17
18impl Page {
19 pub const SIZE: usize = 4096;
20}
21
22/// Area for allocating pages
23#[repr(C, align(4096))]
24pub struct PagePoolArea<const AREA_SIZE: usize> {
25 area: [u8; AREA_SIZE],
26}
27
28impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
29 pub const fn new() -> Self {
30 Self {
31 area: [0; AREA_SIZE],
32 }
33 }
34}
35
36/// Continuous pages
37pub struct Pages {
38 pa: usize,
39 length: usize,
40 used: bool,
41}
42
43impl Pages {
44 // Create new instance
45 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
46 Pages { pa, length, used }
47 }
48
49 /// Copy data to pages
50 pub fn copy_data_to_page(&mut self, data: &[u8]) {
51 assert!(data.len() <= self.length);
52
53 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
54 page_contents.clone_from_slice(data);
55 }
56
57 /// Zero init pages
58 pub fn zero_init(&mut self) {
59 unsafe {
60 self.get_as_slice::<u8>().fill(0);
61 }
62 }
63
64 /// Get physical address
65 pub fn get_pa(&self) -> usize {
66 self.pa
67 }
68
69 /// Get as mutable slice
70 ///
71 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
72 /// object. The caller has to ensure that no other references are being used of the pages.
73 pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
74 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
75
76 core::slice::from_raw_parts_mut(
77 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
78 self.length / core::mem::size_of::<T>(),
79 )
80 }
81
82 /// Set contents from slice
83 ///
84 /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
85 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
86 Pages {
87 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
88 length: core::mem::size_of_val(s),
89 used: true,
90 }
91 }
92}
93
94impl Region for Pages {
95 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +020096 type Base = usize;
97 type Length = usize;
Imre Kis703482d2023-11-30 15:51:26 +010098
99 fn base(&self) -> usize {
100 self.pa
101 }
102
103 fn length(&self) -> usize {
104 self.length
105 }
106
107 fn used(&self) -> bool {
108 self.used
109 }
110
111 fn contains(&self, base: usize, length: usize) -> bool {
112 if let (Some(end), Some(self_end)) =
113 (base.checked_add(length), self.pa.checked_add(self.length))
114 {
115 self.pa <= base && end <= self_end
116 } else {
117 false
118 }
119 }
120
121 fn try_append(&mut self, other: &Self) -> bool {
122 if let (Some(self_end), Some(new_length)) = (
123 self.pa.checked_add(self.length),
124 self.length.checked_add(other.length),
125 ) {
126 if self.used == other.used && self_end == other.pa {
127 self.length = new_length;
128 true
129 } else {
130 false
131 }
132 } else {
133 false
134 }
135 }
136
137 fn create_split(
138 &self,
139 base: usize,
140 length: usize,
141 resource: Option<Self::Resource>,
142 ) -> (Self, Vec<Self>) {
143 assert!(self.contains(base, length));
144
145 let used = resource.is_some();
146 let mut res = Vec::new();
147 if self.pa != base {
148 res.push(Pages::new(self.pa, base - self.pa, self.used));
149 }
150 res.push(Pages::new(base, length, used));
151 if self.pa + self.length != base + length {
152 res.push(Pages::new(
153 base + length,
154 (self.pa + self.length) - (base + length),
155 self.used,
156 ));
157 }
158
159 (Pages::new(base, length, used), res)
160 }
161}
162
163/// RegionPool implementation for pages
164#[derive(Clone)]
165pub struct PagePool {
166 pages: Arc<Mutex<RegionPool<Pages>>>,
167}
168
169type PagePoolError = RegionPoolError;
170
171impl PagePool {
172 /// Create new page pool
173 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
174 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
175 let length = page_pool_area.area.len();
176
177 let mut region_pool = RegionPool::new();
178 region_pool.add(Pages::new(pa, length, false)).unwrap();
179 Self {
180 pages: Arc::new(Mutex::new(region_pool)),
181 }
182 }
183
184 /// Allocate pages for given length
185 pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
186 self.pages
187 .lock()
188 .allocate(Self::round_up_to_page_size(length), ())
189 }
190
191 /// Release pages
192 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
193 self.pages.lock().release(pages_to_release)
194 }
195
196 fn round_up_to_page_size(length: usize) -> usize {
197 (length + Page::SIZE - 1) & !(Page::SIZE - 1)
198 }
199}
200
Imre Kis42935a22024-10-17 11:30:16 +0200201#[cfg(test)]
202mod tests {
203 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100204
Imre Kis42935a22024-10-17 11:30:16 +0200205 #[test]
206 fn test_pages() {
207 let area = [0x5au8; 4096];
208 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100209
Imre Kis42935a22024-10-17 11:30:16 +0200210 assert_eq!(area.as_ptr() as usize, pages.pa);
211 assert_eq!(area.len(), pages.length);
212 assert!(pages.used);
213 assert_eq!(area.as_ptr() as usize, pages.get_pa());
214 assert_eq!(area.as_ptr() as usize, pages.base());
215 assert_eq!(area.len(), pages.length());
216 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100217
Imre Kis42935a22024-10-17 11:30:16 +0200218 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
219 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100220
Imre Kis42935a22024-10-17 11:30:16 +0200221 pages.zero_init();
222 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
223
224 let s = unsafe { pages.get_as_slice() };
225 for (i, e) in s.iter_mut().enumerate().take(8) {
226 *e = i as u8;
227 }
228 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
229
230 let from_slice = unsafe { Pages::from_slice(s) };
231 assert_eq!(area.as_ptr() as usize, from_slice.pa);
232 assert_eq!(area.len(), from_slice.length);
233 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100234 }
Imre Kis703482d2023-11-30 15:51:26 +0100235
Imre Kis42935a22024-10-17 11:30:16 +0200236 #[test]
237 fn test_pages_contains() {
238 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100239
Imre Kis42935a22024-10-17 11:30:16 +0200240 assert!(!pages.contains(0x3fff_f000, 0x1000));
241 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
242 assert!(!pages.contains(0x4000_4000, 0x1000));
243 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100244
Imre Kis42935a22024-10-17 11:30:16 +0200245 // Overflow tests
246 }
Imre Kis703482d2023-11-30 15:51:26 +0100247}