blob: 8636af715e98011386fa54a2d4d2f7919b5bc7d3 [file] [log] [blame]
Imre Kis703482d2023-11-30 15:51:26 +01001// SPDX-FileCopyrightText: Copyright 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Region pool implementation for allocating pages
5
6use core::slice;
7
8use alloc::sync::Arc;
9use alloc::vec::Vec;
10use spin::Mutex;
11
Imre Kisd5b96fd2024-09-11 17:04:32 +020012use super::address::PhysicalAddress;
Imre Kis703482d2023-11-30 15:51:26 +010013use super::kernel_space::KernelSpace;
14use super::region_pool::{Region, RegionPool, RegionPoolError};
15
16/// Single 4kB page definition
17pub struct Page {}
18
19impl Page {
20 pub const SIZE: usize = 4096;
21}
22
23/// Area for allocating pages
24#[repr(C, align(4096))]
25pub struct PagePoolArea<const AREA_SIZE: usize> {
26 area: [u8; AREA_SIZE],
27}
28
29impl<const AREA_SIZE: usize> PagePoolArea<AREA_SIZE> {
30 pub const fn new() -> Self {
31 Self {
32 area: [0; AREA_SIZE],
33 }
34 }
35}
36
37/// Continuous pages
38pub struct Pages {
39 pa: usize,
40 length: usize,
41 used: bool,
42}
43
44impl Pages {
45 // Create new instance
46 pub(crate) fn new(pa: usize, length: usize, used: bool) -> Self {
47 Pages { pa, length, used }
48 }
49
50 /// Copy data to pages
51 pub fn copy_data_to_page(&mut self, data: &[u8]) {
52 assert!(data.len() <= self.length);
53
54 let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
55 page_contents.clone_from_slice(data);
56 }
57
58 /// Zero init pages
59 pub fn zero_init(&mut self) {
60 unsafe {
61 self.get_as_slice::<u8>().fill(0);
62 }
63 }
64
65 /// Get physical address
Imre Kisd5b96fd2024-09-11 17:04:32 +020066 pub fn get_pa(&self) -> PhysicalAddress {
67 PhysicalAddress(self.pa)
Imre Kis703482d2023-11-30 15:51:26 +010068 }
69
70 /// Get as mutable slice
71 ///
72 /// **Unsafe**: The returned slice is created from its address and length which is stored in the
73 /// object. The caller has to ensure that no other references are being used of the pages.
74 pub unsafe fn get_as_slice<T>(&mut self) -> &mut [T] {
75 assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
76
77 core::slice::from_raw_parts_mut(
78 KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
79 self.length / core::mem::size_of::<T>(),
80 )
81 }
82
83 /// Set contents from slice
84 ///
85 /// **Unsafe:** The caller has to ensure that the passed slice is a valid page range.
86 pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
87 Pages {
88 pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
89 length: core::mem::size_of_val(s),
90 used: true,
91 }
92 }
93}
94
95impl Region for Pages {
96 type Resource = ();
Imre Kis589aa052024-09-10 20:19:47 +020097 type Base = usize;
98 type Length = usize;
Imre Kis703482d2023-11-30 15:51:26 +010099
100 fn base(&self) -> usize {
101 self.pa
102 }
103
104 fn length(&self) -> usize {
105 self.length
106 }
107
108 fn used(&self) -> bool {
109 self.used
110 }
111
112 fn contains(&self, base: usize, length: usize) -> bool {
113 if let (Some(end), Some(self_end)) =
114 (base.checked_add(length), self.pa.checked_add(self.length))
115 {
116 self.pa <= base && end <= self_end
117 } else {
118 false
119 }
120 }
121
122 fn try_append(&mut self, other: &Self) -> bool {
123 if let (Some(self_end), Some(new_length)) = (
124 self.pa.checked_add(self.length),
125 self.length.checked_add(other.length),
126 ) {
127 if self.used == other.used && self_end == other.pa {
128 self.length = new_length;
129 true
130 } else {
131 false
132 }
133 } else {
134 false
135 }
136 }
137
138 fn create_split(
139 &self,
140 base: usize,
141 length: usize,
142 resource: Option<Self::Resource>,
143 ) -> (Self, Vec<Self>) {
144 assert!(self.contains(base, length));
145
146 let used = resource.is_some();
147 let mut res = Vec::new();
148 if self.pa != base {
149 res.push(Pages::new(self.pa, base - self.pa, self.used));
150 }
151 res.push(Pages::new(base, length, used));
152 if self.pa + self.length != base + length {
153 res.push(Pages::new(
154 base + length,
155 (self.pa + self.length) - (base + length),
156 self.used,
157 ));
158 }
159
160 (Pages::new(base, length, used), res)
161 }
162}
163
164/// RegionPool implementation for pages
165#[derive(Clone)]
166pub struct PagePool {
167 pages: Arc<Mutex<RegionPool<Pages>>>,
168}
169
170type PagePoolError = RegionPoolError;
171
172impl PagePool {
173 /// Create new page pool
174 pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
175 let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
176 let length = page_pool_area.area.len();
177
178 let mut region_pool = RegionPool::new();
179 region_pool.add(Pages::new(pa, length, false)).unwrap();
180 Self {
181 pages: Arc::new(Mutex::new(region_pool)),
182 }
183 }
184
185 /// Allocate pages for given length
186 pub fn allocate_pages(&self, length: usize) -> Result<Pages, PagePoolError> {
187 self.pages
188 .lock()
189 .allocate(Self::round_up_to_page_size(length), ())
190 }
191
192 /// Release pages
193 pub fn release_pages(&self, pages_to_release: Pages) -> Result<(), PagePoolError> {
194 self.pages.lock().release(pages_to_release)
195 }
196
197 fn round_up_to_page_size(length: usize) -> usize {
198 (length + Page::SIZE - 1) & !(Page::SIZE - 1)
199 }
200}
201
Imre Kis42935a22024-10-17 11:30:16 +0200202#[cfg(test)]
203mod tests {
204 use super::*;
Imre Kis703482d2023-11-30 15:51:26 +0100205
Imre Kis42935a22024-10-17 11:30:16 +0200206 #[test]
207 fn test_pages() {
208 let area = [0x5au8; 4096];
209 let mut pages = Pages::new(area.as_ptr() as usize, area.len(), true);
Imre Kis703482d2023-11-30 15:51:26 +0100210
Imre Kis42935a22024-10-17 11:30:16 +0200211 assert_eq!(area.as_ptr() as usize, pages.pa);
212 assert_eq!(area.len(), pages.length);
213 assert!(pages.used);
Imre Kisd5b96fd2024-09-11 17:04:32 +0200214 assert_eq!(PhysicalAddress(area.as_ptr() as usize), pages.get_pa());
Imre Kis42935a22024-10-17 11:30:16 +0200215 assert_eq!(area.as_ptr() as usize, pages.base());
216 assert_eq!(area.len(), pages.length());
217 assert!(pages.used());
Imre Kis703482d2023-11-30 15:51:26 +0100218
Imre Kis42935a22024-10-17 11:30:16 +0200219 pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
220 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
Imre Kis703482d2023-11-30 15:51:26 +0100221
Imre Kis42935a22024-10-17 11:30:16 +0200222 pages.zero_init();
223 assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
224
225 let s = unsafe { pages.get_as_slice() };
226 for (i, e) in s.iter_mut().enumerate().take(8) {
227 *e = i as u8;
228 }
229 assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
230
231 let from_slice = unsafe { Pages::from_slice(s) };
232 assert_eq!(area.as_ptr() as usize, from_slice.pa);
233 assert_eq!(area.len(), from_slice.length);
234 assert!(from_slice.used);
Imre Kis703482d2023-11-30 15:51:26 +0100235 }
Imre Kis703482d2023-11-30 15:51:26 +0100236
Imre Kis42935a22024-10-17 11:30:16 +0200237 #[test]
238 fn test_pages_contains() {
239 let pages = Pages::new(0x4000_0000, 0x4000, true);
Imre Kis703482d2023-11-30 15:51:26 +0100240
Imre Kis42935a22024-10-17 11:30:16 +0200241 assert!(!pages.contains(0x3fff_f000, 0x1000));
242 assert!(!pages.contains(0x3fff_f000, 0x1_0000));
243 assert!(!pages.contains(0x4000_4000, 0x1000));
244 assert!(!pages.contains(0x4000_0000, 0x1_0000));
Imre Kis703482d2023-11-30 15:51:26 +0100245
Imre Kis42935a22024-10-17 11:30:16 +0200246 // Overflow tests
247 }
Imre Kis703482d2023-11-30 15:51:26 +0100248}