Introduce KernelAddressTranslator trait

Add KernelAddressTranslator as a generic parameter of Xlat in order to
decouple dependency on KernelSpace. This trait is used for translating
between physical addresses and virtual addresses of the running kernel
context. Xlat uses the trait for accessing the translation tables.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: Iaf4189429f21fced9d40e34fb309388165127124
diff --git a/src/page_pool.rs b/src/page_pool.rs
index c2707c7..d767caf 100644
--- a/src/page_pool.rs
+++ b/src/page_pool.rs
@@ -3,14 +3,14 @@
 
 //! Region pool implementation for allocating pages
 
-use core::slice;
-
 use alloc::sync::Arc;
 use alloc::vec::Vec;
 use spin::Mutex;
 
+use crate::address::VirtualAddress;
+use crate::KernelAddressTranslator;
+
 use super::address::PhysicalAddress;
-use super::kernel_space::KernelSpace;
 use super::region_pool::{Region, RegionPool, RegionPoolError};
 
 /// Single 4kB page definition
@@ -48,17 +48,22 @@
     }
 
     /// Copy data to pages
-    pub fn copy_data_to_page(&mut self, data: &[u8]) {
+    pub fn copy_data_to_page<K: KernelAddressTranslator>(&mut self, data: &[u8]) {
         assert!(data.len() <= self.length);
 
-        let page_contents = unsafe { slice::from_raw_parts_mut(self.pa as *mut u8, data.len()) };
+        let page_contents = unsafe {
+            core::slice::from_raw_parts_mut(
+                K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *mut u8,
+                data.len(),
+            )
+        };
         page_contents.clone_from_slice(data);
     }
 
     /// Zero init pages
-    pub fn zero_init(&mut self) {
+    pub fn zero_init<K: KernelAddressTranslator>(&mut self) {
         unsafe {
-            self.get_as_mut_slice::<u8>().fill(0);
+            self.get_as_mut_slice::<K, u8>().fill(0);
         }
     }
 
@@ -72,11 +77,11 @@
     /// # Safety
     /// The returned slice is created from its address and length which is stored in the
     /// object. The caller has to ensure that no other references are being used of the pages.
-    pub unsafe fn get_as_slice<T>(&self) -> &[T] {
+    pub unsafe fn get_as_slice<K: KernelAddressTranslator, T>(&self) -> &[T] {
         assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
 
         core::slice::from_raw_parts(
-            KernelSpace::pa_to_kernel(self.pa as u64) as *const T,
+            K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *const T,
             self.length / core::mem::size_of::<T>(),
         )
     }
@@ -86,11 +91,11 @@
     /// # Safety
     /// The returned slice is created from its address and length which is stored in the
     /// object. The caller has to ensure that no other references are being used of the pages.
-    pub unsafe fn get_as_mut_slice<T>(&mut self) -> &mut [T] {
+    pub unsafe fn get_as_mut_slice<K: KernelAddressTranslator, T>(&mut self) -> &mut [T] {
         assert!((core::mem::align_of::<T>() - 1) & self.pa == 0);
 
         core::slice::from_raw_parts_mut(
-            KernelSpace::pa_to_kernel(self.pa as u64) as *mut T,
+            K::pa_to_kernel(PhysicalAddress(self.pa)).0 as *mut T,
             self.length / core::mem::size_of::<T>(),
         )
     }
@@ -99,9 +104,9 @@
     ///
     /// # Safety
     ///  The caller has to ensure that the passed slice is a valid page range.
-    pub unsafe fn from_slice<T>(s: &mut [T]) -> Pages {
+    pub unsafe fn from_slice<K: KernelAddressTranslator, T>(s: &mut [T]) -> Pages {
         Pages {
-            pa: KernelSpace::kernel_to_pa(s.as_ptr() as u64) as usize,
+            pa: K::kernel_to_pa(VirtualAddress(s.as_ptr() as usize)).0,
             length: core::mem::size_of_val(s),
             used: true,
         }
@@ -204,8 +209,13 @@
 
 impl PagePool {
     /// Create new page pool
-    pub fn new<const AREA_SIZE: usize>(page_pool_area: &'static PagePoolArea<AREA_SIZE>) -> Self {
-        let pa = KernelSpace::kernel_to_pa(&page_pool_area.area[0] as *const u8 as u64) as usize;
+    pub fn new<K: KernelAddressTranslator, const AREA_SIZE: usize>(
+        page_pool_area: &'static PagePoolArea<AREA_SIZE>,
+    ) -> Self {
+        let pa = K::kernel_to_pa(VirtualAddress(
+            &page_pool_area.area[0] as *const u8 as usize,
+        ))
+        .0;
         let length = page_pool_area.area.len();
 
         let mut region_pool = RegionPool::new();
@@ -240,6 +250,18 @@
 mod tests {
     use super::*;
 
+    struct DummyKernelAddressTranslator {}
+
+    impl KernelAddressTranslator for DummyKernelAddressTranslator {
+        fn kernel_to_pa(va: VirtualAddress) -> PhysicalAddress {
+            va.identity_pa()
+        }
+
+        fn pa_to_kernel(pa: PhysicalAddress) -> VirtualAddress {
+            pa.identity_va()
+        }
+    }
+
     #[test]
     fn test_pages() {
         let area = [0x5au8; 4096];
@@ -253,19 +275,19 @@
         assert_eq!(area.len(), pages.length());
         assert!(pages.used());
 
-        pages.copy_data_to_page(&[0, 1, 2, 3, 4, 5, 6, 7]);
+        pages.copy_data_to_page::<DummyKernelAddressTranslator>(&[0, 1, 2, 3, 4, 5, 6, 7]);
         assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
 
-        pages.zero_init();
+        pages.zero_init::<DummyKernelAddressTranslator>();
         assert_eq!([0, 0, 0, 0, 0, 0, 0, 0], area[0..8]);
 
-        let s = unsafe { pages.get_as_mut_slice() };
+        let s = unsafe { pages.get_as_mut_slice::<DummyKernelAddressTranslator, u8>() };
         for (i, e) in s.iter_mut().enumerate().take(8) {
             *e = i as u8;
         }
         assert_eq!([0, 1, 2, 3, 4, 5, 6, 7], area[0..8]);
 
-        let from_slice = unsafe { Pages::from_slice(s) };
+        let from_slice = unsafe { Pages::from_slice::<DummyKernelAddressTranslator, u8>(s) };
         assert_eq!(area.as_ptr() as usize, from_slice.pa);
         assert_eq!(area.len(), from_slice.length);
         assert!(from_slice.used);