Create internal crate::arch module

Gather architecture specific code into the crate::arch module for easier
separation between actual implementations and stubs. Add stubs for cache
invalidation functions.

Signed-off-by: Imre Kis <imre.kis@arm.com>
Change-Id: I476075e233c33762af0105fe37380ab5bccb3911
diff --git a/src/lib.rs b/src/lib.rs
index 4ffc12a..5ac3a56 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -761,7 +761,7 @@
         if granule.block_size_at_level(level) == block_size {
             // Follow break-before-make sequence
             descriptor.set_block_or_invalid_descriptor_to_invalid(level);
-            Self::invalidate(regime, Some(va));
+            arch::invalidate(regime, Some(va));
             descriptor.set_block_descriptor(granule, level, pa, attributes);
             return Ok(());
         }
@@ -877,7 +877,7 @@
 
                     // Follow break-before-make sequence
                     descriptor.set_block_or_invalid_descriptor_to_invalid(level);
-                    Self::invalidate(regime, Some(current_va));
+                    arch::invalidate(regime, Some(current_va));
 
                     // Set table descriptor if the table is configured properly
                     descriptor.set_table_descriptor(level, next_table_pa, None);
@@ -952,7 +952,7 @@
         // We reached the required level with the matching block size
         if granule.block_size_at_level(level) == block_size {
             descriptor.set_block_descriptor_to_invalid(level);
-            Self::invalidate(regime, Some(va));
+            arch::invalidate(regime, Some(va));
             return;
         }
 
@@ -1091,20 +1091,10 @@
     /// section 'D7.5.9.2 The data cache maintenance instruction (DC)' of 'ARM DDI 0487L'. Since the
     /// cache clean operation might alter the visible contents of the affected memory area for other
     /// cores as well, the code must be designed to account for these changes.
-    #[cfg(target_arch = "aarch64")]
     pub unsafe fn clean_data_cache(va: VirtualAddress, length: usize) {
-        let line_size = Self::dcache_line_size();
-        let address_mask = !(line_size - 1);
-
-        for address in (va.0 & address_mask..va.0 + length).step_by(line_size) {
-            // SAFETY: If the functions safety conditions are met, the 'dc' instruction cannot
-            // violate Rust's safety guarantees.
-            unsafe { core::arch::asm!("dc cvac, {}", in(reg) address) }
-        }
-
-        // SAFETY: Memory barrier.
+        // SAFETY: the called function has the same safety requirements.
         unsafe {
-            core::arch::asm!("dsb ish");
+            arch::clean_data_cache(va, length);
         }
     }
 
@@ -1117,49 +1107,19 @@
     /// 'ARM DDI 0487L'. Since the cache clean operation might alter the visible contents of the
     /// affected memory area for other cores as well, the code must be designed to account for these
     /// changes.
-    #[cfg(target_arch = "aarch64")]
     pub unsafe fn invalidate_instruction_cache(va: VirtualAddress, length: usize) {
-        let line_size = Self::icache_line_size();
-        let address_mask = !(line_size - 1);
-
-        for address in (va.0 & address_mask..va.0 + length).step_by(line_size) {
-            // SAFETY: If the functions safety conditions are met, the 'ic' instruction cannot
-            // violate Rust's safety guarantees.
-            unsafe { core::arch::asm!("ic ivau, {}", in(reg) address) }
-        }
-
-        // SAFETY: Memory barrier.
+        // SAFETY: the called function has the same safety requirements.
         unsafe {
-            core::arch::asm!("dsb ish");
+            arch::invalidate_instruction_cache(va, length);
         }
     }
+}
 
-    /// Returns the data cache line size in bytes.
-    #[cfg(target_arch = "aarch64")]
-    fn dcache_line_size() -> usize {
-        const WORD_SIZE: usize = 4;
-        let ctr_el0: u64;
+#[cfg(target_arch = "aarch64")]
+mod arch {
+    use super::{address::VirtualAddress, TranslationRegime};
 
-        unsafe { core::arch::asm!("mrs {0}, ctr_el0", out(reg) ctr_el0) };
-
-        let dminline = (ctr_el0 >> 16) & 0xf;
-        WORD_SIZE << dminline
-    }
-
-    /// Returns the instruction cache line size in bytes.
-    #[cfg(target_arch = "aarch64")]
-    fn icache_line_size() -> usize {
-        const WORD_SIZE: usize = 4;
-        let ctr_el0: u64;
-
-        unsafe { core::arch::asm!("mrs {0}, ctr_el0", out(reg) ctr_el0) };
-
-        let iminline = ctr_el0 & 0xf;
-        WORD_SIZE << iminline
-    }
-
-    #[cfg(target_arch = "aarch64")]
-    fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
+    pub(super) fn invalidate(regime: &TranslationRegime, va: Option<VirtualAddress>) {
         // SAFETY: The assembly code invalidates the translation table entry of
         // the VA or all entries of the translation regime.
         unsafe {
@@ -1214,8 +1174,84 @@
         }
     }
 
-    #[cfg(not(target_arch = "aarch64"))]
-    fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
+    /// Clean data cache by address to Point of Coherency.
+    ///
+    /// SAFETY: The clean operation is done on a virtual address of the currently active mapping.
+    /// The caller must ensure that the VA to PA translation does not cause a translation fault as
+    /// described in section 'D7.5.9.2 The data cache maintenance instruction (DC)' of
+    /// 'ARM DDI 0487L'. Since the cache clean operation might alter the visible contents of the
+    /// affected memory area for other cores as well, the code must be designed to account for these
+    /// changes.
+    pub(super) unsafe fn clean_data_cache(va: VirtualAddress, length: usize) {
+        let line_size = dcache_line_size();
+        let address_mask = !(line_size - 1);
+
+        for address in (va.0 & address_mask..va.0 + length).step_by(line_size) {
+            // SAFETY: If the functions safety conditions are met, the 'dc' instruction cannot
+            // violate Rust's safety guarantees.
+            unsafe { core::arch::asm!("dc cvac, {}", in(reg) address) }
+        }
+
+        // SAFETY: Memory barrier.
+        unsafe {
+            core::arch::asm!("dsb ish");
+        }
+    }
+
+    /// Invalidate instruction cache by address to Point of Unification.
+    ///
+    /// SAFETY: The invalidate operation is done on a virtual address of the currently active
+    /// mapping. The caller must ensure that the VA to PA translation does not cause a translation
+    /// fault as described in section 'D7.5.9.2 The data cache maintenance instruction (DC)' of
+    /// 'ARM DDI 0487L'. Since the cache clean operation might alter the visible contents of the
+    /// affected memory area for other cores as well, the code must be designed to account for these
+    /// changes.
+    pub(super) unsafe fn invalidate_instruction_cache(va: VirtualAddress, length: usize) {
+        let line_size = icache_line_size();
+        let address_mask = !(line_size - 1);
+
+        for address in (va.0 & address_mask..va.0 + length).step_by(line_size) {
+            // SAFETY: If the functions safety conditions are met, the 'ic' instruction cannot
+            // violate Rust's safety guarantees.
+            unsafe { core::arch::asm!("ic ivau, {}", in(reg) address) }
+        }
+
+        // SAFETY: Memory barrier.
+        unsafe {
+            core::arch::asm!("dsb ish");
+        }
+    }
+
+    /// Returns the data cache line size in bytes.
+    fn dcache_line_size() -> usize {
+        const WORD_SIZE: usize = 4;
+        let ctr_el0: u64;
+
+        unsafe { core::arch::asm!("mrs {0}, ctr_el0", out(reg) ctr_el0) };
+
+        let dminline = (ctr_el0 >> 16) & 0xf;
+        WORD_SIZE << dminline
+    }
+
+    /// Returns the instruction cache line size in bytes.
+    fn icache_line_size() -> usize {
+        const WORD_SIZE: usize = 4;
+        let ctr_el0: u64;
+
+        unsafe { core::arch::asm!("mrs {0}, ctr_el0", out(reg) ctr_el0) };
+
+        let iminline = ctr_el0 & 0xf;
+        WORD_SIZE << iminline
+    }
+}
+
+#[cfg(not(target_arch = "aarch64"))]
+mod arch {
+    use super::{address::VirtualAddress, TranslationRegime};
+
+    pub(super) fn invalidate(_regime: &TranslationRegime, _va: Option<VirtualAddress>) {}
+    pub(super) unsafe fn clean_data_cache(_va: VirtualAddress, _length: usize) {}
+    pub(super) unsafe fn invalidate_instruction_cache(_va: VirtualAddress, _length: usize) {}
 }
 
 impl<K: KernelAddressTranslator, const VA_BITS: usize> fmt::Debug for Xlat<K, VA_BITS> {