refactor(mm): use typedefs

* Add typedefs for memory modes and attrs
* Add typedef for page table levels
* Add typedef for the ptable ASID
* Rewrite `MM_MODE_` macros to use shifts instead of writing the
  value manually.

Change-Id: I783825777b4897692d48287fc689026a04ecba50
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index e126320..aff344a 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -12,6 +12,7 @@
 #include <stddef.h>
 
 #include "hf/addr.h"
+#include "hf/mm.h"
 
 #include "vmapi/hf/ffa.h"
 
@@ -27,46 +28,46 @@
 /**
  * Creates an absent PTE.
  */
-pte_t arch_mm_absent_pte(uint8_t level);
+pte_t arch_mm_absent_pte(mm_level_t level);
 
 /**
  * Creates a table PTE.
  */
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa);
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa);
 
 /**
  * Creates a block PTE.
  */
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs);
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs);
 
 /**
  * Checks whether a block is allowed at the given level of the page table.
  */
-bool arch_mm_is_block_allowed(uint8_t level);
+bool arch_mm_is_block_allowed(mm_level_t level);
 
 /**
  * Determines if a PTE is present i.e. it contains information and therefore
  * needs to exist in the page table. Any non-absent PTE is present.
  */
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level);
 
 /**
  * Determines if a PTE is valid i.e. it can affect the address space. Tables and
  * valid blocks fall into this category. Invalid blocks do not as they hold
  * information about blocks that are not in the address space.
  */
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level);
 
 /**
  * Determines if a PTE is a block and represents an address range, valid or
  * invalid.
  */
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level);
 
 /**
  * Determines if a PTE represents a reference to a table of PTEs.
  */
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level);
 
 /**
  * Clears the bits of an address that are ignored by the page table. In effect,
@@ -77,34 +78,34 @@
 /**
  * Extracts the start address of the PTE range.
  */
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level);
 
 /**
  * Extracts the address of the table referenced by the PTE.
  */
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level);
 
 /**
  * Extracts the attributes of the PTE.
  */
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level);
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level);
 
 /**
  * Merges the attributes of a block into those of its containing table.
  */
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
-					   uint64_t block_attrs);
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+					    mm_attr_t block_attrs);
 
 /**
  * Invalidates the given range of stage-1 TLB.
  */
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
 				     vaddr_t va_end);
 
 /**
  * Invalidates the given range of stage-2 TLB.
  */
-void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
+void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin,
 				     ipaddr_t va_end, bool non_secure);
 
 /**
@@ -122,12 +123,12 @@
 /**
  * Gets the maximum level allowed in the page table for stage-1.
  */
-uint8_t arch_mm_stage1_max_level(void);
+mm_level_t arch_mm_stage1_max_level(void);
 
 /**
  * Gets the maximum level allowed in the page table for stage-2.
  */
-uint8_t arch_mm_stage2_max_level(void);
+mm_level_t arch_mm_stage2_max_level(void);
 
 /**
  * Gets the number of concatenated page tables used at the root for stage-1.
@@ -147,22 +148,22 @@
 /**
  * Converts the mode into stage-1 attributes for a block PTE.
  */
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode);
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode);
 
 /**
  * Converts the mode into stage-2 attributes for a block PTE.
  */
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode);
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode);
 
 /**
  * Converts the stage-2 block attributes back to the corresponding mode.
  */
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs);
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs);
 
 /**
  * Converts the stage-1 block attributes back to the corresponding mode.
  */
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs);
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs);
 
 /**
  * Initializes the arch specific memory management.
@@ -172,7 +173,7 @@
 /**
  * Return the arch specific mm mode for send/recv pages of given VM ID.
  */
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id);
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id);
 
 /**
  * Execute any barriers or synchronization that is required
diff --git a/inc/hf/arch/vm.h b/inc/hf/arch/vm.h
index 432f950..c4ee916 100644
--- a/inc/hf/arch/vm.h
+++ b/inc/hf/arch/vm.h
@@ -17,16 +17,16 @@
 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool);
 bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool);
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
-			      paddr_t end, uint32_t mode, struct mpool *ppool);
+			      paddr_t end, mm_mode_t mode, struct mpool *ppool);
 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode, struct mpool *ppool,
+			     paddr_t end, mm_mode_t mode, struct mpool *ppool,
 			     ipaddr_t *ipa);
 bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
 		   struct mpool *ppool);
 void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
-			  ipaddr_t end, uint32_t *mode);
+			  ipaddr_t end, mm_mode_t *mode);
 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
-				   paddr_t end, uint32_t mode,
+				   paddr_t end, mm_mode_t mode,
 				   struct mpool *ppool, ipaddr_t *ipa,
 				   uint8_t dma_device_id);
diff --git a/inc/hf/ffa/ffa_memory.h b/inc/hf/ffa/ffa_memory.h
index ee00e14..ad0e9ad 100644
--- a/inc/hf/ffa/ffa_memory.h
+++ b/inc/hf/ffa/ffa_memory.h
@@ -9,6 +9,7 @@
 #pragma once
 
 #include "hf/ffa.h"
+#include "hf/mm.h"
 #include "hf/vm.h"
 
 /** Check validity of the FF-A memory send function attempt. */
@@ -41,7 +42,7 @@
  * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
  * with NS accesses by default.
  */
-uint32_t ffa_memory_get_other_world_mode(void);
+mm_mode_t ffa_memory_get_other_world_mode(void);
 
 bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current);
 bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current);
@@ -74,4 +75,4 @@
  * Set the security bit in `attributes` if specified by `mode`.
  */
 ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
-	ffa_memory_attributes_t attributes, uint32_t mode);
+	ffa_memory_attributes_t attributes, mm_mode_t mode);
diff --git a/inc/hf/ffa_memory_internal.h b/inc/hf/ffa_memory_internal.h
index f08c425..c373ad7 100644
--- a/inc/hf/ffa_memory_internal.h
+++ b/inc/hf/ffa_memory_internal.h
@@ -13,6 +13,7 @@
  */
 #pragma once
 
+#include "hf/mm.h"
 #define MAX_MEM_SHARES 100
 
 #include <stdbool.h>
@@ -92,7 +93,7 @@
 	 * This is used to reset the original configuration when sender invokes
 	 * FFA_MEM_RECLAIM_32.
 	 */
-	uint32_t sender_orig_mode;
+	mm_mode_t sender_orig_mode;
 
 	/**
 	 * True if all the fragments of this sharing request have been sent and
@@ -172,7 +173,7 @@
 struct ffa_value ffa_memory_send_complete(
 	struct vm_locked from_locked, struct share_states_locked share_states,
 	struct ffa_memory_share_state *share_state, struct mpool *page_pool,
-	uint32_t *orig_from_mode_ret);
+	mm_mode_t *orig_from_mode_ret);
 struct ffa_value ffa_memory_send_continue_validate(
 	struct share_states_locked share_states, ffa_memory_handle_t handle,
 	struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
@@ -182,14 +183,14 @@
 	struct vm_locked to_locked,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t sender_orig_mode, uint32_t share_func, bool clear,
-	struct mpool *page_pool, uint32_t *response_mode,
+	mm_mode_t sender_orig_mode, uint32_t share_func, bool clear,
+	struct mpool *page_pool, mm_mode_t *response_mode,
 	bool memory_protected);
 struct ffa_value ffa_region_group_identity_map(
 	struct vm_locked vm_locked,
 	struct ffa_memory_region_constituent **fragments,
 	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
+	mm_mode_t mode, struct mpool *ppool, enum ffa_map_action action,
 	bool *memory_protected);
 bool memory_region_receivers_from_other_world(
 	struct ffa_memory_region *memory_region);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 8f2d5d8..6023817 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -13,12 +13,15 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "hf/arch/mm.h"
-
 #include "hf/addr.h"
 #include "hf/mpool.h"
 #include "hf/static_assert.h"
 
+typedef uint32_t mm_mode_t;
+typedef uint64_t mm_attr_t;
+typedef uint8_t mm_level_t;
+typedef uint16_t mm_asid_t;
+
 /* Keep macro alignment */
 /* clang-format off */
 
@@ -26,10 +29,10 @@
 #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
 
 /* The following are arch-independent page mapping modes. */
-#define MM_MODE_R UINT32_C(0x0001) /* read */
-#define MM_MODE_W UINT32_C(0x0002) /* write */
-#define MM_MODE_X UINT32_C(0x0004) /* execute */
-#define MM_MODE_D UINT32_C(0x0008) /* device */
+#define MM_MODE_R (1U << 0) /* read */
+#define MM_MODE_W (1U << 1) /* write */
+#define MM_MODE_X (1U << 2) /* execute */
+#define MM_MODE_D (1U << 3) /* device */
 
 /*
  * Memory in stage-1 is either valid (present) or invalid (absent).
@@ -59,15 +62,15 @@
  *
  *  Modes are selected so that owner of exclusive memory is the default.
  */
-#define MM_MODE_INVALID UINT32_C(0x0010)
-#define MM_MODE_UNOWNED UINT32_C(0x0020)
-#define MM_MODE_SHARED  UINT32_C(0x0040)
-
-/* Specifies if a mapping will be a user mapping(EL0). */
-#define MM_MODE_USER    UINT32_C(0x0200)
+#define MM_MODE_INVALID (1U << 4)
+#define MM_MODE_UNOWNED (1U << 5)
+#define MM_MODE_SHARED  (1U << 6)
 
 /* Map page as non-global. */
-#define MM_MODE_NG UINT32_C(0x0100) /* non-global */
+#define MM_MODE_NG (1U << 8)
+
+/* Specifies if a mapping will be a user mapping(EL0). */
+#define MM_MODE_USER    (1U << 9)
 
 /* The mask for a mode that is considered unmapped. */
 #define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
@@ -95,7 +98,7 @@
 	 * VMID/ASID associated with a page table. ASID 0 is reserved for use by
 	 * the hypervisor.
 	 */
-	uint16_t id;
+	mm_asid_t id;
 	/** Address of the root of the page table. */
 	paddr_t root;
 };
@@ -110,24 +113,24 @@
 
 void mm_vm_enable_invalidation(void);
 
-bool mm_ptable_init(struct mm_ptable *ptable, uint16_t id,
+bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
 		    struct mm_flags flags, struct mpool *ppool);
 ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags);
 
-bool mm_vm_init(struct mm_ptable *ptable, uint16_t id, struct mpool *ppool);
+bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool);
 void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool);
 
 bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool);
+			 mm_mode_t mode, struct mpool *ppool);
 void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool);
+			 mm_mode_t mode, struct mpool *ppool);
 
 bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+			mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
-			    paddr_t end, uint32_t mode, struct mpool *ppool);
+			    paddr_t end, mm_mode_t mode, struct mpool *ppool);
 void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+			   mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
 		 struct mpool *ppool);
 void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool);
@@ -135,15 +138,15 @@
 		  bool non_secure);
 void mm_vm_dump(const struct mm_ptable *ptable);
 bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
-		    ipaddr_t end, uint32_t *mode);
+		    ipaddr_t end, mm_mode_t *mode);
 bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
-		 uint32_t *mode);
+		 mm_mode_t *mode);
 
 struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
 struct mm_stage1_locked mm_lock_stage1(void);
 void mm_unlock_stage1(struct mm_stage1_locked *lock);
 void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
-		      paddr_t end, uint32_t mode, struct mpool *ppool);
+		      paddr_t end, mm_mode_t mode, struct mpool *ppool);
 bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
 	      struct mpool *ppool);
 void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
diff --git a/inc/hf/plat/iommu.h b/inc/hf/plat/iommu.h
index a144622..bb33871 100644
--- a/inc/hf/plat/iommu.h
+++ b/inc/hf/plat/iommu.h
@@ -44,7 +44,7 @@
  * that read and write modes are enforced by the IOMMU driver.
  */
 void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode);
+			     paddr_t end, mm_mode_t mode);
 
 /**
  * Configure IOMMU to perform address translation of memory transactions on the
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 7d16b6a..ffb1ec8 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -13,6 +13,7 @@
 #include "hf/addr.h"
 #include "hf/interrupt_desc.h"
 #include "hf/list.h"
+#include "hf/mm.h"
 #include "hf/spinlock.h"
 
 #include "vmapi/hf/ffa.h"
@@ -110,7 +111,7 @@
 	ipaddr_t ipaddr;
 	vaddr_t vaddr;
 	vaddr_t pc;
-	uint32_t mode;
+	mm_mode_t mode;
 };
 
 struct call_chain {
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index bb1a277..f161077 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -300,20 +300,20 @@
 bool vm_is_mailbox_busy(struct vm_locked to);
 bool vm_is_mailbox_other_world_owned(struct vm_locked to);
 bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-		     uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+		     mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool);
+			 mm_mode_t mode, struct mpool *ppool);
 void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+			mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
 bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
 	      struct mpool *ppool);
 void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
 bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
 
 bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
-		     uint32_t *mode);
+		     mm_mode_t *mode);
 bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
-			      paddr_t end, uint32_t mode, struct mpool *ppool,
+			      paddr_t end, mm_mode_t mode, struct mpool *ppool,
 			      ipaddr_t *ipa, uint8_t dma_device_id);
 
 void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
diff --git a/src/api.c b/src/api.c
index 7f318ba..381cc42 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1435,7 +1435,7 @@
 /**
  * Check that the mode indicates memory that is valid, owned and exclusive.
  */
-static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
+static bool api_mode_valid_owned_and_exclusive(mm_mode_t mode)
 {
 	return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
 			MM_MODE_SHARED)) == 0;
@@ -1447,7 +1447,7 @@
 static struct ffa_value api_vm_configure_stage1(
 	struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
 	paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
-	paddr_t pa_recv_end, uint32_t extra_attributes,
+	paddr_t pa_recv_end, mm_mode_t extra_mode,
 	struct mpool *local_page_pool)
 {
 	struct ffa_value ret;
@@ -1457,7 +1457,7 @@
 	 */
 	vm_locked.vm->mailbox.send =
 		mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
-				MM_MODE_R | extra_attributes, local_page_pool);
+				MM_MODE_R | extra_mode, local_page_pool);
 	if (!vm_locked.vm->mailbox.send) {
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
@@ -1469,7 +1469,7 @@
 	 */
 	vm_locked.vm->mailbox.recv =
 		mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
-				MM_MODE_W | extra_attributes, local_page_pool);
+				MM_MODE_W | extra_mode, local_page_pool);
 	if (!vm_locked.vm->mailbox.recv) {
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto fail_undo_send;
@@ -1514,9 +1514,9 @@
 	paddr_t pa_send_end;
 	paddr_t pa_recv_begin;
 	paddr_t pa_recv_end;
-	uint32_t orig_send_mode = 0;
-	uint32_t orig_recv_mode = 0;
-	uint32_t extra_attributes;
+	mm_mode_t orig_send_mode = 0;
+	mm_mode_t orig_recv_mode = 0;
+	mm_mode_t extra_mode;
 
 	/* We only allow these to be setup once. */
 	if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
@@ -1585,8 +1585,8 @@
 		}
 
 		/* Take memory ownership away from the VM and mark as shared. */
-		uint32_t mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R |
-				MM_MODE_W;
+		mm_mode_t mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R |
+				 MM_MODE_W;
 		if (vm_locked.vm->el0_partition) {
 			mode |= MM_MODE_USER | MM_MODE_NG;
 		}
@@ -1623,8 +1623,8 @@
 		}
 	}
 
-	/* Get extra send/recv pages mapping attributes for the given VM ID. */
-	extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
+	/* Get extra send/recv pages mapping mode for the given VM ID. */
+	extra_mode = arch_mm_extra_mode_from_vm(vm_locked.vm->id);
 
 	/*
 	 * For EL0 partitions, since both the partition and the hypervisor code
@@ -1638,12 +1638,12 @@
 	 * other partitions buffers through cached translations.
 	 */
 	if (vm_locked.vm->el0_partition) {
-		extra_attributes |= MM_MODE_NG;
+		extra_mode |= MM_MODE_NG;
 	}
 
-	ret = api_vm_configure_stage1(
-		mm_stage1_locked, vm_locked, pa_send_begin, pa_send_end,
-		pa_recv_begin, pa_recv_end, extra_attributes, local_page_pool);
+	ret = api_vm_configure_stage1(mm_stage1_locked, vm_locked,
+				      pa_send_begin, pa_send_end, pa_recv_begin,
+				      pa_recv_end, extra_mode, local_page_pool);
 	if (ret.func != FFA_SUCCESS_32) {
 		goto fail_undo_send_and_recv;
 	}
@@ -4649,8 +4649,8 @@
 	struct vm_locked vm_locked;
 	struct ffa_value ret;
 	bool mode_ret;
-	uint32_t original_mode;
-	uint32_t new_mode;
+	mm_mode_t original_mode;
+	mm_mode_t new_mode;
 	struct mpool local_page_pool;
 	vaddr_t end_addr = va_add(base_addr, page_count * PAGE_SIZE);
 
diff --git a/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c b/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
index 32e622c..093e885 100644
--- a/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
+++ b/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
@@ -1390,7 +1390,7 @@
 }
 
 void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode)
+			     paddr_t end, mm_mode_t mode)
 {
 	(void)vm_locked;
 	(void)begin;
diff --git a/src/arch/aarch64/hftest/el0/mm.c b/src/arch/aarch64/hftest/el0/mm.c
index c0c1fcc..034ec29 100644
--- a/src/arch/aarch64/hftest/el0/mm.c
+++ b/src/arch/aarch64/hftest/el0/mm.c
@@ -8,6 +8,8 @@
 
 #include "hf/arch/vm/mm.h"
 
+#include "hf/arch/mm.h"
+
 /**
  * MM support is not done at EL0.
  * Define dummy functions for EL0 targets.
@@ -26,7 +28,7 @@
 {
 }
 
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
 {
 	(void)id;
 	return 0;
diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c
index c419820..483dc4d 100644
--- a/src/arch/aarch64/hftest/mm.c
+++ b/src/arch/aarch64/hftest/mm.c
@@ -9,6 +9,7 @@
 #include "hf/mm.h"
 
 #include "hf/arch/barriers.h"
+#include "hf/arch/mm.h"
 #include "hf/arch/vm/mm.h"
 
 #include "hf/dlog.h"
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 852d397..98d2e5a 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -1212,7 +1212,7 @@
  */
 static struct vcpu_fault_info fault_info_init(uintreg_t esr,
 					      const struct vcpu *vcpu,
-					      uint32_t mode)
+					      mm_mode_t mode)
 {
 	uint32_t fsc = esr & 0x3f;
 	struct vcpu_fault_info r;
diff --git a/src/arch/aarch64/hypervisor/other_world.c b/src/arch/aarch64/hypervisor/other_world.c
index 3f846b5..8078cb3 100644
--- a/src/arch/aarch64/hypervisor/other_world.c
+++ b/src/arch/aarch64/hypervisor/other_world.c
@@ -14,6 +14,7 @@
 #include "hf/dlog.h"
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
+#include "hf/mm.h"
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
@@ -148,10 +149,10 @@
 	paddr_t pa_recv_end)
 {
 	struct ffa_value ret;
-	uint32_t send_mode;
-	uint32_t recv_mode;
+	mm_mode_t send_mode;
+	mm_mode_t recv_mode;
 	struct vm_locked other_world_locked;
-	const uint32_t expected_mode =
+	const mm_mode_t expected_mode =
 		MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS;
 
 	other_world_locked = lock_other_world(vm_locked);
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 59ea714..e3496ff 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -130,7 +130,7 @@
 }
 
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
-			      paddr_t end, uint32_t mode, struct mpool *ppool)
+			      paddr_t end, mm_mode_t mode, struct mpool *ppool)
 {
 	struct mm_ptable *ptable = &vm_locked.vm->ptable;
 
@@ -148,7 +148,7 @@
 }
 
 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode, struct mpool *ppool,
+			     paddr_t end, mm_mode_t mode, struct mpool *ppool,
 			     ipaddr_t *ipa)
 {
 	struct mm_ptable *ptable = &vm_locked.vm->ptable;
@@ -180,7 +180,7 @@
 		   struct mpool *ppool)
 {
 	bool ret;
-	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+	mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
 
 	ret = vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
 
@@ -209,7 +209,7 @@
 }
 
 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
-			  ipaddr_t end, uint32_t *mode)
+			  ipaddr_t end, mm_mode_t *mode)
 {
 	bool ret;
 
@@ -222,8 +222,8 @@
 	ret = mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
 
 #if SECURE_WORLD == 1
-	uint32_t mode2;
-	const uint32_t mask =
+	mm_mode_t mode2;
+	const mm_mode_t mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 
 	/* If the region is fully unmapped in the secure IPA space. */
@@ -243,7 +243,7 @@
 }
 
 static bool arch_vm_iommu_mm_prepare(struct vm_locked vm_locked, paddr_t begin,
-				     paddr_t end, uint32_t mode,
+				     paddr_t end, mm_mode_t mode,
 				     struct mpool *ppool, uint8_t dma_device_id)
 {
 	struct mm_ptable *ptable = &vm_locked.vm->iommu_ptables[dma_device_id];
@@ -258,7 +258,7 @@
 }
 
 static void arch_vm_iommu_mm_commit(struct vm_locked vm_locked, paddr_t begin,
-				    paddr_t end, uint32_t mode,
+				    paddr_t end, mm_mode_t mode,
 				    struct mpool *ppool, ipaddr_t *ipa,
 				    uint8_t dma_device_id)
 {
@@ -274,7 +274,7 @@
 }
 
 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
-				   paddr_t end, uint32_t mode,
+				   paddr_t end, mm_mode_t mode,
 				   struct mpool *ppool, ipaddr_t *ipa,
 				   uint8_t dma_device_id)
 {
diff --git a/src/arch/aarch64/inc/hf/arch/mmu.h b/src/arch/aarch64/inc/hf/arch/mmu.h
index d6eb366..670968b 100644
--- a/src/arch/aarch64/inc/hf/arch/mmu.h
+++ b/src/arch/aarch64/inc/hf/arch/mmu.h
@@ -11,10 +11,10 @@
 /** AArch64-specific mapping modes */
 
 /** Mapping mode defining MMU Stage-1 block/page non-secure bit */
-#define MM_MODE_NS UINT32_C(0x0080)
+#define MM_MODE_NS (1U << 7)
 
 /** Page mapping mode for tagged normal memory. */
-#define MM_MODE_T UINT32_C(0x0400)
+#define MM_MODE_T (1U << 10)
 
 #define tlbi(op)                               \
 	do {                                   \
diff --git a/src/arch/aarch64/inc/hf/arch/vm/vm.h b/src/arch/aarch64/inc/hf/arch/vm/vm.h
index 309f1e0..a8749b0 100644
--- a/src/arch/aarch64/inc/hf/arch/vm/vm.h
+++ b/src/arch/aarch64/inc/hf/arch/vm/vm.h
@@ -8,6 +8,8 @@
 
 #pragma once
 
+#include "hf/arch/mm.h"
+
 #include "hf/mm.h"
 
 /** Arch-specific information about a VM. */
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 1a37562..5a49d37 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -10,6 +10,7 @@
 
 #include "hf/arch/barriers.h"
 #include "hf/arch/cpu.h"
+#include "hf/arch/mm.h"
 #include "hf/arch/mmu.h"
 #include "hf/arch/std.h"
 
@@ -136,14 +137,14 @@
 	uintreg_t vstcr_el2;
 } arch_mm_config;
 
-static uint8_t mm_s1_max_level;
-static uint8_t mm_s2_max_level;
+static mm_level_t mm_s1_max_level;
+static mm_level_t mm_s2_max_level;
 static uint8_t mm_s2_root_table_count;
 
 /**
  * Returns the encoding of a page table entry that isn't present.
  */
-pte_t arch_mm_absent_pte(uint8_t level)
+pte_t arch_mm_absent_pte(mm_level_t level)
 {
 	(void)level;
 	return 0;
@@ -155,7 +156,7 @@
  * The spec says that 'Table descriptors for stage 2 translations do not
  * include any attribute field', so we don't take any attributes as arguments.
  */
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa)
 {
 	/* This is the same for all levels on aarch64. */
 	(void)level;
@@ -167,7 +168,7 @@
  *
  * The level must allow block entries.
  */
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs)
 {
 	pte_t pte = pa_addr(pa) | attrs;
 
@@ -183,7 +184,7 @@
  *
  * Level 0 must allow block entries.
  */
-bool arch_mm_is_block_allowed(uint8_t level)
+bool arch_mm_is_block_allowed(mm_level_t level)
 {
 	return level <= 2;
 }
@@ -192,7 +193,7 @@
  * Determines if the given pte is present, i.e., if it is valid or it is invalid
  * but still holds state about the memory so needs to be present in the table.
  */
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level)
 {
 	return arch_mm_pte_is_valid(pte, level) || (pte & STAGE2_SW_OWNED) != 0;
 }
@@ -201,7 +202,7 @@
  * Determines if the given pte is valid, i.e., if it points to another table,
  * to a page, or a block of pages that can be accessed.
  */
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level)
 {
 	(void)level;
 	return (pte & PTE_VALID) != 0;
@@ -210,7 +211,7 @@
 /**
  * Determines if the given pte references a block of pages.
  */
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level)
 {
 	/* We count pages at level 0 as blocks. */
 	return arch_mm_is_block_allowed(level) &&
@@ -222,7 +223,7 @@
 /**
  * Determines if the given pte references another table.
  */
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level)
 {
 	return level != 0 && arch_mm_pte_is_valid(pte, level) &&
 	       (pte & PTE_TABLE) != 0;
@@ -246,7 +247,7 @@
  * Extracts the physical address of the block referred to by the given page
  * table entry.
  */
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level)
 {
 	(void)level;
 	return pa_init(pte_addr(pte));
@@ -256,7 +257,7 @@
  * Extracts the physical address of the page table referred to by the given page
  * table entry.
  */
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level)
 {
 	(void)level;
 	return pa_init(pte_addr(pte));
@@ -266,7 +267,7 @@
  * Extracts the architecture-specific attributes applies to the given page table
  * entry.
  */
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level)
 {
 	(void)level;
 	return pte & PTE_ATTR_MASK;
@@ -287,7 +288,7 @@
 /**
  * Invalidates stage-1 TLB entries referring to the given virtual address range.
  */
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
 				     vaddr_t va_end)
 {
 	uintvaddr_t begin = va_addr(va_begin);
@@ -444,9 +445,9 @@
 	dsb(sy);
 }
 
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode)
 {
-	uint64_t attrs = 0;
+	mm_attr_t attrs = 0;
 
 	attrs |= STAGE1_AF | STAGE1_SH(INNER_SHAREABLE);
 
@@ -530,9 +531,9 @@
 	return attrs;
 }
 
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs)
 {
-	uint32_t mode = 0;
+	mm_mode_t mode = 0;
 
 #if SECURE_WORLD == 1
 	if (attrs & STAGE1_NS) {
@@ -584,10 +585,10 @@
 	return mode;
 }
 
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode)
 {
-	uint64_t attrs = 0;
-	uint64_t access = 0;
+	mm_attr_t attrs = 0;
+	mm_attr_t access = 0;
 
 	/*
 	 * Default shareability is inner shareable in stage 2 tables. Per
@@ -660,9 +661,9 @@
 	return attrs;
 }
 
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs)
 {
-	uint32_t mode = 0;
+	mm_mode_t mode = 0;
 
 	if (attrs & STAGE2_S2AP(STAGE2_ACCESS_READ)) {
 		mode |= MM_MODE_R;
@@ -713,12 +714,12 @@
 	}
 }
 
-uint8_t arch_mm_stage1_max_level(void)
+mm_level_t arch_mm_stage1_max_level(void)
 {
 	return mm_s1_max_level;
 }
 
-uint8_t arch_mm_stage2_max_level(void)
+mm_level_t arch_mm_stage2_max_level(void)
 {
 	return mm_s2_max_level;
 }
@@ -739,8 +740,8 @@
  * in that table, returns equivalent attrs to use for a block which will replace
  * the entire table.
  */
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
-					   uint64_t block_attrs)
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+					    mm_attr_t block_attrs)
 {
 	/*
 	 * Only stage 1 table descriptors have attributes, but the bits are res0
@@ -980,7 +981,7 @@
 /**
  * Return the arch specific mm mode for send/recv pages of given VM ID.
  */
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
 {
 	return ((id & HF_VM_ID_WORLD_MASK) == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS
 								   : 0;
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 567d73a..5f3fbc9 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -543,7 +543,7 @@
 }
 
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
-			      paddr_t end, uint32_t mode, struct mpool *ppool)
+			      paddr_t end, mm_mode_t mode, struct mpool *ppool)
 {
 	(void)vm_locked;
 	(void)begin;
@@ -555,7 +555,7 @@
 }
 
 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode, struct mpool *ppool,
+			     paddr_t end, mm_mode_t mode, struct mpool *ppool,
 			     ipaddr_t *ipa)
 {
 	(void)vm_locked;
@@ -584,7 +584,7 @@
 }
 
 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
-			  ipaddr_t end, uint32_t *mode)	 // NOLINT
+			  ipaddr_t end, mm_mode_t *mode)  // NOLINT
 {
 	(void)vm_locked;
 	(void)begin;
@@ -595,7 +595,7 @@
 }
 
 ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
-	ffa_memory_attributes_t attributes, uint32_t mode)
+	ffa_memory_attributes_t attributes, mm_mode_t mode)
 {
 	(void)mode;
 
@@ -624,7 +624,7 @@
 }
 
 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
-				   paddr_t end, uint32_t mode,
+				   paddr_t end, mm_mode_t mode,
 				   struct mpool *ppool, ipaddr_t *ipa,
 				   struct dma_device_properties *dma_prop)
 {
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 742bd6b..8d46c13 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -15,9 +15,9 @@
  * to memory. The flags are shifted to avoid equality of modes and attributes.
  */
 #define PTE_ATTR_MODE_SHIFT 48
-#define PTE_ATTR_MODE_MASK                                              \
-	((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D |     \
-		    MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
+#define PTE_ATTR_MODE_MASK                                               \
+	((mm_attr_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D |     \
+		     MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
 	 << PTE_ATTR_MODE_SHIFT)
 
 /* The bit to distinguish a table from a block is the highest of the page bits.
@@ -30,49 +30,49 @@
 /* Offset the bits of each level so they can't be misued. */
 #define PTE_LEVEL_SHIFT(lvl) ((lvl) * 2)
 
-pte_t arch_mm_absent_pte(uint8_t level)
+pte_t arch_mm_absent_pte(mm_level_t level)
 {
-	return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
+	return ((mm_attr_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
 		<< PTE_ATTR_MODE_SHIFT) >>
 	       PTE_LEVEL_SHIFT(level);
 }
 
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa)
 {
 	return (pa_addr(pa) | PTE_TABLE) >> PTE_LEVEL_SHIFT(level);
 }
 
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs)
 {
 	return (pa_addr(pa) | attrs) >> PTE_LEVEL_SHIFT(level);
 }
 
-bool arch_mm_is_block_allowed(uint8_t level)
+bool arch_mm_is_block_allowed(mm_level_t level)
 {
 	(void)level;
 	return true;
 }
 
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level)
 {
 	return arch_mm_pte_is_valid(pte, level) ||
 	       !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
 		 MM_MODE_UNOWNED);
 }
 
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level)
 {
 	return !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
 		 MM_MODE_INVALID);
 }
 
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level)
 {
 	return arch_mm_pte_is_present(pte, level) &&
 	       !arch_mm_pte_is_table(pte, level);
 }
 
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level)
 {
 	return (pte << PTE_LEVEL_SHIFT(level)) & PTE_TABLE;
 }
@@ -82,28 +82,28 @@
 	return pa_init(pa_addr(pa) & PTE_ADDR_MASK);
 }
 
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level)
 {
 	return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
 }
 
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level)
 {
 	return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
 }
 
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level)
 {
 	return (pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK;
 }
 
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
-					   uint64_t block_attrs)
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+					    mm_attr_t block_attrs)
 {
 	return table_attrs | block_attrs;
 }
 
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
 				     vaddr_t va_end)
 {
 	(void)asid;
@@ -112,7 +112,7 @@
 	/* There's no modelling of the stage-1 TLB. */
 }
 
-void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
+void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin,
 				     ipaddr_t va_end, bool non_secure)
 {
 	(void)vmid;
@@ -135,12 +135,12 @@
 	(void)pa_bits;
 }
 
-uint8_t arch_mm_stage1_max_level(void)
+mm_level_t arch_mm_stage1_max_level(void)
 {
 	return 2;
 }
 
-uint8_t arch_mm_stage2_max_level(void)
+mm_level_t arch_mm_stage2_max_level(void)
 {
 	return 2;
 }
@@ -156,22 +156,22 @@
 	return 4;
 }
 
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode)
 {
-	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+	return ((mm_attr_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
 
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode)
 {
-	return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+	return ((mm_attr_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
 }
 
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs)
 {
 	return attrs >> PTE_ATTR_MODE_SHIFT;
 }
 
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs)
 {
 	return attrs >> PTE_ATTR_MODE_SHIFT;
 }
@@ -191,7 +191,7 @@
 	return true;
 }
 
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
 {
 	(void)id;
 
diff --git a/src/boot_info.c b/src/boot_info.c
index 7d9f013..c229e0a 100644
--- a/src/boot_info.c
+++ b/src/boot_info.c
@@ -8,10 +8,13 @@
 
 #include "hf/boot_info.h"
 
+#include "hf/arch/mm.h"
+
 #include "hf/assert.h"
 #include "hf/dlog.h"
 #include "hf/ffa.h"
 #include "hf/memiter.h"
+#include "hf/mm.h"
 #include "hf/std.h"
 
 #include "vmapi/hf/ffa.h"
diff --git a/src/ffa/hypervisor/ffa_memory.c b/src/ffa/hypervisor/ffa_memory.c
index e582ac8..edb3277 100644
--- a/src/ffa/hypervisor/ffa_memory.c
+++ b/src/ffa/hypervisor/ffa_memory.c
@@ -13,6 +13,7 @@
 #include "hf/ffa/init.h"
 #include "hf/ffa_internal.h"
 #include "hf/ffa_memory_internal.h"
+#include "hf/mm.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -54,7 +55,7 @@
 	return true;
 }
 
-uint32_t ffa_memory_get_other_world_mode(void)
+mm_mode_t ffa_memory_get_other_world_mode(void)
 {
 	return 0U;
 }
@@ -690,7 +691,7 @@
 }
 
 ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
-	ffa_memory_attributes_t attributes, uint32_t mode)
+	ffa_memory_attributes_t attributes, mm_mode_t mode)
 {
 	(void)mode;
 
diff --git a/src/ffa/spmc/ffa_memory.c b/src/ffa/spmc/ffa_memory.c
index 9c70605..bf93593 100644
--- a/src/ffa/spmc/ffa_memory.c
+++ b/src/ffa/spmc/ffa_memory.c
@@ -12,6 +12,7 @@
 
 #include "hf/ffa/ffa_memory.h"
 #include "hf/ffa_internal.h"
+#include "hf/mm.h"
 #include "hf/vm.h"
 
 #include "sysregs.h"
@@ -62,7 +63,7 @@
 	}
 }
 
-uint32_t ffa_memory_get_other_world_mode(void)
+mm_mode_t ffa_memory_get_other_world_mode(void)
 {
 	return MM_MODE_NS;
 }
@@ -141,7 +142,7 @@
  * supplied mode.
  */
 ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
-	ffa_memory_attributes_t attributes, uint32_t mode)
+	ffa_memory_attributes_t attributes, mm_mode_t mode)
 {
 	ffa_memory_attributes_t ret = attributes;
 
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 0f0f147..52969bd 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -353,7 +353,7 @@
 static inline uint32_t ffa_memory_permissions_to_mode(
 	ffa_memory_access_permissions_t permissions, uint32_t default_mode)
 {
-	uint32_t mode = 0;
+	mm_mode_t mode = 0;
 
 	switch (permissions.data_access) {
 	case FFA_DATA_ACCESS_RO:
@@ -403,7 +403,7 @@
  * an appropriate FF-A error if not.
  */
 static struct ffa_value constituents_get_mode(
-	struct vm_locked vm, uint32_t *orig_mode,
+	struct vm_locked vm, mm_mode_t *orig_mode,
 	struct ffa_memory_region_constituent **fragments,
 	const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
 {
@@ -741,12 +741,12 @@
  */
 static struct ffa_value ffa_send_check_transition(
 	struct vm_locked from, uint32_t share_func,
-	struct ffa_memory_region *memory_region, uint32_t *orig_from_mode,
+	struct ffa_memory_region *memory_region, mm_mode_t *orig_from_mode,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t *from_mode, enum ffa_map_action *map_action, bool zero)
+	mm_mode_t *from_mode, enum ffa_map_action *map_action, bool zero)
 {
-	const uint32_t state_mask =
+	const mm_mode_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
 	struct ffa_value ret;
 	bool all_receivers_from_current_world = true;
@@ -870,10 +870,10 @@
 }
 
 static struct ffa_value ffa_relinquish_check_transition(
-	struct vm_locked from, uint32_t *orig_from_mode,
+	struct vm_locked from, mm_mode_t *orig_from_mode,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t *from_mode, enum ffa_map_action *map_action)
+	mm_mode_t *from_mode, enum ffa_map_action *map_action)
 {
 	const uint32_t state_mask =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
@@ -941,10 +941,10 @@
 	struct vm_locked to, uint32_t share_func,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t sender_orig_mode, uint32_t *to_mode, bool memory_protected,
+	mm_mode_t sender_orig_mode, mm_mode_t *to_mode, bool memory_protected,
 	enum ffa_map_action *map_action)
 {
-	uint32_t orig_to_mode;
+	mm_mode_t orig_to_mode;
 	struct ffa_value ret;
 
 	ret = constituents_get_mode(to, &orig_to_mode, fragments,
@@ -1052,7 +1052,7 @@
  */
 static struct ffa_value ffa_region_group_check_actions(
 	struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
-	struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
+	struct mpool *ppool, mm_mode_t mode, enum ffa_map_action action,
 	bool *memory_protected)
 {
 	struct ffa_value ret;
@@ -1120,7 +1120,7 @@
 
 static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
 					    paddr_t pa_begin, paddr_t pa_end,
-					    struct mpool *ppool, uint32_t mode,
+					    struct mpool *ppool, mm_mode_t mode,
 					    enum ffa_map_action action)
 {
 	switch (action) {
@@ -1215,7 +1215,7 @@
 	struct vm_locked vm_locked,
 	struct ffa_memory_region_constituent **fragments,
 	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
+	mm_mode_t mode, struct mpool *ppool, enum ffa_map_action action,
 	bool *memory_protected)
 {
 	uint32_t i;
@@ -1287,7 +1287,7 @@
  * flushed from the cache so the memory has been cleared across the system.
  */
 static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
-			 uint32_t extra_mode_attributes)
+			 mm_mode_t extra_mode)
 {
 	/*
 	 * TODO: change this to a CPU local single page window rather than a
@@ -1297,11 +1297,10 @@
 	 */
 	bool ret;
 	struct mm_stage1_locked stage1_locked = mm_lock_stage1();
-	void *ptr =
-		mm_identity_map(stage1_locked, begin, end,
-				MM_MODE_W | (extra_mode_attributes &
-					     ffa_memory_get_other_world_mode()),
-				ppool);
+	void *ptr = mm_identity_map(
+		stage1_locked, begin, end,
+		MM_MODE_W | (extra_mode & ffa_memory_get_other_world_mode()),
+		ppool);
 	size_t size = pa_difference(begin, end);
 
 	if (!ptr) {
@@ -1329,7 +1328,7 @@
  * flushed from the cache so the memory has been cleared across the system.
  */
 static bool ffa_clear_memory_constituents(
-	uint32_t security_state_mode,
+	mm_mode_t security_state_mode,
 	struct ffa_memory_region_constituent **fragments,
 	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
 	struct mpool *page_pool)
@@ -1470,13 +1469,13 @@
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
 	uint32_t composite_total_page_count, uint32_t share_func,
 	struct ffa_memory_region *memory_region, struct mpool *page_pool,
-	uint32_t *orig_from_mode_ret, bool *memory_protected)
+	mm_mode_t *orig_from_mode_ret, bool *memory_protected)
 {
 	uint32_t i;
 	uint32_t j;
-	uint32_t orig_from_mode;
-	uint32_t clean_mode;
-	uint32_t from_mode;
+	mm_mode_t orig_from_mode;
+	mm_mode_t clean_mode;
+	mm_mode_t from_mode;
 	struct mpool local_page_pool;
 	struct ffa_value ret;
 	uint32_t constituents_total_page_count = 0;
@@ -1632,11 +1631,12 @@
 	struct vm_locked to_locked,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t sender_orig_mode, uint32_t share_func, bool clear,
-	struct mpool *page_pool, uint32_t *response_mode, bool memory_protected)
+	mm_mode_t sender_orig_mode, uint32_t share_func, bool clear,
+	struct mpool *page_pool, mm_mode_t *response_mode,
+	bool memory_protected)
 {
 	uint32_t i;
-	uint32_t to_mode;
+	mm_mode_t to_mode;
 	struct mpool local_page_pool;
 	struct ffa_value ret;
 	enum ffa_map_action map_action = MAP_ACTION_COMMIT;
@@ -1755,11 +1755,11 @@
 	struct vm_locked from_locked,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
-	uint32_t sender_orig_mode, struct mpool *page_pool, bool clear)
+	mm_mode_t sender_orig_mode, struct mpool *page_pool, bool clear)
 {
-	uint32_t orig_from_mode;
-	uint32_t clearing_mode;
-	uint32_t from_mode;
+	mm_mode_t orig_from_mode;
+	mm_mode_t clearing_mode;
+	mm_mode_t from_mode;
 	struct mpool local_page_pool;
 	struct ffa_value ret;
 	enum ffa_map_action map_action;
@@ -1866,7 +1866,7 @@
 struct ffa_value ffa_memory_send_complete(
 	struct vm_locked from_locked, struct share_states_locked share_states,
 	struct ffa_memory_share_state *share_state, struct mpool *page_pool,
-	uint32_t *orig_from_mode_ret)
+	mm_mode_t *orig_from_mode_ret)
 {
 	struct ffa_memory_region *memory_region = share_state->memory_region;
 	struct ffa_composite_memory_region *composite;
@@ -3304,7 +3304,7 @@
 	uint32_t retrieve_request_length, struct mpool *page_pool)
 {
 	ffa_memory_access_permissions_t permissions = {0};
-	uint32_t memory_to_mode;
+	mm_mode_t memory_to_mode;
 	struct ffa_value ret;
 	struct ffa_composite_memory_region *composite;
 	uint32_t total_length;
@@ -3317,7 +3317,7 @@
 	struct ffa_memory_access *receiver;
 	ffa_memory_handle_t handle = retrieve_request->handle;
 	ffa_memory_attributes_t attributes = {0};
-	uint32_t retrieve_mode = 0;
+	mm_mode_t retrieve_mode = 0;
 	struct ffa_memory_region *memory_region = share_state->memory_region;
 
 	if (!share_state->sending_complete) {
diff --git a/src/iommu/absent.c b/src/iommu/absent.c
index 2532f93..781ed24 100644
--- a/src/iommu/absent.c
+++ b/src/iommu/absent.c
@@ -27,7 +27,7 @@
 }
 
 void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
-			     paddr_t end, uint32_t mode)
+			     paddr_t end, mm_mode_t mode)
 {
 	(void)vm_locked;
 	(void)begin;
diff --git a/src/ipi_test.cc b/src/ipi_test.cc
index 984c28b..91cc229 100644
--- a/src/ipi_test.cc
+++ b/src/ipi_test.cc
@@ -9,8 +9,11 @@
 #include <gmock/gmock.h>
 
 extern "C" {
+#include "hf/arch/mm.h"
+
 #include "hf/check.h"
 #include "hf/hf_ipi.h"
+#include "hf/mm.h"
 }
 
 #include <map>
@@ -32,7 +35,7 @@
  */
 
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
-const int TOP_LEVEL = arch_mm_stage2_max_level();
+const mm_level_t TOP_LEVEL = arch_mm_stage2_max_level();
 class ipi : public ::testing::Test
 {
        protected:
diff --git a/src/load.c b/src/load.c
index b2dc996..4f4c410 100644
--- a/src/load.c
+++ b/src/load.c
@@ -479,9 +479,9 @@
 /**
  * Convert the manifest memory region attributes to mode consumed by mm layer.
  */
-static uint32_t memory_region_attributes_to_mode(uint32_t attributes)
+static mm_mode_t memory_region_attributes_to_mode(uint32_t attributes)
 {
-	uint32_t mode = 0U;
+	mm_mode_t mode = 0U;
 
 	if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
 		mode |= MM_MODE_R;
@@ -499,7 +499,7 @@
 	       (mode == (MM_MODE_R | MM_MODE_X)));
 
 	if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
-		mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
+		mode |= arch_mm_extra_mode_from_vm(HF_HYPERVISOR_VM_ID);
 	}
 
 	return mode;
@@ -508,9 +508,9 @@
 /**
  * Convert the manifest device region attributes to mode consumed by mm layer.
  */
-static uint32_t device_region_attributes_to_mode(uint32_t attributes)
+static mm_mode_t device_region_attributes_to_mode(uint32_t attributes)
 {
-	uint32_t mode = 0U;
+	mm_mode_t mode = 0U;
 
 	if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
 		mode |= MM_MODE_R;
@@ -523,7 +523,7 @@
 	assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R));
 
 	if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
-		mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
+		mode |= arch_mm_extra_mode_from_vm(HF_HYPERVISOR_VM_ID);
 	}
 
 	return mode | MM_MODE_D;
@@ -541,7 +541,7 @@
 	paddr_t region_begin;
 	paddr_t region_end;
 	size_t size;
-	uint32_t map_mode;
+	mm_mode_t map_mode;
 	uint32_t attributes;
 
 	/* Map memory-regions */
@@ -673,7 +673,7 @@
 	bool has_fdt;
 	size_t kernel_size = 0;
 	const size_t mem_size = pa_difference(mem_begin, mem_end);
-	uint32_t map_mode;
+	mm_mode_t map_mode;
 	bool is_el0_partition = manifest_vm->partition.run_time_el == S_EL0 ||
 				manifest_vm->partition.run_time_el == EL0;
 	size_t n;
diff --git a/src/mm.c b/src/mm.c
index 78ff880..de1b320 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -12,10 +12,11 @@
 #include <stdint.h>
 
 #include "hf/arch/init.h"
+#include "hf/arch/mm.h"
 
-#include "hf/assert.h"
 #include "hf/check.h"
 #include "hf/dlog.h"
+#include "hf/ffa.h"
 #include "hf/layout.h"
 #include "hf/plat/console.h"
 #include "hf/static_assert.h"
@@ -80,7 +81,7 @@
  * Calculates the size of the address space represented by a page table entry at
  * the given level.
  */
-static size_t mm_entry_size(uint8_t level)
+static size_t mm_entry_size(mm_level_t level)
 {
 	return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
 }
@@ -108,7 +109,7 @@
  * For a given address, calculates the maximum (plus one) address that can be
  * represented by the same table at the given level.
  */
-static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
+static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level)
 {
 	size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
 
@@ -119,7 +120,7 @@
  * For a given address, calculates the index at which its entry is stored in a
  * table at the given level.
  */
-static size_t mm_index(ptable_addr_t addr, uint8_t level)
+static size_t mm_index(ptable_addr_t addr, mm_level_t level)
 {
 	ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
 
@@ -142,7 +143,7 @@
 /**
  * Returns the maximum level in the page table given the flags.
  */
-static uint8_t mm_max_level(struct mm_flags flags)
+static mm_level_t mm_max_level(struct mm_flags flags)
 {
 	return flags.stage1 ? arch_mm_stage1_max_level()
 			    : arch_mm_stage2_max_level();
@@ -162,7 +163,7 @@
  */
 static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
 			      struct mm_flags flags, bool non_secure,
-			      uint16_t id)
+			      mm_asid_t id)
 {
 	if (flags.stage1) {
 		arch_mm_invalidate_stage1_range(id, va_init(begin),
@@ -178,7 +179,7 @@
  * given level, including any subtables recursively.
  */
 // NOLINTNEXTLINE(misc-no-recursion)
-static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
+static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool)
 {
 	struct mm_page_table *table;
 
@@ -209,7 +210,7 @@
 /**
  * Initialises the given page table.
  */
-bool mm_ptable_init(struct mm_ptable *ptable, uint16_t id,
+bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
 		    struct mm_flags flags, struct mpool *ppool)
 {
 	struct mm_page_table *tables;
@@ -243,7 +244,7 @@
 			   struct mm_flags flags, struct mpool *ppool)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
-	uint8_t level = mm_max_level(flags);
+	mm_level_t level = mm_max_level(flags);
 	uint8_t root_table_count = mm_root_table_count(flags);
 
 	for (size_t i = 0; i < root_table_count; ++i) {
@@ -264,8 +265,8 @@
  * TLBs, which may result in issues for example in cache coherency.
  */
 static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
-			     uint8_t level, struct mm_flags flags,
-			     bool non_secure, struct mpool *ppool, uint16_t id)
+			     mm_level_t level, struct mm_flags flags,
+			     bool non_secure, struct mpool *ppool, mm_asid_t id)
 {
 	pte_t v = *pte;
 
@@ -293,15 +294,18 @@
  *
  * Returns a pointer to the table the entry now points to.
  */
-static struct mm_page_table *mm_populate_table_pte(
-	ptable_addr_t begin, pte_t *pte, uint8_t level, struct mm_flags flags,
-	bool non_secure, struct mpool *ppool, uint16_t id)
+static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
+						   pte_t *pte, mm_level_t level,
+						   struct mm_flags flags,
+						   bool non_secure,
+						   struct mpool *ppool,
+						   mm_asid_t id)
 {
 	struct mm_page_table *ntable;
 	pte_t v = *pte;
 	pte_t new_pte;
 	size_t inc;
-	uint8_t level_below = level - 1;
+	mm_level_t level_below = level - 1;
 
 	/* Just return pointer to table if it's already populated. */
 	if (arch_mm_pte_is_table(v, level)) {
@@ -354,9 +358,9 @@
  */
 // NOLINTNEXTLINE(misc-no-recursion)
 static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
-			 uint64_t attrs, struct mm_page_table *table,
-			 uint8_t level, struct mm_flags flags,
-			 struct mpool *ppool, uint16_t id)
+			 mm_attr_t attrs, struct mm_page_table *table,
+			 mm_level_t level, struct mm_flags flags,
+			 struct mpool *ppool, mm_asid_t id)
 {
 	pte_t *pte = &table->entries[mm_index(begin, level)];
 	ptable_addr_t level_end = mm_level_end(begin, level);
@@ -432,8 +436,9 @@
  * `flags.unmap` is set, unmap the given range instead.
  */
 static bool mm_map_root(struct mm_ptable *ptable, ptable_addr_t begin,
-			ptable_addr_t end, uint64_t attrs, uint8_t root_level,
-			struct mm_flags flags, struct mpool *ppool)
+			ptable_addr_t end, mm_attr_t attrs,
+			mm_level_t root_level, struct mm_flags flags,
+			struct mpool *ppool)
 {
 	size_t root_table_size = mm_entry_size(root_level);
 	struct mm_page_table *table = &mm_page_table_from_pa(
@@ -457,10 +462,10 @@
  * provided. Only commits the change if `flags.commit` is set.
  */
 static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
-				   paddr_t pa_end, uint64_t attrs,
+				   paddr_t pa_end, mm_attr_t attrs,
 				   struct mm_flags flags, struct mpool *ppool)
 {
-	uint8_t root_level = mm_max_level(flags) + 1;
+	mm_level_t root_level = mm_max_level(flags) + 1;
 	ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
 	ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
 	ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
@@ -501,7 +506,7 @@
  */
 static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
 				       paddr_t pa_begin, paddr_t pa_end,
-				       uint64_t attrs, struct mm_flags flags,
+				       mm_attr_t attrs, struct mm_flags flags,
 				       struct mpool *ppool)
 {
 	flags.commit = false;
@@ -525,7 +530,7 @@
  */
 static void mm_ptable_identity_commit(struct mm_ptable *ptable,
 				      paddr_t pa_begin, paddr_t pa_end,
-				      uint64_t attrs, struct mm_flags flags,
+				      mm_attr_t attrs, struct mm_flags flags,
 				      struct mpool *ppool)
 {
 	flags.commit = true;
@@ -545,7 +550,7 @@
  */
 static bool mm_ptable_identity_update(struct mm_ptable *ptable,
 				      paddr_t pa_begin, paddr_t pa_end,
-				      uint64_t attrs, struct mm_flags flags,
+				      mm_attr_t attrs, struct mm_flags flags,
 				      struct mpool *ppool)
 {
 	if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
@@ -565,7 +570,7 @@
  */
 // NOLINTNEXTLINE(misc-no-recursion)
 static void mm_dump_table_recursive(const struct mm_page_table *ptable,
-				    uint8_t level, int max_level)
+				    mm_level_t level, mm_level_t max_level)
 {
 	for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
 		if (!arch_mm_pte_is_present(ptable->entries[i], level)) {
@@ -591,7 +596,7 @@
 			   struct mm_flags flags)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
-	uint8_t max_level = mm_max_level(flags);
+	mm_level_t max_level = mm_max_level(flags);
 	uint8_t root_table_count = mm_root_table_count(flags);
 
 	for (size_t i = 0; i < root_table_count; ++i) {
@@ -603,12 +608,12 @@
  * Given the table PTE entries all have identical attributes, returns the single
  * entry with which it can be replaced.
  */
-static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level)
+static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level)
 {
 	struct mm_page_table *table;
-	uint64_t block_attrs;
-	uint64_t table_attrs;
-	uint64_t combined_attrs;
+	mm_attr_t block_attrs;
+	mm_attr_t table_attrs;
+	mm_attr_t combined_attrs;
 	paddr_t block_address;
 
 	table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
@@ -638,14 +643,14 @@
  */
 // NOLINTNEXTLINE(misc-no-recursion)
 static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
-				   uint8_t level, struct mm_flags flags,
+				   mm_level_t level, struct mm_flags flags,
 				   bool non_secure, struct mpool *ppool,
-				   uint16_t id)
+				   mm_asid_t id)
 {
 	struct mm_page_table *table;
 	bool mergeable;
 	bool base_present;
-	uint64_t base_attrs;
+	mm_attr_t base_attrs;
 	pte_t new_entry;
 
 	if (!arch_mm_pte_is_table(*entry, level)) {
@@ -720,7 +725,7 @@
 			     bool non_secure, struct mpool *ppool)
 {
 	struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
-	uint8_t level = mm_max_level(flags);
+	mm_level_t level = mm_max_level(flags);
 	uint8_t root_table_count = mm_root_table_count(flags);
 	ptable_addr_t block_addr = 0;
 
@@ -755,8 +760,8 @@
 // NOLINTNEXTLINE(misc-no-recursion)
 static bool mm_ptable_get_attrs_level(const struct mm_page_table *table,
 				      ptable_addr_t begin, ptable_addr_t end,
-				      uint8_t level, bool got_attrs,
-				      uint64_t *attrs)
+				      mm_level_t level, bool got_attrs,
+				      mm_attr_t *attrs)
 {
 	const pte_t *pte = &table->entries[mm_index(begin, level)];
 	ptable_addr_t level_end = mm_level_end(begin, level);
@@ -804,11 +809,11 @@
  * Returns true if the whole range has the same attributes and false otherwise.
  */
 static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin,
-			 ptable_addr_t end, uint64_t *attrs,
+			 ptable_addr_t end, mm_attr_t *attrs,
 			 struct mm_flags flags)
 {
-	uint8_t max_level = mm_max_level(flags);
-	uint8_t root_level = max_level + 1;
+	mm_level_t max_level = mm_max_level(flags);
+	mm_level_t root_level = max_level + 1;
 	size_t root_table_size = mm_entry_size(root_level);
 	ptable_addr_t ptable_end =
 		mm_root_table_count(flags) * mm_entry_size(root_level);
@@ -839,7 +844,7 @@
 	return got_attrs;
 }
 
-bool mm_vm_init(struct mm_ptable *ptable, uint16_t id, struct mpool *ppool)
+bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool)
 {
 	return mm_ptable_init(ptable, id, (struct mm_flags){0}, ppool);
 }
@@ -853,7 +858,7 @@
  * Selects flags to pass to the page table manipulation operation based on the
  * mapping mode.
  */
-static struct mm_flags mm_mode_to_flags(uint32_t mode)
+static struct mm_flags mm_mode_to_flags(mm_mode_t mode)
 {
 	struct mm_flags flags = {0};
 
@@ -872,9 +877,10 @@
  * Returns true on success, or false if the update would fail.
  */
 bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool)
+			 mm_mode_t mode, struct mpool *ppool)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
+
 	flags.stage1 = true;
 
 	return mm_ptable_identity_prepare(ptable, begin, end,
@@ -888,9 +894,10 @@
  * `mm_identity_prepare` must be called before this for the same mapping.
  */
 void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool)
+			 mm_mode_t mode, struct mpool *ppool)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
+
 	flags.stage1 = true;
 
 	mm_ptable_identity_commit(ptable, begin, end,
@@ -907,7 +914,7 @@
  * Returns true on success, or false if the update would fail.
  */
 bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
-			    paddr_t end, uint32_t mode, struct mpool *ppool)
+			    paddr_t end, mm_mode_t mode, struct mpool *ppool)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
 
@@ -922,7 +929,7 @@
  * `mm_vm_identity_prepare` must be called before this for the same mapping.
  */
 void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+			   mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
 
@@ -949,7 +956,7 @@
  * made.
  */
 bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
-			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+			mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
 	bool success = mm_ptable_identity_update(
@@ -970,7 +977,7 @@
 bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
 		 struct mpool *ppool)
 {
-	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+	mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
 
 	return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
 }
@@ -1008,9 +1015,9 @@
  * Returns true if the range is mapped with the same mode and false otherwise.
  */
 bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
-		    ipaddr_t end, uint32_t *mode)
+		    ipaddr_t end, mm_mode_t *mode)
 {
-	uint64_t attrs;
+	mm_attr_t attrs;
 	bool ret;
 
 	ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs,
@@ -1029,9 +1036,9 @@
  * Returns true if the range is mapped with the same mode and false otherwise.
  */
 bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
-		 uint32_t *mode)
+		 mm_mode_t *mode)
 {
-	uint64_t attrs;
+	mm_attr_t attrs;
 	bool ret;
 
 	ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs,
@@ -1072,9 +1079,10 @@
  * architecture-agnostic mode provided.
  */
 void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
-		      paddr_t end, uint32_t mode, struct mpool *ppool)
+		      paddr_t end, mm_mode_t mode, struct mpool *ppool)
 {
 	struct mm_flags flags = mm_mode_to_flags(mode);
+
 	flags.stage1 = true;
 
 	if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
@@ -1093,7 +1101,7 @@
 bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
 	      struct mpool *ppool)
 {
-	uint32_t mode = MM_MODE_UNMAPPED_MASK;
+	mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
 
 	return mm_identity_map(stage1_locked, begin, end, mode, ppool);
 }
@@ -1114,6 +1122,7 @@
 {
 	/* Locking is not enabled yet so fake it, */
 	struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
+	struct mm_flags flags = {.stage1 = true};
 
 	dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
 		  pa_addr(layout_text_end()));
@@ -1125,8 +1134,7 @@
 		  pa_addr(layout_stacks_end()));
 
 	/* ASID 0 is reserved for use by the hypervisor. */
-	if (!mm_ptable_init(&ptable, 0, (struct mm_flags){.stage1 = true},
-			    ppool)) {
+	if (!mm_ptable_init(&ptable, 0, flags, ppool)) {
 		dlog_error("Unable to allocate memory for page table.\n");
 		return false;
 	}
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 34c72a4..5e4b31b 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -37,7 +37,7 @@
 using ::mm_test::get_ptable;
 
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
-const int TOP_LEVEL = arch_mm_stage2_max_level();
+const mm_level_t TOP_LEVEL = arch_mm_stage2_max_level();
 const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
 
 /**
@@ -54,7 +54,7 @@
  */
 bool mm_vm_is_mapped(struct mm_ptable *ptable, ipaddr_t ipa)
 {
-	uint32_t mode;
+	mm_mode_t mode;
 	return mm_vm_get_mode(ptable, ipa, ipa_add(ipa, 1), &mode) &&
 	       (mode & MM_MODE_INVALID) == 0;
 }
@@ -121,7 +121,7 @@
  */
 TEST_F(mm, map_first_page)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(0);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
@@ -160,7 +160,7 @@
  */
 TEST_F(mm, map_round_to_page)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
 	const paddr_t map_end = pa_add(map_begin, 268);
 	ipaddr_t ipa = ipa_init(-1);
@@ -202,7 +202,7 @@
  */
 TEST_F(mm, map_across_tables)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
@@ -261,7 +261,7 @@
  */
 TEST_F(mm, map_all_at_top_level)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	auto tables = get_ptable(ptable);
@@ -286,7 +286,7 @@
  */
 TEST_F(mm, map_already_mapped)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
@@ -305,7 +305,7 @@
  */
 TEST_F(mm, map_reverse_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
 				       pa_init(0x5000), mode, &ppool, &ipa));
@@ -324,7 +324,7 @@
  */
 TEST_F(mm, map_reverse_range_quirk)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
 				       &ppool, &ipa));
@@ -342,7 +342,7 @@
  */
 TEST_F(mm, map_last_address_quirk)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	ASSERT_TRUE(mm_vm_identity_map(
 		&ptable, pa_init(0),
@@ -360,7 +360,7 @@
  */
 TEST_F(mm, map_clamp_to_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
 				       pa_init(0xf32'0000'0000'0000), mode,
 				       &ppool, nullptr));
@@ -376,7 +376,7 @@
  */
 TEST_F(mm, map_ignore_out_of_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ipaddr_t ipa = ipa_init(-1);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
 				       pa_init(0xf0'0000'0000'0000), mode,
@@ -393,7 +393,7 @@
  */
 TEST_F(mm, map_block_replaces_table)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
@@ -412,7 +412,7 @@
  */
 TEST_F(mm, map_does_not_defrag)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -436,7 +436,7 @@
  */
 TEST_F(mm, map_to_unmap)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
 	const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
@@ -457,7 +457,7 @@
  */
 TEST_F(mm, prepare_and_commit_first_page)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(0);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
@@ -497,7 +497,7 @@
  */
 TEST_F(mm, prepare_and_commit_disjoint_regions)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t first_begin = pa_init(0);
 	const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
 	const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
@@ -566,7 +566,7 @@
  */
 TEST_F(mm, prepare_and_commit_overlapping_regions)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
 	const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
 	const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
@@ -644,7 +644,7 @@
  */
 TEST_F(mm, unmap_all)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
 	const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
@@ -664,7 +664,7 @@
  */
 TEST_F(mm, unmap_round_to_page)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
 
@@ -707,7 +707,7 @@
  */
 TEST_F(mm, unmap_across_tables)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
 
@@ -758,7 +758,7 @@
  */
 TEST_F(mm, unmap_out_of_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
@@ -775,7 +775,7 @@
  */
 TEST_F(mm, unmap_reverse_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
@@ -795,7 +795,7 @@
  */
 TEST_F(mm, unmap_reverse_range_quirk)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(0x180'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
@@ -835,7 +835,7 @@
  */
 TEST_F(mm, unmap_last_address_quirk)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	ASSERT_TRUE(mm_vm_unmap(
@@ -852,7 +852,7 @@
  */
 TEST_F(mm, unmap_does_not_defrag)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
 	const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
 	const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
@@ -883,7 +883,7 @@
  */
 TEST_F(mm, is_mapped_all)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
@@ -896,7 +896,7 @@
  */
 TEST_F(mm, is_mapped_page)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t page_begin = pa_init(0x100'0000'0000);
 	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
@@ -912,7 +912,7 @@
  */
 TEST_F(mm, is_mapped_out_of_range)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
@@ -929,7 +929,7 @@
 {
 	constexpr int default_mode =
 		MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
-	uint32_t read_mode;
+	mm_mode_t read_mode;
 
 	read_mode = 0;
 	EXPECT_TRUE(
@@ -953,10 +953,10 @@
  */
 TEST_F(mm, get_mode_pages_across_tables)
 {
-	constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
+	constexpr mm_mode_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
 	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
 	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
-	uint32_t read_mode;
+	mm_mode_t read_mode;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
 				       &ppool, nullptr));
 
@@ -981,8 +981,8 @@
  */
 TEST_F(mm, get_mode_out_of_range)
 {
-	constexpr uint32_t mode = MM_MODE_UNOWNED;
-	uint32_t read_mode;
+	constexpr mm_mode_t mode = MM_MODE_UNOWNED;
+	mm_mode_t read_mode;
 	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
 				       &ppool, nullptr));
 	EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
@@ -1012,7 +1012,7 @@
  */
 TEST_F(mm, defrag_empty_subtables)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
 	const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
 	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
@@ -1035,7 +1035,7 @@
  */
 TEST_F(mm, defrag_block_subtables)
 {
-	constexpr uint32_t mode = 0;
+	constexpr mm_mode_t mode = 0;
 	const paddr_t begin = pa_init(39456 * mm_entry_size(1));
 	const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
 	const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
diff --git a/src/vcpu.c b/src/vcpu.c
index 4fa3765..114048b 100644
--- a/src/vcpu.c
+++ b/src/vcpu.c
@@ -141,7 +141,7 @@
 			    struct vcpu_fault_info *f)
 {
 	struct vm *vm = current->vm;
-	uint32_t mode;
+	mm_mode_t mode;
 	uint32_t mask = f->mode | MM_MODE_INVALID;
 	bool resume;
 	struct vm_locked locked_vm;
diff --git a/src/vcpu_test.cc b/src/vcpu_test.cc
index 7cf76d7..987bf98 100644
--- a/src/vcpu_test.cc
+++ b/src/vcpu_test.cc
@@ -9,6 +9,8 @@
 #include <gmock/gmock.h>
 
 extern "C" {
+#include "hf/arch/mm.h"
+
 #include "hf/check.h"
 #include "hf/vcpu.h"
 #include "hf/vm.h"
diff --git a/src/vm.c b/src/vm.c
index ae56eac..8c86c8b 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -297,7 +297,7 @@
  *
  */
 bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-		     uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+		     mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
 		return false;
@@ -319,7 +319,7 @@
  * made.
  */
 bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-			 uint32_t mode, struct mpool *ppool)
+			 mm_mode_t mode, struct mpool *ppool)
 {
 	return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
 }
@@ -330,7 +330,7 @@
  * this condition.
  */
 void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
-			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
+			mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
 {
 	arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
 }
@@ -380,13 +380,13 @@
  * is a vm or a el0 partition.
  */
 bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
-		     uint32_t *mode)
+		     mm_mode_t *mode)
 {
 	return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
 }
 
 bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
-			      paddr_t end, uint32_t mode, struct mpool *ppool,
+			      paddr_t end, mm_mode_t mode, struct mpool *ppool,
 			      ipaddr_t *ipa, uint8_t dma_device_id)
 {
 	return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
diff --git a/src/vm_test.cc b/src/vm_test.cc
index cfe4686..08dd3ca 100644
--- a/src/vm_test.cc
+++ b/src/vm_test.cc
@@ -9,8 +9,11 @@
 #include <gmock/gmock.h>
 
 extern "C" {
+#include "hf/arch/mm.h"
+
 #include "hf/check.h"
 #include "hf/list.h"
+#include "hf/mm.h"
 #include "hf/mpool.h"
 #include "hf/timer_mgmt.h"
 #include "hf/vm.h"
@@ -36,7 +39,7 @@
 using struct_vm_locked = struct vm_locked;
 
 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
-const int TOP_LEVEL = arch_mm_stage2_max_level();
+const mm_level_t TOP_LEVEL = arch_mm_stage2_max_level();
 
 class vm : public ::testing::Test
 {
diff --git a/test/arch/mm_test.c b/test/arch/mm_test.c
index bff2eb6..4251367 100644
--- a/test/arch/mm_test.c
+++ b/test/arch/mm_test.c
@@ -38,7 +38,7 @@
 TEST(arch_mm, max_level_stage1)
 {
 	uint32_t pa_bits = arch_mm_get_pa_bits(arch_mm_get_pa_range());
-	uint8_t max_level;
+	mm_attr_t max_level;
 
 	arch_mm_stage1_max_level_set(pa_bits);
 	max_level = arch_mm_stage1_max_level();
@@ -54,7 +54,7 @@
  */
 TEST(arch_mm, absent_properties)
 {
-	for (uint8_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
+	for (mm_level_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
 		pte_t absent_pte;
 
 		absent_pte = arch_mm_absent_pte(level);
@@ -71,8 +71,8 @@
  */
 TEST(arch_mm, invalid_block_properties)
 {
-	for (uint8_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
-		uint64_t attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID);
+	for (mm_level_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
+		mm_attr_t attrs = arch_mm_mode_to_stage2_attrs(MM_MODE_INVALID);
 		pte_t block_pte;
 
 		/* Test doesn't apply if a block is not allowed. */
@@ -95,8 +95,8 @@
  */
 TEST(arch_mm, valid_block_properties)
 {
-	for (uint8_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
-		uint64_t attrs = arch_mm_mode_to_stage2_attrs(0);
+	for (mm_level_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
+		mm_attr_t attrs = arch_mm_mode_to_stage2_attrs(0);
 		pte_t block_pte;
 
 		/* Test doesn't apply if a block is not allowed. */
@@ -119,7 +119,7 @@
  */
 TEST(arch_mm, table_properties)
 {
-	for (uint8_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
+	for (mm_level_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
 		pte_t table_pte;
 
 		/* Test doesn't apply to level 0 as there can't be a table. */
@@ -143,9 +143,9 @@
  */
 TEST(arch_mm, block_addr_and_attrs_preserved)
 {
-	for (uint8_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
+	for (mm_level_t level = 0; level <= MAX_LEVEL_UPPER_BOUND; level++) {
 		paddr_t addr;
-		uint64_t attrs;
+		mm_attr_t attrs;
 		pte_t block_pte;
 
 		/* Test doesn't apply if a block is not allowed. */
diff --git a/test/hftest/arch/aarch64/el0/mm.c b/test/hftest/arch/aarch64/el0/mm.c
index 30da3c4..42c4fbf 100644
--- a/test/hftest/arch/aarch64/el0/mm.c
+++ b/test/hftest/arch/aarch64/el0/mm.c
@@ -24,7 +24,7 @@
 }
 
 // NOLINTNEXTLINE
-bool hftest_mm_get_mode(const void *base, size_t size, uint32_t *mode)
+bool hftest_mm_get_mode(const void *base, size_t size, mm_mode_t *mode)
 {
 	(void)base;
 	(void)size;
@@ -33,7 +33,7 @@
 	return true;
 }
 
-void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode)
+void hftest_mm_identity_map(const void *base, size_t size, mm_mode_t mode)
 {
 	(void)base;
 	(void)size;
diff --git a/test/hftest/mm.c b/test/hftest/mm.c
index d277784..993a571 100644
--- a/test/hftest/mm.c
+++ b/test/hftest/mm.c
@@ -70,7 +70,7 @@
 	return true;
 }
 
-bool hftest_mm_get_mode(const void *base, size_t size, uint32_t *mode)
+bool hftest_mm_get_mode(const void *base, size_t size, mm_mode_t *mode)
 {
 	vaddr_t start = va_from_ptr(base);
 	vaddr_t end = va_add(start, size);
@@ -81,7 +81,7 @@
 	return mm_get_mode(stage1_locked.ptable, start, end, mode);
 }
 
-void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode)
+void hftest_mm_identity_map(const void *base, size_t size, mm_mode_t mode)
 {
 	struct mm_stage1_locked stage1_locked = hftest_mm_get_stage1();
 	paddr_t start = pa_from_va(va_from_ptr(base));
diff --git a/test/inc/test/hftest.h b/test/inc/test/hftest.h
index d07c322..a52f07a 100644
--- a/test/inc/test/hftest.h
+++ b/test/inc/test/hftest.h
@@ -130,9 +130,9 @@
 bool hftest_mm_init(void);
 
 /** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */
-void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode);
+void hftest_mm_identity_map(const void *base, size_t size, mm_mode_t mode);
 
-bool hftest_mm_get_mode(const void *base, size_t size, uint32_t *mode);
+bool hftest_mm_get_mode(const void *base, size_t size, mm_mode_t *mode);
 
 void hftest_mm_vcpu_init(void);
 
diff --git a/test/vmapi/arch/aarch64/gicv3/gicv3_setup.c b/test/vmapi/arch/aarch64/gicv3/gicv3_setup.c
index 932d15a..88399c5 100644
--- a/test/vmapi/arch/aarch64/gicv3/gicv3_setup.c
+++ b/test/vmapi/arch/aarch64/gicv3/gicv3_setup.c
@@ -27,7 +27,7 @@
 
 void gicv3_system_setup(void)
 {
-	const uint32_t mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
+	const mm_mode_t mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
 	hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode);
 	hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode);
 	hftest_mm_identity_map((void *)IO32_C(GICR_BASE + SGI_BASE).ptr,
diff --git a/test/vmapi/common/ffa.c b/test/vmapi/common/ffa.c
index 26ace84..55249bb 100644
--- a/test/vmapi/common/ffa.c
+++ b/test/vmapi/common/ffa.c
@@ -888,7 +888,7 @@
 	if (attributes.security == FFA_MEMORY_SECURITY_NON_SECURE &&
 	    !ffa_is_vm_id(hf_vm_get_id())) {
 		for (uint32_t i = 0; i < composite->constituent_count; i++) {
-			uint32_t mode;
+			mm_mode_t mode;
 
 			if (!hftest_mm_get_mode(
 				    // NOLINTNEXTLINE(performance-no-int-to-ptr)
diff --git a/test/vmapi/primary_with_secondaries/services/boot.c b/test/vmapi/primary_with_secondaries/services/boot.c
index 710bbd9..8e30281 100644
--- a/test/vmapi/primary_with_secondaries/services/boot.c
+++ b/test/vmapi/primary_with_secondaries/services/boot.c
@@ -53,17 +53,17 @@
 	size_t page_count = mem_region->page_count;
 	uint32_t attributes = mem_region->attributes;
 
-	uint32_t mode = 0;
-	uint32_t extra_attributes =
-		(attributes & MANIFEST_REGION_ATTR_SECURITY) != 0 ? MM_MODE_NS
-								  : 0U;
+	mm_mode_t mode = 0;
+	mm_mode_t extra_mode = (attributes & MANIFEST_REGION_ATTR_SECURITY) != 0
+				       ? MM_MODE_NS
+				       : 0U;
 
 	if (!hftest_mm_get_mode(address, FFA_PAGE_SIZE * page_count, &mode)) {
 		FAIL("Memory range has different modes.\n");
 	}
 
 	hftest_mm_identity_map(address, FFA_PAGE_SIZE * page_count,
-			       mode | extra_attributes);
+			       mode | extra_mode);
 }
 
 TEST_SERVICE(boot_memory)