refactor(mm): use typedefs
* Add typedefs for memory modes and attrs
* Add typedef for page table levels
* Add typedef for the ptable ASID
* Rewrite `MM_MODE_` macros to use shifts instead of writing the
value manually.
Change-Id: I783825777b4897692d48287fc689026a04ecba50
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index e126320..aff344a 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -12,6 +12,7 @@
#include <stddef.h>
#include "hf/addr.h"
+#include "hf/mm.h"
#include "vmapi/hf/ffa.h"
@@ -27,46 +28,46 @@
/**
* Creates an absent PTE.
*/
-pte_t arch_mm_absent_pte(uint8_t level);
+pte_t arch_mm_absent_pte(mm_level_t level);
/**
* Creates a table PTE.
*/
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa);
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa);
/**
* Creates a block PTE.
*/
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs);
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs);
/**
* Checks whether a block is allowed at the given level of the page table.
*/
-bool arch_mm_is_block_allowed(uint8_t level);
+bool arch_mm_is_block_allowed(mm_level_t level);
/**
* Determines if a PTE is present i.e. it contains information and therefore
* needs to exist in the page table. Any non-absent PTE is present.
*/
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level);
/**
* Determines if a PTE is valid i.e. it can affect the address space. Tables and
* valid blocks fall into this category. Invalid blocks do not as they hold
* information about blocks that are not in the address space.
*/
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level);
/**
* Determines if a PTE is a block and represents an address range, valid or
* invalid.
*/
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level);
/**
* Determines if a PTE represents a reference to a table of PTEs.
*/
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level);
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level);
/**
* Clears the bits of an address that are ignored by the page table. In effect,
@@ -77,34 +78,34 @@
/**
* Extracts the start address of the PTE range.
*/
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level);
/**
* Extracts the address of the table referenced by the PTE.
*/
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level);
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level);
/**
* Extracts the attributes of the PTE.
*/
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level);
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level);
/**
* Merges the attributes of a block into those of its containing table.
*/
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
- uint64_t block_attrs);
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+ mm_attr_t block_attrs);
/**
* Invalidates the given range of stage-1 TLB.
*/
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
vaddr_t va_end);
/**
* Invalidates the given range of stage-2 TLB.
*/
-void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
+void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin,
ipaddr_t va_end, bool non_secure);
/**
@@ -122,12 +123,12 @@
/**
* Gets the maximum level allowed in the page table for stage-1.
*/
-uint8_t arch_mm_stage1_max_level(void);
+mm_level_t arch_mm_stage1_max_level(void);
/**
* Gets the maximum level allowed in the page table for stage-2.
*/
-uint8_t arch_mm_stage2_max_level(void);
+mm_level_t arch_mm_stage2_max_level(void);
/**
* Gets the number of concatenated page tables used at the root for stage-1.
@@ -147,22 +148,22 @@
/**
* Converts the mode into stage-1 attributes for a block PTE.
*/
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode);
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode);
/**
* Converts the mode into stage-2 attributes for a block PTE.
*/
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode);
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode);
/**
* Converts the stage-2 block attributes back to the corresponding mode.
*/
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs);
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs);
/**
* Converts the stage-1 block attributes back to the corresponding mode.
*/
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs);
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs);
/**
* Initializes the arch specific memory management.
@@ -172,7 +173,7 @@
/**
* Return the arch specific mm mode for send/recv pages of given VM ID.
*/
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id);
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id);
/**
* Execute any barriers or synchronization that is required
diff --git a/inc/hf/arch/vm.h b/inc/hf/arch/vm.h
index 432f950..c4ee916 100644
--- a/inc/hf/arch/vm.h
+++ b/inc/hf/arch/vm.h
@@ -17,16 +17,16 @@
bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool);
bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool);
bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool);
+ paddr_t end, mm_mode_t mode, struct mpool *ppool);
void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool,
+ paddr_t end, mm_mode_t mode, struct mpool *ppool,
ipaddr_t *ipa);
bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
struct mpool *ppool);
void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
- ipaddr_t end, uint32_t *mode);
+ ipaddr_t end, mm_mode_t *mode);
bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode,
+ paddr_t end, mm_mode_t mode,
struct mpool *ppool, ipaddr_t *ipa,
uint8_t dma_device_id);
diff --git a/inc/hf/ffa/ffa_memory.h b/inc/hf/ffa/ffa_memory.h
index ee00e14..ad0e9ad 100644
--- a/inc/hf/ffa/ffa_memory.h
+++ b/inc/hf/ffa/ffa_memory.h
@@ -9,6 +9,7 @@
#pragma once
#include "hf/ffa.h"
+#include "hf/mm.h"
#include "hf/vm.h"
/** Check validity of the FF-A memory send function attempt. */
@@ -41,7 +42,7 @@
* it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
* with NS accesses by default.
*/
-uint32_t ffa_memory_get_other_world_mode(void);
+mm_mode_t ffa_memory_get_other_world_mode(void);
bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current);
bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current);
@@ -74,4 +75,4 @@
* Set the security bit in `attributes` if specified by `mode`.
*/
ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
- ffa_memory_attributes_t attributes, uint32_t mode);
+ ffa_memory_attributes_t attributes, mm_mode_t mode);
diff --git a/inc/hf/ffa_memory_internal.h b/inc/hf/ffa_memory_internal.h
index f08c425..c373ad7 100644
--- a/inc/hf/ffa_memory_internal.h
+++ b/inc/hf/ffa_memory_internal.h
@@ -13,6 +13,7 @@
*/
#pragma once
+#include "hf/mm.h"
#define MAX_MEM_SHARES 100
#include <stdbool.h>
@@ -92,7 +93,7 @@
* This is used to reset the original configuration when sender invokes
* FFA_MEM_RECLAIM_32.
*/
- uint32_t sender_orig_mode;
+ mm_mode_t sender_orig_mode;
/**
* True if all the fragments of this sharing request have been sent and
@@ -172,7 +173,7 @@
struct ffa_value ffa_memory_send_complete(
struct vm_locked from_locked, struct share_states_locked share_states,
struct ffa_memory_share_state *share_state, struct mpool *page_pool,
- uint32_t *orig_from_mode_ret);
+ mm_mode_t *orig_from_mode_ret);
struct ffa_value ffa_memory_send_continue_validate(
struct share_states_locked share_states, ffa_memory_handle_t handle,
struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
@@ -182,14 +183,14 @@
struct vm_locked to_locked,
struct ffa_memory_region_constituent **fragments,
uint32_t *fragment_constituent_counts, uint32_t fragment_count,
- uint32_t sender_orig_mode, uint32_t share_func, bool clear,
- struct mpool *page_pool, uint32_t *response_mode,
+ mm_mode_t sender_orig_mode, uint32_t share_func, bool clear,
+ struct mpool *page_pool, mm_mode_t *response_mode,
bool memory_protected);
struct ffa_value ffa_region_group_identity_map(
struct vm_locked vm_locked,
struct ffa_memory_region_constituent **fragments,
const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
- uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
+ mm_mode_t mode, struct mpool *ppool, enum ffa_map_action action,
bool *memory_protected);
bool memory_region_receivers_from_other_world(
struct ffa_memory_region *memory_region);
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index 8f2d5d8..6023817 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -13,12 +13,15 @@
#include <stddef.h>
#include <stdint.h>
-#include "hf/arch/mm.h"
-
#include "hf/addr.h"
#include "hf/mpool.h"
#include "hf/static_assert.h"
+typedef uint32_t mm_mode_t;
+typedef uint64_t mm_attr_t;
+typedef uint8_t mm_level_t;
+typedef uint16_t mm_asid_t;
+
/* Keep macro alignment */
/* clang-format off */
@@ -26,10 +29,10 @@
#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
/* The following are arch-independent page mapping modes. */
-#define MM_MODE_R UINT32_C(0x0001) /* read */
-#define MM_MODE_W UINT32_C(0x0002) /* write */
-#define MM_MODE_X UINT32_C(0x0004) /* execute */
-#define MM_MODE_D UINT32_C(0x0008) /* device */
+#define MM_MODE_R (1U << 0) /* read */
+#define MM_MODE_W (1U << 1) /* write */
+#define MM_MODE_X (1U << 2) /* execute */
+#define MM_MODE_D (1U << 3) /* device */
/*
* Memory in stage-1 is either valid (present) or invalid (absent).
@@ -59,15 +62,15 @@
*
* Modes are selected so that owner of exclusive memory is the default.
*/
-#define MM_MODE_INVALID UINT32_C(0x0010)
-#define MM_MODE_UNOWNED UINT32_C(0x0020)
-#define MM_MODE_SHARED UINT32_C(0x0040)
-
-/* Specifies if a mapping will be a user mapping(EL0). */
-#define MM_MODE_USER UINT32_C(0x0200)
+#define MM_MODE_INVALID (1U << 4)
+#define MM_MODE_UNOWNED (1U << 5)
+#define MM_MODE_SHARED (1U << 6)
/* Map page as non-global. */
-#define MM_MODE_NG UINT32_C(0x0100) /* non-global */
+#define MM_MODE_NG (1U << 8)
+
+/* Specifies if a mapping will be a user mapping(EL0). */
+#define MM_MODE_USER (1U << 9)
/* The mask for a mode that is considered unmapped. */
#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
@@ -95,7 +98,7 @@
* VMID/ASID associated with a page table. ASID 0 is reserved for use by
* the hypervisor.
*/
- uint16_t id;
+ mm_asid_t id;
/** Address of the root of the page table. */
paddr_t root;
};
@@ -110,24 +113,24 @@
void mm_vm_enable_invalidation(void);
-bool mm_ptable_init(struct mm_ptable *ptable, uint16_t id,
+bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
struct mm_flags flags, struct mpool *ppool);
ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags);
-bool mm_vm_init(struct mm_ptable *ptable, uint16_t id, struct mpool *ppool);
+bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool);
void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool);
bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool);
+ mm_mode_t mode, struct mpool *ppool);
void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool);
+ mm_mode_t mode, struct mpool *ppool);
bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+ mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool);
+ paddr_t end, mm_mode_t mode, struct mpool *ppool);
void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+ mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
struct mpool *ppool);
void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool);
@@ -135,15 +138,15 @@
bool non_secure);
void mm_vm_dump(const struct mm_ptable *ptable);
bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
- ipaddr_t end, uint32_t *mode);
+ ipaddr_t end, mm_mode_t *mode);
bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
- uint32_t *mode);
+ mm_mode_t *mode);
struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
struct mm_stage1_locked mm_lock_stage1(void);
void mm_unlock_stage1(struct mm_stage1_locked *lock);
void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool);
+ paddr_t end, mm_mode_t mode, struct mpool *ppool);
bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
struct mpool *ppool);
void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
diff --git a/inc/hf/plat/iommu.h b/inc/hf/plat/iommu.h
index a144622..bb33871 100644
--- a/inc/hf/plat/iommu.h
+++ b/inc/hf/plat/iommu.h
@@ -44,7 +44,7 @@
* that read and write modes are enforced by the IOMMU driver.
*/
void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode);
+ paddr_t end, mm_mode_t mode);
/**
* Configure IOMMU to perform address translation of memory transactions on the
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 7d16b6a..ffb1ec8 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -13,6 +13,7 @@
#include "hf/addr.h"
#include "hf/interrupt_desc.h"
#include "hf/list.h"
+#include "hf/mm.h"
#include "hf/spinlock.h"
#include "vmapi/hf/ffa.h"
@@ -110,7 +111,7 @@
ipaddr_t ipaddr;
vaddr_t vaddr;
vaddr_t pc;
- uint32_t mode;
+ mm_mode_t mode;
};
struct call_chain {
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index bb1a277..f161077 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -300,20 +300,20 @@
bool vm_is_mailbox_busy(struct vm_locked to);
bool vm_is_mailbox_other_world_owned(struct vm_locked to);
bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+ mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool);
+ mm_mode_t mode, struct mpool *ppool);
void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
- uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
+ mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
struct mpool *ppool);
void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
- uint32_t *mode);
+ mm_mode_t *mode);
bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool,
+ paddr_t end, mm_mode_t mode, struct mpool *ppool,
ipaddr_t *ipa, uint8_t dma_device_id);
void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,