refactor(mm): use typedefs
* Add typedefs for memory modes and attrs
* Add typedef for page table levels
* Add typedef for the ptable ASID
* Rewrite `MM_MODE_` macros to use shifts instead of writing the
value manually.
Change-Id: I783825777b4897692d48287fc689026a04ecba50
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c b/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
index 32e622c..093e885 100644
--- a/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
+++ b/src/arch/aarch64/arm_smmuv3/arm_smmuv3.c
@@ -1390,7 +1390,7 @@
}
void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode)
+ paddr_t end, mm_mode_t mode)
{
(void)vm_locked;
(void)begin;
diff --git a/src/arch/aarch64/hftest/el0/mm.c b/src/arch/aarch64/hftest/el0/mm.c
index c0c1fcc..034ec29 100644
--- a/src/arch/aarch64/hftest/el0/mm.c
+++ b/src/arch/aarch64/hftest/el0/mm.c
@@ -8,6 +8,8 @@
#include "hf/arch/vm/mm.h"
+#include "hf/arch/mm.h"
+
/**
* MM support is not done at EL0.
* Define dummy functions for EL0 targets.
@@ -26,7 +28,7 @@
{
}
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
{
(void)id;
return 0;
diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c
index c419820..483dc4d 100644
--- a/src/arch/aarch64/hftest/mm.c
+++ b/src/arch/aarch64/hftest/mm.c
@@ -9,6 +9,7 @@
#include "hf/mm.h"
#include "hf/arch/barriers.h"
+#include "hf/arch/mm.h"
#include "hf/arch/vm/mm.h"
#include "hf/dlog.h"
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 852d397..98d2e5a 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -1212,7 +1212,7 @@
*/
static struct vcpu_fault_info fault_info_init(uintreg_t esr,
const struct vcpu *vcpu,
- uint32_t mode)
+ mm_mode_t mode)
{
uint32_t fsc = esr & 0x3f;
struct vcpu_fault_info r;
diff --git a/src/arch/aarch64/hypervisor/other_world.c b/src/arch/aarch64/hypervisor/other_world.c
index 3f846b5..8078cb3 100644
--- a/src/arch/aarch64/hypervisor/other_world.c
+++ b/src/arch/aarch64/hypervisor/other_world.c
@@ -14,6 +14,7 @@
#include "hf/dlog.h"
#include "hf/ffa.h"
#include "hf/ffa_internal.h"
+#include "hf/mm.h"
#include "hf/vcpu.h"
#include "hf/vm.h"
@@ -148,10 +149,10 @@
paddr_t pa_recv_end)
{
struct ffa_value ret;
- uint32_t send_mode;
- uint32_t recv_mode;
+ mm_mode_t send_mode;
+ mm_mode_t recv_mode;
struct vm_locked other_world_locked;
- const uint32_t expected_mode =
+ const mm_mode_t expected_mode =
MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS;
other_world_locked = lock_other_world(vm_locked);
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 59ea714..e3496ff 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -130,7 +130,7 @@
}
bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool)
+ paddr_t end, mm_mode_t mode, struct mpool *ppool)
{
struct mm_ptable *ptable = &vm_locked.vm->ptable;
@@ -148,7 +148,7 @@
}
void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool,
+ paddr_t end, mm_mode_t mode, struct mpool *ppool,
ipaddr_t *ipa)
{
struct mm_ptable *ptable = &vm_locked.vm->ptable;
@@ -180,7 +180,7 @@
struct mpool *ppool)
{
bool ret;
- uint32_t mode = MM_MODE_UNMAPPED_MASK;
+ mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
ret = vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
@@ -209,7 +209,7 @@
}
bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
- ipaddr_t end, uint32_t *mode)
+ ipaddr_t end, mm_mode_t *mode)
{
bool ret;
@@ -222,8 +222,8 @@
ret = mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
#if SECURE_WORLD == 1
- uint32_t mode2;
- const uint32_t mask =
+ mm_mode_t mode2;
+ const mm_mode_t mask =
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
/* If the region is fully unmapped in the secure IPA space. */
@@ -243,7 +243,7 @@
}
static bool arch_vm_iommu_mm_prepare(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode,
+ paddr_t end, mm_mode_t mode,
struct mpool *ppool, uint8_t dma_device_id)
{
struct mm_ptable *ptable = &vm_locked.vm->iommu_ptables[dma_device_id];
@@ -258,7 +258,7 @@
}
static void arch_vm_iommu_mm_commit(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode,
+ paddr_t end, mm_mode_t mode,
struct mpool *ppool, ipaddr_t *ipa,
uint8_t dma_device_id)
{
@@ -274,7 +274,7 @@
}
bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode,
+ paddr_t end, mm_mode_t mode,
struct mpool *ppool, ipaddr_t *ipa,
uint8_t dma_device_id)
{
diff --git a/src/arch/aarch64/inc/hf/arch/mmu.h b/src/arch/aarch64/inc/hf/arch/mmu.h
index d6eb366..670968b 100644
--- a/src/arch/aarch64/inc/hf/arch/mmu.h
+++ b/src/arch/aarch64/inc/hf/arch/mmu.h
@@ -11,10 +11,10 @@
/** AArch64-specific mapping modes */
/** Mapping mode defining MMU Stage-1 block/page non-secure bit */
-#define MM_MODE_NS UINT32_C(0x0080)
+#define MM_MODE_NS (1U << 7)
/** Page mapping mode for tagged normal memory. */
-#define MM_MODE_T UINT32_C(0x0400)
+#define MM_MODE_T (1U << 10)
#define tlbi(op) \
do { \
diff --git a/src/arch/aarch64/inc/hf/arch/vm/vm.h b/src/arch/aarch64/inc/hf/arch/vm/vm.h
index 309f1e0..a8749b0 100644
--- a/src/arch/aarch64/inc/hf/arch/vm/vm.h
+++ b/src/arch/aarch64/inc/hf/arch/vm/vm.h
@@ -8,6 +8,8 @@
#pragma once
+#include "hf/arch/mm.h"
+
#include "hf/mm.h"
/** Arch-specific information about a VM. */
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 1a37562..5a49d37 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -10,6 +10,7 @@
#include "hf/arch/barriers.h"
#include "hf/arch/cpu.h"
+#include "hf/arch/mm.h"
#include "hf/arch/mmu.h"
#include "hf/arch/std.h"
@@ -136,14 +137,14 @@
uintreg_t vstcr_el2;
} arch_mm_config;
-static uint8_t mm_s1_max_level;
-static uint8_t mm_s2_max_level;
+static mm_level_t mm_s1_max_level;
+static mm_level_t mm_s2_max_level;
static uint8_t mm_s2_root_table_count;
/**
* Returns the encoding of a page table entry that isn't present.
*/
-pte_t arch_mm_absent_pte(uint8_t level)
+pte_t arch_mm_absent_pte(mm_level_t level)
{
(void)level;
return 0;
@@ -155,7 +156,7 @@
* The spec says that 'Table descriptors for stage 2 translations do not
* include any attribute field', so we don't take any attributes as arguments.
*/
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa)
{
/* This is the same for all levels on aarch64. */
(void)level;
@@ -167,7 +168,7 @@
*
* The level must allow block entries.
*/
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs)
{
pte_t pte = pa_addr(pa) | attrs;
@@ -183,7 +184,7 @@
*
* Level 0 must allow block entries.
*/
-bool arch_mm_is_block_allowed(uint8_t level)
+bool arch_mm_is_block_allowed(mm_level_t level)
{
return level <= 2;
}
@@ -192,7 +193,7 @@
* Determines if the given pte is present, i.e., if it is valid or it is invalid
* but still holds state about the memory so needs to be present in the table.
*/
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level)
{
return arch_mm_pte_is_valid(pte, level) || (pte & STAGE2_SW_OWNED) != 0;
}
@@ -201,7 +202,7 @@
* Determines if the given pte is valid, i.e., if it points to another table,
* to a page, or a block of pages that can be accessed.
*/
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level)
{
(void)level;
return (pte & PTE_VALID) != 0;
@@ -210,7 +211,7 @@
/**
* Determines if the given pte references a block of pages.
*/
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level)
{
/* We count pages at level 0 as blocks. */
return arch_mm_is_block_allowed(level) &&
@@ -222,7 +223,7 @@
/**
* Determines if the given pte references another table.
*/
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level)
{
return level != 0 && arch_mm_pte_is_valid(pte, level) &&
(pte & PTE_TABLE) != 0;
@@ -246,7 +247,7 @@
* Extracts the physical address of the block referred to by the given page
* table entry.
*/
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level)
{
(void)level;
return pa_init(pte_addr(pte));
@@ -256,7 +257,7 @@
* Extracts the physical address of the page table referred to by the given page
* table entry.
*/
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level)
{
(void)level;
return pa_init(pte_addr(pte));
@@ -266,7 +267,7 @@
* Extracts the architecture-specific attributes applies to the given page table
* entry.
*/
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level)
{
(void)level;
return pte & PTE_ATTR_MASK;
@@ -287,7 +288,7 @@
/**
* Invalidates stage-1 TLB entries referring to the given virtual address range.
*/
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
vaddr_t va_end)
{
uintvaddr_t begin = va_addr(va_begin);
@@ -444,9 +445,9 @@
dsb(sy);
}
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode)
{
- uint64_t attrs = 0;
+ mm_attr_t attrs = 0;
attrs |= STAGE1_AF | STAGE1_SH(INNER_SHAREABLE);
@@ -530,9 +531,9 @@
return attrs;
}
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs)
{
- uint32_t mode = 0;
+ mm_mode_t mode = 0;
#if SECURE_WORLD == 1
if (attrs & STAGE1_NS) {
@@ -584,10 +585,10 @@
return mode;
}
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode)
{
- uint64_t attrs = 0;
- uint64_t access = 0;
+ mm_attr_t attrs = 0;
+ mm_attr_t access = 0;
/*
* Default shareability is inner shareable in stage 2 tables. Per
@@ -660,9 +661,9 @@
return attrs;
}
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs)
{
- uint32_t mode = 0;
+ mm_mode_t mode = 0;
if (attrs & STAGE2_S2AP(STAGE2_ACCESS_READ)) {
mode |= MM_MODE_R;
@@ -713,12 +714,12 @@
}
}
-uint8_t arch_mm_stage1_max_level(void)
+mm_level_t arch_mm_stage1_max_level(void)
{
return mm_s1_max_level;
}
-uint8_t arch_mm_stage2_max_level(void)
+mm_level_t arch_mm_stage2_max_level(void)
{
return mm_s2_max_level;
}
@@ -739,8 +740,8 @@
* in that table, returns equivalent attrs to use for a block which will replace
* the entire table.
*/
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
- uint64_t block_attrs)
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+ mm_attr_t block_attrs)
{
/*
* Only stage 1 table descriptors have attributes, but the bits are res0
@@ -980,7 +981,7 @@
/**
* Return the arch specific mm mode for send/recv pages of given VM ID.
*/
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
{
return ((id & HF_VM_ID_WORLD_MASK) == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS
: 0;
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 567d73a..5f3fbc9 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -543,7 +543,7 @@
}
bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool)
+ paddr_t end, mm_mode_t mode, struct mpool *ppool)
{
(void)vm_locked;
(void)begin;
@@ -555,7 +555,7 @@
}
void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode, struct mpool *ppool,
+ paddr_t end, mm_mode_t mode, struct mpool *ppool,
ipaddr_t *ipa)
{
(void)vm_locked;
@@ -584,7 +584,7 @@
}
bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
- ipaddr_t end, uint32_t *mode) // NOLINT
+ ipaddr_t end, mm_mode_t *mode) // NOLINT
{
(void)vm_locked;
(void)begin;
@@ -595,7 +595,7 @@
}
ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
- ffa_memory_attributes_t attributes, uint32_t mode)
+ ffa_memory_attributes_t attributes, mm_mode_t mode)
{
(void)mode;
@@ -624,7 +624,7 @@
}
bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
- paddr_t end, uint32_t mode,
+ paddr_t end, mm_mode_t mode,
struct mpool *ppool, ipaddr_t *ipa,
struct dma_device_properties *dma_prop)
{
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index 742bd6b..8d46c13 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -15,9 +15,9 @@
* to memory. The flags are shifted to avoid equality of modes and attributes.
*/
#define PTE_ATTR_MODE_SHIFT 48
-#define PTE_ATTR_MODE_MASK \
- ((uint64_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D | \
- MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
+#define PTE_ATTR_MODE_MASK \
+ ((mm_attr_t)(MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_D | \
+ MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED) \
<< PTE_ATTR_MODE_SHIFT)
/* The bit to distinguish a table from a block is the highest of the page bits.
@@ -30,49 +30,49 @@
/* Offset the bits of each level so they can't be misued. */
#define PTE_LEVEL_SHIFT(lvl) ((lvl) * 2)
-pte_t arch_mm_absent_pte(uint8_t level)
+pte_t arch_mm_absent_pte(mm_level_t level)
{
- return ((uint64_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
+ return ((mm_attr_t)(MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)
<< PTE_ATTR_MODE_SHIFT) >>
PTE_LEVEL_SHIFT(level);
}
-pte_t arch_mm_table_pte(uint8_t level, paddr_t pa)
+pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa)
{
return (pa_addr(pa) | PTE_TABLE) >> PTE_LEVEL_SHIFT(level);
}
-pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs)
+pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs)
{
return (pa_addr(pa) | attrs) >> PTE_LEVEL_SHIFT(level);
}
-bool arch_mm_is_block_allowed(uint8_t level)
+bool arch_mm_is_block_allowed(mm_level_t level)
{
(void)level;
return true;
}
-bool arch_mm_pte_is_present(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_present(pte_t pte, mm_level_t level)
{
return arch_mm_pte_is_valid(pte, level) ||
!(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
MM_MODE_UNOWNED);
}
-bool arch_mm_pte_is_valid(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level)
{
return !(((pte << PTE_LEVEL_SHIFT(level)) >> PTE_ATTR_MODE_SHIFT) &
MM_MODE_INVALID);
}
-bool arch_mm_pte_is_block(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_block(pte_t pte, mm_level_t level)
{
return arch_mm_pte_is_present(pte, level) &&
!arch_mm_pte_is_table(pte, level);
}
-bool arch_mm_pte_is_table(pte_t pte, uint8_t level)
+bool arch_mm_pte_is_table(pte_t pte, mm_level_t level)
{
return (pte << PTE_LEVEL_SHIFT(level)) & PTE_TABLE;
}
@@ -82,28 +82,28 @@
return pa_init(pa_addr(pa) & PTE_ADDR_MASK);
}
-paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level)
{
return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
}
-paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level)
+paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level)
{
return pa_init((pte << PTE_LEVEL_SHIFT(level)) & PTE_ADDR_MASK);
}
-uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level)
+mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level)
{
return (pte << PTE_LEVEL_SHIFT(level)) & PTE_ATTR_MODE_MASK;
}
-uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs,
- uint64_t block_attrs)
+mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
+ mm_attr_t block_attrs)
{
return table_attrs | block_attrs;
}
-void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin,
+void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
vaddr_t va_end)
{
(void)asid;
@@ -112,7 +112,7 @@
/* There's no modelling of the stage-1 TLB. */
}
-void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin,
+void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin,
ipaddr_t va_end, bool non_secure)
{
(void)vmid;
@@ -135,12 +135,12 @@
(void)pa_bits;
}
-uint8_t arch_mm_stage1_max_level(void)
+mm_level_t arch_mm_stage1_max_level(void)
{
return 2;
}
-uint8_t arch_mm_stage2_max_level(void)
+mm_level_t arch_mm_stage2_max_level(void)
{
return 2;
}
@@ -156,22 +156,22 @@
return 4;
}
-uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode)
{
- return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+ return ((mm_attr_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
}
-uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode)
+mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode)
{
- return ((uint64_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
+ return ((mm_attr_t)mode << PTE_ATTR_MODE_SHIFT) & PTE_ATTR_MODE_MASK;
}
-uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs)
{
return attrs >> PTE_ATTR_MODE_SHIFT;
}
-uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs)
+mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs)
{
return attrs >> PTE_ATTR_MODE_SHIFT;
}
@@ -191,7 +191,7 @@
return true;
}
-uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
+mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id)
{
(void)id;