VHE: Associate a page table with an ASID/VMID
Added an id field to page table structures(struct mm_ptable). This will
help future changes where we can invalidate TLB entries by VMID's and
ASID's. In the near term, this is primarily expected to be useful for
EL0 partitions, where we want to be able to invalidate TLB by ASID's
when mapping/unmapping/replacing page table entries for an EL0
partition.
Change-Id: I3b6ea97eaf4281954ca953cb8f5a40edbf5a2661
Signed-off-by: Raghu Krishnamurthy <raghu.ncstate@gmail.com>
diff --git a/inc/hf/mm.h b/inc/hf/mm.h
index b440d6f..b8ed221 100644
--- a/inc/hf/mm.h
+++ b/inc/hf/mm.h
@@ -82,6 +82,11 @@
"A page table must be page aligned.");
struct mm_ptable {
+ /**
+ * VMID/ASID associated with a page table. ASID 0 is reserved for use by
+ * the hypervisor.
+ */
+ uint16_t id;
/** Address of the root of the page table. */
paddr_t root;
};
@@ -96,10 +101,11 @@
void mm_vm_enable_invalidation(void);
-bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool);
+bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
+ struct mpool *ppool);
ptable_addr_t mm_ptable_addr_space_end(int flags);
-bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
+bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool);
void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
diff --git a/src/mm.c b/src/mm.c
index c1e7059..c7b7e45 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -202,7 +202,8 @@
/**
* Initialises the given page table.
*/
-bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
+bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
+ struct mpool *ppool)
{
uint8_t i;
size_t j;
@@ -226,7 +227,7 @@
* enabled?
*/
t->root = pa_init((uintpaddr_t)tables);
-
+ t->id = id;
return true;
}
@@ -824,9 +825,9 @@
return got_attrs;
}
-bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool)
+bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool)
{
- return mm_ptable_init(t, 0, ppool);
+ return mm_ptable_init(t, id, 0, ppool);
}
void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
@@ -1064,7 +1065,8 @@
dlog_info("data: %#x - %#x\n", pa_addr(layout_data_begin()),
pa_addr(layout_data_end()));
- if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) {
+ /* ASID 0 is reserved for use by the hypervisor. */
+ if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, ppool)) {
dlog_error("Unable to allocate memory for page table.\n");
return false;
}
diff --git a/src/mm_test.cc b/src/mm_test.cc
index e507fce..63a94f5 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -94,7 +94,7 @@
TEST_F(mm, ptable_init_empty)
{
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
EXPECT_THAT(
get_ptable(ptable),
AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -107,7 +107,7 @@
TEST_F(mm, ptable_init_concatenated_empty)
{
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
EXPECT_THAT(
get_ptable(ptable),
AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
@@ -123,7 +123,7 @@
const paddr_t page_begin = pa_init(0);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
&ppool, nullptr));
@@ -167,7 +167,7 @@
const paddr_t map_end = pa_add(map_begin, 268);
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
&ppool, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
@@ -212,7 +212,7 @@
const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
&ppool, nullptr));
@@ -273,7 +273,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
auto tables = get_ptable(ptable);
@@ -302,7 +302,7 @@
constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
@@ -324,7 +324,7 @@
constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
pa_init(0x5000), mode, &ppool, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
@@ -346,7 +346,7 @@
constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
&ppool, &ipa));
EXPECT_THAT(ipa_addr(ipa), Eq(20));
@@ -366,7 +366,7 @@
constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(
&ptable, pa_init(0),
pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
@@ -386,7 +386,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
pa_init(0xf32'0000'0000'0000), mode,
&ppool, nullptr));
@@ -406,7 +406,7 @@
constexpr uint32_t mode = 0;
ipaddr_t ipa = ipa_init(-1);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
pa_init(0xf0'0000'0000'0000), mode,
&ppool, &ipa));
@@ -427,7 +427,7 @@
const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
@@ -449,7 +449,7 @@
const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
@@ -478,7 +478,7 @@
const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
@@ -500,7 +500,7 @@
const paddr_t page_begin = pa_init(0);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
&ppool));
mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
@@ -546,7 +546,7 @@
const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
const paddr_t last_end = VM_MEM_END;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
mode, &ppool));
ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
@@ -618,7 +618,7 @@
const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
&ppool));
ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
@@ -684,7 +684,7 @@
TEST_F(mm, unmap_not_mapped)
{
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
EXPECT_TRUE(
mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
EXPECT_THAT(
@@ -704,7 +704,7 @@
const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
@@ -726,7 +726,7 @@
const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
@@ -773,7 +773,7 @@
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
@@ -825,7 +825,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
@@ -845,7 +845,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
@@ -870,7 +870,7 @@
const paddr_t page_begin = pa_init(0x180'0000'0000);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
@@ -912,7 +912,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(
@@ -936,7 +936,7 @@
const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
@@ -955,7 +955,7 @@
TEST_F(mm, is_mapped_empty)
{
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
@@ -969,7 +969,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
@@ -987,7 +987,7 @@
const paddr_t page_begin = pa_init(0x100'0000'0000);
const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
&ppool, nullptr));
EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
@@ -1004,7 +1004,7 @@
{
constexpr uint32_t mode = 0;
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
@@ -1024,7 +1024,7 @@
MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
struct mm_ptable ptable;
uint32_t read_mode;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
read_mode = 0;
EXPECT_TRUE(
@@ -1055,7 +1055,7 @@
const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
struct mm_ptable ptable;
uint32_t read_mode;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
&ppool, nullptr));
@@ -1084,7 +1084,7 @@
constexpr uint32_t mode = MM_MODE_UNOWNED;
struct mm_ptable ptable;
uint32_t read_mode;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
@@ -1104,7 +1104,7 @@
TEST_F(mm, defrag_empty)
{
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
mm_vm_defrag(&ptable, &ppool);
EXPECT_THAT(
get_ptable(ptable),
@@ -1124,7 +1124,7 @@
const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
nullptr));
ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
@@ -1149,7 +1149,7 @@
const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
struct mm_ptable ptable;
- ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
&ppool, nullptr));
ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
diff --git a/src/vm.c b/src/vm.c
index e575d35..a3ecddd 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -52,7 +52,7 @@
atomic_init(&vm->aborting, false);
vm->el0_partition = el0_partition;
- if (!mm_vm_init(&vm->ptable, ppool)) {
+ if (!mm_vm_init(&vm->ptable, id, ppool)) {
return NULL;
}
diff --git a/src/vm_test.cc b/src/vm_test.cc
index 6a9e109..4dd8dc5 100644
--- a/src/vm_test.cc
+++ b/src/vm_test.cc
@@ -68,7 +68,7 @@
EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false));
vm_locked = vm_lock(vm);
- ASSERT_TRUE(mm_vm_init(&vm->ptable, &ppool));
+ ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
EXPECT_THAT(
mm_test::get_ptable(vm->ptable),
diff --git a/test/hftest/mm.c b/test/hftest/mm.c
index 1e2b528..aa46365 100644
--- a/test/hftest/mm.c
+++ b/test/hftest/mm.c
@@ -48,7 +48,7 @@
HFTEST_FAIL(true, "Failed to add buffer to page-table pool.");
}
- if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, &ppool)) {
+ if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, &ppool)) {
HFTEST_FAIL(true, "Unable to allocate memory for page table.");
}