Support concatenated page tables.

To reduce the depth of the page table tree, some architectures
concatenate top level page tables rather than introducing a new level in
the tree.

The tests cover more of the memory management code and also document
some of the quirks that exist in the APIs. The tests also exercise the
concatenated tables logic.

Bug: 117549422
Change-Id: I99991aaf3bfb753dd6176cb9df7a5337ed9c184d
diff --git a/src/mm_test.cc b/src/mm_test.cc
index 7990d27..cd1e663 100644
--- a/src/mm_test.cc
+++ b/src/mm_test.cc
@@ -22,17 +22,28 @@
 #include "hf/alloc.h"
 }
 
+#include <limits>
 #include <memory>
+#include <span>
+#include <vector>
 
 #include <gmock/gmock.h>
 
 namespace
 {
-using ::testing::Eq;
+using namespace ::std::placeholders;
 
-constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
+using ::testing::AllOf;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::Eq;
+using ::testing::SizeIs;
+using ::testing::Truly;
+
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
 const int TOP_LEVEL = arch_mm_max_level(0);
 const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
+const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
 
 /**
  * Calculates the size of the address space represented by a page table entry at
@@ -44,267 +55,723 @@
 }
 
 /**
- * Get the page table from the physical address.
+ * Get an STL representation of the page table.
  */
-struct mm_page_table *page_table_from_pa(paddr_t pa)
+std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
 {
-	return reinterpret_cast<struct mm_page_table *>(
+	auto table = reinterpret_cast<struct mm_page_table *>(
 		ptr_from_va(va_from_pa(pa)));
+	return std::span<pte_t>(table->entries, std::end(table->entries));
 }
 
 /**
- * Allocate a page table.
+ * Get an STL representation of the ptable.
  */
-struct mm_page_table *alloc_page_table()
+std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
+	const struct mm_ptable &ptable, int mode)
 {
-	return reinterpret_cast<struct mm_page_table *>(halloc_aligned(
-		sizeof(struct mm_page_table), alignof(struct mm_page_table)));
-}
-
-/**
- * Fill a ptable with absent entries.
- */
-void init_absent(struct mm_page_table *table)
-{
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		table->entries[i] = ABSENT_ENTRY;
+	std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
+	const uint8_t root_table_count = arch_mm_root_table_count(mode);
+	for (uint8_t i = 0; i < root_table_count; ++i) {
+		all.push_back(get_table(
+			pa_add(ptable.root, i * sizeof(struct mm_page_table))));
 	}
+	return all;
 }
 
-/**
- * Fill a ptable with block entries.
- */
-void init_blocks(struct mm_page_table *table, int level, paddr_t start_address,
-		 uint64_t attrs)
+class mm : public ::testing::Test
 {
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		table->entries[i] = arch_mm_block_pte(
-			level, pa_add(start_address, i * mm_entry_size(level)),
-			attrs);
+	void SetUp() override
+	{
+		/*
+		 * TODO: replace with direct use of stdlib allocator so
+		 * sanitizers are more effective.
+		 */
+		test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
+		halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
 	}
-}
+
+	std::unique_ptr<uint8_t[]> test_heap;
+};
 
 /**
- * Defragging an entirely empty table should have no effect.
+ * A new table is initially empty.
  */
-TEST(mm, ptable_defrag_empty)
+TEST_F(mm, ptable_init_empty)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
+	constexpr int mode = MM_MODE_STAGE1;
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(1), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 /**
- * Defragging a table with some empty subtables (even nested) should result in
+ * Each new concatenated table is initially empty.
+ */
+TEST_F(mm, ptable_init_concatenated_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Only the first page is mapped with all others left absent.
+ */
+TEST_F(mm, map_first_page)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the first page is mapped and nothing else. */
+	EXPECT_THAT(std::span(tables).last(3), Each(Each(ABSENT_ENTRY)));
+
+	auto table_l2 = tables.front();
+	EXPECT_THAT(table_l2.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
+
+	auto table_l1 = get_table(arch_mm_table_from_pte(table_l2[0]));
+	EXPECT_THAT(table_l1.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
+
+	auto table_l0 = get_table(arch_mm_table_from_pte(table_l1[0]));
+	EXPECT_THAT(table_l0.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0])),
+		    Eq(pa_addr(page_begin)));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * The start address is rounded down and the end address is rounded up to page
+ * boundaries.
+ */
+TEST_F(mm, map_round_to_page)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
+	const paddr_t map_end = pa_add(map_begin, 268);
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check that the last page is mapped, and nothing else. */
+	EXPECT_THAT(std::span(tables).first(3), Each(Each(ABSENT_ENTRY)));
+
+	auto table_l2 = tables.back();
+	EXPECT_THAT(table_l2.first(table_l2.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
+
+	auto table_l1 = get_table(arch_mm_table_from_pte(table_l2.last(1)[0]));
+	EXPECT_THAT(table_l1.first(table_l1.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table_l0 = get_table(arch_mm_table_from_pte(table_l1.last(1)[0]));
+	EXPECT_THAT(table_l0.first(table_l0.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0])),
+		    Eq(0x200'0000'0000 - PAGE_SIZE));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map a two page range over the boundary of two tables.
+ */
+TEST_F(mm, map_across_tables)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(tables, SizeIs(4));
+	EXPECT_THAT(std::span(tables).last(2), Each(Each(ABSENT_ENTRY)));
+	ASSERT_THAT(TOP_LEVEL, Eq(2));
+
+	/* Check only the last page of the first table is mapped. */
+	auto table0_l2 = tables.front();
+	EXPECT_THAT(table0_l2.first(table0_l2.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
+
+	auto table0_l1 =
+		get_table(arch_mm_table_from_pte(table0_l2.last(1)[0]));
+	EXPECT_THAT(table0_l1.first(table0_l1.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
+
+	auto table0_l0 =
+		get_table(arch_mm_table_from_pte(table0_l1.last(1)[0]));
+	EXPECT_THAT(table0_l0.first(table0_l0.size() - 1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0])),
+		    Eq(pa_addr(map_begin)));
+
+	/* Checl only the first page of the second table is mapped. */
+	auto table1_l2 = tables[1];
+	EXPECT_THAT(table1_l2.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
+
+	auto table1_l1 = get_table(arch_mm_table_from_pte(table1_l2[0]));
+	EXPECT_THAT(table1_l1.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
+
+	auto table1_l0 = get_table(arch_mm_table_from_pte(table1_l1[0]));
+	EXPECT_THAT(table1_l0.subspan(1), Each(ABSENT_ENTRY));
+	ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
+	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table1_l0[0])),
+		    Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
+
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping all of memory creates blocks at the highest level.
+ */
+TEST_F(mm, map_all_at_top_level)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	auto tables = get_ptable(ptable, mode);
+	EXPECT_THAT(
+		tables,
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	for (uint64_t i = 0; i < tables.size(); ++i) {
+		for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
+			EXPECT_THAT(
+				pa_addr(arch_mm_block_from_pte(tables[i][j])),
+				Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
+				   (j * mm_entry_size(TOP_LEVEL))))
+				<< "i=" << i << " j=" << j;
+		}
+	}
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map all memory then trying to map a page again doesn't introduce a special
+ * mapping for that particular page.
+ */
+TEST_F(mm, map_already_mapped)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
+				       mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no mappings are made.
+ */
+TEST_F(mm, map_reverse_range)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
+				       pa_init(0x5000), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a reverse range in the same page will map the page because the start
+ * of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_reverse_range_quirk)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
+				       &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(20));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no memory is mapped.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, map_last_address_quirk)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(
+		&ptable, pa_init(0),
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(0));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range that goes beyond the available memory clamps to the available
+ * range.
+ */
+TEST_F(mm, map_clamp_to_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
+				       pa_init(0xf32'0000'0000'0000), mode,
+				       nullptr));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping a range outside of the available memory is ignored and doesn't alter
+ * the page tables.
+ */
+TEST_F(mm, map_ignore_out_of_range)
+{
+	constexpr int mode = 0;
+	ipaddr_t ipa = ipa_init(-1);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(
+		&ptable, VM_MEM_END, pa_init(0xf0'0000'0000'0000), mode, &ipa));
+	EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, 0);
+}
+
+/**
+ * Map a single page and then map all of memory which replaces the single page
+ * mapping with a higher level block mapping.
+ */
+TEST_F(mm, map_block_replaces_table)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Map all memory at the top level, unmapping a page and remapping at a lower
+ * level does not result in all memory being mapped at the top level again.
+ */
+TEST_F(mm, map_does_not_defrag)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4),
+			  Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
+						    TOP_LEVEL)))),
+			  Contains(Contains(Truly(std::bind(
+				  arch_mm_pte_is_block, _1, TOP_LEVEL)))),
+			  Contains(Contains(Truly(std::bind(
+				  arch_mm_pte_is_table, _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * If nothing is mapped, unmapping the hypervisor has no effect.
+ */
+TEST_F(mm, vm_unmap_hypervisor_not_mapped)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * If range is not mapped, unmapping has no effect.
+ */
+TEST_F(mm, unmap_not_mapped)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_TRUE(
+		mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping everything should result in an empty page table with no subtables.
+ */
+TEST_F(mm, unmap_all)
+{
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
+	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmap range is rounded to the containing pages.
+ */
+TEST_F(mm, unmap_round_to_page)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
+				pa_add(map_begin, 99), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmap a range that of page mappings that spans multiple concatenated tables.
+ */
+TEST_F(mm, unmap_across_tables)
+{
+	constexpr int mode = 0;
+	const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
+	const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping outside the range of memory had no effect.
+ */
+TEST_F(mm, unmap_out_of_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
+				mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a reverse range, i.e. the end comes before the start, is treated as
+ * an empty range so no change is made.
+ */
+TEST_F(mm, unmap_reverse_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
+				mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a reverse range in the same page will unmap the page because the
+ * start of the range is rounded down and the end is rounded up.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_reverse_range_quirk)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0x180'0000'0000);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
+				pa_add(page_begin, 50), mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Unmapping a range up to the maximum address causes the range end to wrap to
+ * zero as it is rounded up to a page boundary meaning no change is made.
+ *
+ * This serves as a form of documentation of behaviour rather than a
+ * requirement. Check whether any code relies on this before changing it.
+ */
+TEST_F(mm, unmap_last_address_quirk)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(
+		&ptable, pa_init(0),
+		pa_init(std::numeric_limits<uintpaddr_t>::max()), mode));
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Mapping then unmapping a page does not defrag the table.
+ */
+TEST_F(mm, unmap_does_not_defrag)
+{
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, MM_MODE_STAGE1);
+}
+
+/**
+ * Nothing is mapped in an empty table.
+ */
+TEST_F(mm, is_mapped_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Everything is mapped in a full table.
+ */
+TEST_F(mm, is_mapped_all)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * A page is mapped for the range [begin, end).
+ */
+TEST_F(mm, is_mapped_page)
+{
+	constexpr int mode = 0;
+	const paddr_t page_begin = pa_init(0x100'0000'0000);
+	const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
+				       nullptr));
+	EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
+	EXPECT_TRUE(mm_vm_is_mapped(
+		&ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Everything out of range is not mapped.
+ */
+TEST_F(mm, is_mapped_out_of_range)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
+	EXPECT_FALSE(
+		mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
+	EXPECT_FALSE(mm_vm_is_mapped(
+		&ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
+		mode));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Defragging an entirely empty table has no effect.
+ */
+TEST_F(mm, defrag_empty)
+{
+	constexpr int mode = 0;
+	struct mm_ptable ptable;
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	mm_ptable_defrag(&ptable, mode);
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
+}
+
+/**
+ * Defragging a table with some empty subtables (even nested) results in
  * an empty table.
  */
-TEST(mm, ptable_defrag_empty_subtables)
+TEST_F(mm, defrag_empty_subtables)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *subtable_b = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(subtable_a);
-	init_absent(subtable_aa);
-	init_absent(subtable_b);
-	init_absent(table);
-
-	subtable_a->entries[3] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table->entries[5] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
-
+	constexpr int mode = 0;
+	const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
+	const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
+	const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
+	const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(
+		mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
 	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
+	EXPECT_THAT(get_ptable(ptable, mode),
+		    AllOf(SizeIs(4), Each(Each(ABSENT_ENTRY))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 /**
  * Any subtable with all blocks with the same attributes should be replaced
  * with a single block.
  */
-TEST(mm, ptable_defrag_block_subtables)
+TEST_F(mm, defrag_block_subtables)
 {
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *subtable_b = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_blocks(subtable_a, TOP_LEVEL - 1, pa_init(0), 0);
-	init_blocks(subtable_aa, TOP_LEVEL - 2,
-		    pa_init(3 * mm_entry_size(TOP_LEVEL - 1)), 0);
-	init_blocks(subtable_b, TOP_LEVEL - 1,
-		    pa_init(5 * mm_entry_size(TOP_LEVEL)), 0);
-	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
-
-	subtable_a->entries[3] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-	table->entries[5] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
-
+	constexpr int mode = 0;
+	const paddr_t begin = pa_init(39456 * mm_entry_size(1));
+	const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
+	const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
 	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
+	ASSERT_TRUE(mm_ptable_init(&ptable, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
+				       nullptr));
+	ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr));
+	ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr));
 	mm_ptable_defrag(&ptable, 0);
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_TRUE(
-			arch_mm_pte_is_present(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-		EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table->entries[i])),
-			    Eq(i * mm_entry_size(TOP_LEVEL)))
-			<< "i=" << i;
-	}
-}
-
-/** If nothing is mapped, unmapping the hypervisor should have no effect. */
-TEST(mm, ptable_unmap_hypervisor_not_mapped)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
-
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	EXPECT_TRUE(mm_ptable_unmap_hypervisor(&ptable, 0));
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-}
-
-/**
- * Unmapping everything should result in an empty page table with no subtables.
- */
-TEST(mm, vm_unmap)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	struct mm_page_table *subtable_a = alloc_page_table();
-	struct mm_page_table *subtable_aa = alloc_page_table();
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(subtable_a);
-	init_absent(subtable_aa);
-	init_absent(table);
-
-	subtable_aa->entries[0] =
-		arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
-	subtable_a->entries[0] = arch_mm_table_pte(
-		TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
-	table->entries[0] =
-		arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
-
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), pa_init(1), 0));
-
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-}
-
-/**
- * Mapping a range should result in just the corresponding pages being mapped.
- */
-TEST(mm, vm_identity_map)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with an empty page table. */
-	struct mm_page_table *table = alloc_page_table();
-	init_absent(table);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/* Check that the first page is mapped, and nothing else. */
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
-	}
-	ASSERT_TRUE(arch_mm_pte_is_table(table->entries[0], TOP_LEVEL));
-	struct mm_page_table *subtable_a =
-		page_table_from_pa(arch_mm_table_from_pte(table->entries[0]));
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(subtable_a->entries[i], Eq(ABSENT_ENTRY))
-			<< "i=" << i;
-	}
-	ASSERT_TRUE(
-		arch_mm_pte_is_table(subtable_a->entries[0], TOP_LEVEL - 1));
-	struct mm_page_table *subtable_aa = page_table_from_pa(
-		arch_mm_table_from_pte(subtable_a->entries[0]));
-	for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_THAT(subtable_aa->entries[i], Eq(ABSENT_ENTRY))
-			<< "i=" << i;
-	}
-	EXPECT_TRUE(
-		arch_mm_pte_is_block(subtable_aa->entries[0], TOP_LEVEL - 2));
-	EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa->entries[0])),
-		    Eq(0));
-}
-
-/** Mapping a range that is already mapped should be a no-op. */
-TEST(mm, vm_identity_map_already_mapped)
-{
-	auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
-	halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
-
-	/* Start with a full page table mapping everything. */
-	struct mm_page_table *table = alloc_page_table();
-	init_blocks(table, TOP_LEVEL, pa_init(0), 0);
-	struct mm_ptable ptable;
-	ptable.table = pa_init((uintpaddr_t)table);
-
-	/* Try mapping the first page. */
-	ipaddr_t ipa = ipa_init(-1);
-	EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
-				       0, &ipa));
-	EXPECT_THAT(ipa_addr(ipa), Eq(0));
-
-	/*
-	 * The table should still be full of blocks, with no subtables or
-	 * anything else.
-	 */
-	for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
-		EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
-			<< "i=" << i;
-	}
+	EXPECT_THAT(
+		get_ptable(ptable, mode),
+		AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
+							   _1, TOP_LEVEL))))));
+	mm_ptable_fini(&ptable, mode);
 }
 
 } /* namespace */