blob: 5e4b31b6c701bfb9aa3ac27a79dab6d5241dc279 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01007 */
8
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00009#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010010
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000011extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010012#include "hf/arch/mm.h"
13
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000014#include "hf/mm.h"
15#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010016}
17
Andrew Scull1ba470e2018-10-31 15:14:31 +000018#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010019#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000020#include <span>
21#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010022
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "mm_test.hh"
24
Andrew Scull232d5602018-10-15 11:07:45 +010025namespace
26{
Andrew Scull1ba470e2018-10-31 15:14:31 +000027using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010028
Andrew Scull1ba470e2018-10-31 15:14:31 +000029using ::testing::AllOf;
30using ::testing::Contains;
31using ::testing::Each;
32using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000033using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using ::testing::SizeIs;
35using ::testing::Truly;
36
Andrew Scull3c257452019-11-26 13:32:50 +000037using ::mm_test::get_ptable;
38
Andrew Scull1ba470e2018-10-31 15:14:31 +000039constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Karl Meakin07a69ab2025-02-07 14:53:19 +000040const mm_level_t TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000041const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042
43/**
44 * Calculates the size of the address space represented by a page table entry at
45 * the given level.
46 */
Andrew Scull232d5602018-10-15 11:07:45 +010047size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010048{
49 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
50}
51
52/**
Andrew Scull81e85092018-12-12 12:56:20 +000053 * Checks whether the address is mapped in the address space.
54 */
Karl Meakind64aaf82025-02-08 01:12:55 +000055bool mm_vm_is_mapped(struct mm_ptable *ptable, ipaddr_t ipa)
Andrew Scull81e85092018-12-12 12:56:20 +000056{
Karl Meakin07a69ab2025-02-07 14:53:19 +000057 mm_mode_t mode;
Karl Meakind64aaf82025-02-08 01:12:55 +000058 return mm_vm_get_mode(ptable, ipa, ipa_add(ipa, 1), &mode) &&
Andrew Scull81e85092018-12-12 12:56:20 +000059 (mode & MM_MODE_INVALID) == 0;
60}
61
62/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010064 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000065std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010066{
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 ptr_from_va(va_from_pa(pa)));
Olivier Depreza4491a22021-04-20 17:34:42 +020069 return std::span<pte_t, MM_PTE_PER_PAGE>(table->entries,
70 std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010071}
72
Andrew Scull1ba470e2018-10-31 15:14:31 +000073class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010074{
Andrew Scull1ba470e2018-10-31 15:14:31 +000075 void SetUp() override
76 {
77 /*
78 * TODO: replace with direct use of stdlib allocator so
79 * sanitizers are more effective.
80 */
81 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000082 mpool_init(&ppool, sizeof(struct mm_page_table));
83 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Karl Meakina4858362025-02-13 16:04:28 +000084 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
85 }
86
87 void TearDown() override
88 {
89 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010090 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000091
92 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000093
94 protected:
95 struct mpool ppool;
Karl Meakina4858362025-02-13 16:04:28 +000096 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +000097};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010098
99/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000100 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100103{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000104 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000105 get_ptable(ptable),
106 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100107}
108
109/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000110 * Each new concatenated table is initially empty.
111 */
112TEST_F(mm, ptable_init_concatenated_empty)
113{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000114 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000115 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000116 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000117}
118
119/**
120 * Only the first page is mapped with all others left absent.
121 */
122TEST_F(mm, map_first_page)
123{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000124 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000125 const paddr_t page_begin = pa_init(0);
126 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000128 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000129
Andrew Scullda3df7f2019-01-05 17:49:27 +0000130 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000131 EXPECT_THAT(tables, SizeIs(4));
132 ASSERT_THAT(TOP_LEVEL, Eq(2));
133
134 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000135 EXPECT_THAT(std::span(tables).last(3),
136 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000137
138 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000139 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000140 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
141
Andrew Scull3681b8d2018-12-12 14:22:59 +0000142 auto table_l1 =
143 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
144 EXPECT_THAT(table_l1.subspan(1),
145 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000146 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
147
Andrew Scull3681b8d2018-12-12 14:22:59 +0000148 auto table_l0 =
149 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
150 EXPECT_THAT(table_l0.subspan(1),
151 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000152 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000153 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000154 Eq(pa_addr(page_begin)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000155}
156
157/**
158 * The start address is rounded down and the end address is rounded up to page
159 * boundaries.
160 */
161TEST_F(mm, map_round_to_page)
162{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000163 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000164 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
165 const paddr_t map_end = pa_add(map_begin, 268);
166 ipaddr_t ipa = ipa_init(-1);
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000167 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
168 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000169 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
170
Andrew Scullda3df7f2019-01-05 17:49:27 +0000171 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000172 EXPECT_THAT(tables, SizeIs(4));
173 ASSERT_THAT(TOP_LEVEL, Eq(2));
174
175 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000176 EXPECT_THAT(std::span(tables).first(3),
177 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000178
179 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000180 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
181 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000182 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
183
Andrew Scull3681b8d2018-12-12 14:22:59 +0000184 auto table_l1 = get_table(
185 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
186 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
187 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000188 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
189
Andrew Scull3681b8d2018-12-12 14:22:59 +0000190 auto table_l0 = get_table(
191 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
192 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
193 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000194 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000195 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
196 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000197 Eq(0x200'0000'0000 - PAGE_SIZE));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000198}
199
200/**
201 * Map a two page range over the boundary of two tables.
202 */
203TEST_F(mm, map_across_tables)
204{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000205 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000206 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
207 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000208 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000209 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000210
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000212 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000213 EXPECT_THAT(std::span(tables).last(2),
214 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000215 ASSERT_THAT(TOP_LEVEL, Eq(2));
216
217 /* Check only the last page of the first table is mapped. */
218 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000219 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
220 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000221 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
222
Andrew Scull3681b8d2018-12-12 14:22:59 +0000223 auto table0_l1 = get_table(
224 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
225 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
226 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000227 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
228
Andrew Scull3681b8d2018-12-12 14:22:59 +0000229 auto table0_l0 = get_table(
230 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
231 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
232 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000233 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000234 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
235 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000236 Eq(pa_addr(map_begin)));
237
Andrew Scull164f8152019-11-19 14:29:55 +0000238 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000240 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000241 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
242
Andrew Scull3681b8d2018-12-12 14:22:59 +0000243 auto table1_l1 =
244 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
245 EXPECT_THAT(table1_l1.subspan(1),
246 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000247 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
248
Andrew Scull3681b8d2018-12-12 14:22:59 +0000249 auto table1_l0 =
250 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
251 EXPECT_THAT(table1_l0.subspan(1),
252 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000253 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000254 EXPECT_THAT(
255 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
256 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000257}
258
259/**
260 * Mapping all of memory creates blocks at the highest level.
261 */
262TEST_F(mm, map_all_at_top_level)
263{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000264 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000265 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000266 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000267 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000268 EXPECT_THAT(
269 tables,
270 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
271 _1, TOP_LEVEL))))));
272 for (uint64_t i = 0; i < tables.size(); ++i) {
273 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000274 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
275 TOP_LEVEL)),
276 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
277 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000278 << "i=" << i << " j=" << j;
279 }
280 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000281}
282
283/**
284 * Map all memory then trying to map a page again doesn't introduce a special
285 * mapping for that particular page.
286 */
287TEST_F(mm, map_already_mapped)
288{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000289 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000290 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000291 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000292 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000293 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000294 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000295 EXPECT_THAT(ipa_addr(ipa), Eq(0));
296 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000297 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
299 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000300}
301
302/**
303 * Mapping a reverse range, i.e. the end comes before the start, is treated as
304 * an empty range so no mappings are made.
305 */
306TEST_F(mm, map_reverse_range)
307{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000308 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000309 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000310 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000311 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000312 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000313 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000314 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000315 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000316}
317
318/**
319 * Mapping a reverse range in the same page will map the page because the start
320 * of the range is rounded down and the end is rounded up.
321 *
322 * This serves as a form of documentation of behaviour rather than a
323 * requirement. Check whether any code relies on this before changing it.
324 */
325TEST_F(mm, map_reverse_range_quirk)
326{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000327 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000328 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000330 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000331 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000332 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000333 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000334}
335
336/**
337 * Mapping a range up to the maximum address causes the range end to wrap to
338 * zero as it is rounded up to a page boundary meaning no memory is mapped.
339 *
340 * This serves as a form of documentation of behaviour rather than a
341 * requirement. Check whether any code relies on this before changing it.
342 */
343TEST_F(mm, map_last_address_quirk)
344{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000345 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000346 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000347 ASSERT_TRUE(mm_vm_identity_map(
348 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000349 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
350 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000351 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000352 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000353 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000354 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000355}
356
357/**
358 * Mapping a range that goes beyond the available memory clamps to the available
359 * range.
360 */
361TEST_F(mm, map_clamp_to_range)
362{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000363 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000364 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
365 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000366 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000367 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000368 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000369 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
370 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000371}
372
373/**
374 * Mapping a range outside of the available memory is ignored and doesn't alter
375 * the page tables.
376 */
377TEST_F(mm, map_ignore_out_of_range)
378{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000379 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000380 ipaddr_t ipa = ipa_init(-1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000381 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000382 pa_init(0xf0'0000'0000'0000), mode,
383 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000384 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000385 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000386 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000387 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000388}
389
390/**
391 * Map a single page and then map all of memory which replaces the single page
392 * mapping with a higher level block mapping.
393 */
394TEST_F(mm, map_block_replaces_table)
395{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000396 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000397 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
398 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000399 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000400 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000401 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000402 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000403 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000404 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000405 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
406 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000407}
408
409/**
410 * Map all memory at the top level, unmapping a page and remapping at a lower
411 * level does not result in all memory being mapped at the top level again.
412 */
413TEST_F(mm, map_does_not_defrag)
414{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000415 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000416 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
417 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000418 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000419 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000420 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000421 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000422 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000423 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000424 AllOf(SizeIs(4),
425 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
426 TOP_LEVEL)))),
427 Contains(Contains(Truly(std::bind(
428 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
429 Contains(Contains(Truly(std::bind(
430 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431}
432
433/**
Andrew Scull73b89542019-11-20 17:31:26 +0000434 * Mapping with a mode that indicates unmapping results in the addresses being
435 * unmapped with absent entries.
436 */
437TEST_F(mm, map_to_unmap)
438{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000439 constexpr mm_mode_t mode = 0;
Andrew Scull73b89542019-11-20 17:31:26 +0000440 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
441 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
442 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
443 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000444 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
445 nullptr));
446 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
447 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000448 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000449 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000450 EXPECT_THAT(
451 get_ptable(ptable),
452 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull73b89542019-11-20 17:31:26 +0000453}
454
Andrew Scull4e83cef2019-11-19 14:17:54 +0000455/*
456 * Preparing and committing an address range works the same as mapping it.
457 */
458TEST_F(mm, prepare_and_commit_first_page)
459{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000460 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000461 const paddr_t page_begin = pa_init(0);
462 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000463 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
464 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000465 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
466 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000467
468 auto tables = get_ptable(ptable);
469 EXPECT_THAT(tables, SizeIs(4));
470 ASSERT_THAT(TOP_LEVEL, Eq(2));
471
472 /* Check that the first page is mapped and nothing else. */
473 EXPECT_THAT(std::span(tables).last(3),
474 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
475
476 auto table_l2 = tables.front();
477 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
478 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
479
480 auto table_l1 =
481 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
482 EXPECT_THAT(table_l1.subspan(1),
483 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
484 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
485
486 auto table_l0 =
487 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
488 EXPECT_THAT(table_l0.subspan(1),
489 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
490 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
491 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
492 Eq(pa_addr(page_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000493}
494
495/**
496 * Disjoint address ranges can be prepared and committed together.
497 */
498TEST_F(mm, prepare_and_commit_disjoint_regions)
499{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000500 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000501 const paddr_t first_begin = pa_init(0);
502 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
503 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
504 const paddr_t last_end = VM_MEM_END;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000505 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
506 mode, &ppool));
507 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
508 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000509 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
510 nullptr);
511 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
512 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000513
514 auto tables = get_ptable(ptable);
515 EXPECT_THAT(tables, SizeIs(4));
516 ASSERT_THAT(TOP_LEVEL, Eq(2));
517
518 /* Check that the first and last pages are mapped and nothing else. */
519 EXPECT_THAT(std::span(tables).subspan(1, 2),
520 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
521
522 /* Check the first page. */
523 auto table0_l2 = tables.front();
524 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
525 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
526
527 auto table0_l1 =
528 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
529 EXPECT_THAT(table0_l1.subspan(1),
530 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
531 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
532
533 auto table0_l0 =
534 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
535 EXPECT_THAT(table0_l0.subspan(1),
536 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
537 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
538 EXPECT_THAT(
539 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
540 Eq(pa_addr(first_begin)));
541
542 /* Check the last page. */
543 auto table3_l2 = tables.back();
544 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
545 Each(arch_mm_absent_pte(TOP_LEVEL)));
546 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
547
548 auto table3_l1 = get_table(
549 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
550 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
551 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
552 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
553
554 auto table3_l0 = get_table(
555 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
556 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
557 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
558 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
559 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
560 TOP_LEVEL - 2)),
561 Eq(pa_addr(last_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000562}
563
564/**
565 * Overlapping address ranges can be prepared and committed together.
566 */
567TEST_F(mm, prepare_and_commit_overlapping_regions)
568{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000569 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000570 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
571 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
572 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000573 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
574 &ppool));
575 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
576 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000577 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
578 nullptr);
579 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
580 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000581
582 auto tables = get_ptable(ptable);
583 EXPECT_THAT(tables, SizeIs(4));
584 EXPECT_THAT(std::span(tables).last(2),
585 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
586 ASSERT_THAT(TOP_LEVEL, Eq(2));
587
588 /* Check only the last page of the first table is mapped. */
589 auto table0_l2 = tables.front();
590 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
591 Each(arch_mm_absent_pte(TOP_LEVEL)));
592 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
593
594 auto table0_l1 = get_table(
595 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
596 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
597 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
598 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
599
600 auto table0_l0 = get_table(
601 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
602 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
603 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
604 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
605 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
606 TOP_LEVEL - 2)),
607 Eq(pa_addr(low_begin)));
608
609 /* Check only the first page of the second table is mapped. */
610 auto table1_l2 = tables[1];
611 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
612 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
613
614 auto table1_l1 =
615 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
616 EXPECT_THAT(table1_l1.subspan(1),
617 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
618 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
619
620 auto table1_l0 =
621 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
622 EXPECT_THAT(table1_l0.subspan(1),
623 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
624 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
625 EXPECT_THAT(
626 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
627 Eq(pa_addr(high_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000628}
629
Andrew Scull73b89542019-11-20 17:31:26 +0000630/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000631 * If range is not mapped, unmapping has no effect.
632 */
633TEST_F(mm, unmap_not_mapped)
634{
Andrew Scullda241972019-01-05 18:17:48 +0000635 EXPECT_TRUE(
636 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000637 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000638 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000639 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000640}
641
642/**
643 * Unmapping everything should result in an empty page table with no subtables.
644 */
645TEST_F(mm, unmap_all)
646{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000647 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000648 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
649 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
650 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
651 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000652 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
653 nullptr));
654 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
655 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000656 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000657 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000658 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000659 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000660}
661
662/**
663 * Unmap range is rounded to the containing pages.
664 */
665TEST_F(mm, unmap_round_to_page)
666{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000667 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000668 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
669 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000670
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000671 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000672 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000673 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000674 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000675
676 auto tables = get_ptable(ptable);
677 constexpr auto l3_index = 2;
678
679 /* Check all other top level entries are empty... */
680 EXPECT_THAT(std::span(tables).first(l3_index),
681 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
682 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
683 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
684
685 /* Except the mapped page which is absent. */
686 auto table_l2 = tables[l3_index];
687 constexpr auto l2_index = 384;
688 EXPECT_THAT(table_l2.first(l2_index),
689 Each(arch_mm_absent_pte(TOP_LEVEL)));
690 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
691 EXPECT_THAT(table_l2.subspan(l2_index + 1),
692 Each(arch_mm_absent_pte(TOP_LEVEL)));
693
694 auto table_l1 = get_table(
695 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
696 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
697 EXPECT_THAT(table_l1.subspan(1),
698 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
699
700 auto table_l0 = get_table(
701 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
702 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000703}
704
705/**
706 * Unmap a range that of page mappings that spans multiple concatenated tables.
707 */
708TEST_F(mm, unmap_across_tables)
709{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000710 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000711 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
712 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000713
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000714 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000715 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000716 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000717
718 auto tables = get_ptable(ptable);
719
720 /* Check the untouched tables are empty. */
721 EXPECT_THAT(std::span(tables).first(2),
722 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
723
724 /* Check the last page is explicity marked as absent. */
725 auto table2_l2 = tables[2];
726 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
727 Each(arch_mm_absent_pte(TOP_LEVEL)));
728 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
729
730 auto table2_l1 = get_table(
731 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
732 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
733 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
734 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
735
736 auto table2_l0 = get_table(
737 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
738 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
739
740 /* Check the first page is explicitly marked as absent. */
741 auto table3_l2 = tables[3];
742 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
743 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
744
745 auto table3_l1 = get_table(
746 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
747 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
748 EXPECT_THAT(table3_l1.subspan(1),
749 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
750
751 auto table3_l0 = get_table(
752 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
753 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000754}
755
756/**
757 * Unmapping outside the range of memory had no effect.
758 */
759TEST_F(mm, unmap_out_of_range)
760{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000761 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000762 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000763 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000764 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000765 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000766 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000767 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000768 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
769 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000770}
771
772/**
773 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
774 * an empty range so no change is made.
775 */
776TEST_F(mm, unmap_reverse_range)
777{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000778 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000779 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000780 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000781 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000782 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000783 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000784 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000785 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
786 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000787}
788
789/**
790 * Unmapping a reverse range in the same page will unmap the page because the
791 * start of the range is rounded down and the end is rounded up.
792 *
793 * This serves as a form of documentation of behaviour rather than a
794 * requirement. Check whether any code relies on this before changing it.
795 */
796TEST_F(mm, unmap_reverse_range_quirk)
797{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000798 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000799 const paddr_t page_begin = pa_init(0x180'0000'0000);
800 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000801 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000802 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000803 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000804 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000805
806 auto tables = get_ptable(ptable);
807 constexpr auto l3_index = 3;
808
809 /* Check all other top level entries are empty... */
810 EXPECT_THAT(std::span(tables).first(l3_index),
811 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
812
813 /* Except the mapped page which is absent. */
814 auto table_l2 = tables[l3_index];
815 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
816 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
817
818 auto table_l1 = get_table(
819 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
820 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
821 EXPECT_THAT(table_l1.subspan(1),
822 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
823
824 auto table_l0 = get_table(
825 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
826 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000827}
828
829/**
830 * Unmapping a range up to the maximum address causes the range end to wrap to
831 * zero as it is rounded up to a page boundary meaning no change is made.
832 *
833 * This serves as a form of documentation of behaviour rather than a
834 * requirement. Check whether any code relies on this before changing it.
835 */
836TEST_F(mm, unmap_last_address_quirk)
837{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000838 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000839 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000840 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000841 ASSERT_TRUE(mm_vm_unmap(
842 &ptable, pa_init(0),
843 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000844 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000845 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000846 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
847 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000848}
849
850/**
851 * Mapping then unmapping a page does not defrag the table.
852 */
853TEST_F(mm, unmap_does_not_defrag)
854{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000855 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000856 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
857 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
858 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
859 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000860 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
861 nullptr));
862 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
863 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000864 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
865 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000866 EXPECT_THAT(get_ptable(ptable),
867 AllOf(SizeIs(4),
868 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000869}
870
871/**
872 * Nothing is mapped in an empty table.
873 */
874TEST_F(mm, is_mapped_empty)
875{
Andrew Scull81e85092018-12-12 12:56:20 +0000876 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
877 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
878 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000879}
880
881/**
882 * Everything is mapped in a full table.
883 */
884TEST_F(mm, is_mapped_all)
885{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000886 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000887 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000888 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000889 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
890 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
891 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000892}
893
894/**
895 * A page is mapped for the range [begin, end).
896 */
897TEST_F(mm, is_mapped_page)
898{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000899 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000900 const paddr_t page_begin = pa_init(0x100'0000'0000);
901 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000902 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000903 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000904 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
905 EXPECT_TRUE(
906 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
907 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000908}
909
910/**
911 * Everything out of range is not mapped.
912 */
913TEST_F(mm, is_mapped_out_of_range)
914{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000915 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000916 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000917 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000918 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
919 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000920 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000921 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scull81e85092018-12-12 12:56:20 +0000922}
923
924/**
925 * The mode of unmapped addresses can be retrieved and is set to invalid,
926 * unowned and shared.
927 */
928TEST_F(mm, get_mode_empty)
929{
Andrew Scull81e85092018-12-12 12:56:20 +0000930 constexpr int default_mode =
931 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000932 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000933
934 read_mode = 0;
935 EXPECT_TRUE(
936 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
937 EXPECT_THAT(read_mode, Eq(default_mode));
938
939 read_mode = 0;
940 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
941 ipa_init(0x3c97'e000), &read_mode));
942 EXPECT_THAT(read_mode, Eq(default_mode));
943
944 read_mode = 0;
945 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
946 ipa_init(0x1ff'ffff'ffff), &read_mode));
947 EXPECT_THAT(read_mode, Eq(default_mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000948}
949
950/**
951 * Get the mode of a range comprised of individual pages which are either side
952 * of a root table boundary.
953 */
954TEST_F(mm, get_mode_pages_across_tables)
955{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000956 constexpr mm_mode_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +0000957 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
958 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000959 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000960 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000961 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000962
963 read_mode = 0;
964 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
965 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
966 &read_mode));
967 EXPECT_THAT(read_mode, Eq(mode));
968
969 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
970 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
971 &read_mode));
972
973 read_mode = 0;
974 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
975 ipa_from_pa(map_end), &read_mode));
976 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000977}
978
979/**
980 * Anything out of range fail to retrieve the mode.
981 */
982TEST_F(mm, get_mode_out_of_range)
983{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000984 constexpr mm_mode_t mode = MM_MODE_UNOWNED;
985 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000986 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000987 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000988 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
989 ipa_from_pa(pa_add(VM_MEM_END, 1)),
990 &read_mode));
991 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
992 ipa_from_pa(pa_add(VM_MEM_END, 1)),
993 &read_mode));
994 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
995 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000996}
997
998/**
999 * Defragging an entirely empty table has no effect.
1000 */
1001TEST_F(mm, defrag_empty)
1002{
Olivier Deprez6f400372022-03-07 09:31:08 +01001003 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001004 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001005 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001006 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001007}
1008
1009/**
1010 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001011 * an empty table.
1012 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001013TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001014{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001015 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001016 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1017 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1018 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1019 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001020 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1021 nullptr));
1022 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1023 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001024 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1025 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Olivier Deprez6f400372022-03-07 09:31:08 +01001026 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001027 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001028 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001029 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001030}
1031
1032/**
1033 * Any subtable with all blocks with the same attributes should be replaced
1034 * with a single block.
1035 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001036TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001037{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001038 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001039 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1040 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1041 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001042 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001043 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001044 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001045 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1046 nullptr));
1047 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1048 nullptr));
Olivier Deprez6f400372022-03-07 09:31:08 +01001049 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001050 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001051 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001052 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1053 _1, TOP_LEVEL))))));
Andrew Walbran6324fc92018-10-03 11:46:43 +01001054}
1055
Andrew Scull232d5602018-10-15 11:07:45 +01001056} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001057
1058namespace mm_test
1059{
1060/**
1061 * Get an STL representation of the ptable.
1062 */
1063std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1064 const struct mm_ptable &ptable)
1065{
1066 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1067 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1068 for (uint8_t i = 0; i < root_table_count; ++i) {
1069 all.push_back(get_table(
1070 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
1071 }
1072 return all;
1073}
1074
1075} /* namespace mm_test */