blob: 99bf08affd0533677331d36b33d4763f000115f2 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01007 */
8
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00009#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010010
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000011extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010012#include "hf/arch/mm.h"
13
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000014#include "hf/mm.h"
15#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010016}
17
Andrew Scull1ba470e2018-10-31 15:14:31 +000018#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010019#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000020#include <span>
21#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010022
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "mm_test.hh"
24
Andrew Scull232d5602018-10-15 11:07:45 +010025namespace
26{
Andrew Scull1ba470e2018-10-31 15:14:31 +000027using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010028
Andrew Scull1ba470e2018-10-31 15:14:31 +000029using ::testing::AllOf;
30using ::testing::Contains;
31using ::testing::Each;
32using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000033using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using ::testing::SizeIs;
35using ::testing::Truly;
36
Andrew Scull3c257452019-11-26 13:32:50 +000037using ::mm_test::get_ptable;
38
Andrew Scull1ba470e2018-10-31 15:14:31 +000039constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Karl Meakina3a9f952025-02-08 00:11:16 +000040const mm_level_t TOP_LEVEL = arch_mm_stage2_root_level() - 1;
Andrew Scull1ba470e2018-10-31 15:14:31 +000041const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042
43/**
44 * Calculates the size of the address space represented by a page table entry at
45 * the given level.
46 */
Andrew Scull232d5602018-10-15 11:07:45 +010047size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010048{
49 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
50}
51
52/**
Andrew Scull81e85092018-12-12 12:56:20 +000053 * Checks whether the address is mapped in the address space.
54 */
Karl Meakind64aaf82025-02-08 01:12:55 +000055bool mm_vm_is_mapped(struct mm_ptable *ptable, ipaddr_t ipa)
Andrew Scull81e85092018-12-12 12:56:20 +000056{
Karl Meakin07a69ab2025-02-07 14:53:19 +000057 mm_mode_t mode;
Karl Meakind64aaf82025-02-08 01:12:55 +000058 return mm_vm_get_mode(ptable, ipa, ipa_add(ipa, 1), &mode) &&
Andrew Scull81e85092018-12-12 12:56:20 +000059 (mode & MM_MODE_INVALID) == 0;
60}
61
62/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010064 */
Karl Meakinaacfd4f2025-02-08 19:30:52 +000065std::span<pte_t, MM_PTE_PER_PAGE> get_table(struct mm_page_table *table)
Andrew Scull4e5f8142018-10-12 14:37:19 +010066{
Olivier Depreza4491a22021-04-20 17:34:42 +020067 return std::span<pte_t, MM_PTE_PER_PAGE>(table->entries,
68 std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010069}
70
Andrew Scull1ba470e2018-10-31 15:14:31 +000071class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010072{
Andrew Scull1ba470e2018-10-31 15:14:31 +000073 void SetUp() override
74 {
75 /*
76 * TODO: replace with direct use of stdlib allocator so
77 * sanitizers are more effective.
78 */
79 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000080 mpool_init(&ppool, sizeof(struct mm_page_table));
81 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Karl Meakina4858362025-02-13 16:04:28 +000082 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
83 }
84
85 void TearDown() override
86 {
87 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010088 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000089
90 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000091
92 protected:
93 struct mpool ppool;
Karl Meakina4858362025-02-13 16:04:28 +000094 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +000095};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010096
97/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000098 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +010099 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000100TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000102 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000103 get_ptable(ptable),
104 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100105}
106
107/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000108 * Each new concatenated table is initially empty.
109 */
110TEST_F(mm, ptable_init_concatenated_empty)
111{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000112 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000113 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000114 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000115}
116
117/**
118 * Only the first page is mapped with all others left absent.
119 */
120TEST_F(mm, map_first_page)
121{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000122 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123 const paddr_t page_begin = pa_init(0);
124 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000125 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000126 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127
Andrew Scullda3df7f2019-01-05 17:49:27 +0000128 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000129 EXPECT_THAT(tables, SizeIs(4));
130 ASSERT_THAT(TOP_LEVEL, Eq(2));
131
132 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000133 EXPECT_THAT(std::span(tables).last(3),
134 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000135
136 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000137 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000138 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
139
Andrew Scull3681b8d2018-12-12 14:22:59 +0000140 auto table_l1 =
141 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
142 EXPECT_THAT(table_l1.subspan(1),
143 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000144 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
145
Andrew Scull3681b8d2018-12-12 14:22:59 +0000146 auto table_l0 =
147 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
148 EXPECT_THAT(table_l0.subspan(1),
149 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000150 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000151 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000152 Eq(pa_addr(page_begin)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000153}
154
155/**
156 * The start address is rounded down and the end address is rounded up to page
157 * boundaries.
158 */
159TEST_F(mm, map_round_to_page)
160{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000161 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000162 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
163 const paddr_t map_end = pa_add(map_begin, 268);
164 ipaddr_t ipa = ipa_init(-1);
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000165 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
166 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000167 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
168
Andrew Scullda3df7f2019-01-05 17:49:27 +0000169 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000170 EXPECT_THAT(tables, SizeIs(4));
171 ASSERT_THAT(TOP_LEVEL, Eq(2));
172
173 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000174 EXPECT_THAT(std::span(tables).first(3),
175 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000176
177 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000178 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
179 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000180 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
181
Andrew Scull3681b8d2018-12-12 14:22:59 +0000182 auto table_l1 = get_table(
183 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
184 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
185 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000186 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
187
Andrew Scull3681b8d2018-12-12 14:22:59 +0000188 auto table_l0 = get_table(
189 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
190 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
191 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000193 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
194 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000195 Eq(0x200'0000'0000 - PAGE_SIZE));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000196}
197
198/**
199 * Map a two page range over the boundary of two tables.
200 */
201TEST_F(mm, map_across_tables)
202{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000203 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000204 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
205 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000206 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000207 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000208
Andrew Scullda3df7f2019-01-05 17:49:27 +0000209 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000210 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000211 EXPECT_THAT(std::span(tables).last(2),
212 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000213 ASSERT_THAT(TOP_LEVEL, Eq(2));
214
215 /* Check only the last page of the first table is mapped. */
216 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000217 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
218 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000219 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
220
Andrew Scull3681b8d2018-12-12 14:22:59 +0000221 auto table0_l1 = get_table(
222 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
223 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
224 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000225 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
226
Andrew Scull3681b8d2018-12-12 14:22:59 +0000227 auto table0_l0 = get_table(
228 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
229 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
230 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000231 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000232 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
233 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000234 Eq(pa_addr(map_begin)));
235
Andrew Scull164f8152019-11-19 14:29:55 +0000236 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000237 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000238 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
240
Andrew Scull3681b8d2018-12-12 14:22:59 +0000241 auto table1_l1 =
242 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
243 EXPECT_THAT(table1_l1.subspan(1),
244 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000245 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
246
Andrew Scull3681b8d2018-12-12 14:22:59 +0000247 auto table1_l0 =
248 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
249 EXPECT_THAT(table1_l0.subspan(1),
250 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000251 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000252 EXPECT_THAT(
253 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
254 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000255}
256
257/**
258 * Mapping all of memory creates blocks at the highest level.
259 */
260TEST_F(mm, map_all_at_top_level)
261{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000262 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000263 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000264 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000265 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000266 EXPECT_THAT(
267 tables,
268 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
269 _1, TOP_LEVEL))))));
270 for (uint64_t i = 0; i < tables.size(); ++i) {
271 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000272 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
273 TOP_LEVEL)),
274 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
275 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000276 << "i=" << i << " j=" << j;
277 }
278 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000279}
280
281/**
282 * Map all memory then trying to map a page again doesn't introduce a special
283 * mapping for that particular page.
284 */
285TEST_F(mm, map_already_mapped)
286{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000287 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000288 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000289 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000290 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000291 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000292 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000293 EXPECT_THAT(ipa_addr(ipa), Eq(0));
294 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000295 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000296 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
297 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298}
299
300/**
301 * Mapping a reverse range, i.e. the end comes before the start, is treated as
302 * an empty range so no mappings are made.
303 */
304TEST_F(mm, map_reverse_range)
305{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000306 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000307 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000308 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000309 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000310 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000311 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000312 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000313 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000314}
315
316/**
317 * Mapping a reverse range in the same page will map the page because the start
318 * of the range is rounded down and the end is rounded up.
319 *
320 * This serves as a form of documentation of behaviour rather than a
321 * requirement. Check whether any code relies on this before changing it.
322 */
323TEST_F(mm, map_reverse_range_quirk)
324{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000325 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000327 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000328 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000330 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000331 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000332}
333
334/**
335 * Mapping a range up to the maximum address causes the range end to wrap to
336 * zero as it is rounded up to a page boundary meaning no memory is mapped.
337 *
338 * This serves as a form of documentation of behaviour rather than a
339 * requirement. Check whether any code relies on this before changing it.
340 */
341TEST_F(mm, map_last_address_quirk)
342{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000343 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000344 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000345 ASSERT_TRUE(mm_vm_identity_map(
346 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000347 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
348 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000349 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000350 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000351 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000352 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353}
354
355/**
356 * Mapping a range that goes beyond the available memory clamps to the available
357 * range.
358 */
359TEST_F(mm, map_clamp_to_range)
360{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000361 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000362 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
363 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000364 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000365 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000366 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000367 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
368 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000369}
370
371/**
372 * Mapping a range outside of the available memory is ignored and doesn't alter
373 * the page tables.
374 */
375TEST_F(mm, map_ignore_out_of_range)
376{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000377 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000378 ipaddr_t ipa = ipa_init(-1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000379 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000380 pa_init(0xf0'0000'0000'0000), mode,
381 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000382 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000383 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000384 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000385 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000386}
387
388/**
389 * Map a single page and then map all of memory which replaces the single page
390 * mapping with a higher level block mapping.
391 */
392TEST_F(mm, map_block_replaces_table)
393{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000394 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000395 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
396 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000397 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000398 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000399 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000400 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000401 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000402 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000403 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
404 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000405}
406
407/**
408 * Map all memory at the top level, unmapping a page and remapping at a lower
409 * level does not result in all memory being mapped at the top level again.
410 */
411TEST_F(mm, map_does_not_defrag)
412{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000413 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000414 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
415 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000416 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000417 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000418 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000419 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000420 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000421 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000422 AllOf(SizeIs(4),
423 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
424 TOP_LEVEL)))),
425 Contains(Contains(Truly(std::bind(
426 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
427 Contains(Contains(Truly(std::bind(
428 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000429}
430
431/**
Andrew Scull73b89542019-11-20 17:31:26 +0000432 * Mapping with a mode that indicates unmapping results in the addresses being
433 * unmapped with absent entries.
434 */
435TEST_F(mm, map_to_unmap)
436{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000437 constexpr mm_mode_t mode = 0;
Andrew Scull73b89542019-11-20 17:31:26 +0000438 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
439 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
440 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
441 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000442 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
443 nullptr));
444 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
445 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000446 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000447 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000448 EXPECT_THAT(
449 get_ptable(ptable),
450 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull73b89542019-11-20 17:31:26 +0000451}
452
Andrew Scull4e83cef2019-11-19 14:17:54 +0000453/*
454 * Preparing and committing an address range works the same as mapping it.
455 */
456TEST_F(mm, prepare_and_commit_first_page)
457{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000458 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000459 const paddr_t page_begin = pa_init(0);
460 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000461 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
462 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000463 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
464 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000465
466 auto tables = get_ptable(ptable);
467 EXPECT_THAT(tables, SizeIs(4));
468 ASSERT_THAT(TOP_LEVEL, Eq(2));
469
470 /* Check that the first page is mapped and nothing else. */
471 EXPECT_THAT(std::span(tables).last(3),
472 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
473
474 auto table_l2 = tables.front();
475 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
476 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
477
478 auto table_l1 =
479 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
480 EXPECT_THAT(table_l1.subspan(1),
481 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
482 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
483
484 auto table_l0 =
485 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
486 EXPECT_THAT(table_l0.subspan(1),
487 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
488 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
489 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
490 Eq(pa_addr(page_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000491}
492
493/**
494 * Disjoint address ranges can be prepared and committed together.
495 */
496TEST_F(mm, prepare_and_commit_disjoint_regions)
497{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000498 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000499 const paddr_t first_begin = pa_init(0);
500 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
501 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
502 const paddr_t last_end = VM_MEM_END;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000503 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
504 mode, &ppool));
505 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
506 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000507 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
508 nullptr);
509 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
510 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000511
512 auto tables = get_ptable(ptable);
513 EXPECT_THAT(tables, SizeIs(4));
514 ASSERT_THAT(TOP_LEVEL, Eq(2));
515
516 /* Check that the first and last pages are mapped and nothing else. */
517 EXPECT_THAT(std::span(tables).subspan(1, 2),
518 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
519
520 /* Check the first page. */
521 auto table0_l2 = tables.front();
522 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
523 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
524
525 auto table0_l1 =
526 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
527 EXPECT_THAT(table0_l1.subspan(1),
528 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
529 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
530
531 auto table0_l0 =
532 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
533 EXPECT_THAT(table0_l0.subspan(1),
534 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
535 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
536 EXPECT_THAT(
537 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
538 Eq(pa_addr(first_begin)));
539
540 /* Check the last page. */
541 auto table3_l2 = tables.back();
542 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
543 Each(arch_mm_absent_pte(TOP_LEVEL)));
544 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
545
546 auto table3_l1 = get_table(
547 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
548 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
549 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
550 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
551
552 auto table3_l0 = get_table(
553 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
554 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
555 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
556 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
557 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
558 TOP_LEVEL - 2)),
559 Eq(pa_addr(last_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000560}
561
562/**
563 * Overlapping address ranges can be prepared and committed together.
564 */
565TEST_F(mm, prepare_and_commit_overlapping_regions)
566{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000567 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000568 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
569 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
570 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000571 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
572 &ppool));
573 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
574 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000575 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
576 nullptr);
577 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
578 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000579
580 auto tables = get_ptable(ptable);
581 EXPECT_THAT(tables, SizeIs(4));
582 EXPECT_THAT(std::span(tables).last(2),
583 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
584 ASSERT_THAT(TOP_LEVEL, Eq(2));
585
586 /* Check only the last page of the first table is mapped. */
587 auto table0_l2 = tables.front();
588 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
589 Each(arch_mm_absent_pte(TOP_LEVEL)));
590 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
591
592 auto table0_l1 = get_table(
593 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
594 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
595 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
596 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
597
598 auto table0_l0 = get_table(
599 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
600 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
601 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
602 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
603 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
604 TOP_LEVEL - 2)),
605 Eq(pa_addr(low_begin)));
606
607 /* Check only the first page of the second table is mapped. */
608 auto table1_l2 = tables[1];
609 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
610 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
611
612 auto table1_l1 =
613 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
614 EXPECT_THAT(table1_l1.subspan(1),
615 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
616 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
617
618 auto table1_l0 =
619 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
620 EXPECT_THAT(table1_l0.subspan(1),
621 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
622 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
623 EXPECT_THAT(
624 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
625 Eq(pa_addr(high_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000626}
627
Andrew Scull73b89542019-11-20 17:31:26 +0000628/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000629 * If range is not mapped, unmapping has no effect.
630 */
631TEST_F(mm, unmap_not_mapped)
632{
Andrew Scullda241972019-01-05 18:17:48 +0000633 EXPECT_TRUE(
634 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000635 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000636 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000637 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000638}
639
640/**
641 * Unmapping everything should result in an empty page table with no subtables.
642 */
643TEST_F(mm, unmap_all)
644{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000645 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000646 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
647 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
648 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
649 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000650 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
651 nullptr));
652 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
653 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000654 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000655 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000656 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000657 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000658}
659
660/**
661 * Unmap range is rounded to the containing pages.
662 */
663TEST_F(mm, unmap_round_to_page)
664{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000665 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000666 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
667 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000668
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000669 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000670 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000671 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000672 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000673
674 auto tables = get_ptable(ptable);
675 constexpr auto l3_index = 2;
676
677 /* Check all other top level entries are empty... */
678 EXPECT_THAT(std::span(tables).first(l3_index),
679 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
680 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
681 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
682
683 /* Except the mapped page which is absent. */
684 auto table_l2 = tables[l3_index];
685 constexpr auto l2_index = 384;
686 EXPECT_THAT(table_l2.first(l2_index),
687 Each(arch_mm_absent_pte(TOP_LEVEL)));
688 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
689 EXPECT_THAT(table_l2.subspan(l2_index + 1),
690 Each(arch_mm_absent_pte(TOP_LEVEL)));
691
692 auto table_l1 = get_table(
693 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
694 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
695 EXPECT_THAT(table_l1.subspan(1),
696 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
697
698 auto table_l0 = get_table(
699 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
700 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000701}
702
703/**
704 * Unmap a range that of page mappings that spans multiple concatenated tables.
705 */
706TEST_F(mm, unmap_across_tables)
707{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000708 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000709 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
710 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000711
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000712 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000713 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000714 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000715
716 auto tables = get_ptable(ptable);
717
718 /* Check the untouched tables are empty. */
719 EXPECT_THAT(std::span(tables).first(2),
720 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
721
722 /* Check the last page is explicity marked as absent. */
723 auto table2_l2 = tables[2];
724 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
725 Each(arch_mm_absent_pte(TOP_LEVEL)));
726 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
727
728 auto table2_l1 = get_table(
729 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
730 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
731 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
732 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
733
734 auto table2_l0 = get_table(
735 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
736 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
737
738 /* Check the first page is explicitly marked as absent. */
739 auto table3_l2 = tables[3];
740 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
741 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
742
743 auto table3_l1 = get_table(
744 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
745 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
746 EXPECT_THAT(table3_l1.subspan(1),
747 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
748
749 auto table3_l0 = get_table(
750 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
751 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000752}
753
754/**
755 * Unmapping outside the range of memory had no effect.
756 */
757TEST_F(mm, unmap_out_of_range)
758{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000759 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000760 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000761 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000762 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000763 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000764 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000765 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000766 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
767 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000768}
769
770/**
771 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
772 * an empty range so no change is made.
773 */
774TEST_F(mm, unmap_reverse_range)
775{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000776 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000777 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000778 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000779 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000780 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000781 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000782 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000783 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
784 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000785}
786
787/**
788 * Unmapping a reverse range in the same page will unmap the page because the
789 * start of the range is rounded down and the end is rounded up.
790 *
791 * This serves as a form of documentation of behaviour rather than a
792 * requirement. Check whether any code relies on this before changing it.
793 */
794TEST_F(mm, unmap_reverse_range_quirk)
795{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000796 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000797 const paddr_t page_begin = pa_init(0x180'0000'0000);
798 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000799 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000800 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000801 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000802 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000803
804 auto tables = get_ptable(ptable);
805 constexpr auto l3_index = 3;
806
807 /* Check all other top level entries are empty... */
808 EXPECT_THAT(std::span(tables).first(l3_index),
809 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
810
811 /* Except the mapped page which is absent. */
812 auto table_l2 = tables[l3_index];
813 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
814 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
815
816 auto table_l1 = get_table(
817 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
818 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
819 EXPECT_THAT(table_l1.subspan(1),
820 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
821
822 auto table_l0 = get_table(
823 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
824 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000825}
826
827/**
828 * Unmapping a range up to the maximum address causes the range end to wrap to
829 * zero as it is rounded up to a page boundary meaning no change is made.
830 *
831 * This serves as a form of documentation of behaviour rather than a
832 * requirement. Check whether any code relies on this before changing it.
833 */
834TEST_F(mm, unmap_last_address_quirk)
835{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000836 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000837 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000838 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000839 ASSERT_TRUE(mm_vm_unmap(
840 &ptable, pa_init(0),
841 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000842 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000843 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000844 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
845 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000846}
847
848/**
849 * Mapping then unmapping a page does not defrag the table.
850 */
851TEST_F(mm, unmap_does_not_defrag)
852{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000853 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000854 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
855 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
856 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
857 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000858 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
859 nullptr));
860 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
861 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000862 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
863 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000864 EXPECT_THAT(get_ptable(ptable),
865 AllOf(SizeIs(4),
866 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000867}
868
869/**
870 * Nothing is mapped in an empty table.
871 */
872TEST_F(mm, is_mapped_empty)
873{
Andrew Scull81e85092018-12-12 12:56:20 +0000874 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
875 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
876 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000877}
878
879/**
880 * Everything is mapped in a full table.
881 */
882TEST_F(mm, is_mapped_all)
883{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000884 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000885 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000886 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000887 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
888 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
889 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000890}
891
892/**
893 * A page is mapped for the range [begin, end).
894 */
895TEST_F(mm, is_mapped_page)
896{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000897 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000898 const paddr_t page_begin = pa_init(0x100'0000'0000);
899 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000900 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000901 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000902 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
903 EXPECT_TRUE(
904 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
905 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000906}
907
908/**
909 * Everything out of range is not mapped.
910 */
911TEST_F(mm, is_mapped_out_of_range)
912{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000913 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000914 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000915 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000916 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
917 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000918 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000919 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scull81e85092018-12-12 12:56:20 +0000920}
921
922/**
923 * The mode of unmapped addresses can be retrieved and is set to invalid,
924 * unowned and shared.
925 */
926TEST_F(mm, get_mode_empty)
927{
Andrew Scull81e85092018-12-12 12:56:20 +0000928 constexpr int default_mode =
929 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000930 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000931
932 read_mode = 0;
933 EXPECT_TRUE(
934 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
935 EXPECT_THAT(read_mode, Eq(default_mode));
936
937 read_mode = 0;
938 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
939 ipa_init(0x3c97'e000), &read_mode));
940 EXPECT_THAT(read_mode, Eq(default_mode));
941
942 read_mode = 0;
943 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
944 ipa_init(0x1ff'ffff'ffff), &read_mode));
945 EXPECT_THAT(read_mode, Eq(default_mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000946}
947
948/**
949 * Get the mode of a range comprised of individual pages which are either side
950 * of a root table boundary.
951 */
952TEST_F(mm, get_mode_pages_across_tables)
953{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000954 constexpr mm_mode_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +0000955 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
956 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000957 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000958 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000959 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000960
961 read_mode = 0;
962 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
963 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
964 &read_mode));
965 EXPECT_THAT(read_mode, Eq(mode));
966
967 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
968 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
969 &read_mode));
970
971 read_mode = 0;
972 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
973 ipa_from_pa(map_end), &read_mode));
974 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000975}
976
977/**
978 * Anything out of range fail to retrieve the mode.
979 */
980TEST_F(mm, get_mode_out_of_range)
981{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000982 constexpr mm_mode_t mode = MM_MODE_UNOWNED;
983 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000984 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000985 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000986 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
987 ipa_from_pa(pa_add(VM_MEM_END, 1)),
988 &read_mode));
989 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
990 ipa_from_pa(pa_add(VM_MEM_END, 1)),
991 &read_mode));
992 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
993 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000994}
995
996/**
997 * Defragging an entirely empty table has no effect.
998 */
999TEST_F(mm, defrag_empty)
1000{
Olivier Deprez6f400372022-03-07 09:31:08 +01001001 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001002 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001003 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001004 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001005}
1006
1007/**
1008 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001009 * an empty table.
1010 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001011TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001012{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001013 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001014 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1015 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1016 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1017 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001018 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1019 nullptr));
1020 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1021 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001022 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1023 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Olivier Deprez6f400372022-03-07 09:31:08 +01001024 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001025 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001026 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001027 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001028}
1029
1030/**
1031 * Any subtable with all blocks with the same attributes should be replaced
1032 * with a single block.
1033 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001034TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001035{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001036 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001037 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1038 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1039 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001040 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001041 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001042 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001043 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1044 nullptr));
1045 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1046 nullptr));
Olivier Deprez6f400372022-03-07 09:31:08 +01001047 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001048 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001049 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001050 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1051 _1, TOP_LEVEL))))));
Andrew Walbran6324fc92018-10-03 11:46:43 +01001052}
1053
Andrew Scull232d5602018-10-15 11:07:45 +01001054} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001055
1056namespace mm_test
1057{
1058/**
1059 * Get an STL representation of the ptable.
1060 */
1061std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1062 const struct mm_ptable &ptable)
1063{
1064 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1065 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1066 for (uint8_t i = 0; i < root_table_count; ++i) {
Karl Meakinaacfd4f2025-02-08 19:30:52 +00001067 all.push_back(get_table(&ptable.root_tables[i]));
Andrew Scull3c257452019-11-26 13:32:50 +00001068 }
1069 return all;
1070}
1071
1072} /* namespace mm_test */