blob: 22e5dc059d78b20e7c7c1bfe03532b68ebedbc7c [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01007 */
8
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00009#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010010
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000011extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010012#include "hf/arch/mm.h"
13
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000014#include "hf/mm.h"
15#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010016}
17
Andrew Scull1ba470e2018-10-31 15:14:31 +000018#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010019#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000020#include <span>
21#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010022
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "mm_test.hh"
24
Andrew Scull232d5602018-10-15 11:07:45 +010025namespace
26{
Andrew Scull1ba470e2018-10-31 15:14:31 +000027using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010028
Andrew Scull1ba470e2018-10-31 15:14:31 +000029using ::testing::AllOf;
30using ::testing::Contains;
31using ::testing::Each;
32using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000033using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using ::testing::SizeIs;
35using ::testing::Truly;
36
Andrew Scull3c257452019-11-26 13:32:50 +000037using ::mm_test::get_ptable;
38
Andrew Scull1ba470e2018-10-31 15:14:31 +000039constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Karl Meakina3a9f952025-02-08 00:11:16 +000040const mm_level_t TOP_LEVEL = arch_mm_stage2_root_level() - 1;
Andrew Scull1ba470e2018-10-31 15:14:31 +000041const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042
43/**
44 * Calculates the size of the address space represented by a page table entry at
45 * the given level.
46 */
Andrew Scull232d5602018-10-15 11:07:45 +010047size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010048{
49 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
50}
51
52/**
Andrew Scull81e85092018-12-12 12:56:20 +000053 * Checks whether the address is mapped in the address space.
54 */
Karl Meakind64aaf82025-02-08 01:12:55 +000055bool mm_vm_is_mapped(struct mm_ptable *ptable, ipaddr_t ipa)
Andrew Scull81e85092018-12-12 12:56:20 +000056{
Karl Meakin07a69ab2025-02-07 14:53:19 +000057 mm_mode_t mode;
Karl Meakind64aaf82025-02-08 01:12:55 +000058 return mm_vm_get_mode(ptable, ipa, ipa_add(ipa, 1), &mode) &&
Andrew Scull81e85092018-12-12 12:56:20 +000059 (mode & MM_MODE_INVALID) == 0;
60}
61
62/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010064 */
Karl Meakinaacfd4f2025-02-08 19:30:52 +000065std::span<pte_t, MM_PTE_PER_PAGE> get_table(struct mm_page_table *table)
Andrew Scull4e5f8142018-10-12 14:37:19 +010066{
Olivier Depreza4491a22021-04-20 17:34:42 +020067 return std::span<pte_t, MM_PTE_PER_PAGE>(table->entries,
68 std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010069}
70
Andrew Scull1ba470e2018-10-31 15:14:31 +000071class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010072{
Andrew Scull1ba470e2018-10-31 15:14:31 +000073 void SetUp() override
74 {
75 /*
76 * TODO: replace with direct use of stdlib allocator so
77 * sanitizers are more effective.
78 */
79 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000080 mpool_init(&ppool, sizeof(struct mm_page_table));
81 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Karl Meakina4858362025-02-13 16:04:28 +000082 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
83 }
84
85 void TearDown() override
86 {
87 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010088 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000089
90 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000091
92 protected:
93 struct mpool ppool;
Karl Meakina4858362025-02-13 16:04:28 +000094 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +000095};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010096
97/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000098 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +010099 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000100TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000102 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000103 get_ptable(ptable),
104 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100105}
106
107/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000108 * Each new concatenated table is initially empty.
109 */
110TEST_F(mm, ptable_init_concatenated_empty)
111{
Andrew Scull3681b8d2018-12-12 14:22:59 +0000112 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000113 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000114 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000115}
116
117/**
118 * Only the first page is mapped with all others left absent.
119 */
120TEST_F(mm, map_first_page)
121{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000122 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123 const paddr_t page_begin = pa_init(0);
124 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000125 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000126 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127
Andrew Scullda3df7f2019-01-05 17:49:27 +0000128 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000129 EXPECT_THAT(tables, SizeIs(4));
130 ASSERT_THAT(TOP_LEVEL, Eq(2));
131
132 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000133 EXPECT_THAT(std::span(tables).last(3),
134 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000135
136 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000137 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000138 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
139
Andrew Scull3681b8d2018-12-12 14:22:59 +0000140 auto table_l1 =
141 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
142 EXPECT_THAT(table_l1.subspan(1),
143 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000144 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
145
Andrew Scull3681b8d2018-12-12 14:22:59 +0000146 auto table_l0 =
147 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
148 EXPECT_THAT(table_l0.subspan(1),
149 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000150 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000151 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000152 Eq(pa_addr(page_begin)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000153}
154
155/**
156 * The start address is rounded down and the end address is rounded up to page
157 * boundaries.
158 */
159TEST_F(mm, map_round_to_page)
160{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000161 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000162 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
163 const paddr_t map_end = pa_add(map_begin, 268);
164 ipaddr_t ipa = ipa_init(-1);
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000165 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
166 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000167 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
168
Andrew Scullda3df7f2019-01-05 17:49:27 +0000169 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000170 EXPECT_THAT(tables, SizeIs(4));
171 ASSERT_THAT(TOP_LEVEL, Eq(2));
172
173 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000174 EXPECT_THAT(std::span(tables).first(3),
175 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000176
177 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000178 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
179 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000180 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
181
Andrew Scull3681b8d2018-12-12 14:22:59 +0000182 auto table_l1 = get_table(
183 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
184 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
185 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000186 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
187
Andrew Scull3681b8d2018-12-12 14:22:59 +0000188 auto table_l0 = get_table(
189 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
190 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
191 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000193 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
194 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000195 Eq(0x200'0000'0000 - PAGE_SIZE));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000196}
197
198/**
199 * Map a two page range over the boundary of two tables.
200 */
201TEST_F(mm, map_across_tables)
202{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000203 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000204 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
205 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000206 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000207 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000208
Andrew Scullda3df7f2019-01-05 17:49:27 +0000209 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000210 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000211 EXPECT_THAT(std::span(tables).last(2),
212 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000213 ASSERT_THAT(TOP_LEVEL, Eq(2));
214
215 /* Check only the last page of the first table is mapped. */
216 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000217 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
218 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000219 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
220
Andrew Scull3681b8d2018-12-12 14:22:59 +0000221 auto table0_l1 = get_table(
222 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
223 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
224 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000225 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
226
Andrew Scull3681b8d2018-12-12 14:22:59 +0000227 auto table0_l0 = get_table(
228 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
229 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
230 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000231 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000232 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
233 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000234 Eq(pa_addr(map_begin)));
235
Andrew Scull164f8152019-11-19 14:29:55 +0000236 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000237 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000238 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
240
Andrew Scull3681b8d2018-12-12 14:22:59 +0000241 auto table1_l1 =
242 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
243 EXPECT_THAT(table1_l1.subspan(1),
244 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000245 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
246
Andrew Scull3681b8d2018-12-12 14:22:59 +0000247 auto table1_l0 =
248 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
249 EXPECT_THAT(table1_l0.subspan(1),
250 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000251 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000252 EXPECT_THAT(
253 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
254 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000255}
256
257/**
258 * Mapping all of memory creates blocks at the highest level.
259 */
260TEST_F(mm, map_all_at_top_level)
261{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000262 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000263 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000264 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000265 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000266 EXPECT_THAT(
267 tables,
268 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
269 _1, TOP_LEVEL))))));
270 for (uint64_t i = 0; i < tables.size(); ++i) {
271 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000272 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
273 TOP_LEVEL)),
274 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
275 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000276 << "i=" << i << " j=" << j;
277 }
278 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000279}
280
281/**
282 * Map all memory then trying to map a page again doesn't introduce a special
283 * mapping for that particular page.
284 */
285TEST_F(mm, map_already_mapped)
286{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000287 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000288 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000289 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000290 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000291 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000292 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000293 EXPECT_THAT(ipa_addr(ipa), Eq(0));
294 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000295 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000296 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
297 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298}
299
300/**
301 * Mapping a reverse range, i.e. the end comes before the start, is treated as
302 * an empty range so no mappings are made.
Karl Meakin30506952025-02-18 18:13:06 +0000303 *
304 * This serves as a form of documentation of behaviour rather than a
305 * requirement. Check whether any code relies on this before changing it.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000306 */
Karl Meakin30506952025-02-18 18:13:06 +0000307TEST_F(mm, map_reverse_range_quirk)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000308{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000309 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000310 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000311 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000312 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000313 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000314 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000315 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000316 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000317}
318
319/**
320 * Mapping a reverse range in the same page will map the page because the start
321 * of the range is rounded down and the end is rounded up.
322 *
323 * This serves as a form of documentation of behaviour rather than a
324 * requirement. Check whether any code relies on this before changing it.
325 */
Karl Meakin30506952025-02-18 18:13:06 +0000326TEST_F(mm, map_reverse_range_rounded_quirk)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000327{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000328 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000330 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000331 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000332 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000333 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000334 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000335}
336
337/**
338 * Mapping a range up to the maximum address causes the range end to wrap to
339 * zero as it is rounded up to a page boundary meaning no memory is mapped.
340 *
341 * This serves as a form of documentation of behaviour rather than a
342 * requirement. Check whether any code relies on this before changing it.
343 */
344TEST_F(mm, map_last_address_quirk)
345{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000346 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000347 ipaddr_t ipa = ipa_init(-1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000348 ASSERT_TRUE(mm_vm_identity_map(
349 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000350 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
351 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000352 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000353 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000354 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000355 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000356}
357
358/**
359 * Mapping a range that goes beyond the available memory clamps to the available
360 * range.
361 */
362TEST_F(mm, map_clamp_to_range)
363{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000364 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000365 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
366 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000367 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000368 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000369 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000370 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
371 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000372}
373
374/**
375 * Mapping a range outside of the available memory is ignored and doesn't alter
376 * the page tables.
377 */
378TEST_F(mm, map_ignore_out_of_range)
379{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000380 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000381 ipaddr_t ipa = ipa_init(-1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000382 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000383 pa_init(0xf0'0000'0000'0000), mode,
384 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000385 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000386 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000387 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000388 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000389}
390
391/**
392 * Map a single page and then map all of memory which replaces the single page
393 * mapping with a higher level block mapping.
394 */
395TEST_F(mm, map_block_replaces_table)
396{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000397 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
399 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000400 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000401 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000402 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000403 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000404 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000405 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000406 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
407 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000408}
409
410/**
411 * Map all memory at the top level, unmapping a page and remapping at a lower
412 * level does not result in all memory being mapped at the top level again.
413 */
414TEST_F(mm, map_does_not_defrag)
415{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000416 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000417 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
418 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000419 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000420 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000421 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000422 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000423 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000424 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000425 AllOf(SizeIs(4),
426 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
427 TOP_LEVEL)))),
428 Contains(Contains(Truly(std::bind(
429 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
430 Contains(Contains(Truly(std::bind(
431 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000432}
433
434/**
Andrew Scull73b89542019-11-20 17:31:26 +0000435 * Mapping with a mode that indicates unmapping results in the addresses being
436 * unmapped with absent entries.
437 */
438TEST_F(mm, map_to_unmap)
439{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000440 constexpr mm_mode_t mode = 0;
Andrew Scull73b89542019-11-20 17:31:26 +0000441 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
442 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
443 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
444 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000445 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
446 nullptr));
447 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
448 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000449 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000450 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000451 EXPECT_THAT(
452 get_ptable(ptable),
453 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull73b89542019-11-20 17:31:26 +0000454}
455
Andrew Scull4e83cef2019-11-19 14:17:54 +0000456/*
457 * Preparing and committing an address range works the same as mapping it.
458 */
459TEST_F(mm, prepare_and_commit_first_page)
460{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000461 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000462 const paddr_t page_begin = pa_init(0);
463 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000464 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
465 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000466 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
467 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000468
469 auto tables = get_ptable(ptable);
470 EXPECT_THAT(tables, SizeIs(4));
471 ASSERT_THAT(TOP_LEVEL, Eq(2));
472
473 /* Check that the first page is mapped and nothing else. */
474 EXPECT_THAT(std::span(tables).last(3),
475 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
476
477 auto table_l2 = tables.front();
478 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
479 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
480
481 auto table_l1 =
482 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
483 EXPECT_THAT(table_l1.subspan(1),
484 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
485 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
486
487 auto table_l0 =
488 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
489 EXPECT_THAT(table_l0.subspan(1),
490 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
491 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
492 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
493 Eq(pa_addr(page_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000494}
495
496/**
497 * Disjoint address ranges can be prepared and committed together.
498 */
499TEST_F(mm, prepare_and_commit_disjoint_regions)
500{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000501 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000502 const paddr_t first_begin = pa_init(0);
503 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
504 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
505 const paddr_t last_end = VM_MEM_END;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000506 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
507 mode, &ppool));
508 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
509 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000510 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
511 nullptr);
512 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
513 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000514
515 auto tables = get_ptable(ptable);
516 EXPECT_THAT(tables, SizeIs(4));
517 ASSERT_THAT(TOP_LEVEL, Eq(2));
518
519 /* Check that the first and last pages are mapped and nothing else. */
520 EXPECT_THAT(std::span(tables).subspan(1, 2),
521 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
522
523 /* Check the first page. */
524 auto table0_l2 = tables.front();
525 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
526 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
527
528 auto table0_l1 =
529 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
530 EXPECT_THAT(table0_l1.subspan(1),
531 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
532 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
533
534 auto table0_l0 =
535 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
536 EXPECT_THAT(table0_l0.subspan(1),
537 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
538 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
539 EXPECT_THAT(
540 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
541 Eq(pa_addr(first_begin)));
542
543 /* Check the last page. */
544 auto table3_l2 = tables.back();
545 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
546 Each(arch_mm_absent_pte(TOP_LEVEL)));
547 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
548
549 auto table3_l1 = get_table(
550 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
551 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
552 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
553 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
554
555 auto table3_l0 = get_table(
556 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
557 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
558 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
559 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
560 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
561 TOP_LEVEL - 2)),
562 Eq(pa_addr(last_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000563}
564
565/**
566 * Overlapping address ranges can be prepared and committed together.
567 */
568TEST_F(mm, prepare_and_commit_overlapping_regions)
569{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000570 constexpr mm_mode_t mode = 0;
Andrew Scull4e83cef2019-11-19 14:17:54 +0000571 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
572 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
573 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000574 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
575 &ppool));
576 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
577 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000578 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
579 nullptr);
580 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
581 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000582
583 auto tables = get_ptable(ptable);
584 EXPECT_THAT(tables, SizeIs(4));
585 EXPECT_THAT(std::span(tables).last(2),
586 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
587 ASSERT_THAT(TOP_LEVEL, Eq(2));
588
589 /* Check only the last page of the first table is mapped. */
590 auto table0_l2 = tables.front();
591 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
592 Each(arch_mm_absent_pte(TOP_LEVEL)));
593 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
594
595 auto table0_l1 = get_table(
596 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
597 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
598 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
599 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
600
601 auto table0_l0 = get_table(
602 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
603 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
604 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
605 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
606 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
607 TOP_LEVEL - 2)),
608 Eq(pa_addr(low_begin)));
609
610 /* Check only the first page of the second table is mapped. */
611 auto table1_l2 = tables[1];
612 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
613 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
614
615 auto table1_l1 =
616 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
617 EXPECT_THAT(table1_l1.subspan(1),
618 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
619 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
620
621 auto table1_l0 =
622 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
623 EXPECT_THAT(table1_l0.subspan(1),
624 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
625 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
626 EXPECT_THAT(
627 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
628 Eq(pa_addr(high_begin)));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000629}
630
Andrew Scull73b89542019-11-20 17:31:26 +0000631/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000632 * If range is not mapped, unmapping has no effect.
633 */
634TEST_F(mm, unmap_not_mapped)
635{
Andrew Scullda241972019-01-05 18:17:48 +0000636 EXPECT_TRUE(
637 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000638 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000639 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000640 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000641}
642
643/**
644 * Unmapping everything should result in an empty page table with no subtables.
645 */
646TEST_F(mm, unmap_all)
647{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000648 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000649 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
650 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
651 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
652 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000653 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
654 nullptr));
655 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
656 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000657 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000658 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000659 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000660 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000661}
662
663/**
664 * Unmap range is rounded to the containing pages.
665 */
666TEST_F(mm, unmap_round_to_page)
667{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000668 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000669 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
670 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000671
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000672 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000673 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000674 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000675 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000676
677 auto tables = get_ptable(ptable);
678 constexpr auto l3_index = 2;
679
680 /* Check all other top level entries are empty... */
681 EXPECT_THAT(std::span(tables).first(l3_index),
682 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
683 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
684 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
685
686 /* Except the mapped page which is absent. */
687 auto table_l2 = tables[l3_index];
688 constexpr auto l2_index = 384;
689 EXPECT_THAT(table_l2.first(l2_index),
690 Each(arch_mm_absent_pte(TOP_LEVEL)));
691 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
692 EXPECT_THAT(table_l2.subspan(l2_index + 1),
693 Each(arch_mm_absent_pte(TOP_LEVEL)));
694
695 auto table_l1 = get_table(
696 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
697 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
698 EXPECT_THAT(table_l1.subspan(1),
699 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
700
701 auto table_l0 = get_table(
702 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
703 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000704}
705
706/**
707 * Unmap a range that of page mappings that spans multiple concatenated tables.
708 */
709TEST_F(mm, unmap_across_tables)
710{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000711 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000712 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
713 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Andrew Scull164f8152019-11-19 14:29:55 +0000714
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000715 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000716 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000717 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000718
719 auto tables = get_ptable(ptable);
720
721 /* Check the untouched tables are empty. */
722 EXPECT_THAT(std::span(tables).first(2),
723 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
724
725 /* Check the last page is explicity marked as absent. */
726 auto table2_l2 = tables[2];
727 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
728 Each(arch_mm_absent_pte(TOP_LEVEL)));
729 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
730
731 auto table2_l1 = get_table(
732 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
733 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
734 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
735 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
736
737 auto table2_l0 = get_table(
738 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
739 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
740
741 /* Check the first page is explicitly marked as absent. */
742 auto table3_l2 = tables[3];
743 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
744 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
745
746 auto table3_l1 = get_table(
747 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
748 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
749 EXPECT_THAT(table3_l1.subspan(1),
750 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
751
752 auto table3_l0 = get_table(
753 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
754 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000755}
756
757/**
758 * Unmapping outside the range of memory had no effect.
759 */
760TEST_F(mm, unmap_out_of_range)
761{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000762 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000763 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000764 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000765 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000766 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000767 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000768 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000769 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
770 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000771}
772
773/**
774 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
775 * an empty range so no change is made.
776 */
777TEST_F(mm, unmap_reverse_range)
778{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000779 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000780 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000781 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000782 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000783 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000784 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000785 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000786 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
787 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000788}
789
790/**
791 * Unmapping a reverse range in the same page will unmap the page because the
792 * start of the range is rounded down and the end is rounded up.
793 *
794 * This serves as a form of documentation of behaviour rather than a
795 * requirement. Check whether any code relies on this before changing it.
796 */
797TEST_F(mm, unmap_reverse_range_quirk)
798{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000799 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000800 const paddr_t page_begin = pa_init(0x180'0000'0000);
801 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000802 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000803 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000804 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000805 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000806
807 auto tables = get_ptable(ptable);
808 constexpr auto l3_index = 3;
809
810 /* Check all other top level entries are empty... */
811 EXPECT_THAT(std::span(tables).first(l3_index),
812 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
813
814 /* Except the mapped page which is absent. */
815 auto table_l2 = tables[l3_index];
816 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
817 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
818
819 auto table_l1 = get_table(
820 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
821 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
822 EXPECT_THAT(table_l1.subspan(1),
823 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
824
825 auto table_l0 = get_table(
826 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
827 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000828}
829
830/**
831 * Unmapping a range up to the maximum address causes the range end to wrap to
832 * zero as it is rounded up to a page boundary meaning no change is made.
833 *
834 * This serves as a form of documentation of behaviour rather than a
835 * requirement. Check whether any code relies on this before changing it.
836 */
837TEST_F(mm, unmap_last_address_quirk)
838{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000839 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000840 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000841 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000842 ASSERT_TRUE(mm_vm_unmap(
843 &ptable, pa_init(0),
844 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000845 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000846 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000847 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
848 _1, TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000849}
850
851/**
852 * Mapping then unmapping a page does not defrag the table.
853 */
854TEST_F(mm, unmap_does_not_defrag)
855{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000856 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000857 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
858 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
859 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
860 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000861 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
862 nullptr));
863 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
864 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000865 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
866 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000867 EXPECT_THAT(get_ptable(ptable),
868 AllOf(SizeIs(4),
869 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000870}
871
872/**
873 * Nothing is mapped in an empty table.
874 */
875TEST_F(mm, is_mapped_empty)
876{
Andrew Scull81e85092018-12-12 12:56:20 +0000877 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
878 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
879 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000880}
881
882/**
883 * Everything is mapped in a full table.
884 */
885TEST_F(mm, is_mapped_all)
886{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000887 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000888 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000889 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000890 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
891 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
892 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000893}
894
895/**
896 * A page is mapped for the range [begin, end).
897 */
898TEST_F(mm, is_mapped_page)
899{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000900 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000901 const paddr_t page_begin = pa_init(0x100'0000'0000);
902 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000903 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000904 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000905 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
906 EXPECT_TRUE(
907 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
908 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000909}
910
911/**
912 * Everything out of range is not mapped.
913 */
914TEST_F(mm, is_mapped_out_of_range)
915{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000916 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000917 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000918 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000919 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
920 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000921 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000922 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scull81e85092018-12-12 12:56:20 +0000923}
924
925/**
926 * The mode of unmapped addresses can be retrieved and is set to invalid,
927 * unowned and shared.
Karl Meakin30506952025-02-18 18:13:06 +0000928 *
929 * This serves as a form of documentation of behaviour rather than a
930 * requirement. Check whether any code relies on this before changing it.
Andrew Scull81e85092018-12-12 12:56:20 +0000931 */
Karl Meakin30506952025-02-18 18:13:06 +0000932TEST_F(mm, get_mode_empty_quirk)
Andrew Scull81e85092018-12-12 12:56:20 +0000933{
Andrew Scull81e85092018-12-12 12:56:20 +0000934 constexpr int default_mode =
935 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000936 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000937
938 read_mode = 0;
939 EXPECT_TRUE(
940 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
941 EXPECT_THAT(read_mode, Eq(default_mode));
942
943 read_mode = 0;
944 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
945 ipa_init(0x3c97'e000), &read_mode));
946 EXPECT_THAT(read_mode, Eq(default_mode));
947
948 read_mode = 0;
949 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
950 ipa_init(0x1ff'ffff'ffff), &read_mode));
951 EXPECT_THAT(read_mode, Eq(default_mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000952}
953
954/**
955 * Get the mode of a range comprised of individual pages which are either side
956 * of a root table boundary.
957 */
958TEST_F(mm, get_mode_pages_across_tables)
959{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000960 constexpr mm_mode_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +0000961 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
962 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000963 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +0000964 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000965 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000966
967 read_mode = 0;
968 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
969 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
970 &read_mode));
971 EXPECT_THAT(read_mode, Eq(mode));
972
973 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
974 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
975 &read_mode));
976
977 read_mode = 0;
978 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
979 ipa_from_pa(map_end), &read_mode));
980 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scull81e85092018-12-12 12:56:20 +0000981}
982
Karl Meakinb2b5ff72025-02-19 15:47:56 +0000983TEST_F(mm, get_mode_partial)
984{
985 constexpr mm_mode_t mode0 = MM_MODE_R;
986 constexpr mm_mode_t mode1 = MM_MODE_W;
987 constexpr mm_mode_t mode2 = MM_MODE_X;
988
989 mm_mode_t ret_mode;
990
991 const paddr_t page0_start = pa_init(0);
992 const paddr_t page0_end = pa_init(PAGE_SIZE * 1);
993 const paddr_t page1_start = pa_init(PAGE_SIZE * 1);
994 const paddr_t page1_end = pa_init(PAGE_SIZE * 2);
995 const paddr_t page2_start = pa_init(PAGE_SIZE * 2);
996 const paddr_t page2_end = pa_init(PAGE_SIZE * 3);
997 ipaddr_t end_ret;
998
999 ASSERT_TRUE(mm_vm_identity_map(&ptable, page0_start, page0_end, mode0,
1000 &ppool, nullptr));
1001 ASSERT_TRUE(mm_vm_identity_map(&ptable, page1_start, page1_end, mode1,
1002 &ppool, nullptr));
1003 ASSERT_TRUE(mm_vm_identity_map(&ptable, page2_start, page2_end, mode2,
1004 &ppool, nullptr));
1005
1006 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(page0_start),
1007 ipa_from_pa(page0_end), &ret_mode));
1008 EXPECT_THAT(ret_mode, Eq(mode0));
1009
1010 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(page1_start),
1011 ipa_from_pa(page1_end), &ret_mode));
1012 EXPECT_THAT(ret_mode, Eq(mode1));
1013
1014 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(page2_start),
1015 ipa_from_pa(page2_end), &ret_mode));
1016 EXPECT_THAT(ret_mode, Eq(mode2));
1017
1018 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(page0_start),
1019 ipa_from_pa(page2_end), nullptr));
1020 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(page0_start),
1021 ipa_from_pa(page1_end), nullptr));
1022
1023 EXPECT_TRUE(mm_vm_get_mode_partial(&ptable, ipa_from_pa(page0_start),
1024 ipa_from_pa(page1_end), &ret_mode,
1025 &end_ret));
1026 EXPECT_EQ(ipa_addr(end_ret), ipa_addr(ipa_from_pa(page1_start)));
1027 EXPECT_EQ(ret_mode, mode0);
1028
1029 EXPECT_TRUE(mm_vm_get_mode_partial(&ptable, ipa_from_pa(page1_start),
1030 ipa_from_pa(page2_end), &ret_mode,
1031 &end_ret));
1032 EXPECT_EQ(ipa_addr(end_ret), ipa_addr(ipa_from_pa(page2_start)));
1033 EXPECT_EQ(ret_mode, mode1);
1034
1035 EXPECT_TRUE(mm_vm_get_mode_partial(
1036 &ptable, ipa_from_pa(page2_start),
1037 ipa_from_pa(pa_add(page2_end, 2 * PAGE_SIZE)), &ret_mode,
1038 &end_ret));
1039 EXPECT_EQ(ipa_addr(end_ret), ipa_addr(ipa_from_pa(page2_end)));
1040 EXPECT_EQ(ret_mode, mode2);
1041}
1042
Andrew Scull81e85092018-12-12 12:56:20 +00001043/**
1044 * Anything out of range fail to retrieve the mode.
1045 */
1046TEST_F(mm, get_mode_out_of_range)
1047{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001048 constexpr mm_mode_t mode = MM_MODE_UNOWNED;
1049 mm_mode_t read_mode;
Andrew Scull81e85092018-12-12 12:56:20 +00001050 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001051 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001052 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1053 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1054 &read_mode));
1055 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
1056 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1057 &read_mode));
1058 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
1059 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001060}
1061
1062/**
1063 * Defragging an entirely empty table has no effect.
1064 */
1065TEST_F(mm, defrag_empty)
1066{
Olivier Deprez6f400372022-03-07 09:31:08 +01001067 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001068 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001069 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001070 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001071}
1072
1073/**
1074 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001075 * an empty table.
1076 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001077TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001078{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001079 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001080 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1081 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1082 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1083 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001084 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1085 nullptr));
1086 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1087 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001088 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1089 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Olivier Deprez6f400372022-03-07 09:31:08 +01001090 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001091 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001092 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001093 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001094}
1095
1096/**
1097 * Any subtable with all blocks with the same attributes should be replaced
1098 * with a single block.
1099 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001100TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001101{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001102 constexpr mm_mode_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001103 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1104 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1105 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001106 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001107 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001108 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001109 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1110 nullptr));
1111 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1112 nullptr));
Olivier Deprez6f400372022-03-07 09:31:08 +01001113 mm_vm_defrag(&ptable, &ppool, false);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001114 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001115 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001116 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1117 _1, TOP_LEVEL))))));
Andrew Walbran6324fc92018-10-03 11:46:43 +01001118}
1119
Andrew Scull232d5602018-10-15 11:07:45 +01001120} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001121
1122namespace mm_test
1123{
1124/**
1125 * Get an STL representation of the ptable.
1126 */
1127std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1128 const struct mm_ptable &ptable)
1129{
1130 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1131 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1132 for (uint8_t i = 0; i < root_table_count; ++i) {
Karl Meakinaacfd4f2025-02-08 19:30:52 +00001133 all.push_back(get_table(&ptable.root_tables[i]));
Andrew Scull3c257452019-11-26 13:32:50 +00001134 }
1135 return all;
1136}
1137
1138} /* namespace mm_test */