blob: 5091a79dfd9c3a0187daaac89402879e650a2a77 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01007 */
8
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00009#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010010
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000011extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010012#include "hf/arch/mm.h"
13
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000014#include "hf/mm.h"
15#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010016}
17
Andrew Scull1ba470e2018-10-31 15:14:31 +000018#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010019#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000020#include <span>
21#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010022
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "mm_test.hh"
24
Andrew Scull232d5602018-10-15 11:07:45 +010025namespace
26{
Andrew Scull1ba470e2018-10-31 15:14:31 +000027using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010028
Andrew Scull1ba470e2018-10-31 15:14:31 +000029using ::testing::AllOf;
30using ::testing::Contains;
31using ::testing::Each;
32using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000033using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using ::testing::SizeIs;
35using ::testing::Truly;
36
Andrew Scull3c257452019-11-26 13:32:50 +000037using ::mm_test::get_ptable;
38
Andrew Scull1ba470e2018-10-31 15:14:31 +000039constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000040const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000041const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042
43/**
44 * Calculates the size of the address space represented by a page table entry at
45 * the given level.
46 */
Andrew Scull232d5602018-10-15 11:07:45 +010047size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010048{
49 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
50}
51
52/**
Andrew Scull81e85092018-12-12 12:56:20 +000053 * Checks whether the address is mapped in the address space.
54 */
55bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
56{
Andrew Walbran1281ed42019-10-22 17:23:40 +010057 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000058 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
59 (mode & MM_MODE_INVALID) == 0;
60}
61
62/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010064 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000065std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010066{
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 ptr_from_va(va_from_pa(pa)));
Olivier Depreza4491a22021-04-20 17:34:42 +020069 return std::span<pte_t, MM_PTE_PER_PAGE>(table->entries,
70 std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010071}
72
Andrew Scull1ba470e2018-10-31 15:14:31 +000073class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010074{
Andrew Scull1ba470e2018-10-31 15:14:31 +000075 void SetUp() override
76 {
77 /*
78 * TODO: replace with direct use of stdlib allocator so
79 * sanitizers are more effective.
80 */
81 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000082 mpool_init(&ppool, sizeof(struct mm_page_table));
83 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010084 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000085
86 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000087
88 protected:
89 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +000090};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091
92/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000093 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +010094 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000095TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010096{
Andrew Walbran9fa106c2018-09-28 14:19:29 +010097 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080098 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +000099 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000100 get_ptable(ptable),
101 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
102 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100103}
104
105/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000106 * Each new concatenated table is initially empty.
107 */
108TEST_F(mm, ptable_init_concatenated_empty)
109{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000110 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800111 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000112 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000113 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000114 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000115 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000116}
117
118/**
119 * Only the first page is mapped with all others left absent.
120 */
121TEST_F(mm, map_first_page)
122{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100123 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 const paddr_t page_begin = pa_init(0);
125 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
126 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800127 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000128 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000129 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000130
Andrew Scullda3df7f2019-01-05 17:49:27 +0000131 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000132 EXPECT_THAT(tables, SizeIs(4));
133 ASSERT_THAT(TOP_LEVEL, Eq(2));
134
135 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000136 EXPECT_THAT(std::span(tables).last(3),
137 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000138
139 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000140 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000141 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
142
Andrew Scull3681b8d2018-12-12 14:22:59 +0000143 auto table_l1 =
144 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
145 EXPECT_THAT(table_l1.subspan(1),
146 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
148
Andrew Scull3681b8d2018-12-12 14:22:59 +0000149 auto table_l0 =
150 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
151 EXPECT_THAT(table_l0.subspan(1),
152 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000153 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000154 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000155 Eq(pa_addr(page_begin)));
156
Andrew Scullda3df7f2019-01-05 17:49:27 +0000157 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000158}
159
160/**
161 * The start address is rounded down and the end address is rounded up to page
162 * boundaries.
163 */
164TEST_F(mm, map_round_to_page)
165{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100166 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000167 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
168 const paddr_t map_end = pa_add(map_begin, 268);
169 ipaddr_t ipa = ipa_init(-1);
170 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800171 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000172 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
173 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000174 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
175
Andrew Scullda3df7f2019-01-05 17:49:27 +0000176 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000177 EXPECT_THAT(tables, SizeIs(4));
178 ASSERT_THAT(TOP_LEVEL, Eq(2));
179
180 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000181 EXPECT_THAT(std::span(tables).first(3),
182 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000183
184 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000185 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
186 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000187 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
188
Andrew Scull3681b8d2018-12-12 14:22:59 +0000189 auto table_l1 = get_table(
190 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
191 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
192 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000193 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
194
Andrew Scull3681b8d2018-12-12 14:22:59 +0000195 auto table_l0 = get_table(
196 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
197 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
198 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000199 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000200 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
201 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000202 Eq(0x200'0000'0000 - PAGE_SIZE));
203
Andrew Scullda3df7f2019-01-05 17:49:27 +0000204 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000205}
206
207/**
208 * Map a two page range over the boundary of two tables.
209 */
210TEST_F(mm, map_across_tables)
211{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100212 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000213 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
214 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
215 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800216 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000217 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000218 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000219
Andrew Scullda3df7f2019-01-05 17:49:27 +0000220 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000221 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000222 EXPECT_THAT(std::span(tables).last(2),
223 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000224 ASSERT_THAT(TOP_LEVEL, Eq(2));
225
226 /* Check only the last page of the first table is mapped. */
227 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000228 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
229 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000230 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
231
Andrew Scull3681b8d2018-12-12 14:22:59 +0000232 auto table0_l1 = get_table(
233 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
234 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
235 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000236 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
237
Andrew Scull3681b8d2018-12-12 14:22:59 +0000238 auto table0_l0 = get_table(
239 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
240 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
241 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000242 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000243 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
244 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000245 Eq(pa_addr(map_begin)));
246
Andrew Scull164f8152019-11-19 14:29:55 +0000247 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000248 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000249 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000250 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
251
Andrew Scull3681b8d2018-12-12 14:22:59 +0000252 auto table1_l1 =
253 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
254 EXPECT_THAT(table1_l1.subspan(1),
255 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000256 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
257
Andrew Scull3681b8d2018-12-12 14:22:59 +0000258 auto table1_l0 =
259 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
260 EXPECT_THAT(table1_l0.subspan(1),
261 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000262 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000263 EXPECT_THAT(
264 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
265 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000266
Andrew Scullda3df7f2019-01-05 17:49:27 +0000267 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000268}
269
270/**
271 * Mapping all of memory creates blocks at the highest level.
272 */
273TEST_F(mm, map_all_at_top_level)
274{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100275 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000276 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800277 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000278 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000279 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000280 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000281 EXPECT_THAT(
282 tables,
283 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
284 _1, TOP_LEVEL))))));
285 for (uint64_t i = 0; i < tables.size(); ++i) {
286 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000287 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
288 TOP_LEVEL)),
289 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
290 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000291 << "i=" << i << " j=" << j;
292 }
293 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000294 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000295}
296
297/**
298 * Map all memory then trying to map a page again doesn't introduce a special
299 * mapping for that particular page.
300 */
301TEST_F(mm, map_already_mapped)
302{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100303 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000304 ipaddr_t ipa = ipa_init(-1);
305 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800306 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000307 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000308 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000309 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000310 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000311 EXPECT_THAT(ipa_addr(ipa), Eq(0));
312 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000313 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000314 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
315 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000316 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000317}
318
319/**
320 * Mapping a reverse range, i.e. the end comes before the start, is treated as
321 * an empty range so no mappings are made.
322 */
323TEST_F(mm, map_reverse_range)
324{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100325 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326 ipaddr_t ipa = ipa_init(-1);
327 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800328 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000330 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000331 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000332 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000333 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000334 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000335 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000336}
337
338/**
339 * Mapping a reverse range in the same page will map the page because the start
340 * of the range is rounded down and the end is rounded up.
341 *
342 * This serves as a form of documentation of behaviour rather than a
343 * requirement. Check whether any code relies on this before changing it.
344 */
345TEST_F(mm, map_reverse_range_quirk)
346{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100347 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000348 ipaddr_t ipa = ipa_init(-1);
349 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800350 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000351 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000352 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000354 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000355 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000356}
357
358/**
359 * Mapping a range up to the maximum address causes the range end to wrap to
360 * zero as it is rounded up to a page boundary meaning no memory is mapped.
361 *
362 * This serves as a form of documentation of behaviour rather than a
363 * requirement. Check whether any code relies on this before changing it.
364 */
365TEST_F(mm, map_last_address_quirk)
366{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100367 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000368 ipaddr_t ipa = ipa_init(-1);
369 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800370 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000371 ASSERT_TRUE(mm_vm_identity_map(
372 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000373 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
374 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000375 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000376 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000377 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000378 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000379 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000380}
381
382/**
383 * Mapping a range that goes beyond the available memory clamps to the available
384 * range.
385 */
386TEST_F(mm, map_clamp_to_range)
387{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100388 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000389 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800390 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000391 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
392 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000393 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000394 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000395 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000396 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
397 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000398 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000399}
400
401/**
402 * Mapping a range outside of the available memory is ignored and doesn't alter
403 * the page tables.
404 */
405TEST_F(mm, map_ignore_out_of_range)
406{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100407 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000408 ipaddr_t ipa = ipa_init(-1);
409 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800410 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000411 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000412 pa_init(0xf0'0000'0000'0000), mode,
413 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000414 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000415 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000416 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000417 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000418 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000419}
420
421/**
422 * Map a single page and then map all of memory which replaces the single page
423 * mapping with a higher level block mapping.
424 */
425TEST_F(mm, map_block_replaces_table)
426{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100427 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000428 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
429 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
430 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800431 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000432 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000433 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000434 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000435 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000437 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000438 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
439 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000440 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000441}
442
443/**
444 * Map all memory at the top level, unmapping a page and remapping at a lower
445 * level does not result in all memory being mapped at the top level again.
446 */
447TEST_F(mm, map_does_not_defrag)
448{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100449 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000450 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
451 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
452 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800453 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000454 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000455 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000456 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000457 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000458 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000459 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000460 AllOf(SizeIs(4),
461 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
462 TOP_LEVEL)))),
463 Contains(Contains(Truly(std::bind(
464 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
465 Contains(Contains(Truly(std::bind(
466 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000467 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000468}
469
470/**
Andrew Scull73b89542019-11-20 17:31:26 +0000471 * Mapping with a mode that indicates unmapping results in the addresses being
472 * unmapped with absent entries.
473 */
474TEST_F(mm, map_to_unmap)
475{
476 constexpr uint32_t mode = 0;
477 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
478 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
479 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
480 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
481 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800482 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000483 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
484 nullptr));
485 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
486 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000487 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000488 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000489 EXPECT_THAT(
490 get_ptable(ptable),
491 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
492 mm_vm_fini(&ptable, &ppool);
493}
494
Andrew Scull4e83cef2019-11-19 14:17:54 +0000495/*
496 * Preparing and committing an address range works the same as mapping it.
497 */
498TEST_F(mm, prepare_and_commit_first_page)
499{
500 constexpr uint32_t mode = 0;
501 const paddr_t page_begin = pa_init(0);
502 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
503 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800504 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000505 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
506 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000507 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
508 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000509
510 auto tables = get_ptable(ptable);
511 EXPECT_THAT(tables, SizeIs(4));
512 ASSERT_THAT(TOP_LEVEL, Eq(2));
513
514 /* Check that the first page is mapped and nothing else. */
515 EXPECT_THAT(std::span(tables).last(3),
516 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
517
518 auto table_l2 = tables.front();
519 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
520 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
521
522 auto table_l1 =
523 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
524 EXPECT_THAT(table_l1.subspan(1),
525 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
526 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
527
528 auto table_l0 =
529 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
530 EXPECT_THAT(table_l0.subspan(1),
531 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
532 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
533 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
534 Eq(pa_addr(page_begin)));
535
536 mm_vm_fini(&ptable, &ppool);
537}
538
539/**
540 * Disjoint address ranges can be prepared and committed together.
541 */
542TEST_F(mm, prepare_and_commit_disjoint_regions)
543{
544 constexpr uint32_t mode = 0;
545 const paddr_t first_begin = pa_init(0);
546 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
547 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
548 const paddr_t last_end = VM_MEM_END;
549 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800550 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000551 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
552 mode, &ppool));
553 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
554 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000555 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
556 nullptr);
557 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
558 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000559
560 auto tables = get_ptable(ptable);
561 EXPECT_THAT(tables, SizeIs(4));
562 ASSERT_THAT(TOP_LEVEL, Eq(2));
563
564 /* Check that the first and last pages are mapped and nothing else. */
565 EXPECT_THAT(std::span(tables).subspan(1, 2),
566 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
567
568 /* Check the first page. */
569 auto table0_l2 = tables.front();
570 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
571 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
572
573 auto table0_l1 =
574 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
575 EXPECT_THAT(table0_l1.subspan(1),
576 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
577 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
578
579 auto table0_l0 =
580 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
581 EXPECT_THAT(table0_l0.subspan(1),
582 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
583 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
584 EXPECT_THAT(
585 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
586 Eq(pa_addr(first_begin)));
587
588 /* Check the last page. */
589 auto table3_l2 = tables.back();
590 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
591 Each(arch_mm_absent_pte(TOP_LEVEL)));
592 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
593
594 auto table3_l1 = get_table(
595 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
596 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
597 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
598 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
599
600 auto table3_l0 = get_table(
601 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
602 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
603 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
604 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
605 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
606 TOP_LEVEL - 2)),
607 Eq(pa_addr(last_begin)));
608
609 mm_vm_fini(&ptable, &ppool);
610}
611
612/**
613 * Overlapping address ranges can be prepared and committed together.
614 */
615TEST_F(mm, prepare_and_commit_overlapping_regions)
616{
617 constexpr uint32_t mode = 0;
618 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
619 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
620 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
621 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800622 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000623 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
624 &ppool));
625 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
626 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000627 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
628 nullptr);
629 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
630 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000631
632 auto tables = get_ptable(ptable);
633 EXPECT_THAT(tables, SizeIs(4));
634 EXPECT_THAT(std::span(tables).last(2),
635 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
636 ASSERT_THAT(TOP_LEVEL, Eq(2));
637
638 /* Check only the last page of the first table is mapped. */
639 auto table0_l2 = tables.front();
640 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
641 Each(arch_mm_absent_pte(TOP_LEVEL)));
642 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
643
644 auto table0_l1 = get_table(
645 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
646 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
647 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
648 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
649
650 auto table0_l0 = get_table(
651 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
652 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
653 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
654 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
655 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
656 TOP_LEVEL - 2)),
657 Eq(pa_addr(low_begin)));
658
659 /* Check only the first page of the second table is mapped. */
660 auto table1_l2 = tables[1];
661 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
662 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
663
664 auto table1_l1 =
665 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
666 EXPECT_THAT(table1_l1.subspan(1),
667 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
668 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
669
670 auto table1_l0 =
671 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
672 EXPECT_THAT(table1_l0.subspan(1),
673 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
674 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
675 EXPECT_THAT(
676 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
677 Eq(pa_addr(high_begin)));
678
679 mm_vm_fini(&ptable, &ppool);
680}
681
Andrew Scull73b89542019-11-20 17:31:26 +0000682/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000683 * If range is not mapped, unmapping has no effect.
684 */
685TEST_F(mm, unmap_not_mapped)
686{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000687 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800688 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000689 EXPECT_TRUE(
690 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000691 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000692 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000693 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000694 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000695}
696
697/**
698 * Unmapping everything should result in an empty page table with no subtables.
699 */
700TEST_F(mm, unmap_all)
701{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100702 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000703 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
704 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
705 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
706 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
707 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800708 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000709 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
710 nullptr));
711 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
712 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000713 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000714 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000715 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000716 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000717 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000718}
719
720/**
721 * Unmap range is rounded to the containing pages.
722 */
723TEST_F(mm, unmap_round_to_page)
724{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100725 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000726 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
727 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
728 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000729
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800730 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000731 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000732 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000733 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000734 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000735
736 auto tables = get_ptable(ptable);
737 constexpr auto l3_index = 2;
738
739 /* Check all other top level entries are empty... */
740 EXPECT_THAT(std::span(tables).first(l3_index),
741 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
742 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
743 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
744
745 /* Except the mapped page which is absent. */
746 auto table_l2 = tables[l3_index];
747 constexpr auto l2_index = 384;
748 EXPECT_THAT(table_l2.first(l2_index),
749 Each(arch_mm_absent_pte(TOP_LEVEL)));
750 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
751 EXPECT_THAT(table_l2.subspan(l2_index + 1),
752 Each(arch_mm_absent_pte(TOP_LEVEL)));
753
754 auto table_l1 = get_table(
755 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
756 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
757 EXPECT_THAT(table_l1.subspan(1),
758 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
759
760 auto table_l0 = get_table(
761 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
762 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
763
Andrew Scullda3df7f2019-01-05 17:49:27 +0000764 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000765}
766
767/**
768 * Unmap a range that of page mappings that spans multiple concatenated tables.
769 */
770TEST_F(mm, unmap_across_tables)
771{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100772 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000773 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
774 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
775 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000776
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800777 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000778 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000779 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000780 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000781
782 auto tables = get_ptable(ptable);
783
784 /* Check the untouched tables are empty. */
785 EXPECT_THAT(std::span(tables).first(2),
786 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
787
788 /* Check the last page is explicity marked as absent. */
789 auto table2_l2 = tables[2];
790 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
791 Each(arch_mm_absent_pte(TOP_LEVEL)));
792 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
793
794 auto table2_l1 = get_table(
795 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
796 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
797 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
798 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
799
800 auto table2_l0 = get_table(
801 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
802 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
803
804 /* Check the first page is explicitly marked as absent. */
805 auto table3_l2 = tables[3];
806 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
807 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
808
809 auto table3_l1 = get_table(
810 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
811 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
812 EXPECT_THAT(table3_l1.subspan(1),
813 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
814
815 auto table3_l0 = get_table(
816 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
817 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
818
Andrew Scullda3df7f2019-01-05 17:49:27 +0000819 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000820}
821
822/**
823 * Unmapping outside the range of memory had no effect.
824 */
825TEST_F(mm, unmap_out_of_range)
826{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100827 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000828 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800829 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000830 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000831 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000832 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000833 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000834 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000835 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000836 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
837 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000838 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000839}
840
841/**
842 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
843 * an empty range so no change is made.
844 */
845TEST_F(mm, unmap_reverse_range)
846{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100847 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000848 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800849 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000850 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000851 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000852 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000853 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000854 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000855 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000856 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
857 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000858 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000859}
860
861/**
862 * Unmapping a reverse range in the same page will unmap the page because the
863 * start of the range is rounded down and the end is rounded up.
864 *
865 * This serves as a form of documentation of behaviour rather than a
866 * requirement. Check whether any code relies on this before changing it.
867 */
868TEST_F(mm, unmap_reverse_range_quirk)
869{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100870 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000871 const paddr_t page_begin = pa_init(0x180'0000'0000);
872 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
873 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800874 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000875 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000876 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000877 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000878 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000879
880 auto tables = get_ptable(ptable);
881 constexpr auto l3_index = 3;
882
883 /* Check all other top level entries are empty... */
884 EXPECT_THAT(std::span(tables).first(l3_index),
885 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
886
887 /* Except the mapped page which is absent. */
888 auto table_l2 = tables[l3_index];
889 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
890 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
891
892 auto table_l1 = get_table(
893 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
894 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
895 EXPECT_THAT(table_l1.subspan(1),
896 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
897
898 auto table_l0 = get_table(
899 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
900 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
901
Andrew Scullda3df7f2019-01-05 17:49:27 +0000902 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000903}
904
905/**
906 * Unmapping a range up to the maximum address causes the range end to wrap to
907 * zero as it is rounded up to a page boundary meaning no change is made.
908 *
909 * This serves as a form of documentation of behaviour rather than a
910 * requirement. Check whether any code relies on this before changing it.
911 */
912TEST_F(mm, unmap_last_address_quirk)
913{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100914 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000915 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800916 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000917 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000918 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000919 ASSERT_TRUE(mm_vm_unmap(
920 &ptable, pa_init(0),
921 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000922 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000923 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000924 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
925 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000926 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000927}
928
929/**
930 * Mapping then unmapping a page does not defrag the table.
931 */
932TEST_F(mm, unmap_does_not_defrag)
933{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100934 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000935 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
936 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
937 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
938 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
939 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800940 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000941 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
942 nullptr));
943 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
944 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000945 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
946 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000947 EXPECT_THAT(get_ptable(ptable),
948 AllOf(SizeIs(4),
949 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000950 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000951}
952
953/**
954 * Nothing is mapped in an empty table.
955 */
956TEST_F(mm, is_mapped_empty)
957{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000958 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800959 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000960 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
961 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
962 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000963 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000964}
965
966/**
967 * Everything is mapped in a full table.
968 */
969TEST_F(mm, is_mapped_all)
970{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100971 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000972 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800973 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000974 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000975 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000976 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
977 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
978 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000979 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000980}
981
982/**
983 * A page is mapped for the range [begin, end).
984 */
985TEST_F(mm, is_mapped_page)
986{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100987 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000988 const paddr_t page_begin = pa_init(0x100'0000'0000);
989 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
990 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800991 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000992 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000993 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000994 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
995 EXPECT_TRUE(
996 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
997 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000998 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000999}
1000
1001/**
1002 * Everything out of range is not mapped.
1003 */
1004TEST_F(mm, is_mapped_out_of_range)
1005{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001006 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001007 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001008 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001009 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001010 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001011 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
1012 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001013 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +00001014 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001015 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001016}
1017
1018/**
1019 * The mode of unmapped addresses can be retrieved and is set to invalid,
1020 * unowned and shared.
1021 */
1022TEST_F(mm, get_mode_empty)
1023{
Andrew Scull81e85092018-12-12 12:56:20 +00001024 constexpr int default_mode =
1025 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
1026 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001027 uint32_t read_mode;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001028 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001029
1030 read_mode = 0;
1031 EXPECT_TRUE(
1032 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
1033 EXPECT_THAT(read_mode, Eq(default_mode));
1034
1035 read_mode = 0;
1036 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
1037 ipa_init(0x3c97'e000), &read_mode));
1038 EXPECT_THAT(read_mode, Eq(default_mode));
1039
1040 read_mode = 0;
1041 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
1042 ipa_init(0x1ff'ffff'ffff), &read_mode));
1043 EXPECT_THAT(read_mode, Eq(default_mode));
1044
Andrew Scullda3df7f2019-01-05 17:49:27 +00001045 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001046}
1047
1048/**
1049 * Get the mode of a range comprised of individual pages which are either side
1050 * of a root table boundary.
1051 */
1052TEST_F(mm, get_mode_pages_across_tables)
1053{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001054 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +00001055 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
1056 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
1057 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001058 uint32_t read_mode;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001059 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001060 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001061 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001062
1063 read_mode = 0;
1064 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1065 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1066 &read_mode));
1067 EXPECT_THAT(read_mode, Eq(mode));
1068
1069 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1070 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1071 &read_mode));
1072
1073 read_mode = 0;
1074 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1075 ipa_from_pa(map_end), &read_mode));
1076 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001077 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001078}
1079
1080/**
1081 * Anything out of range fail to retrieve the mode.
1082 */
1083TEST_F(mm, get_mode_out_of_range)
1084{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001085 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +00001086 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001087 uint32_t read_mode;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001088 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001089 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001090 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001091 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1092 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1093 &read_mode));
1094 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
1095 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1096 &read_mode));
1097 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
1098 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001099 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001100}
1101
1102/**
1103 * Defragging an entirely empty table has no effect.
1104 */
1105TEST_F(mm, defrag_empty)
1106{
Andrew Scull1ba470e2018-10-31 15:14:31 +00001107 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001108 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001109 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001110 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001111 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001112 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001113 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001114}
1115
1116/**
1117 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001118 * an empty table.
1119 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001120TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001121{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001122 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001123 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1124 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1125 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1126 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001127 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001128 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001129 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1130 nullptr));
1131 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1132 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001133 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1134 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001135 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001136 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001137 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001138 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001139 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001140}
1141
1142/**
1143 * Any subtable with all blocks with the same attributes should be replaced
1144 * with a single block.
1145 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001146TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001147{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001148 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001149 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1150 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1151 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001152 struct mm_ptable ptable;
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001153 ASSERT_TRUE(mm_vm_init(&ptable, 0, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001154 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001155 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001156 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001157 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1158 nullptr));
1159 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1160 nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001161 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001162 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001163 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001164 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1165 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001166 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +01001167}
1168
Andrew Scull232d5602018-10-15 11:07:45 +01001169} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001170
1171namespace mm_test
1172{
1173/**
1174 * Get an STL representation of the ptable.
1175 */
1176std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1177 const struct mm_ptable &ptable)
1178{
1179 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1180 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1181 for (uint8_t i = 0; i < root_table_count; ++i) {
1182 all.push_back(get_table(
1183 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
1184 }
1185 return all;
1186}
1187
1188} /* namespace mm_test */