blob: e507fce3920fb9ac27d7d800fb0c5bca3949bf59 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01007 */
8
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00009#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010010
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000011extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010012#include "hf/arch/mm.h"
13
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000014#include "hf/mm.h"
15#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010016}
17
Andrew Scull1ba470e2018-10-31 15:14:31 +000018#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010019#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000020#include <span>
21#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010022
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "mm_test.hh"
24
Andrew Scull232d5602018-10-15 11:07:45 +010025namespace
26{
Andrew Scull1ba470e2018-10-31 15:14:31 +000027using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010028
Andrew Scull1ba470e2018-10-31 15:14:31 +000029using ::testing::AllOf;
30using ::testing::Contains;
31using ::testing::Each;
32using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000033using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using ::testing::SizeIs;
35using ::testing::Truly;
36
Andrew Scull3c257452019-11-26 13:32:50 +000037using ::mm_test::get_ptable;
38
Andrew Scull1ba470e2018-10-31 15:14:31 +000039constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000040const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000041const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042
43/**
44 * Calculates the size of the address space represented by a page table entry at
45 * the given level.
46 */
Andrew Scull232d5602018-10-15 11:07:45 +010047size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010048{
49 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
50}
51
52/**
Andrew Scull81e85092018-12-12 12:56:20 +000053 * Checks whether the address is mapped in the address space.
54 */
55bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
56{
Andrew Walbran1281ed42019-10-22 17:23:40 +010057 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000058 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
59 (mode & MM_MODE_INVALID) == 0;
60}
61
62/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010064 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000065std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010066{
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000069 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010070}
71
Andrew Scull1ba470e2018-10-31 15:14:31 +000072class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010073{
Andrew Scull1ba470e2018-10-31 15:14:31 +000074 void SetUp() override
75 {
76 /*
77 * TODO: replace with direct use of stdlib allocator so
78 * sanitizers are more effective.
79 */
80 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000081 mpool_init(&ppool, sizeof(struct mm_page_table));
82 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010083 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000084
85 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000086
87 protected:
88 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +000089};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010090
91/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000092 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +010093 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000094TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010095{
Andrew Walbran9fa106c2018-09-28 14:19:29 +010096 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +000097 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +000098 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +000099 get_ptable(ptable),
100 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
101 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100102}
103
104/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000105 * Each new concatenated table is initially empty.
106 */
107TEST_F(mm, ptable_init_concatenated_empty)
108{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000109 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000110 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000111 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000112 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000113 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000114 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000115}
116
117/**
118 * Only the first page is mapped with all others left absent.
119 */
120TEST_F(mm, map_first_page)
121{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100122 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123 const paddr_t page_begin = pa_init(0);
124 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
125 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000126 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000128 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000129
Andrew Scullda3df7f2019-01-05 17:49:27 +0000130 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000131 EXPECT_THAT(tables, SizeIs(4));
132 ASSERT_THAT(TOP_LEVEL, Eq(2));
133
134 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000135 EXPECT_THAT(std::span(tables).last(3),
136 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000137
138 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000139 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000140 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
141
Andrew Scull3681b8d2018-12-12 14:22:59 +0000142 auto table_l1 =
143 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
144 EXPECT_THAT(table_l1.subspan(1),
145 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000146 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
147
Andrew Scull3681b8d2018-12-12 14:22:59 +0000148 auto table_l0 =
149 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
150 EXPECT_THAT(table_l0.subspan(1),
151 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000152 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000153 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000154 Eq(pa_addr(page_begin)));
155
Andrew Scullda3df7f2019-01-05 17:49:27 +0000156 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000157}
158
159/**
160 * The start address is rounded down and the end address is rounded up to page
161 * boundaries.
162 */
163TEST_F(mm, map_round_to_page)
164{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100165 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000166 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
167 const paddr_t map_end = pa_add(map_begin, 268);
168 ipaddr_t ipa = ipa_init(-1);
169 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000170 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000171 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
172 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000173 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
174
Andrew Scullda3df7f2019-01-05 17:49:27 +0000175 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000176 EXPECT_THAT(tables, SizeIs(4));
177 ASSERT_THAT(TOP_LEVEL, Eq(2));
178
179 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000180 EXPECT_THAT(std::span(tables).first(3),
181 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000182
183 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000184 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
185 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000186 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
187
Andrew Scull3681b8d2018-12-12 14:22:59 +0000188 auto table_l1 = get_table(
189 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
190 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
191 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
193
Andrew Scull3681b8d2018-12-12 14:22:59 +0000194 auto table_l0 = get_table(
195 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
196 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
197 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000198 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000199 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
200 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000201 Eq(0x200'0000'0000 - PAGE_SIZE));
202
Andrew Scullda3df7f2019-01-05 17:49:27 +0000203 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000204}
205
206/**
207 * Map a two page range over the boundary of two tables.
208 */
209TEST_F(mm, map_across_tables)
210{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100211 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000212 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
213 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
214 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000215 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000216 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000217 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000218
Andrew Scullda3df7f2019-01-05 17:49:27 +0000219 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000220 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000221 EXPECT_THAT(std::span(tables).last(2),
222 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000223 ASSERT_THAT(TOP_LEVEL, Eq(2));
224
225 /* Check only the last page of the first table is mapped. */
226 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000227 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
228 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000229 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
230
Andrew Scull3681b8d2018-12-12 14:22:59 +0000231 auto table0_l1 = get_table(
232 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
233 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
234 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000235 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
236
Andrew Scull3681b8d2018-12-12 14:22:59 +0000237 auto table0_l0 = get_table(
238 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
239 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
240 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000241 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000242 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
243 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000244 Eq(pa_addr(map_begin)));
245
Andrew Scull164f8152019-11-19 14:29:55 +0000246 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000247 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000248 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000249 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
250
Andrew Scull3681b8d2018-12-12 14:22:59 +0000251 auto table1_l1 =
252 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
253 EXPECT_THAT(table1_l1.subspan(1),
254 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000255 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
256
Andrew Scull3681b8d2018-12-12 14:22:59 +0000257 auto table1_l0 =
258 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
259 EXPECT_THAT(table1_l0.subspan(1),
260 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000261 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000262 EXPECT_THAT(
263 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
264 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000265
Andrew Scullda3df7f2019-01-05 17:49:27 +0000266 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000267}
268
269/**
270 * Mapping all of memory creates blocks at the highest level.
271 */
272TEST_F(mm, map_all_at_top_level)
273{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100274 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000275 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000276 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000277 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000278 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000279 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000280 EXPECT_THAT(
281 tables,
282 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
283 _1, TOP_LEVEL))))));
284 for (uint64_t i = 0; i < tables.size(); ++i) {
285 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000286 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
287 TOP_LEVEL)),
288 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
289 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000290 << "i=" << i << " j=" << j;
291 }
292 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000293 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000294}
295
296/**
297 * Map all memory then trying to map a page again doesn't introduce a special
298 * mapping for that particular page.
299 */
300TEST_F(mm, map_already_mapped)
301{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100302 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000303 ipaddr_t ipa = ipa_init(-1);
304 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000305 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000306 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000307 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000308 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000309 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000310 EXPECT_THAT(ipa_addr(ipa), Eq(0));
311 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000312 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000313 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
314 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000315 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000316}
317
318/**
319 * Mapping a reverse range, i.e. the end comes before the start, is treated as
320 * an empty range so no mappings are made.
321 */
322TEST_F(mm, map_reverse_range)
323{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100324 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000325 ipaddr_t ipa = ipa_init(-1);
326 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000327 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000328 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000329 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000330 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000331 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000332 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000333 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000334 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000335}
336
337/**
338 * Mapping a reverse range in the same page will map the page because the start
339 * of the range is rounded down and the end is rounded up.
340 *
341 * This serves as a form of documentation of behaviour rather than a
342 * requirement. Check whether any code relies on this before changing it.
343 */
344TEST_F(mm, map_reverse_range_quirk)
345{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100346 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000347 ipaddr_t ipa = ipa_init(-1);
348 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000349 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000350 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000351 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000352 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000353 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000354 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000355}
356
357/**
358 * Mapping a range up to the maximum address causes the range end to wrap to
359 * zero as it is rounded up to a page boundary meaning no memory is mapped.
360 *
361 * This serves as a form of documentation of behaviour rather than a
362 * requirement. Check whether any code relies on this before changing it.
363 */
364TEST_F(mm, map_last_address_quirk)
365{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100366 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000367 ipaddr_t ipa = ipa_init(-1);
368 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000369 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000370 ASSERT_TRUE(mm_vm_identity_map(
371 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000372 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
373 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000374 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000375 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000376 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000377 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000378 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000379}
380
381/**
382 * Mapping a range that goes beyond the available memory clamps to the available
383 * range.
384 */
385TEST_F(mm, map_clamp_to_range)
386{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100387 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000388 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000389 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000390 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
391 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000392 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000393 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000394 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000395 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
396 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000397 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398}
399
400/**
401 * Mapping a range outside of the available memory is ignored and doesn't alter
402 * the page tables.
403 */
404TEST_F(mm, map_ignore_out_of_range)
405{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100406 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000407 ipaddr_t ipa = ipa_init(-1);
408 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000409 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000410 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000411 pa_init(0xf0'0000'0000'0000), mode,
412 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000413 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000414 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000415 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000416 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000417 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000418}
419
420/**
421 * Map a single page and then map all of memory which replaces the single page
422 * mapping with a higher level block mapping.
423 */
424TEST_F(mm, map_block_replaces_table)
425{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100426 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000427 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
428 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
429 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000430 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000432 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000433 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000434 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000435 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000436 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000437 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
438 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000439 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000440}
441
442/**
443 * Map all memory at the top level, unmapping a page and remapping at a lower
444 * level does not result in all memory being mapped at the top level again.
445 */
446TEST_F(mm, map_does_not_defrag)
447{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100448 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
450 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
451 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000452 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000454 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000455 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000456 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000457 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000458 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000459 AllOf(SizeIs(4),
460 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
461 TOP_LEVEL)))),
462 Contains(Contains(Truly(std::bind(
463 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
464 Contains(Contains(Truly(std::bind(
465 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000466 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000467}
468
469/**
Andrew Scull73b89542019-11-20 17:31:26 +0000470 * Mapping with a mode that indicates unmapping results in the addresses being
471 * unmapped with absent entries.
472 */
473TEST_F(mm, map_to_unmap)
474{
475 constexpr uint32_t mode = 0;
476 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
477 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
478 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
479 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
480 struct mm_ptable ptable;
481 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000482 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
483 nullptr));
484 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
485 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000486 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000487 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000488 EXPECT_THAT(
489 get_ptable(ptable),
490 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
491 mm_vm_fini(&ptable, &ppool);
492}
493
Andrew Scull4e83cef2019-11-19 14:17:54 +0000494/*
495 * Preparing and committing an address range works the same as mapping it.
496 */
497TEST_F(mm, prepare_and_commit_first_page)
498{
499 constexpr uint32_t mode = 0;
500 const paddr_t page_begin = pa_init(0);
501 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
502 struct mm_ptable ptable;
503 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
504 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
505 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000506 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
507 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000508
509 auto tables = get_ptable(ptable);
510 EXPECT_THAT(tables, SizeIs(4));
511 ASSERT_THAT(TOP_LEVEL, Eq(2));
512
513 /* Check that the first page is mapped and nothing else. */
514 EXPECT_THAT(std::span(tables).last(3),
515 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
516
517 auto table_l2 = tables.front();
518 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
519 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
520
521 auto table_l1 =
522 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
523 EXPECT_THAT(table_l1.subspan(1),
524 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
525 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
526
527 auto table_l0 =
528 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
529 EXPECT_THAT(table_l0.subspan(1),
530 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
531 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
532 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
533 Eq(pa_addr(page_begin)));
534
535 mm_vm_fini(&ptable, &ppool);
536}
537
538/**
539 * Disjoint address ranges can be prepared and committed together.
540 */
541TEST_F(mm, prepare_and_commit_disjoint_regions)
542{
543 constexpr uint32_t mode = 0;
544 const paddr_t first_begin = pa_init(0);
545 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
546 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
547 const paddr_t last_end = VM_MEM_END;
548 struct mm_ptable ptable;
549 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
550 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
551 mode, &ppool));
552 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
553 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000554 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
555 nullptr);
556 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
557 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000558
559 auto tables = get_ptable(ptable);
560 EXPECT_THAT(tables, SizeIs(4));
561 ASSERT_THAT(TOP_LEVEL, Eq(2));
562
563 /* Check that the first and last pages are mapped and nothing else. */
564 EXPECT_THAT(std::span(tables).subspan(1, 2),
565 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
566
567 /* Check the first page. */
568 auto table0_l2 = tables.front();
569 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
570 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
571
572 auto table0_l1 =
573 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
574 EXPECT_THAT(table0_l1.subspan(1),
575 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
576 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
577
578 auto table0_l0 =
579 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
580 EXPECT_THAT(table0_l0.subspan(1),
581 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
582 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
583 EXPECT_THAT(
584 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
585 Eq(pa_addr(first_begin)));
586
587 /* Check the last page. */
588 auto table3_l2 = tables.back();
589 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
590 Each(arch_mm_absent_pte(TOP_LEVEL)));
591 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
592
593 auto table3_l1 = get_table(
594 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
595 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
596 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
597 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
598
599 auto table3_l0 = get_table(
600 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
601 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
602 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
603 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
604 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
605 TOP_LEVEL - 2)),
606 Eq(pa_addr(last_begin)));
607
608 mm_vm_fini(&ptable, &ppool);
609}
610
611/**
612 * Overlapping address ranges can be prepared and committed together.
613 */
614TEST_F(mm, prepare_and_commit_overlapping_regions)
615{
616 constexpr uint32_t mode = 0;
617 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
618 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
619 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
620 struct mm_ptable ptable;
621 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
622 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
623 &ppool));
624 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
625 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000626 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
627 nullptr);
628 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
629 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000630
631 auto tables = get_ptable(ptable);
632 EXPECT_THAT(tables, SizeIs(4));
633 EXPECT_THAT(std::span(tables).last(2),
634 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
635 ASSERT_THAT(TOP_LEVEL, Eq(2));
636
637 /* Check only the last page of the first table is mapped. */
638 auto table0_l2 = tables.front();
639 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
640 Each(arch_mm_absent_pte(TOP_LEVEL)));
641 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
642
643 auto table0_l1 = get_table(
644 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
645 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
646 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
647 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
648
649 auto table0_l0 = get_table(
650 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
651 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
652 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
653 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
654 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
655 TOP_LEVEL - 2)),
656 Eq(pa_addr(low_begin)));
657
658 /* Check only the first page of the second table is mapped. */
659 auto table1_l2 = tables[1];
660 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
661 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
662
663 auto table1_l1 =
664 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
665 EXPECT_THAT(table1_l1.subspan(1),
666 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
667 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
668
669 auto table1_l0 =
670 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
671 EXPECT_THAT(table1_l0.subspan(1),
672 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
673 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
674 EXPECT_THAT(
675 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
676 Eq(pa_addr(high_begin)));
677
678 mm_vm_fini(&ptable, &ppool);
679}
680
Andrew Scull73b89542019-11-20 17:31:26 +0000681/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000682 * If range is not mapped, unmapping has no effect.
683 */
684TEST_F(mm, unmap_not_mapped)
685{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000686 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000687 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000688 EXPECT_TRUE(
689 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000690 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000691 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000692 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000693 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000694}
695
696/**
697 * Unmapping everything should result in an empty page table with no subtables.
698 */
699TEST_F(mm, unmap_all)
700{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100701 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000702 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
703 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
704 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
705 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
706 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000707 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000708 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
709 nullptr));
710 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
711 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000712 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000713 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000714 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000715 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000716 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000717}
718
719/**
720 * Unmap range is rounded to the containing pages.
721 */
722TEST_F(mm, unmap_round_to_page)
723{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100724 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000725 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
726 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
727 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000728
Andrew Scullda3df7f2019-01-05 17:49:27 +0000729 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000730 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000731 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000732 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000733 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000734
735 auto tables = get_ptable(ptable);
736 constexpr auto l3_index = 2;
737
738 /* Check all other top level entries are empty... */
739 EXPECT_THAT(std::span(tables).first(l3_index),
740 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
741 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
742 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
743
744 /* Except the mapped page which is absent. */
745 auto table_l2 = tables[l3_index];
746 constexpr auto l2_index = 384;
747 EXPECT_THAT(table_l2.first(l2_index),
748 Each(arch_mm_absent_pte(TOP_LEVEL)));
749 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
750 EXPECT_THAT(table_l2.subspan(l2_index + 1),
751 Each(arch_mm_absent_pte(TOP_LEVEL)));
752
753 auto table_l1 = get_table(
754 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
755 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
756 EXPECT_THAT(table_l1.subspan(1),
757 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
758
759 auto table_l0 = get_table(
760 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
761 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
762
Andrew Scullda3df7f2019-01-05 17:49:27 +0000763 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000764}
765
766/**
767 * Unmap a range that of page mappings that spans multiple concatenated tables.
768 */
769TEST_F(mm, unmap_across_tables)
770{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100771 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000772 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
773 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
774 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000775
Andrew Scullda3df7f2019-01-05 17:49:27 +0000776 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000777 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000778 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000779 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000780
781 auto tables = get_ptable(ptable);
782
783 /* Check the untouched tables are empty. */
784 EXPECT_THAT(std::span(tables).first(2),
785 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
786
787 /* Check the last page is explicity marked as absent. */
788 auto table2_l2 = tables[2];
789 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
790 Each(arch_mm_absent_pte(TOP_LEVEL)));
791 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
792
793 auto table2_l1 = get_table(
794 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
795 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
796 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
797 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
798
799 auto table2_l0 = get_table(
800 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
801 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
802
803 /* Check the first page is explicitly marked as absent. */
804 auto table3_l2 = tables[3];
805 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
806 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
807
808 auto table3_l1 = get_table(
809 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
810 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
811 EXPECT_THAT(table3_l1.subspan(1),
812 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
813
814 auto table3_l0 = get_table(
815 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
816 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
817
Andrew Scullda3df7f2019-01-05 17:49:27 +0000818 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000819}
820
821/**
822 * Unmapping outside the range of memory had no effect.
823 */
824TEST_F(mm, unmap_out_of_range)
825{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100826 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000827 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000828 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000829 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000830 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000831 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000832 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000833 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000834 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000835 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
836 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000837 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000838}
839
840/**
841 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
842 * an empty range so no change is made.
843 */
844TEST_F(mm, unmap_reverse_range)
845{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100846 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000847 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000848 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000849 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000850 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000851 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000852 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000853 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000854 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000855 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
856 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000857 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000858}
859
860/**
861 * Unmapping a reverse range in the same page will unmap the page because the
862 * start of the range is rounded down and the end is rounded up.
863 *
864 * This serves as a form of documentation of behaviour rather than a
865 * requirement. Check whether any code relies on this before changing it.
866 */
867TEST_F(mm, unmap_reverse_range_quirk)
868{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100869 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000870 const paddr_t page_begin = pa_init(0x180'0000'0000);
871 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
872 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000873 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000874 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000875 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000876 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000877 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000878
879 auto tables = get_ptable(ptable);
880 constexpr auto l3_index = 3;
881
882 /* Check all other top level entries are empty... */
883 EXPECT_THAT(std::span(tables).first(l3_index),
884 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
885
886 /* Except the mapped page which is absent. */
887 auto table_l2 = tables[l3_index];
888 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
889 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
890
891 auto table_l1 = get_table(
892 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
893 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
894 EXPECT_THAT(table_l1.subspan(1),
895 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
896
897 auto table_l0 = get_table(
898 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
899 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
900
Andrew Scullda3df7f2019-01-05 17:49:27 +0000901 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000902}
903
904/**
905 * Unmapping a range up to the maximum address causes the range end to wrap to
906 * zero as it is rounded up to a page boundary meaning no change is made.
907 *
908 * This serves as a form of documentation of behaviour rather than a
909 * requirement. Check whether any code relies on this before changing it.
910 */
911TEST_F(mm, unmap_last_address_quirk)
912{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100913 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000914 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000915 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000916 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000917 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000918 ASSERT_TRUE(mm_vm_unmap(
919 &ptable, pa_init(0),
920 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000921 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000922 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000923 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
924 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000925 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000926}
927
928/**
929 * Mapping then unmapping a page does not defrag the table.
930 */
931TEST_F(mm, unmap_does_not_defrag)
932{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100933 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000934 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
935 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
936 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
937 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
938 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000939 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000940 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
941 nullptr));
942 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
943 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000944 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
945 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000946 EXPECT_THAT(get_ptable(ptable),
947 AllOf(SizeIs(4),
948 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000949 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000950}
951
952/**
953 * Nothing is mapped in an empty table.
954 */
955TEST_F(mm, is_mapped_empty)
956{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000957 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000958 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000959 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
960 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
961 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000962 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000963}
964
965/**
966 * Everything is mapped in a full table.
967 */
968TEST_F(mm, is_mapped_all)
969{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100970 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000971 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000972 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000973 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000974 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000975 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
976 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
977 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000978 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000979}
980
981/**
982 * A page is mapped for the range [begin, end).
983 */
984TEST_F(mm, is_mapped_page)
985{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100986 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000987 const paddr_t page_begin = pa_init(0x100'0000'0000);
988 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
989 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000990 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000991 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000992 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000993 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
994 EXPECT_TRUE(
995 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
996 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000997 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000998}
999
1000/**
1001 * Everything out of range is not mapped.
1002 */
1003TEST_F(mm, is_mapped_out_of_range)
1004{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001005 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001006 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001007 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001008 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001009 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001010 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
1011 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001012 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +00001013 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001014 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001015}
1016
1017/**
1018 * The mode of unmapped addresses can be retrieved and is set to invalid,
1019 * unowned and shared.
1020 */
1021TEST_F(mm, get_mode_empty)
1022{
Andrew Scull81e85092018-12-12 12:56:20 +00001023 constexpr int default_mode =
1024 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
1025 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001026 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001027 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001028
1029 read_mode = 0;
1030 EXPECT_TRUE(
1031 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
1032 EXPECT_THAT(read_mode, Eq(default_mode));
1033
1034 read_mode = 0;
1035 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
1036 ipa_init(0x3c97'e000), &read_mode));
1037 EXPECT_THAT(read_mode, Eq(default_mode));
1038
1039 read_mode = 0;
1040 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
1041 ipa_init(0x1ff'ffff'ffff), &read_mode));
1042 EXPECT_THAT(read_mode, Eq(default_mode));
1043
Andrew Scullda3df7f2019-01-05 17:49:27 +00001044 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001045}
1046
1047/**
1048 * Get the mode of a range comprised of individual pages which are either side
1049 * of a root table boundary.
1050 */
1051TEST_F(mm, get_mode_pages_across_tables)
1052{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001053 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +00001054 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
1055 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
1056 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001057 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001058 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001059 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001060 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001061
1062 read_mode = 0;
1063 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1064 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1065 &read_mode));
1066 EXPECT_THAT(read_mode, Eq(mode));
1067
1068 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1069 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1070 &read_mode));
1071
1072 read_mode = 0;
1073 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1074 ipa_from_pa(map_end), &read_mode));
1075 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001076 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001077}
1078
1079/**
1080 * Anything out of range fail to retrieve the mode.
1081 */
1082TEST_F(mm, get_mode_out_of_range)
1083{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001084 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +00001085 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001086 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001087 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001088 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001089 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001090 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1091 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1092 &read_mode));
1093 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
1094 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1095 &read_mode));
1096 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
1097 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001098 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001099}
1100
1101/**
1102 * Defragging an entirely empty table has no effect.
1103 */
1104TEST_F(mm, defrag_empty)
1105{
Andrew Scull1ba470e2018-10-31 15:14:31 +00001106 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001107 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
1108 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001109 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001110 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001111 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001112 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001113}
1114
1115/**
1116 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001117 * an empty table.
1118 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001119TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001120{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001121 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001122 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1123 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1124 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1125 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001126 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001127 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001128 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1129 nullptr));
1130 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1131 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001132 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1133 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001134 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001135 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001136 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001137 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001138 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001139}
1140
1141/**
1142 * Any subtable with all blocks with the same attributes should be replaced
1143 * with a single block.
1144 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001145TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001146{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001147 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001148 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1149 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1150 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001151 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001152 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001153 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001154 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001155 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001156 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1157 nullptr));
1158 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1159 nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001160 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001161 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001162 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001163 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1164 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001165 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +01001166}
1167
Andrew Scull232d5602018-10-15 11:07:45 +01001168} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001169
1170namespace mm_test
1171{
1172/**
1173 * Get an STL representation of the ptable.
1174 */
1175std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1176 const struct mm_ptable &ptable)
1177{
1178 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1179 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1180 for (uint8_t i = 0; i < root_table_count; ++i) {
1181 all.push_back(get_table(
1182 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
1183 }
1184 return all;
1185}
1186
1187} /* namespace mm_test */