blob: 2a952dc6d1caf69726cbd7651a9ce762fdf8ff69 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17extern "C" {
18#include "hf/mm.h"
19
20#include "hf/arch/mm.h"
21
22#include "hf/alloc.h"
23}
24
Andrew Scull1ba470e2018-10-31 15:14:31 +000025#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010026#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000027#include <span>
28#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010029
30#include <gmock/gmock.h>
31
Andrew Scull232d5602018-10-15 11:07:45 +010032namespace
33{
Andrew Scull1ba470e2018-10-31 15:14:31 +000034using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010035
Andrew Scull1ba470e2018-10-31 15:14:31 +000036using ::testing::AllOf;
37using ::testing::Contains;
38using ::testing::Each;
39using ::testing::Eq;
40using ::testing::SizeIs;
41using ::testing::Truly;
42
43constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scull232d5602018-10-15 11:07:45 +010044const int TOP_LEVEL = arch_mm_max_level(0);
Andrew Scull1ba470e2018-10-31 15:14:31 +000045const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010046
47/**
48 * Calculates the size of the address space represented by a page table entry at
49 * the given level.
50 */
Andrew Scull232d5602018-10-15 11:07:45 +010051size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010052{
53 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
54}
55
56/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000057 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010058 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000059std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010060{
Andrew Scull1ba470e2018-10-31 15:14:31 +000061 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010062 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000063 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010064}
65
66/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000069std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
70 const struct mm_ptable &ptable, int mode)
Andrew Scull4e5f8142018-10-12 14:37:19 +010071{
Andrew Scull1ba470e2018-10-31 15:14:31 +000072 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
73 const uint8_t root_table_count = arch_mm_root_table_count(mode);
74 for (uint8_t i = 0; i < root_table_count; ++i) {
75 all.push_back(get_table(
76 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010077 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000078 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010079}
80
Andrew Scull1ba470e2018-10-31 15:14:31 +000081class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010082{
Andrew Scull1ba470e2018-10-31 15:14:31 +000083 void SetUp() override
84 {
85 /*
86 * TODO: replace with direct use of stdlib allocator so
87 * sanitizers are more effective.
88 */
89 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
90 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000092
93 std::unique_ptr<uint8_t[]> test_heap;
94};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010095
96/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000097 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +010098 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000099TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100100{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000101 constexpr int mode = MM_MODE_STAGE1;
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100102 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000103 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000104 EXPECT_THAT(
105 get_ptable(ptable, mode),
106 AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000107 mm_ptable_fini(&ptable, mode);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100108}
109
110/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000111 * Each new concatenated table is initially empty.
112 */
113TEST_F(mm, ptable_init_concatenated_empty)
114{
115 constexpr int mode = 0;
116 struct mm_ptable ptable;
117 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000118 EXPECT_THAT(
119 get_ptable(ptable, mode),
120 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000121 mm_ptable_fini(&ptable, mode);
122}
123
124/**
125 * Only the first page is mapped with all others left absent.
126 */
127TEST_F(mm, map_first_page)
128{
129 constexpr int mode = 0;
130 const paddr_t page_begin = pa_init(0);
131 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
132 struct mm_ptable ptable;
133 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
134 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
135 nullptr));
136
137 auto tables = get_ptable(ptable, mode);
138 EXPECT_THAT(tables, SizeIs(4));
139 ASSERT_THAT(TOP_LEVEL, Eq(2));
140
141 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000142 EXPECT_THAT(std::span(tables).last(3),
143 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000144
145 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000146 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
148
Andrew Scull3681b8d2018-12-12 14:22:59 +0000149 auto table_l1 =
150 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
151 EXPECT_THAT(table_l1.subspan(1),
152 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000153 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
154
Andrew Scull3681b8d2018-12-12 14:22:59 +0000155 auto table_l0 =
156 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
157 EXPECT_THAT(table_l0.subspan(1),
158 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000159 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000160 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000161 Eq(pa_addr(page_begin)));
162
163 mm_ptable_fini(&ptable, mode);
164}
165
166/**
167 * The start address is rounded down and the end address is rounded up to page
168 * boundaries.
169 */
170TEST_F(mm, map_round_to_page)
171{
172 constexpr int mode = 0;
173 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
174 const paddr_t map_end = pa_add(map_begin, 268);
175 ipaddr_t ipa = ipa_init(-1);
176 struct mm_ptable ptable;
177 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
178 ASSERT_TRUE(
179 mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa));
180 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
181
182 auto tables = get_ptable(ptable, mode);
183 EXPECT_THAT(tables, SizeIs(4));
184 ASSERT_THAT(TOP_LEVEL, Eq(2));
185
186 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000187 EXPECT_THAT(std::span(tables).first(3),
188 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000189
190 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000191 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
192 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000193 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
194
Andrew Scull3681b8d2018-12-12 14:22:59 +0000195 auto table_l1 = get_table(
196 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
197 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
198 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000199 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
200
Andrew Scull3681b8d2018-12-12 14:22:59 +0000201 auto table_l0 = get_table(
202 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
203 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
204 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000205 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000206 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
207 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000208 Eq(0x200'0000'0000 - PAGE_SIZE));
209
210 mm_ptable_fini(&ptable, mode);
211}
212
213/**
214 * Map a two page range over the boundary of two tables.
215 */
216TEST_F(mm, map_across_tables)
217{
218 constexpr int mode = 0;
219 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
220 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
221 struct mm_ptable ptable;
222 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
223 ASSERT_TRUE(
224 mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
225
226 auto tables = get_ptable(ptable, mode);
227 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000228 EXPECT_THAT(std::span(tables).last(2),
229 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000230 ASSERT_THAT(TOP_LEVEL, Eq(2));
231
232 /* Check only the last page of the first table is mapped. */
233 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000234 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
235 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000236 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
237
Andrew Scull3681b8d2018-12-12 14:22:59 +0000238 auto table0_l1 = get_table(
239 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
240 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
241 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000242 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
243
Andrew Scull3681b8d2018-12-12 14:22:59 +0000244 auto table0_l0 = get_table(
245 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
246 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
247 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000248 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000249 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
250 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000251 Eq(pa_addr(map_begin)));
252
253 /* Checl only the first page of the second table is mapped. */
254 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000255 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000256 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
257
Andrew Scull3681b8d2018-12-12 14:22:59 +0000258 auto table1_l1 =
259 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
260 EXPECT_THAT(table1_l1.subspan(1),
261 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000262 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
263
Andrew Scull3681b8d2018-12-12 14:22:59 +0000264 auto table1_l0 =
265 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
266 EXPECT_THAT(table1_l0.subspan(1),
267 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000268 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000269 EXPECT_THAT(
270 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
271 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000272
273 mm_ptable_fini(&ptable, mode);
274}
275
276/**
277 * Mapping all of memory creates blocks at the highest level.
278 */
279TEST_F(mm, map_all_at_top_level)
280{
281 constexpr int mode = 0;
282 struct mm_ptable ptable;
283 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
284 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
285 nullptr));
286 auto tables = get_ptable(ptable, mode);
287 EXPECT_THAT(
288 tables,
289 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
290 _1, TOP_LEVEL))))));
291 for (uint64_t i = 0; i < tables.size(); ++i) {
292 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000293 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
294 TOP_LEVEL)),
295 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
296 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000297 << "i=" << i << " j=" << j;
298 }
299 }
300 mm_ptable_fini(&ptable, mode);
301}
302
303/**
304 * Map all memory then trying to map a page again doesn't introduce a special
305 * mapping for that particular page.
306 */
307TEST_F(mm, map_already_mapped)
308{
309 constexpr int mode = 0;
310 ipaddr_t ipa = ipa_init(-1);
311 struct mm_ptable ptable;
312 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
313 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
314 nullptr));
315 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
316 mode, &ipa));
317 EXPECT_THAT(ipa_addr(ipa), Eq(0));
318 EXPECT_THAT(
319 get_ptable(ptable, mode),
320 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
321 _1, TOP_LEVEL))))));
322 mm_ptable_fini(&ptable, mode);
323}
324
325/**
326 * Mapping a reverse range, i.e. the end comes before the start, is treated as
327 * an empty range so no mappings are made.
328 */
329TEST_F(mm, map_reverse_range)
330{
331 constexpr int mode = 0;
332 ipaddr_t ipa = ipa_init(-1);
333 struct mm_ptable ptable;
334 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
335 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
336 pa_init(0x5000), mode, &ipa));
337 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000338 EXPECT_THAT(
339 get_ptable(ptable, mode),
340 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000341 mm_ptable_fini(&ptable, mode);
342}
343
344/**
345 * Mapping a reverse range in the same page will map the page because the start
346 * of the range is rounded down and the end is rounded up.
347 *
348 * This serves as a form of documentation of behaviour rather than a
349 * requirement. Check whether any code relies on this before changing it.
350 */
351TEST_F(mm, map_reverse_range_quirk)
352{
353 constexpr int mode = 0;
354 ipaddr_t ipa = ipa_init(-1);
355 struct mm_ptable ptable;
356 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
357 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
358 &ipa));
359 EXPECT_THAT(ipa_addr(ipa), Eq(20));
360 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
361 mm_ptable_fini(&ptable, mode);
362}
363
364/**
365 * Mapping a range up to the maximum address causes the range end to wrap to
366 * zero as it is rounded up to a page boundary meaning no memory is mapped.
367 *
368 * This serves as a form of documentation of behaviour rather than a
369 * requirement. Check whether any code relies on this before changing it.
370 */
371TEST_F(mm, map_last_address_quirk)
372{
373 constexpr int mode = 0;
374 ipaddr_t ipa = ipa_init(-1);
375 struct mm_ptable ptable;
376 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
377 ASSERT_TRUE(mm_vm_identity_map(
378 &ptable, pa_init(0),
379 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa));
380 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000381 EXPECT_THAT(
382 get_ptable(ptable, mode),
383 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000384 mm_ptable_fini(&ptable, mode);
385}
386
387/**
388 * Mapping a range that goes beyond the available memory clamps to the available
389 * range.
390 */
391TEST_F(mm, map_clamp_to_range)
392{
393 constexpr int mode = 0;
394 struct mm_ptable ptable;
395 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
396 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
397 pa_init(0xf32'0000'0000'0000), mode,
398 nullptr));
399 EXPECT_THAT(
400 get_ptable(ptable, mode),
401 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
402 _1, TOP_LEVEL))))));
403 mm_ptable_fini(&ptable, mode);
404}
405
406/**
407 * Mapping a range outside of the available memory is ignored and doesn't alter
408 * the page tables.
409 */
410TEST_F(mm, map_ignore_out_of_range)
411{
412 constexpr int mode = 0;
413 ipaddr_t ipa = ipa_init(-1);
414 struct mm_ptable ptable;
415 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
416 ASSERT_TRUE(mm_vm_identity_map(
417 &ptable, VM_MEM_END, pa_init(0xf0'0000'0000'0000), mode, &ipa));
418 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000419 EXPECT_THAT(
420 get_ptable(ptable, mode),
421 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000422 mm_ptable_fini(&ptable, 0);
423}
424
425/**
426 * Map a single page and then map all of memory which replaces the single page
427 * mapping with a higher level block mapping.
428 */
429TEST_F(mm, map_block_replaces_table)
430{
431 constexpr int mode = 0;
432 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
433 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
434 struct mm_ptable ptable;
435 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
436 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
437 nullptr));
438 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
439 nullptr));
440 EXPECT_THAT(
441 get_ptable(ptable, mode),
442 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
443 _1, TOP_LEVEL))))));
444 mm_ptable_fini(&ptable, mode);
445}
446
447/**
448 * Map all memory at the top level, unmapping a page and remapping at a lower
449 * level does not result in all memory being mapped at the top level again.
450 */
451TEST_F(mm, map_does_not_defrag)
452{
453 constexpr int mode = 0;
454 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
455 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
456 struct mm_ptable ptable;
457 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
458 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
459 nullptr));
460 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode));
461 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
462 nullptr));
463 EXPECT_THAT(get_ptable(ptable, mode),
464 AllOf(SizeIs(4),
465 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
466 TOP_LEVEL)))),
467 Contains(Contains(Truly(std::bind(
468 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
469 Contains(Contains(Truly(std::bind(
470 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
471 mm_ptable_fini(&ptable, mode);
472}
473
474/**
475 * If nothing is mapped, unmapping the hypervisor has no effect.
476 */
477TEST_F(mm, vm_unmap_hypervisor_not_mapped)
478{
479 constexpr int mode = 0;
480 struct mm_ptable ptable;
481 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
482 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000483 EXPECT_THAT(
484 get_ptable(ptable, mode),
485 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000486 mm_ptable_fini(&ptable, mode);
487}
488
489/**
490 * If range is not mapped, unmapping has no effect.
491 */
492TEST_F(mm, unmap_not_mapped)
493{
494 constexpr int mode = 0;
495 struct mm_ptable ptable;
496 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
497 EXPECT_TRUE(
498 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000499 EXPECT_THAT(
500 get_ptable(ptable, mode),
501 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000502 mm_ptable_fini(&ptable, mode);
503}
504
505/**
506 * Unmapping everything should result in an empty page table with no subtables.
507 */
508TEST_F(mm, unmap_all)
509{
510 constexpr int mode = 0;
511 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
512 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
513 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
514 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
515 struct mm_ptable ptable;
516 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
517 ASSERT_TRUE(
518 mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
519 ASSERT_TRUE(
520 mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
521 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000522 EXPECT_THAT(
523 get_ptable(ptable, mode),
524 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000525 mm_ptable_fini(&ptable, mode);
526}
527
528/**
529 * Unmap range is rounded to the containing pages.
530 */
531TEST_F(mm, unmap_round_to_page)
532{
533 constexpr int mode = 0;
534 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
535 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
536 struct mm_ptable ptable;
537 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
538 ASSERT_TRUE(
539 mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
540 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
541 pa_add(map_begin, 99), mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000542 EXPECT_THAT(
543 get_ptable(ptable, mode),
544 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000545 mm_ptable_fini(&ptable, mode);
546}
547
548/**
549 * Unmap a range that of page mappings that spans multiple concatenated tables.
550 */
551TEST_F(mm, unmap_across_tables)
552{
553 constexpr int mode = 0;
554 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
555 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
556 struct mm_ptable ptable;
557 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
558 ASSERT_TRUE(
559 mm_vm_identity_map(&ptable, map_begin, map_end, mode, nullptr));
560 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000561 EXPECT_THAT(
562 get_ptable(ptable, mode),
563 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000564 mm_ptable_fini(&ptable, mode);
565}
566
567/**
568 * Unmapping outside the range of memory had no effect.
569 */
570TEST_F(mm, unmap_out_of_range)
571{
572 constexpr int mode = 0;
573 struct mm_ptable ptable;
574 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
575 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
576 nullptr));
577 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
578 mode));
579 EXPECT_THAT(
580 get_ptable(ptable, mode),
581 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
582 _1, TOP_LEVEL))))));
583 mm_ptable_fini(&ptable, mode);
584}
585
586/**
587 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
588 * an empty range so no change is made.
589 */
590TEST_F(mm, unmap_reverse_range)
591{
592 constexpr int mode = 0;
593 struct mm_ptable ptable;
594 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
595 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
596 nullptr));
597 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
598 mode));
599 EXPECT_THAT(
600 get_ptable(ptable, mode),
601 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
602 _1, TOP_LEVEL))))));
603 mm_ptable_fini(&ptable, mode);
604}
605
606/**
607 * Unmapping a reverse range in the same page will unmap the page because the
608 * start of the range is rounded down and the end is rounded up.
609 *
610 * This serves as a form of documentation of behaviour rather than a
611 * requirement. Check whether any code relies on this before changing it.
612 */
613TEST_F(mm, unmap_reverse_range_quirk)
614{
615 constexpr int mode = 0;
616 const paddr_t page_begin = pa_init(0x180'0000'0000);
617 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
618 struct mm_ptable ptable;
619 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
620 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
621 nullptr));
622 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
623 pa_add(page_begin, 50), mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000624 EXPECT_THAT(
625 get_ptable(ptable, mode),
626 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000627 mm_ptable_fini(&ptable, mode);
628}
629
630/**
631 * Unmapping a range up to the maximum address causes the range end to wrap to
632 * zero as it is rounded up to a page boundary meaning no change is made.
633 *
634 * This serves as a form of documentation of behaviour rather than a
635 * requirement. Check whether any code relies on this before changing it.
636 */
637TEST_F(mm, unmap_last_address_quirk)
638{
639 constexpr int mode = 0;
640 struct mm_ptable ptable;
641 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
642 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
643 nullptr));
644 ASSERT_TRUE(mm_vm_unmap(
645 &ptable, pa_init(0),
646 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode));
647 EXPECT_THAT(
648 get_ptable(ptable, mode),
649 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
650 _1, TOP_LEVEL))))));
651 mm_ptable_fini(&ptable, mode);
652}
653
654/**
655 * Mapping then unmapping a page does not defrag the table.
656 */
657TEST_F(mm, unmap_does_not_defrag)
658{
659 constexpr int mode = 0;
660 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
661 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
662 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
663 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
664 struct mm_ptable ptable;
665 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
666 ASSERT_TRUE(
667 mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
668 ASSERT_TRUE(
669 mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr));
670 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
671 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000672 EXPECT_THAT(
673 get_ptable(ptable, mode),
674 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000675 mm_ptable_fini(&ptable, MM_MODE_STAGE1);
676}
677
678/**
679 * Nothing is mapped in an empty table.
680 */
681TEST_F(mm, is_mapped_empty)
682{
683 constexpr int mode = 0;
684 struct mm_ptable ptable;
685 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
686 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
687 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
688 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
689 mm_ptable_fini(&ptable, mode);
690}
691
692/**
693 * Everything is mapped in a full table.
694 */
695TEST_F(mm, is_mapped_all)
696{
697 constexpr int mode = 0;
698 struct mm_ptable ptable;
699 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
700 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
701 nullptr));
702 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
703 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
704 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
705 mm_ptable_fini(&ptable, mode);
706}
707
708/**
709 * A page is mapped for the range [begin, end).
710 */
711TEST_F(mm, is_mapped_page)
712{
713 constexpr int mode = 0;
714 const paddr_t page_begin = pa_init(0x100'0000'0000);
715 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
716 struct mm_ptable ptable;
717 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
718 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
719 nullptr));
720 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
721 EXPECT_TRUE(mm_vm_is_mapped(
722 &ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
723 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
724 mm_ptable_fini(&ptable, mode);
725}
726
727/**
728 * Everything out of range is not mapped.
729 */
730TEST_F(mm, is_mapped_out_of_range)
731{
732 constexpr int mode = 0;
733 struct mm_ptable ptable;
734 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
735 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
736 nullptr));
737 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
738 EXPECT_FALSE(
739 mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
740 EXPECT_FALSE(mm_vm_is_mapped(
741 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
742 mode));
743 mm_ptable_fini(&ptable, mode);
744}
745
746/**
747 * Defragging an entirely empty table has no effect.
748 */
749TEST_F(mm, defrag_empty)
750{
751 constexpr int mode = 0;
752 struct mm_ptable ptable;
753 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
754 mm_ptable_defrag(&ptable, mode);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000755 EXPECT_THAT(
756 get_ptable(ptable, mode),
757 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000758 mm_ptable_fini(&ptable, mode);
759}
760
761/**
762 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100763 * an empty table.
764 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000765TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100766{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000767 constexpr int mode = 0;
768 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
769 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
770 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
771 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100772 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000773 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
774 ASSERT_TRUE(
775 mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
776 ASSERT_TRUE(
777 mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr));
778 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode));
779 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100780 mm_ptable_defrag(&ptable, 0);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000781 EXPECT_THAT(
782 get_ptable(ptable, mode),
783 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000784 mm_ptable_fini(&ptable, mode);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100785}
786
787/**
788 * Any subtable with all blocks with the same attributes should be replaced
789 * with a single block.
790 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000791TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100792{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000793 constexpr int mode = 0;
794 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
795 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
796 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100797 struct mm_ptable ptable;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000798 ASSERT_TRUE(mm_ptable_init(&ptable, mode));
799 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
800 nullptr));
801 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode));
802 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr));
803 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100804 mm_ptable_defrag(&ptable, 0);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000805 EXPECT_THAT(
806 get_ptable(ptable, mode),
807 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
808 _1, TOP_LEVEL))))));
809 mm_ptable_fini(&ptable, mode);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100810}
811
Andrew Scull232d5602018-10-15 11:07:45 +0100812} /* namespace */