blob: 4a523c80230ddb5404131de0912e060ae8014c9a [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
39using ::testing::SizeIs;
40using ::testing::Truly;
41
42constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000043const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000044const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010045
46/**
47 * Calculates the size of the address space represented by a page table entry at
48 * the given level.
49 */
Andrew Scull232d5602018-10-15 11:07:45 +010050size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010051{
52 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
53}
54
55/**
Andrew Scull81e85092018-12-12 12:56:20 +000056 * Checks whether the address is mapped in the address space.
57 */
58bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
59{
Andrew Walbran1281ed42019-10-22 17:23:40 +010060 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000061 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
62 (mode & MM_MODE_INVALID) == 0;
63}
64
65/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000066 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010067 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000068std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010069{
Andrew Scull1ba470e2018-10-31 15:14:31 +000070 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010071 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000072 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010073}
74
75/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000076 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010077 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000078std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
Andrew Scullda3df7f2019-01-05 17:49:27 +000079 const struct mm_ptable &ptable)
Andrew Scull4e5f8142018-10-12 14:37:19 +010080{
Andrew Scull1ba470e2018-10-31 15:14:31 +000081 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
Andrew Scullda3df7f2019-01-05 17:49:27 +000082 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
Andrew Scull1ba470e2018-10-31 15:14:31 +000083 for (uint8_t i = 0; i < root_table_count; ++i) {
84 all.push_back(get_table(
85 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010086 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000087 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010088}
89
Andrew Scull1ba470e2018-10-31 15:14:31 +000090class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091{
Andrew Scull1ba470e2018-10-31 15:14:31 +000092 void SetUp() override
93 {
94 /*
95 * TODO: replace with direct use of stdlib allocator so
96 * sanitizers are more effective.
97 */
98 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000099 mpool_init(&ppool, sizeof(struct mm_page_table));
100 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102
103 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000104
105 protected:
106 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000107};
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100108
109/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000110 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100111 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000112TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100113{
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100114 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000115 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000116 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000117 get_ptable(ptable),
118 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
119 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100120}
121
122/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123 * Each new concatenated table is initially empty.
124 */
125TEST_F(mm, ptable_init_concatenated_empty)
126{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000128 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000129 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000130 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000131 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000132 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000133}
134
135/**
136 * Only the first page is mapped with all others left absent.
137 */
138TEST_F(mm, map_first_page)
139{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100140 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000141 const paddr_t page_begin = pa_init(0);
142 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
143 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000144 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000145 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000146 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147
Andrew Scullda3df7f2019-01-05 17:49:27 +0000148 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000149 EXPECT_THAT(tables, SizeIs(4));
150 ASSERT_THAT(TOP_LEVEL, Eq(2));
151
152 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000153 EXPECT_THAT(std::span(tables).last(3),
154 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000155
156 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000157 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000158 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
159
Andrew Scull3681b8d2018-12-12 14:22:59 +0000160 auto table_l1 =
161 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
162 EXPECT_THAT(table_l1.subspan(1),
163 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000164 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
165
Andrew Scull3681b8d2018-12-12 14:22:59 +0000166 auto table_l0 =
167 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
168 EXPECT_THAT(table_l0.subspan(1),
169 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000170 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000171 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000172 Eq(pa_addr(page_begin)));
173
Andrew Scullda3df7f2019-01-05 17:49:27 +0000174 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000175}
176
177/**
178 * The start address is rounded down and the end address is rounded up to page
179 * boundaries.
180 */
181TEST_F(mm, map_round_to_page)
182{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100183 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000184 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
185 const paddr_t map_end = pa_add(map_begin, 268);
186 ipaddr_t ipa = ipa_init(-1);
187 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000188 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000189 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
190 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000191 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
192
Andrew Scullda3df7f2019-01-05 17:49:27 +0000193 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000194 EXPECT_THAT(tables, SizeIs(4));
195 ASSERT_THAT(TOP_LEVEL, Eq(2));
196
197 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000198 EXPECT_THAT(std::span(tables).first(3),
199 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000200
201 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000202 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
203 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000204 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
205
Andrew Scull3681b8d2018-12-12 14:22:59 +0000206 auto table_l1 = get_table(
207 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
208 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
209 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000210 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
211
Andrew Scull3681b8d2018-12-12 14:22:59 +0000212 auto table_l0 = get_table(
213 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
214 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
215 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000216 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000217 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
218 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000219 Eq(0x200'0000'0000 - PAGE_SIZE));
220
Andrew Scullda3df7f2019-01-05 17:49:27 +0000221 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000222}
223
224/**
225 * Map a two page range over the boundary of two tables.
226 */
227TEST_F(mm, map_across_tables)
228{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100229 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000230 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
231 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
232 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000233 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000234 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
235 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000236
Andrew Scullda3df7f2019-01-05 17:49:27 +0000237 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000238 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000239 EXPECT_THAT(std::span(tables).last(2),
240 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000241 ASSERT_THAT(TOP_LEVEL, Eq(2));
242
243 /* Check only the last page of the first table is mapped. */
244 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000245 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
246 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000247 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
248
Andrew Scull3681b8d2018-12-12 14:22:59 +0000249 auto table0_l1 = get_table(
250 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
251 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
252 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000253 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
254
Andrew Scull3681b8d2018-12-12 14:22:59 +0000255 auto table0_l0 = get_table(
256 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
257 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
258 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000259 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000260 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
261 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000262 Eq(pa_addr(map_begin)));
263
264 /* Checl only the first page of the second table is mapped. */
265 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000266 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000267 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
268
Andrew Scull3681b8d2018-12-12 14:22:59 +0000269 auto table1_l1 =
270 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
271 EXPECT_THAT(table1_l1.subspan(1),
272 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000273 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
274
Andrew Scull3681b8d2018-12-12 14:22:59 +0000275 auto table1_l0 =
276 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
277 EXPECT_THAT(table1_l0.subspan(1),
278 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000279 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000280 EXPECT_THAT(
281 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
282 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000283
Andrew Scullda3df7f2019-01-05 17:49:27 +0000284 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000285}
286
287/**
288 * Mapping all of memory creates blocks at the highest level.
289 */
290TEST_F(mm, map_all_at_top_level)
291{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100292 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000293 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000294 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000295 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000296 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000297 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298 EXPECT_THAT(
299 tables,
300 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
301 _1, TOP_LEVEL))))));
302 for (uint64_t i = 0; i < tables.size(); ++i) {
303 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000304 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
305 TOP_LEVEL)),
306 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
307 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000308 << "i=" << i << " j=" << j;
309 }
310 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000311 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000312}
313
314/**
315 * Map all memory then trying to map a page again doesn't introduce a special
316 * mapping for that particular page.
317 */
318TEST_F(mm, map_already_mapped)
319{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100320 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000321 ipaddr_t ipa = ipa_init(-1);
322 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000323 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000324 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000325 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000327 mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000328 EXPECT_THAT(ipa_addr(ipa), Eq(0));
329 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000330 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000331 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
332 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000333 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000334}
335
336/**
337 * Mapping a reverse range, i.e. the end comes before the start, is treated as
338 * an empty range so no mappings are made.
339 */
340TEST_F(mm, map_reverse_range)
341{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100342 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000343 ipaddr_t ipa = ipa_init(-1);
344 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000345 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000346 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000347 pa_init(0x5000), mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000348 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000349 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000350 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000351 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000352 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353}
354
355/**
356 * Mapping a reverse range in the same page will map the page because the start
357 * of the range is rounded down and the end is rounded up.
358 *
359 * This serves as a form of documentation of behaviour rather than a
360 * requirement. Check whether any code relies on this before changing it.
361 */
362TEST_F(mm, map_reverse_range_quirk)
363{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100364 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000365 ipaddr_t ipa = ipa_init(-1);
366 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000367 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000368 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000369 &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000370 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000371 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000372 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000373}
374
375/**
376 * Mapping a range up to the maximum address causes the range end to wrap to
377 * zero as it is rounded up to a page boundary meaning no memory is mapped.
378 *
379 * This serves as a form of documentation of behaviour rather than a
380 * requirement. Check whether any code relies on this before changing it.
381 */
382TEST_F(mm, map_last_address_quirk)
383{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100384 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000385 ipaddr_t ipa = ipa_init(-1);
386 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000387 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000388 ASSERT_TRUE(mm_vm_identity_map(
389 &ptable, pa_init(0),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000390 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
391 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000392 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000393 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000394 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000395 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000396 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000397}
398
399/**
400 * Mapping a range that goes beyond the available memory clamps to the available
401 * range.
402 */
403TEST_F(mm, map_clamp_to_range)
404{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100405 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000406 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000407 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000408 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
409 pa_init(0xf32'0000'0000'0000), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000410 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000411 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000412 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000413 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
414 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000415 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000416}
417
418/**
419 * Mapping a range outside of the available memory is ignored and doesn't alter
420 * the page tables.
421 */
422TEST_F(mm, map_ignore_out_of_range)
423{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100424 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000425 ipaddr_t ipa = ipa_init(-1);
426 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000427 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000428 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
429 pa_init(0xf0'0000'0000'0000), mode, &ipa,
430 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000432 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000433 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000434 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000435 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436}
437
438/**
439 * Map a single page and then map all of memory which replaces the single page
440 * mapping with a higher level block mapping.
441 */
442TEST_F(mm, map_block_replaces_table)
443{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100444 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000445 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
446 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
447 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000448 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000450 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000451 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000452 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000454 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000455 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
456 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000457 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000458}
459
460/**
461 * Map all memory at the top level, unmapping a page and remapping at a lower
462 * level does not result in all memory being mapped at the top level again.
463 */
464TEST_F(mm, map_does_not_defrag)
465{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100466 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000467 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
468 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
469 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000470 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000471 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000472 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000473 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000475 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000476 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477 AllOf(SizeIs(4),
478 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
479 TOP_LEVEL)))),
480 Contains(Contains(Truly(std::bind(
481 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
482 Contains(Contains(Truly(std::bind(
483 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000484 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000485}
486
487/**
488 * If nothing is mapped, unmapping the hypervisor has no effect.
489 */
490TEST_F(mm, vm_unmap_hypervisor_not_mapped)
491{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000492 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000493 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000494 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000495 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000496 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000497 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000498 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000499}
500
501/**
502 * If range is not mapped, unmapping has no effect.
503 */
504TEST_F(mm, unmap_not_mapped)
505{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000506 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000507 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000508 EXPECT_TRUE(
509 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000510 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000511 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000512 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000513 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000514}
515
516/**
517 * Unmapping everything should result in an empty page table with no subtables.
518 */
519TEST_F(mm, unmap_all)
520{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100521 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000522 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
523 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
524 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
525 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
526 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000527 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000528 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
529 &ppool));
530 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
531 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000532 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000533 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000534 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000535 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000536 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000537}
538
539/**
540 * Unmap range is rounded to the containing pages.
541 */
542TEST_F(mm, unmap_round_to_page)
543{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100544 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000545 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
546 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
547 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000548 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000549 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
550 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000551 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000552 pa_add(map_begin, 99), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000553 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000554 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000555 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000556 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000557}
558
559/**
560 * Unmap a range that of page mappings that spans multiple concatenated tables.
561 */
562TEST_F(mm, unmap_across_tables)
563{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100564 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000565 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
566 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
567 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000568 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000569 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
570 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000571 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000572 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000573 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000574 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000575 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000576}
577
578/**
579 * Unmapping outside the range of memory had no effect.
580 */
581TEST_F(mm, unmap_out_of_range)
582{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100583 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000584 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000585 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000586 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000587 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000588 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000589 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000590 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000591 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000592 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
593 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000594 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000595}
596
597/**
598 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
599 * an empty range so no change is made.
600 */
601TEST_F(mm, unmap_reverse_range)
602{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100603 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000604 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000605 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000606 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000607 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000608 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000609 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000610 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000611 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000612 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
613 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000614 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000615}
616
617/**
618 * Unmapping a reverse range in the same page will unmap the page because the
619 * start of the range is rounded down and the end is rounded up.
620 *
621 * This serves as a form of documentation of behaviour rather than a
622 * requirement. Check whether any code relies on this before changing it.
623 */
624TEST_F(mm, unmap_reverse_range_quirk)
625{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100626 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000627 const paddr_t page_begin = pa_init(0x180'0000'0000);
628 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
629 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000630 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000631 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000632 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000633 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000634 pa_add(page_begin, 50), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000635 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000636 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000637 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000638 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000639}
640
641/**
642 * Unmapping a range up to the maximum address causes the range end to wrap to
643 * zero as it is rounded up to a page boundary meaning no change is made.
644 *
645 * This serves as a form of documentation of behaviour rather than a
646 * requirement. Check whether any code relies on this before changing it.
647 */
648TEST_F(mm, unmap_last_address_quirk)
649{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100650 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000651 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000652 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000653 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000654 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000655 ASSERT_TRUE(mm_vm_unmap(
656 &ptable, pa_init(0),
657 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000658 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000659 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000660 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
661 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000662 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000663}
664
665/**
666 * Mapping then unmapping a page does not defrag the table.
667 */
668TEST_F(mm, unmap_does_not_defrag)
669{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100670 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000671 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
672 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
673 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
674 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
675 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000676 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000677 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
678 &ppool));
679 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
680 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000681 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
682 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000683 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000684 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000685 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000686 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000687}
688
689/**
690 * Nothing is mapped in an empty table.
691 */
692TEST_F(mm, is_mapped_empty)
693{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000694 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000695 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000696 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
697 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
698 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000699 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000700}
701
702/**
703 * Everything is mapped in a full table.
704 */
705TEST_F(mm, is_mapped_all)
706{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100707 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000708 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000709 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000710 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000711 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000712 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
713 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
714 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000715 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000716}
717
718/**
719 * A page is mapped for the range [begin, end).
720 */
721TEST_F(mm, is_mapped_page)
722{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100723 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000724 const paddr_t page_begin = pa_init(0x100'0000'0000);
725 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
726 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000727 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000728 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000729 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000730 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
731 EXPECT_TRUE(
732 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
733 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000734 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000735}
736
737/**
738 * Everything out of range is not mapped.
739 */
740TEST_F(mm, is_mapped_out_of_range)
741{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100742 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000743 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000744 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000745 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000746 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000747 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
748 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000749 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000750 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000751 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000752}
753
754/**
755 * The mode of unmapped addresses can be retrieved and is set to invalid,
756 * unowned and shared.
757 */
758TEST_F(mm, get_mode_empty)
759{
Andrew Scull81e85092018-12-12 12:56:20 +0000760 constexpr int default_mode =
761 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
762 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100763 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000764 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000765
766 read_mode = 0;
767 EXPECT_TRUE(
768 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
769 EXPECT_THAT(read_mode, Eq(default_mode));
770
771 read_mode = 0;
772 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
773 ipa_init(0x3c97'e000), &read_mode));
774 EXPECT_THAT(read_mode, Eq(default_mode));
775
776 read_mode = 0;
777 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
778 ipa_init(0x1ff'ffff'ffff), &read_mode));
779 EXPECT_THAT(read_mode, Eq(default_mode));
780
Andrew Scullda3df7f2019-01-05 17:49:27 +0000781 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000782}
783
784/**
785 * Get the mode of a range comprised of individual pages which are either side
786 * of a root table boundary.
787 */
788TEST_F(mm, get_mode_pages_across_tables)
789{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100790 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +0000791 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
792 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
793 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100794 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000795 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000796 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
797 nullptr, &ppool));
798
799 read_mode = 0;
800 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
801 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
802 &read_mode));
803 EXPECT_THAT(read_mode, Eq(mode));
804
805 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
806 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
807 &read_mode));
808
809 read_mode = 0;
810 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
811 ipa_from_pa(map_end), &read_mode));
812 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000813 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000814}
815
816/**
817 * Anything out of range fail to retrieve the mode.
818 */
819TEST_F(mm, get_mode_out_of_range)
820{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100821 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +0000822 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100823 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000824 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000825 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
826 nullptr, &ppool));
827 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
828 ipa_from_pa(pa_add(VM_MEM_END, 1)),
829 &read_mode));
830 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
831 ipa_from_pa(pa_add(VM_MEM_END, 1)),
832 &read_mode));
833 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
834 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000835 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000836}
837
838/**
839 * Defragging an entirely empty table has no effect.
840 */
841TEST_F(mm, defrag_empty)
842{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000843 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000844 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
845 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000846 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000847 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000848 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000849 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000850}
851
852/**
853 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100854 * an empty table.
855 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000856TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100857{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100858 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000859 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
860 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
861 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
862 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100863 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000864 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000865 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
866 &ppool));
867 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
868 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000869 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
870 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000871 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000872 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000873 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000874 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000875 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100876}
877
878/**
879 * Any subtable with all blocks with the same attributes should be replaced
880 * with a single block.
881 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000882TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100883{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100884 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000885 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
886 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
887 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100888 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000889 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000890 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000891 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000892 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000893 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
894 &ppool));
895 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
896 &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000897 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000898 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000899 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000900 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
901 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000902 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100903}
904
Andrew Scull232d5602018-10-15 11:07:45 +0100905} /* namespace */