blob: 0f855e55369ec636acb7e170ce57563064d038a8 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
39using ::testing::SizeIs;
40using ::testing::Truly;
41
42constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000043const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000044const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010045
46/**
47 * Calculates the size of the address space represented by a page table entry at
48 * the given level.
49 */
Andrew Scull232d5602018-10-15 11:07:45 +010050size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010051{
52 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
53}
54
55/**
Andrew Scull81e85092018-12-12 12:56:20 +000056 * Checks whether the address is mapped in the address space.
57 */
58bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
59{
60 int mode;
61 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
62 (mode & MM_MODE_INVALID) == 0;
63}
64
65/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000066 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010067 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000068std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010069{
Andrew Scull1ba470e2018-10-31 15:14:31 +000070 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010071 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000072 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010073}
74
75/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000076 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010077 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000078std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
Andrew Scullda3df7f2019-01-05 17:49:27 +000079 const struct mm_ptable &ptable)
Andrew Scull4e5f8142018-10-12 14:37:19 +010080{
Andrew Scull1ba470e2018-10-31 15:14:31 +000081 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
Andrew Scullda3df7f2019-01-05 17:49:27 +000082 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
Andrew Scull1ba470e2018-10-31 15:14:31 +000083 for (uint8_t i = 0; i < root_table_count; ++i) {
84 all.push_back(get_table(
85 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010086 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000087 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010088}
89
Andrew Scull1ba470e2018-10-31 15:14:31 +000090class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091{
Andrew Scull1ba470e2018-10-31 15:14:31 +000092 void SetUp() override
93 {
94 /*
95 * TODO: replace with direct use of stdlib allocator so
96 * sanitizers are more effective.
97 */
98 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000099 mpool_init(&ppool, sizeof(struct mm_page_table));
100 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102
103 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000104
105 protected:
106 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000107};
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100108
109/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000110 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100111 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000112TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100113{
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100114 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000115 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000116 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000117 get_ptable(ptable),
118 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
119 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100120}
121
122/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123 * Each new concatenated table is initially empty.
124 */
125TEST_F(mm, ptable_init_concatenated_empty)
126{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000127 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000128 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000129 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000130 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000131 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000132 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000133}
134
135/**
136 * Only the first page is mapped with all others left absent.
137 */
138TEST_F(mm, map_first_page)
139{
140 constexpr int mode = 0;
141 const paddr_t page_begin = pa_init(0);
142 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
143 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000144 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000145 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000146 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147
Andrew Scullda3df7f2019-01-05 17:49:27 +0000148 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000149 EXPECT_THAT(tables, SizeIs(4));
150 ASSERT_THAT(TOP_LEVEL, Eq(2));
151
152 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000153 EXPECT_THAT(std::span(tables).last(3),
154 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000155
156 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000157 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000158 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
159
Andrew Scull3681b8d2018-12-12 14:22:59 +0000160 auto table_l1 =
161 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
162 EXPECT_THAT(table_l1.subspan(1),
163 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000164 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
165
Andrew Scull3681b8d2018-12-12 14:22:59 +0000166 auto table_l0 =
167 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
168 EXPECT_THAT(table_l0.subspan(1),
169 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000170 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000171 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000172 Eq(pa_addr(page_begin)));
173
Andrew Scullda3df7f2019-01-05 17:49:27 +0000174 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000175}
176
177/**
178 * The start address is rounded down and the end address is rounded up to page
179 * boundaries.
180 */
181TEST_F(mm, map_round_to_page)
182{
183 constexpr int mode = 0;
184 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
185 const paddr_t map_end = pa_add(map_begin, 268);
186 ipaddr_t ipa = ipa_init(-1);
187 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000188 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000189 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
190 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000191 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
192
Andrew Scullda3df7f2019-01-05 17:49:27 +0000193 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000194 EXPECT_THAT(tables, SizeIs(4));
195 ASSERT_THAT(TOP_LEVEL, Eq(2));
196
197 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000198 EXPECT_THAT(std::span(tables).first(3),
199 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000200
201 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000202 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
203 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000204 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
205
Andrew Scull3681b8d2018-12-12 14:22:59 +0000206 auto table_l1 = get_table(
207 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
208 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
209 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000210 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
211
Andrew Scull3681b8d2018-12-12 14:22:59 +0000212 auto table_l0 = get_table(
213 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
214 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
215 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000216 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000217 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
218 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000219 Eq(0x200'0000'0000 - PAGE_SIZE));
220
Andrew Scullda3df7f2019-01-05 17:49:27 +0000221 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000222}
223
224/**
225 * Map a two page range over the boundary of two tables.
226 */
227TEST_F(mm, map_across_tables)
228{
229 constexpr int mode = 0;
230 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
231 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
232 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000233 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000234 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
235 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000236
Andrew Scullda3df7f2019-01-05 17:49:27 +0000237 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000238 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000239 EXPECT_THAT(std::span(tables).last(2),
240 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000241 ASSERT_THAT(TOP_LEVEL, Eq(2));
242
243 /* Check only the last page of the first table is mapped. */
244 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000245 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
246 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000247 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
248
Andrew Scull3681b8d2018-12-12 14:22:59 +0000249 auto table0_l1 = get_table(
250 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
251 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
252 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000253 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
254
Andrew Scull3681b8d2018-12-12 14:22:59 +0000255 auto table0_l0 = get_table(
256 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
257 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
258 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000259 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000260 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
261 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000262 Eq(pa_addr(map_begin)));
263
264 /* Checl only the first page of the second table is mapped. */
265 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000266 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000267 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
268
Andrew Scull3681b8d2018-12-12 14:22:59 +0000269 auto table1_l1 =
270 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
271 EXPECT_THAT(table1_l1.subspan(1),
272 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000273 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
274
Andrew Scull3681b8d2018-12-12 14:22:59 +0000275 auto table1_l0 =
276 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
277 EXPECT_THAT(table1_l0.subspan(1),
278 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000279 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000280 EXPECT_THAT(
281 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
282 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000283
Andrew Scullda3df7f2019-01-05 17:49:27 +0000284 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000285}
286
287/**
288 * Mapping all of memory creates blocks at the highest level.
289 */
290TEST_F(mm, map_all_at_top_level)
291{
292 constexpr int mode = 0;
293 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000294 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000295 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000296 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000297 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298 EXPECT_THAT(
299 tables,
300 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
301 _1, TOP_LEVEL))))));
302 for (uint64_t i = 0; i < tables.size(); ++i) {
303 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000304 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
305 TOP_LEVEL)),
306 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
307 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000308 << "i=" << i << " j=" << j;
309 }
310 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000311 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000312}
313
314/**
315 * Map all memory then trying to map a page again doesn't introduce a special
316 * mapping for that particular page.
317 */
318TEST_F(mm, map_already_mapped)
319{
320 constexpr int mode = 0;
321 ipaddr_t ipa = ipa_init(-1);
322 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000323 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000324 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000325 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000327 mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000328 EXPECT_THAT(ipa_addr(ipa), Eq(0));
329 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000330 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000331 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
332 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000333 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000334}
335
336/**
337 * Mapping a reverse range, i.e. the end comes before the start, is treated as
338 * an empty range so no mappings are made.
339 */
340TEST_F(mm, map_reverse_range)
341{
342 constexpr int mode = 0;
343 ipaddr_t ipa = ipa_init(-1);
344 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000345 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000346 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000347 pa_init(0x5000), mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000348 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000349 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000350 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000351 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000352 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353}
354
355/**
356 * Mapping a reverse range in the same page will map the page because the start
357 * of the range is rounded down and the end is rounded up.
358 *
359 * This serves as a form of documentation of behaviour rather than a
360 * requirement. Check whether any code relies on this before changing it.
361 */
362TEST_F(mm, map_reverse_range_quirk)
363{
364 constexpr int mode = 0;
365 ipaddr_t ipa = ipa_init(-1);
366 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000367 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000368 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000369 &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000370 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000371 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000372 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000373}
374
375/**
376 * Mapping a range up to the maximum address causes the range end to wrap to
377 * zero as it is rounded up to a page boundary meaning no memory is mapped.
378 *
379 * This serves as a form of documentation of behaviour rather than a
380 * requirement. Check whether any code relies on this before changing it.
381 */
382TEST_F(mm, map_last_address_quirk)
383{
384 constexpr int mode = 0;
385 ipaddr_t ipa = ipa_init(-1);
386 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000387 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000388 ASSERT_TRUE(mm_vm_identity_map(
389 &ptable, pa_init(0),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000390 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
391 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000392 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000393 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000394 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000395 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000396 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000397}
398
399/**
400 * Mapping a range that goes beyond the available memory clamps to the available
401 * range.
402 */
403TEST_F(mm, map_clamp_to_range)
404{
405 constexpr int mode = 0;
406 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000407 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000408 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
409 pa_init(0xf32'0000'0000'0000), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000410 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000411 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000412 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000413 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
414 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000415 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000416}
417
418/**
419 * Mapping a range outside of the available memory is ignored and doesn't alter
420 * the page tables.
421 */
422TEST_F(mm, map_ignore_out_of_range)
423{
424 constexpr int mode = 0;
425 ipaddr_t ipa = ipa_init(-1);
426 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000427 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000428 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
429 pa_init(0xf0'0000'0000'0000), mode, &ipa,
430 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000432 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000433 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000434 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000435 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436}
437
438/**
439 * Map a single page and then map all of memory which replaces the single page
440 * mapping with a higher level block mapping.
441 */
442TEST_F(mm, map_block_replaces_table)
443{
444 constexpr int mode = 0;
445 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
446 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
447 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000448 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000450 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000451 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000452 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000454 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000455 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
456 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000457 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000458}
459
460/**
461 * Map all memory at the top level, unmapping a page and remapping at a lower
462 * level does not result in all memory being mapped at the top level again.
463 */
464TEST_F(mm, map_does_not_defrag)
465{
466 constexpr int mode = 0;
467 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
468 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
469 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000470 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000471 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000472 nullptr, &ppool));
473 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000475 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000476 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477 AllOf(SizeIs(4),
478 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
479 TOP_LEVEL)))),
480 Contains(Contains(Truly(std::bind(
481 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
482 Contains(Contains(Truly(std::bind(
483 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000484 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000485}
486
487/**
488 * If nothing is mapped, unmapping the hypervisor has no effect.
489 */
490TEST_F(mm, vm_unmap_hypervisor_not_mapped)
491{
492 constexpr int mode = 0;
493 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000494 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000495 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000496 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000497 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000498 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000499 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000500}
501
502/**
503 * If range is not mapped, unmapping has no effect.
504 */
505TEST_F(mm, unmap_not_mapped)
506{
507 constexpr int mode = 0;
508 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000509 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000510 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
511 &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000512 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000513 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000514 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000515 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000516}
517
518/**
519 * Unmapping everything should result in an empty page table with no subtables.
520 */
521TEST_F(mm, unmap_all)
522{
523 constexpr int mode = 0;
524 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
525 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
526 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
527 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
528 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000529 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000530 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
531 &ppool));
532 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
533 &ppool));
534 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000535 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000536 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000537 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000538 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000539}
540
541/**
542 * Unmap range is rounded to the containing pages.
543 */
544TEST_F(mm, unmap_round_to_page)
545{
546 constexpr int mode = 0;
547 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
548 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
549 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000550 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000551 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
552 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000553 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000554 pa_add(map_begin, 99), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000555 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000556 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000557 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000558 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000559}
560
561/**
562 * Unmap a range that of page mappings that spans multiple concatenated tables.
563 */
564TEST_F(mm, unmap_across_tables)
565{
566 constexpr int mode = 0;
567 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
568 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
569 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000570 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000571 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
572 nullptr, &ppool));
573 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000574 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000575 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000576 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000577 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000578}
579
580/**
581 * Unmapping outside the range of memory had no effect.
582 */
583TEST_F(mm, unmap_out_of_range)
584{
585 constexpr int mode = 0;
586 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000587 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000588 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000589 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000590 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000591 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000592 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000593 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000594 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
595 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000596 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000597}
598
599/**
600 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
601 * an empty range so no change is made.
602 */
603TEST_F(mm, unmap_reverse_range)
604{
605 constexpr int mode = 0;
606 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000607 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000608 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000609 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000610 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000611 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000612 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000613 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000614 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
615 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000616 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000617}
618
619/**
620 * Unmapping a reverse range in the same page will unmap the page because the
621 * start of the range is rounded down and the end is rounded up.
622 *
623 * This serves as a form of documentation of behaviour rather than a
624 * requirement. Check whether any code relies on this before changing it.
625 */
626TEST_F(mm, unmap_reverse_range_quirk)
627{
628 constexpr int mode = 0;
629 const paddr_t page_begin = pa_init(0x180'0000'0000);
630 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
631 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000632 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000633 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000634 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000635 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000636 pa_add(page_begin, 50), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000637 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000638 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000639 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000640 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000641}
642
643/**
644 * Unmapping a range up to the maximum address causes the range end to wrap to
645 * zero as it is rounded up to a page boundary meaning no change is made.
646 *
647 * This serves as a form of documentation of behaviour rather than a
648 * requirement. Check whether any code relies on this before changing it.
649 */
650TEST_F(mm, unmap_last_address_quirk)
651{
652 constexpr int mode = 0;
653 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000654 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000655 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000656 nullptr, &ppool));
657 ASSERT_TRUE(
658 mm_vm_unmap(&ptable, pa_init(0),
659 pa_init(std::numeric_limits<uintpaddr_t>::max()),
660 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000661 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000662 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000663 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
664 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000665 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000666}
667
668/**
669 * Mapping then unmapping a page does not defrag the table.
670 */
671TEST_F(mm, unmap_does_not_defrag)
672{
673 constexpr int mode = 0;
674 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
675 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
676 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
677 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
678 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000679 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000680 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
681 &ppool));
682 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
683 &ppool));
684 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
685 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000686 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000687 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000688 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000689 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000690}
691
692/**
693 * Nothing is mapped in an empty table.
694 */
695TEST_F(mm, is_mapped_empty)
696{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000697 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000698 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000699 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
700 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
701 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000702 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000703}
704
705/**
706 * Everything is mapped in a full table.
707 */
708TEST_F(mm, is_mapped_all)
709{
710 constexpr int mode = 0;
711 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000712 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000713 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000714 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000715 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
716 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
717 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000718 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000719}
720
721/**
722 * A page is mapped for the range [begin, end).
723 */
724TEST_F(mm, is_mapped_page)
725{
726 constexpr int mode = 0;
727 const paddr_t page_begin = pa_init(0x100'0000'0000);
728 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
729 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000730 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000731 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000732 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000733 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
734 EXPECT_TRUE(
735 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
736 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000737 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000738}
739
740/**
741 * Everything out of range is not mapped.
742 */
743TEST_F(mm, is_mapped_out_of_range)
744{
745 constexpr int mode = 0;
746 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000747 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000748 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000749 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000750 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
751 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000752 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000753 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000754 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000755}
756
757/**
758 * The mode of unmapped addresses can be retrieved and is set to invalid,
759 * unowned and shared.
760 */
761TEST_F(mm, get_mode_empty)
762{
Andrew Scull81e85092018-12-12 12:56:20 +0000763 constexpr int default_mode =
764 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
765 struct mm_ptable ptable;
766 int read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000767 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000768
769 read_mode = 0;
770 EXPECT_TRUE(
771 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
772 EXPECT_THAT(read_mode, Eq(default_mode));
773
774 read_mode = 0;
775 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
776 ipa_init(0x3c97'e000), &read_mode));
777 EXPECT_THAT(read_mode, Eq(default_mode));
778
779 read_mode = 0;
780 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
781 ipa_init(0x1ff'ffff'ffff), &read_mode));
782 EXPECT_THAT(read_mode, Eq(default_mode));
783
Andrew Scullda3df7f2019-01-05 17:49:27 +0000784 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000785}
786
787/**
788 * Get the mode of a range comprised of individual pages which are either side
789 * of a root table boundary.
790 */
791TEST_F(mm, get_mode_pages_across_tables)
792{
793 constexpr int mode = MM_MODE_INVALID | MM_MODE_SHARED;
794 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
795 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
796 struct mm_ptable ptable;
797 int read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000798 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000799 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
800 nullptr, &ppool));
801
802 read_mode = 0;
803 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
804 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
805 &read_mode));
806 EXPECT_THAT(read_mode, Eq(mode));
807
808 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
809 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
810 &read_mode));
811
812 read_mode = 0;
813 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
814 ipa_from_pa(map_end), &read_mode));
815 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000816 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000817}
818
819/**
820 * Anything out of range fail to retrieve the mode.
821 */
822TEST_F(mm, get_mode_out_of_range)
823{
824 constexpr int mode = MM_MODE_UNOWNED;
825 struct mm_ptable ptable;
826 int read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000827 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000828 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
829 nullptr, &ppool));
830 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
831 ipa_from_pa(pa_add(VM_MEM_END, 1)),
832 &read_mode));
833 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
834 ipa_from_pa(pa_add(VM_MEM_END, 1)),
835 &read_mode));
836 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
837 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000838 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000839}
840
841/**
842 * Defragging an entirely empty table has no effect.
843 */
844TEST_F(mm, defrag_empty)
845{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000846 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000847 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
848 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000849 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000850 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000851 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000852 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000853}
854
855/**
856 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100857 * an empty table.
858 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000859TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100860{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000861 constexpr int mode = 0;
862 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
863 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
864 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
865 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100866 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000867 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000868 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
869 &ppool));
870 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
871 &ppool));
872 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
873 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000874 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000875 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000876 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000877 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000878 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100879}
880
881/**
882 * Any subtable with all blocks with the same attributes should be replaced
883 * with a single block.
884 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000885TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100886{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000887 constexpr int mode = 0;
888 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
889 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
890 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100891 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000892 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000893 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000894 nullptr, &ppool));
895 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
896 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
897 &ppool));
898 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
899 &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000900 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000901 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000902 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000903 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
904 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000905 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100906}
907
Andrew Scull232d5602018-10-15 11:07:45 +0100908} /* namespace */