blob: 69164f46b1396e87fcdcf294870e716565d5f1cb [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
39using ::testing::SizeIs;
40using ::testing::Truly;
41
42constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scull232d5602018-10-15 11:07:45 +010043const int TOP_LEVEL = arch_mm_max_level(0);
Andrew Scull1ba470e2018-10-31 15:14:31 +000044const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010045
46/**
47 * Calculates the size of the address space represented by a page table entry at
48 * the given level.
49 */
Andrew Scull232d5602018-10-15 11:07:45 +010050size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010051{
52 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
53}
54
55/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000056 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010057 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000058std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010059{
Andrew Scull1ba470e2018-10-31 15:14:31 +000060 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010061 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000062 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010063}
64
65/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000066 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010067 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000068std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
69 const struct mm_ptable &ptable, int mode)
Andrew Scull4e5f8142018-10-12 14:37:19 +010070{
Andrew Scull1ba470e2018-10-31 15:14:31 +000071 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
72 const uint8_t root_table_count = arch_mm_root_table_count(mode);
73 for (uint8_t i = 0; i < root_table_count; ++i) {
74 all.push_back(get_table(
75 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010076 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000077 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010078}
79
Andrew Scull1ba470e2018-10-31 15:14:31 +000080class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010081{
Andrew Scull1ba470e2018-10-31 15:14:31 +000082 void SetUp() override
83 {
84 /*
85 * TODO: replace with direct use of stdlib allocator so
86 * sanitizers are more effective.
87 */
88 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000089 mpool_init(&ppool, sizeof(struct mm_page_table));
90 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000092
93 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000094
95 protected:
96 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +000097};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010098
99/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000100 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100103{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000104 constexpr int mode = MM_MODE_STAGE1;
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100105 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000106 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000107 EXPECT_THAT(
108 get_ptable(ptable, mode),
109 AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000110 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100111}
112
113/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000114 * Each new concatenated table is initially empty.
115 */
116TEST_F(mm, ptable_init_concatenated_empty)
117{
118 constexpr int mode = 0;
119 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000120 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000121 EXPECT_THAT(
122 get_ptable(ptable, mode),
123 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000124 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000125}
126
127/**
128 * Only the first page is mapped with all others left absent.
129 */
130TEST_F(mm, map_first_page)
131{
132 constexpr int mode = 0;
133 const paddr_t page_begin = pa_init(0);
134 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
135 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000136 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000137 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000138 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000139
140 auto tables = get_ptable(ptable, mode);
141 EXPECT_THAT(tables, SizeIs(4));
142 ASSERT_THAT(TOP_LEVEL, Eq(2));
143
144 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000145 EXPECT_THAT(std::span(tables).last(3),
146 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147
148 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000149 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000150 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
151
Andrew Scull3681b8d2018-12-12 14:22:59 +0000152 auto table_l1 =
153 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
154 EXPECT_THAT(table_l1.subspan(1),
155 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000156 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
157
Andrew Scull3681b8d2018-12-12 14:22:59 +0000158 auto table_l0 =
159 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
160 EXPECT_THAT(table_l0.subspan(1),
161 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000162 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000163 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000164 Eq(pa_addr(page_begin)));
165
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000166 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000167}
168
169/**
170 * The start address is rounded down and the end address is rounded up to page
171 * boundaries.
172 */
173TEST_F(mm, map_round_to_page)
174{
175 constexpr int mode = 0;
176 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
177 const paddr_t map_end = pa_add(map_begin, 268);
178 ipaddr_t ipa = ipa_init(-1);
179 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000180 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
181 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
182 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000183 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
184
185 auto tables = get_ptable(ptable, mode);
186 EXPECT_THAT(tables, SizeIs(4));
187 ASSERT_THAT(TOP_LEVEL, Eq(2));
188
189 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000190 EXPECT_THAT(std::span(tables).first(3),
191 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192
193 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000194 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
195 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000196 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
197
Andrew Scull3681b8d2018-12-12 14:22:59 +0000198 auto table_l1 = get_table(
199 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
200 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
201 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000202 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
203
Andrew Scull3681b8d2018-12-12 14:22:59 +0000204 auto table_l0 = get_table(
205 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
206 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
207 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000208 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000209 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
210 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000211 Eq(0x200'0000'0000 - PAGE_SIZE));
212
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000213 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000214}
215
216/**
217 * Map a two page range over the boundary of two tables.
218 */
219TEST_F(mm, map_across_tables)
220{
221 constexpr int mode = 0;
222 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
223 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
224 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000225 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
226 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
227 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000228
229 auto tables = get_ptable(ptable, mode);
230 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000231 EXPECT_THAT(std::span(tables).last(2),
232 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000233 ASSERT_THAT(TOP_LEVEL, Eq(2));
234
235 /* Check only the last page of the first table is mapped. */
236 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000237 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
238 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
240
Andrew Scull3681b8d2018-12-12 14:22:59 +0000241 auto table0_l1 = get_table(
242 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
243 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
244 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000245 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
246
Andrew Scull3681b8d2018-12-12 14:22:59 +0000247 auto table0_l0 = get_table(
248 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
249 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
250 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000251 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000252 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
253 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000254 Eq(pa_addr(map_begin)));
255
256 /* Checl only the first page of the second table is mapped. */
257 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000258 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000259 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
260
Andrew Scull3681b8d2018-12-12 14:22:59 +0000261 auto table1_l1 =
262 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
263 EXPECT_THAT(table1_l1.subspan(1),
264 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000265 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
266
Andrew Scull3681b8d2018-12-12 14:22:59 +0000267 auto table1_l0 =
268 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
269 EXPECT_THAT(table1_l0.subspan(1),
270 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000271 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000272 EXPECT_THAT(
273 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
274 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000275
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000276 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000277}
278
279/**
280 * Mapping all of memory creates blocks at the highest level.
281 */
282TEST_F(mm, map_all_at_top_level)
283{
284 constexpr int mode = 0;
285 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000286 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000287 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000288 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000289 auto tables = get_ptable(ptable, mode);
290 EXPECT_THAT(
291 tables,
292 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
293 _1, TOP_LEVEL))))));
294 for (uint64_t i = 0; i < tables.size(); ++i) {
295 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000296 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
297 TOP_LEVEL)),
298 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
299 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000300 << "i=" << i << " j=" << j;
301 }
302 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000303 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000304}
305
306/**
307 * Map all memory then trying to map a page again doesn't introduce a special
308 * mapping for that particular page.
309 */
310TEST_F(mm, map_already_mapped)
311{
312 constexpr int mode = 0;
313 ipaddr_t ipa = ipa_init(-1);
314 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000315 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000316 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000317 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000318 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000319 mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000320 EXPECT_THAT(ipa_addr(ipa), Eq(0));
321 EXPECT_THAT(
322 get_ptable(ptable, mode),
323 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
324 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000325 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326}
327
328/**
329 * Mapping a reverse range, i.e. the end comes before the start, is treated as
330 * an empty range so no mappings are made.
331 */
332TEST_F(mm, map_reverse_range)
333{
334 constexpr int mode = 0;
335 ipaddr_t ipa = ipa_init(-1);
336 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000337 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000338 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000339 pa_init(0x5000), mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000340 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000341 EXPECT_THAT(
342 get_ptable(ptable, mode),
343 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000344 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000345}
346
347/**
348 * Mapping a reverse range in the same page will map the page because the start
349 * of the range is rounded down and the end is rounded up.
350 *
351 * This serves as a form of documentation of behaviour rather than a
352 * requirement. Check whether any code relies on this before changing it.
353 */
354TEST_F(mm, map_reverse_range_quirk)
355{
356 constexpr int mode = 0;
357 ipaddr_t ipa = ipa_init(-1);
358 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000359 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000360 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000361 &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000362 EXPECT_THAT(ipa_addr(ipa), Eq(20));
363 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa, mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000364 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000365}
366
367/**
368 * Mapping a range up to the maximum address causes the range end to wrap to
369 * zero as it is rounded up to a page boundary meaning no memory is mapped.
370 *
371 * This serves as a form of documentation of behaviour rather than a
372 * requirement. Check whether any code relies on this before changing it.
373 */
374TEST_F(mm, map_last_address_quirk)
375{
376 constexpr int mode = 0;
377 ipaddr_t ipa = ipa_init(-1);
378 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000379 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000380 ASSERT_TRUE(mm_vm_identity_map(
381 &ptable, pa_init(0),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000382 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
383 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000384 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000385 EXPECT_THAT(
386 get_ptable(ptable, mode),
387 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000388 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000389}
390
391/**
392 * Mapping a range that goes beyond the available memory clamps to the available
393 * range.
394 */
395TEST_F(mm, map_clamp_to_range)
396{
397 constexpr int mode = 0;
398 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000399 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000400 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
401 pa_init(0xf32'0000'0000'0000), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000402 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000403 EXPECT_THAT(
404 get_ptable(ptable, mode),
405 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
406 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000407 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000408}
409
410/**
411 * Mapping a range outside of the available memory is ignored and doesn't alter
412 * the page tables.
413 */
414TEST_F(mm, map_ignore_out_of_range)
415{
416 constexpr int mode = 0;
417 ipaddr_t ipa = ipa_init(-1);
418 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000419 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
420 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
421 pa_init(0xf0'0000'0000'0000), mode, &ipa,
422 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000423 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000424 EXPECT_THAT(
425 get_ptable(ptable, mode),
426 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000427 mm_ptable_fini(&ptable, 0, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000428}
429
430/**
431 * Map a single page and then map all of memory which replaces the single page
432 * mapping with a higher level block mapping.
433 */
434TEST_F(mm, map_block_replaces_table)
435{
436 constexpr int mode = 0;
437 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
438 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
439 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000440 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000441 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000442 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000444 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000445 EXPECT_THAT(
446 get_ptable(ptable, mode),
447 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
448 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000449 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000450}
451
452/**
453 * Map all memory at the top level, unmapping a page and remapping at a lower
454 * level does not result in all memory being mapped at the top level again.
455 */
456TEST_F(mm, map_does_not_defrag)
457{
458 constexpr int mode = 0;
459 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
460 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
461 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000462 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000463 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000464 nullptr, &ppool));
465 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000466 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000467 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000468 EXPECT_THAT(get_ptable(ptable, mode),
469 AllOf(SizeIs(4),
470 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
471 TOP_LEVEL)))),
472 Contains(Contains(Truly(std::bind(
473 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
474 Contains(Contains(Truly(std::bind(
475 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000476 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477}
478
479/**
480 * If nothing is mapped, unmapping the hypervisor has no effect.
481 */
482TEST_F(mm, vm_unmap_hypervisor_not_mapped)
483{
484 constexpr int mode = 0;
485 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000486 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
487 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000488 EXPECT_THAT(
489 get_ptable(ptable, mode),
490 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000491 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000492}
493
494/**
495 * If range is not mapped, unmapping has no effect.
496 */
497TEST_F(mm, unmap_not_mapped)
498{
499 constexpr int mode = 0;
500 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000501 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
502 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
503 &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000504 EXPECT_THAT(
505 get_ptable(ptable, mode),
506 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000507 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000508}
509
510/**
511 * Unmapping everything should result in an empty page table with no subtables.
512 */
513TEST_F(mm, unmap_all)
514{
515 constexpr int mode = 0;
516 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
517 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
518 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
519 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
520 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000521 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
522 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
523 &ppool));
524 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
525 &ppool));
526 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000527 EXPECT_THAT(
528 get_ptable(ptable, mode),
529 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000530 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000531}
532
533/**
534 * Unmap range is rounded to the containing pages.
535 */
536TEST_F(mm, unmap_round_to_page)
537{
538 constexpr int mode = 0;
539 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
540 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
541 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000542 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
543 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
544 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000545 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000546 pa_add(map_begin, 99), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000547 EXPECT_THAT(
548 get_ptable(ptable, mode),
549 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000550 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000551}
552
553/**
554 * Unmap a range that of page mappings that spans multiple concatenated tables.
555 */
556TEST_F(mm, unmap_across_tables)
557{
558 constexpr int mode = 0;
559 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
560 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
561 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000562 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
563 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
564 nullptr, &ppool));
565 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000566 EXPECT_THAT(
567 get_ptable(ptable, mode),
568 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000569 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000570}
571
572/**
573 * Unmapping outside the range of memory had no effect.
574 */
575TEST_F(mm, unmap_out_of_range)
576{
577 constexpr int mode = 0;
578 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000579 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000580 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000581 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000582 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000583 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000584 EXPECT_THAT(
585 get_ptable(ptable, mode),
586 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
587 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000588 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000589}
590
591/**
592 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
593 * an empty range so no change is made.
594 */
595TEST_F(mm, unmap_reverse_range)
596{
597 constexpr int mode = 0;
598 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000599 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000600 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000601 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000602 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000603 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000604 EXPECT_THAT(
605 get_ptable(ptable, mode),
606 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
607 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000608 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000609}
610
611/**
612 * Unmapping a reverse range in the same page will unmap the page because the
613 * start of the range is rounded down and the end is rounded up.
614 *
615 * This serves as a form of documentation of behaviour rather than a
616 * requirement. Check whether any code relies on this before changing it.
617 */
618TEST_F(mm, unmap_reverse_range_quirk)
619{
620 constexpr int mode = 0;
621 const paddr_t page_begin = pa_init(0x180'0000'0000);
622 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
623 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000624 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000625 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000626 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000627 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000628 pa_add(page_begin, 50), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000629 EXPECT_THAT(
630 get_ptable(ptable, mode),
631 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000632 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000633}
634
635/**
636 * Unmapping a range up to the maximum address causes the range end to wrap to
637 * zero as it is rounded up to a page boundary meaning no change is made.
638 *
639 * This serves as a form of documentation of behaviour rather than a
640 * requirement. Check whether any code relies on this before changing it.
641 */
642TEST_F(mm, unmap_last_address_quirk)
643{
644 constexpr int mode = 0;
645 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000646 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000647 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000648 nullptr, &ppool));
649 ASSERT_TRUE(
650 mm_vm_unmap(&ptable, pa_init(0),
651 pa_init(std::numeric_limits<uintpaddr_t>::max()),
652 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000653 EXPECT_THAT(
654 get_ptable(ptable, mode),
655 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
656 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000657 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000658}
659
660/**
661 * Mapping then unmapping a page does not defrag the table.
662 */
663TEST_F(mm, unmap_does_not_defrag)
664{
665 constexpr int mode = 0;
666 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
667 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
668 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
669 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
670 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000671 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
672 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
673 &ppool));
674 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
675 &ppool));
676 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
677 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000678 EXPECT_THAT(
679 get_ptable(ptable, mode),
680 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000681 mm_ptable_fini(&ptable, MM_MODE_STAGE1, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000682}
683
684/**
685 * Nothing is mapped in an empty table.
686 */
687TEST_F(mm, is_mapped_empty)
688{
689 constexpr int mode = 0;
690 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000691 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000692 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
693 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344), mode));
694 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073), mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000695 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000696}
697
698/**
699 * Everything is mapped in a full table.
700 */
701TEST_F(mm, is_mapped_all)
702{
703 constexpr int mode = 0;
704 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000705 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000706 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000707 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000708 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0), mode));
709 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3), mode));
710 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b), mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000711 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000712}
713
714/**
715 * A page is mapped for the range [begin, end).
716 */
717TEST_F(mm, is_mapped_page)
718{
719 constexpr int mode = 0;
720 const paddr_t page_begin = pa_init(0x100'0000'0000);
721 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
722 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000723 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000724 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000725 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000726 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin), mode));
727 EXPECT_TRUE(mm_vm_is_mapped(
728 &ptable, ipa_from_pa(pa_add(page_begin, 127)), mode));
729 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end), mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000730 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000731}
732
733/**
734 * Everything out of range is not mapped.
735 */
736TEST_F(mm, is_mapped_out_of_range)
737{
738 constexpr int mode = 0;
739 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000740 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000741 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000742 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000743 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END), mode));
744 EXPECT_FALSE(
745 mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123), mode));
746 EXPECT_FALSE(mm_vm_is_mapped(
747 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max()),
748 mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000749 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000750}
751
752/**
753 * Defragging an entirely empty table has no effect.
754 */
755TEST_F(mm, defrag_empty)
756{
757 constexpr int mode = 0;
758 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000759 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
760 mm_ptable_defrag(&ptable, mode, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000761 EXPECT_THAT(
762 get_ptable(ptable, mode),
763 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000764 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000765}
766
767/**
768 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100769 * an empty table.
770 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000771TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100772{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000773 constexpr int mode = 0;
774 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
775 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
776 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
777 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100778 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000779 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
780 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
781 &ppool));
782 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
783 &ppool));
784 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
785 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
786 mm_ptable_defrag(&ptable, 0, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000787 EXPECT_THAT(
788 get_ptable(ptable, mode),
789 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000790 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100791}
792
793/**
794 * Any subtable with all blocks with the same attributes should be replaced
795 * with a single block.
796 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000797TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100798{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000799 constexpr int mode = 0;
800 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
801 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
802 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100803 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000804 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000805 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000806 nullptr, &ppool));
807 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
808 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
809 &ppool));
810 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
811 &ppool));
812 mm_ptable_defrag(&ptable, 0, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000813 EXPECT_THAT(
814 get_ptable(ptable, mode),
815 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
816 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000817 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100818}
819
Andrew Scull232d5602018-10-15 11:07:45 +0100820} /* namespace */