blob: c159bba14fb9a2cfdcbb7d5b3fc03dec6ba1919b [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
39using ::testing::SizeIs;
40using ::testing::Truly;
41
42constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scull232d5602018-10-15 11:07:45 +010043const int TOP_LEVEL = arch_mm_max_level(0);
Andrew Scull1ba470e2018-10-31 15:14:31 +000044const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010045
46/**
47 * Calculates the size of the address space represented by a page table entry at
48 * the given level.
49 */
Andrew Scull232d5602018-10-15 11:07:45 +010050size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010051{
52 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
53}
54
55/**
Andrew Scull81e85092018-12-12 12:56:20 +000056 * Checks whether the address is mapped in the address space.
57 */
58bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
59{
60 int mode;
61 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
62 (mode & MM_MODE_INVALID) == 0;
63}
64
65/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000066 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010067 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000068std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010069{
Andrew Scull1ba470e2018-10-31 15:14:31 +000070 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010071 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000072 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010073}
74
75/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000076 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010077 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000078std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
79 const struct mm_ptable &ptable, int mode)
Andrew Scull4e5f8142018-10-12 14:37:19 +010080{
Andrew Scull1ba470e2018-10-31 15:14:31 +000081 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
82 const uint8_t root_table_count = arch_mm_root_table_count(mode);
83 for (uint8_t i = 0; i < root_table_count; ++i) {
84 all.push_back(get_table(
85 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010086 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000087 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010088}
89
Andrew Scull1ba470e2018-10-31 15:14:31 +000090class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091{
Andrew Scull1ba470e2018-10-31 15:14:31 +000092 void SetUp() override
93 {
94 /*
95 * TODO: replace with direct use of stdlib allocator so
96 * sanitizers are more effective.
97 */
98 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000099 mpool_init(&ppool, sizeof(struct mm_page_table));
100 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102
103 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000104
105 protected:
106 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000107};
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100108
109/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000110 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100111 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000112TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100113{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000114 constexpr int mode = MM_MODE_STAGE1;
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100115 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000116 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000117 EXPECT_THAT(
118 get_ptable(ptable, mode),
119 AllOf(SizeIs(1), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000120 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100121}
122
123/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 * Each new concatenated table is initially empty.
125 */
126TEST_F(mm, ptable_init_concatenated_empty)
127{
128 constexpr int mode = 0;
129 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000130 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000131 EXPECT_THAT(
132 get_ptable(ptable, mode),
133 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000134 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000135}
136
137/**
138 * Only the first page is mapped with all others left absent.
139 */
140TEST_F(mm, map_first_page)
141{
142 constexpr int mode = 0;
143 const paddr_t page_begin = pa_init(0);
144 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
145 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000146 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000147 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000148 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000149
150 auto tables = get_ptable(ptable, mode);
151 EXPECT_THAT(tables, SizeIs(4));
152 ASSERT_THAT(TOP_LEVEL, Eq(2));
153
154 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000155 EXPECT_THAT(std::span(tables).last(3),
156 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000157
158 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000159 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000160 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
161
Andrew Scull3681b8d2018-12-12 14:22:59 +0000162 auto table_l1 =
163 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
164 EXPECT_THAT(table_l1.subspan(1),
165 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000166 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
167
Andrew Scull3681b8d2018-12-12 14:22:59 +0000168 auto table_l0 =
169 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
170 EXPECT_THAT(table_l0.subspan(1),
171 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000172 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000173 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000174 Eq(pa_addr(page_begin)));
175
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000176 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000177}
178
179/**
180 * The start address is rounded down and the end address is rounded up to page
181 * boundaries.
182 */
183TEST_F(mm, map_round_to_page)
184{
185 constexpr int mode = 0;
186 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
187 const paddr_t map_end = pa_add(map_begin, 268);
188 ipaddr_t ipa = ipa_init(-1);
189 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000190 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
191 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
192 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000193 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
194
195 auto tables = get_ptable(ptable, mode);
196 EXPECT_THAT(tables, SizeIs(4));
197 ASSERT_THAT(TOP_LEVEL, Eq(2));
198
199 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000200 EXPECT_THAT(std::span(tables).first(3),
201 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000202
203 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000204 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
205 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000206 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
207
Andrew Scull3681b8d2018-12-12 14:22:59 +0000208 auto table_l1 = get_table(
209 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
210 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
211 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000212 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
213
Andrew Scull3681b8d2018-12-12 14:22:59 +0000214 auto table_l0 = get_table(
215 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
216 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
217 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000218 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000219 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
220 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000221 Eq(0x200'0000'0000 - PAGE_SIZE));
222
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000223 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000224}
225
226/**
227 * Map a two page range over the boundary of two tables.
228 */
229TEST_F(mm, map_across_tables)
230{
231 constexpr int mode = 0;
232 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
233 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
234 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000235 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
236 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
237 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000238
239 auto tables = get_ptable(ptable, mode);
240 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000241 EXPECT_THAT(std::span(tables).last(2),
242 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000243 ASSERT_THAT(TOP_LEVEL, Eq(2));
244
245 /* Check only the last page of the first table is mapped. */
246 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000247 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
248 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000249 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
250
Andrew Scull3681b8d2018-12-12 14:22:59 +0000251 auto table0_l1 = get_table(
252 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
253 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
254 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000255 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
256
Andrew Scull3681b8d2018-12-12 14:22:59 +0000257 auto table0_l0 = get_table(
258 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
259 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
260 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000261 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000262 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
263 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000264 Eq(pa_addr(map_begin)));
265
266 /* Checl only the first page of the second table is mapped. */
267 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000268 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000269 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
270
Andrew Scull3681b8d2018-12-12 14:22:59 +0000271 auto table1_l1 =
272 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
273 EXPECT_THAT(table1_l1.subspan(1),
274 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000275 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
276
Andrew Scull3681b8d2018-12-12 14:22:59 +0000277 auto table1_l0 =
278 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
279 EXPECT_THAT(table1_l0.subspan(1),
280 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000281 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000282 EXPECT_THAT(
283 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
284 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000285
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000286 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000287}
288
289/**
290 * Mapping all of memory creates blocks at the highest level.
291 */
292TEST_F(mm, map_all_at_top_level)
293{
294 constexpr int mode = 0;
295 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000296 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000297 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000298 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000299 auto tables = get_ptable(ptable, mode);
300 EXPECT_THAT(
301 tables,
302 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
303 _1, TOP_LEVEL))))));
304 for (uint64_t i = 0; i < tables.size(); ++i) {
305 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000306 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
307 TOP_LEVEL)),
308 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
309 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000310 << "i=" << i << " j=" << j;
311 }
312 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000313 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000314}
315
316/**
317 * Map all memory then trying to map a page again doesn't introduce a special
318 * mapping for that particular page.
319 */
320TEST_F(mm, map_already_mapped)
321{
322 constexpr int mode = 0;
323 ipaddr_t ipa = ipa_init(-1);
324 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000325 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000326 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000327 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000328 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000329 mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000330 EXPECT_THAT(ipa_addr(ipa), Eq(0));
331 EXPECT_THAT(
332 get_ptable(ptable, mode),
333 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
334 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000335 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000336}
337
338/**
339 * Mapping a reverse range, i.e. the end comes before the start, is treated as
340 * an empty range so no mappings are made.
341 */
342TEST_F(mm, map_reverse_range)
343{
344 constexpr int mode = 0;
345 ipaddr_t ipa = ipa_init(-1);
346 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000347 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000348 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000349 pa_init(0x5000), mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000350 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000351 EXPECT_THAT(
352 get_ptable(ptable, mode),
353 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000354 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000355}
356
357/**
358 * Mapping a reverse range in the same page will map the page because the start
359 * of the range is rounded down and the end is rounded up.
360 *
361 * This serves as a form of documentation of behaviour rather than a
362 * requirement. Check whether any code relies on this before changing it.
363 */
364TEST_F(mm, map_reverse_range_quirk)
365{
366 constexpr int mode = 0;
367 ipaddr_t ipa = ipa_init(-1);
368 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000369 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000370 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000371 &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000372 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000373 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000374 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000375}
376
377/**
378 * Mapping a range up to the maximum address causes the range end to wrap to
379 * zero as it is rounded up to a page boundary meaning no memory is mapped.
380 *
381 * This serves as a form of documentation of behaviour rather than a
382 * requirement. Check whether any code relies on this before changing it.
383 */
384TEST_F(mm, map_last_address_quirk)
385{
386 constexpr int mode = 0;
387 ipaddr_t ipa = ipa_init(-1);
388 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000389 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000390 ASSERT_TRUE(mm_vm_identity_map(
391 &ptable, pa_init(0),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000392 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
393 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000394 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000395 EXPECT_THAT(
396 get_ptable(ptable, mode),
397 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000398 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000399}
400
401/**
402 * Mapping a range that goes beyond the available memory clamps to the available
403 * range.
404 */
405TEST_F(mm, map_clamp_to_range)
406{
407 constexpr int mode = 0;
408 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000409 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000410 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
411 pa_init(0xf32'0000'0000'0000), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000412 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000413 EXPECT_THAT(
414 get_ptable(ptable, mode),
415 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
416 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000417 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000418}
419
420/**
421 * Mapping a range outside of the available memory is ignored and doesn't alter
422 * the page tables.
423 */
424TEST_F(mm, map_ignore_out_of_range)
425{
426 constexpr int mode = 0;
427 ipaddr_t ipa = ipa_init(-1);
428 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000429 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
430 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
431 pa_init(0xf0'0000'0000'0000), mode, &ipa,
432 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000433 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000434 EXPECT_THAT(
435 get_ptable(ptable, mode),
436 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000437 mm_ptable_fini(&ptable, 0, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000438}
439
440/**
441 * Map a single page and then map all of memory which replaces the single page
442 * mapping with a higher level block mapping.
443 */
444TEST_F(mm, map_block_replaces_table)
445{
446 constexpr int mode = 0;
447 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
448 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
449 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000450 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000451 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000452 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000454 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000455 EXPECT_THAT(
456 get_ptable(ptable, mode),
457 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
458 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000459 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000460}
461
462/**
463 * Map all memory at the top level, unmapping a page and remapping at a lower
464 * level does not result in all memory being mapped at the top level again.
465 */
466TEST_F(mm, map_does_not_defrag)
467{
468 constexpr int mode = 0;
469 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
470 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
471 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000472 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000473 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000474 nullptr, &ppool));
475 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000476 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000477 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000478 EXPECT_THAT(get_ptable(ptable, mode),
479 AllOf(SizeIs(4),
480 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
481 TOP_LEVEL)))),
482 Contains(Contains(Truly(std::bind(
483 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
484 Contains(Contains(Truly(std::bind(
485 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000486 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000487}
488
489/**
490 * If nothing is mapped, unmapping the hypervisor has no effect.
491 */
492TEST_F(mm, vm_unmap_hypervisor_not_mapped)
493{
494 constexpr int mode = 0;
495 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000496 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
497 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000498 EXPECT_THAT(
499 get_ptable(ptable, mode),
500 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000501 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000502}
503
504/**
505 * If range is not mapped, unmapping has no effect.
506 */
507TEST_F(mm, unmap_not_mapped)
508{
509 constexpr int mode = 0;
510 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000511 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
512 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), mode,
513 &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000514 EXPECT_THAT(
515 get_ptable(ptable, mode),
516 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000517 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000518}
519
520/**
521 * Unmapping everything should result in an empty page table with no subtables.
522 */
523TEST_F(mm, unmap_all)
524{
525 constexpr int mode = 0;
526 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
527 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
528 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
529 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
530 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000531 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
532 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
533 &ppool));
534 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
535 &ppool));
536 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000537 EXPECT_THAT(
538 get_ptable(ptable, mode),
539 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000540 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000541}
542
543/**
544 * Unmap range is rounded to the containing pages.
545 */
546TEST_F(mm, unmap_round_to_page)
547{
548 constexpr int mode = 0;
549 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
550 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
551 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000552 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
553 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
554 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000555 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000556 pa_add(map_begin, 99), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000557 EXPECT_THAT(
558 get_ptable(ptable, mode),
559 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000560 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000561}
562
563/**
564 * Unmap a range that of page mappings that spans multiple concatenated tables.
565 */
566TEST_F(mm, unmap_across_tables)
567{
568 constexpr int mode = 0;
569 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
570 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
571 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000572 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
573 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
574 nullptr, &ppool));
575 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000576 EXPECT_THAT(
577 get_ptable(ptable, mode),
578 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000579 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000580}
581
582/**
583 * Unmapping outside the range of memory had no effect.
584 */
585TEST_F(mm, unmap_out_of_range)
586{
587 constexpr int mode = 0;
588 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000589 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000590 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000591 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000592 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000593 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000594 EXPECT_THAT(
595 get_ptable(ptable, mode),
596 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
597 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000598 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000599}
600
601/**
602 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
603 * an empty range so no change is made.
604 */
605TEST_F(mm, unmap_reverse_range)
606{
607 constexpr int mode = 0;
608 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000609 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000610 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000611 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000612 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000613 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000614 EXPECT_THAT(
615 get_ptable(ptable, mode),
616 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
617 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000618 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000619}
620
621/**
622 * Unmapping a reverse range in the same page will unmap the page because the
623 * start of the range is rounded down and the end is rounded up.
624 *
625 * This serves as a form of documentation of behaviour rather than a
626 * requirement. Check whether any code relies on this before changing it.
627 */
628TEST_F(mm, unmap_reverse_range_quirk)
629{
630 constexpr int mode = 0;
631 const paddr_t page_begin = pa_init(0x180'0000'0000);
632 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
633 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000634 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000635 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000636 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000637 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000638 pa_add(page_begin, 50), mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000639 EXPECT_THAT(
640 get_ptable(ptable, mode),
641 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000642 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000643}
644
645/**
646 * Unmapping a range up to the maximum address causes the range end to wrap to
647 * zero as it is rounded up to a page boundary meaning no change is made.
648 *
649 * This serves as a form of documentation of behaviour rather than a
650 * requirement. Check whether any code relies on this before changing it.
651 */
652TEST_F(mm, unmap_last_address_quirk)
653{
654 constexpr int mode = 0;
655 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000656 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000657 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000658 nullptr, &ppool));
659 ASSERT_TRUE(
660 mm_vm_unmap(&ptable, pa_init(0),
661 pa_init(std::numeric_limits<uintpaddr_t>::max()),
662 mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000663 EXPECT_THAT(
664 get_ptable(ptable, mode),
665 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
666 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000667 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000668}
669
670/**
671 * Mapping then unmapping a page does not defrag the table.
672 */
673TEST_F(mm, unmap_does_not_defrag)
674{
675 constexpr int mode = 0;
676 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
677 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
678 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
679 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
680 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000681 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
682 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
683 &ppool));
684 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
685 &ppool));
686 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
687 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000688 EXPECT_THAT(
689 get_ptable(ptable, mode),
690 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000691 mm_ptable_fini(&ptable, MM_MODE_STAGE1, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000692}
693
694/**
695 * Nothing is mapped in an empty table.
696 */
697TEST_F(mm, is_mapped_empty)
698{
699 constexpr int mode = 0;
700 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000701 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000702 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
703 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
704 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000705 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000706}
707
708/**
709 * Everything is mapped in a full table.
710 */
711TEST_F(mm, is_mapped_all)
712{
713 constexpr int mode = 0;
714 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000715 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000716 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000717 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000718 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
719 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
720 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000721 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000722}
723
724/**
725 * A page is mapped for the range [begin, end).
726 */
727TEST_F(mm, is_mapped_page)
728{
729 constexpr int mode = 0;
730 const paddr_t page_begin = pa_init(0x100'0000'0000);
731 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
732 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000733 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000734 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000735 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000736 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
737 EXPECT_TRUE(
738 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
739 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000740 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000741}
742
743/**
744 * Everything out of range is not mapped.
745 */
746TEST_F(mm, is_mapped_out_of_range)
747{
748 constexpr int mode = 0;
749 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000750 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000751 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000752 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000753 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
754 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000755 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000756 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
757 mm_ptable_fini(&ptable, mode, &ppool);
758}
759
760/**
761 * The mode of unmapped addresses can be retrieved and is set to invalid,
762 * unowned and shared.
763 */
764TEST_F(mm, get_mode_empty)
765{
766 constexpr int mode = 0;
767 constexpr int default_mode =
768 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
769 struct mm_ptable ptable;
770 int read_mode;
771 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
772
773 read_mode = 0;
774 EXPECT_TRUE(
775 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
776 EXPECT_THAT(read_mode, Eq(default_mode));
777
778 read_mode = 0;
779 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
780 ipa_init(0x3c97'e000), &read_mode));
781 EXPECT_THAT(read_mode, Eq(default_mode));
782
783 read_mode = 0;
784 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
785 ipa_init(0x1ff'ffff'ffff), &read_mode));
786 EXPECT_THAT(read_mode, Eq(default_mode));
787
788 mm_ptable_fini(&ptable, mode, &ppool);
789}
790
791/**
792 * Get the mode of a range comprised of individual pages which are either side
793 * of a root table boundary.
794 */
795TEST_F(mm, get_mode_pages_across_tables)
796{
797 constexpr int mode = MM_MODE_INVALID | MM_MODE_SHARED;
798 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
799 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
800 struct mm_ptable ptable;
801 int read_mode;
802 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
803 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
804 nullptr, &ppool));
805
806 read_mode = 0;
807 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
808 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
809 &read_mode));
810 EXPECT_THAT(read_mode, Eq(mode));
811
812 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
813 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
814 &read_mode));
815
816 read_mode = 0;
817 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
818 ipa_from_pa(map_end), &read_mode));
819 EXPECT_THAT(read_mode, Eq(mode));
820 mm_ptable_fini(&ptable, mode, &ppool);
821}
822
823/**
824 * Anything out of range fail to retrieve the mode.
825 */
826TEST_F(mm, get_mode_out_of_range)
827{
828 constexpr int mode = MM_MODE_UNOWNED;
829 struct mm_ptable ptable;
830 int read_mode;
831 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
832 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
833 nullptr, &ppool));
834 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
835 ipa_from_pa(pa_add(VM_MEM_END, 1)),
836 &read_mode));
837 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
838 ipa_from_pa(pa_add(VM_MEM_END, 1)),
839 &read_mode));
840 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
841 ipa_init(2'0000'0000'0000), &read_mode));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000842 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000843}
844
845/**
846 * Defragging an entirely empty table has no effect.
847 */
848TEST_F(mm, defrag_empty)
849{
850 constexpr int mode = 0;
851 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000852 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
853 mm_ptable_defrag(&ptable, mode, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000854 EXPECT_THAT(
855 get_ptable(ptable, mode),
856 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000857 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000858}
859
860/**
861 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100862 * an empty table.
863 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000864TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100865{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000866 constexpr int mode = 0;
867 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
868 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
869 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
870 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100871 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000872 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
873 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
874 &ppool));
875 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
876 &ppool));
877 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, mode, &ppool));
878 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, mode, &ppool));
879 mm_ptable_defrag(&ptable, 0, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000880 EXPECT_THAT(
881 get_ptable(ptable, mode),
882 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000883 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100884}
885
886/**
887 * Any subtable with all blocks with the same attributes should be replaced
888 * with a single block.
889 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000890TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100891{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000892 constexpr int mode = 0;
893 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
894 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
895 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100896 struct mm_ptable ptable;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000897 ASSERT_TRUE(mm_ptable_init(&ptable, mode, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000898 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000899 nullptr, &ppool));
900 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, mode, &ppool));
901 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
902 &ppool));
903 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
904 &ppool));
905 mm_ptable_defrag(&ptable, 0, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000906 EXPECT_THAT(
907 get_ptable(ptable, mode),
908 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
909 _1, TOP_LEVEL))))));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000910 mm_ptable_fini(&ptable, mode, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100911}
912
Andrew Scull232d5602018-10-15 11:07:45 +0100913} /* namespace */