blob: 839a6e7fe51c0fabddbd329de46a4c1bedc23af4 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000039using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000040using ::testing::SizeIs;
41using ::testing::Truly;
42
43constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000044const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000045const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010046
47/**
48 * Calculates the size of the address space represented by a page table entry at
49 * the given level.
50 */
Andrew Scull232d5602018-10-15 11:07:45 +010051size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010052{
53 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
54}
55
56/**
Andrew Scull81e85092018-12-12 12:56:20 +000057 * Checks whether the address is mapped in the address space.
58 */
59bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
60{
Andrew Walbran1281ed42019-10-22 17:23:40 +010061 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000062 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
63 (mode & MM_MODE_INVALID) == 0;
64}
65
66/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000069std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010070{
Andrew Scull1ba470e2018-10-31 15:14:31 +000071 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010072 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000073 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010074}
75
76/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000077 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010078 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000079std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
Andrew Scullda3df7f2019-01-05 17:49:27 +000080 const struct mm_ptable &ptable)
Andrew Scull4e5f8142018-10-12 14:37:19 +010081{
Andrew Scull1ba470e2018-10-31 15:14:31 +000082 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
Andrew Scullda3df7f2019-01-05 17:49:27 +000083 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
Andrew Scull1ba470e2018-10-31 15:14:31 +000084 for (uint8_t i = 0; i < root_table_count; ++i) {
85 all.push_back(get_table(
86 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010087 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000088 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010089}
90
Andrew Scull1ba470e2018-10-31 15:14:31 +000091class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010092{
Andrew Scull1ba470e2018-10-31 15:14:31 +000093 void SetUp() override
94 {
95 /*
96 * TODO: replace with direct use of stdlib allocator so
97 * sanitizers are more effective.
98 */
99 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000100 mpool_init(&ppool, sizeof(struct mm_page_table));
101 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100102 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000103
104 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000105
106 protected:
107 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000108};
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100109
110/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000111 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100112 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000113TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100114{
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100115 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000116 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000117 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000118 get_ptable(ptable),
119 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
120 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100121}
122
123/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 * Each new concatenated table is initially empty.
125 */
126TEST_F(mm, ptable_init_concatenated_empty)
127{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000128 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000129 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000130 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000131 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000132 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000133 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000134}
135
136/**
137 * Only the first page is mapped with all others left absent.
138 */
139TEST_F(mm, map_first_page)
140{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100141 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000142 const paddr_t page_begin = pa_init(0);
143 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
144 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000145 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000146 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000147 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000148
Andrew Scullda3df7f2019-01-05 17:49:27 +0000149 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000150 EXPECT_THAT(tables, SizeIs(4));
151 ASSERT_THAT(TOP_LEVEL, Eq(2));
152
153 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000154 EXPECT_THAT(std::span(tables).last(3),
155 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000156
157 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000158 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000159 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
160
Andrew Scull3681b8d2018-12-12 14:22:59 +0000161 auto table_l1 =
162 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
163 EXPECT_THAT(table_l1.subspan(1),
164 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000165 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
166
Andrew Scull3681b8d2018-12-12 14:22:59 +0000167 auto table_l0 =
168 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
169 EXPECT_THAT(table_l0.subspan(1),
170 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000171 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000172 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000173 Eq(pa_addr(page_begin)));
174
Andrew Scullda3df7f2019-01-05 17:49:27 +0000175 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000176}
177
178/**
179 * The start address is rounded down and the end address is rounded up to page
180 * boundaries.
181 */
182TEST_F(mm, map_round_to_page)
183{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100184 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000185 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
186 const paddr_t map_end = pa_add(map_begin, 268);
187 ipaddr_t ipa = ipa_init(-1);
188 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000189 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000190 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
191 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
193
Andrew Scullda3df7f2019-01-05 17:49:27 +0000194 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000195 EXPECT_THAT(tables, SizeIs(4));
196 ASSERT_THAT(TOP_LEVEL, Eq(2));
197
198 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000199 EXPECT_THAT(std::span(tables).first(3),
200 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000201
202 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000203 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
204 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000205 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
206
Andrew Scull3681b8d2018-12-12 14:22:59 +0000207 auto table_l1 = get_table(
208 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
209 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
210 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000211 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
212
Andrew Scull3681b8d2018-12-12 14:22:59 +0000213 auto table_l0 = get_table(
214 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
215 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
216 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000217 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000218 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
219 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000220 Eq(0x200'0000'0000 - PAGE_SIZE));
221
Andrew Scullda3df7f2019-01-05 17:49:27 +0000222 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000223}
224
225/**
226 * Map a two page range over the boundary of two tables.
227 */
228TEST_F(mm, map_across_tables)
229{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100230 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000231 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
232 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
233 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000234 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000235 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000236 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000237
Andrew Scullda3df7f2019-01-05 17:49:27 +0000238 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000240 EXPECT_THAT(std::span(tables).last(2),
241 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000242 ASSERT_THAT(TOP_LEVEL, Eq(2));
243
244 /* Check only the last page of the first table is mapped. */
245 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000246 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
247 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000248 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
249
Andrew Scull3681b8d2018-12-12 14:22:59 +0000250 auto table0_l1 = get_table(
251 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
252 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
253 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000254 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
255
Andrew Scull3681b8d2018-12-12 14:22:59 +0000256 auto table0_l0 = get_table(
257 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
258 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
259 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000260 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000261 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
262 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000263 Eq(pa_addr(map_begin)));
264
Andrew Scull164f8152019-11-19 14:29:55 +0000265 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000266 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000267 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000268 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
269
Andrew Scull3681b8d2018-12-12 14:22:59 +0000270 auto table1_l1 =
271 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
272 EXPECT_THAT(table1_l1.subspan(1),
273 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000274 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
275
Andrew Scull3681b8d2018-12-12 14:22:59 +0000276 auto table1_l0 =
277 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
278 EXPECT_THAT(table1_l0.subspan(1),
279 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000280 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000281 EXPECT_THAT(
282 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
283 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000284
Andrew Scullda3df7f2019-01-05 17:49:27 +0000285 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000286}
287
288/**
289 * Mapping all of memory creates blocks at the highest level.
290 */
291TEST_F(mm, map_all_at_top_level)
292{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100293 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000294 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000295 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000296 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000297 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000298 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000299 EXPECT_THAT(
300 tables,
301 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
302 _1, TOP_LEVEL))))));
303 for (uint64_t i = 0; i < tables.size(); ++i) {
304 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000305 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
306 TOP_LEVEL)),
307 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
308 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000309 << "i=" << i << " j=" << j;
310 }
311 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000312 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000313}
314
315/**
316 * Map all memory then trying to map a page again doesn't introduce a special
317 * mapping for that particular page.
318 */
319TEST_F(mm, map_already_mapped)
320{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100321 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000322 ipaddr_t ipa = ipa_init(-1);
323 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000324 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000325 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000326 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000327 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000328 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 EXPECT_THAT(ipa_addr(ipa), Eq(0));
330 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000331 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000332 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
333 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000334 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000335}
336
337/**
338 * Mapping a reverse range, i.e. the end comes before the start, is treated as
339 * an empty range so no mappings are made.
340 */
341TEST_F(mm, map_reverse_range)
342{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100343 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000344 ipaddr_t ipa = ipa_init(-1);
345 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000346 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000347 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000348 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000349 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000350 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000351 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000352 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000353 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000354}
355
356/**
357 * Mapping a reverse range in the same page will map the page because the start
358 * of the range is rounded down and the end is rounded up.
359 *
360 * This serves as a form of documentation of behaviour rather than a
361 * requirement. Check whether any code relies on this before changing it.
362 */
363TEST_F(mm, map_reverse_range_quirk)
364{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100365 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000366 ipaddr_t ipa = ipa_init(-1);
367 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000368 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000369 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000370 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000371 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000372 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000373 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000374}
375
376/**
377 * Mapping a range up to the maximum address causes the range end to wrap to
378 * zero as it is rounded up to a page boundary meaning no memory is mapped.
379 *
380 * This serves as a form of documentation of behaviour rather than a
381 * requirement. Check whether any code relies on this before changing it.
382 */
383TEST_F(mm, map_last_address_quirk)
384{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100385 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000386 ipaddr_t ipa = ipa_init(-1);
387 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000388 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000389 ASSERT_TRUE(mm_vm_identity_map(
390 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000391 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
392 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000393 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000394 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000395 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000396 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000397 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398}
399
400/**
401 * Mapping a range that goes beyond the available memory clamps to the available
402 * range.
403 */
404TEST_F(mm, map_clamp_to_range)
405{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100406 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000407 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000408 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000409 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
410 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000411 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000412 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000413 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000414 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
415 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000416 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000417}
418
419/**
420 * Mapping a range outside of the available memory is ignored and doesn't alter
421 * the page tables.
422 */
423TEST_F(mm, map_ignore_out_of_range)
424{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100425 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000426 ipaddr_t ipa = ipa_init(-1);
427 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000428 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000429 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000430 pa_init(0xf0'0000'0000'0000), mode,
431 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000432 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000433 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000434 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000435 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000436 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000437}
438
439/**
440 * Map a single page and then map all of memory which replaces the single page
441 * mapping with a higher level block mapping.
442 */
443TEST_F(mm, map_block_replaces_table)
444{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100445 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000446 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
447 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
448 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000449 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000450 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000451 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000452 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000453 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000454 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000455 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000456 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
457 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000458 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000459}
460
461/**
462 * Map all memory at the top level, unmapping a page and remapping at a lower
463 * level does not result in all memory being mapped at the top level again.
464 */
465TEST_F(mm, map_does_not_defrag)
466{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100467 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000468 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
469 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
470 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000471 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000472 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000473 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000474 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000475 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000476 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000477 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000478 AllOf(SizeIs(4),
479 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
480 TOP_LEVEL)))),
481 Contains(Contains(Truly(std::bind(
482 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
483 Contains(Contains(Truly(std::bind(
484 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000485 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000486}
487
488/**
Andrew Scull73b89542019-11-20 17:31:26 +0000489 * Mapping with a mode that indicates unmapping results in the addresses being
490 * unmapped with absent entries.
491 */
492TEST_F(mm, map_to_unmap)
493{
494 constexpr uint32_t mode = 0;
495 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
496 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
497 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
498 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
499 struct mm_ptable ptable;
500 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000501 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
502 nullptr));
503 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
504 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000505 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000506 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000507 EXPECT_THAT(
508 get_ptable(ptable),
509 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
510 mm_vm_fini(&ptable, &ppool);
511}
512
Andrew Scull4e83cef2019-11-19 14:17:54 +0000513/*
514 * Preparing and committing an address range works the same as mapping it.
515 */
516TEST_F(mm, prepare_and_commit_first_page)
517{
518 constexpr uint32_t mode = 0;
519 const paddr_t page_begin = pa_init(0);
520 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
521 struct mm_ptable ptable;
522 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
523 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
524 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000525 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
526 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000527
528 auto tables = get_ptable(ptable);
529 EXPECT_THAT(tables, SizeIs(4));
530 ASSERT_THAT(TOP_LEVEL, Eq(2));
531
532 /* Check that the first page is mapped and nothing else. */
533 EXPECT_THAT(std::span(tables).last(3),
534 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
535
536 auto table_l2 = tables.front();
537 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
538 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
539
540 auto table_l1 =
541 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
542 EXPECT_THAT(table_l1.subspan(1),
543 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
544 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
545
546 auto table_l0 =
547 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
548 EXPECT_THAT(table_l0.subspan(1),
549 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
550 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
551 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
552 Eq(pa_addr(page_begin)));
553
554 mm_vm_fini(&ptable, &ppool);
555}
556
557/**
558 * Disjoint address ranges can be prepared and committed together.
559 */
560TEST_F(mm, prepare_and_commit_disjoint_regions)
561{
562 constexpr uint32_t mode = 0;
563 const paddr_t first_begin = pa_init(0);
564 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
565 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
566 const paddr_t last_end = VM_MEM_END;
567 struct mm_ptable ptable;
568 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
569 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
570 mode, &ppool));
571 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
572 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000573 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
574 nullptr);
575 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
576 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000577
578 auto tables = get_ptable(ptable);
579 EXPECT_THAT(tables, SizeIs(4));
580 ASSERT_THAT(TOP_LEVEL, Eq(2));
581
582 /* Check that the first and last pages are mapped and nothing else. */
583 EXPECT_THAT(std::span(tables).subspan(1, 2),
584 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
585
586 /* Check the first page. */
587 auto table0_l2 = tables.front();
588 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
589 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
590
591 auto table0_l1 =
592 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
593 EXPECT_THAT(table0_l1.subspan(1),
594 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
595 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
596
597 auto table0_l0 =
598 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
599 EXPECT_THAT(table0_l0.subspan(1),
600 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
601 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
602 EXPECT_THAT(
603 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
604 Eq(pa_addr(first_begin)));
605
606 /* Check the last page. */
607 auto table3_l2 = tables.back();
608 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
609 Each(arch_mm_absent_pte(TOP_LEVEL)));
610 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
611
612 auto table3_l1 = get_table(
613 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
614 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
615 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
616 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
617
618 auto table3_l0 = get_table(
619 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
620 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
621 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
622 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
623 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
624 TOP_LEVEL - 2)),
625 Eq(pa_addr(last_begin)));
626
627 mm_vm_fini(&ptable, &ppool);
628}
629
630/**
631 * Overlapping address ranges can be prepared and committed together.
632 */
633TEST_F(mm, prepare_and_commit_overlapping_regions)
634{
635 constexpr uint32_t mode = 0;
636 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
637 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
638 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
639 struct mm_ptable ptable;
640 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
641 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
642 &ppool));
643 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
644 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000645 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
646 nullptr);
647 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
648 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000649
650 auto tables = get_ptable(ptable);
651 EXPECT_THAT(tables, SizeIs(4));
652 EXPECT_THAT(std::span(tables).last(2),
653 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
654 ASSERT_THAT(TOP_LEVEL, Eq(2));
655
656 /* Check only the last page of the first table is mapped. */
657 auto table0_l2 = tables.front();
658 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
659 Each(arch_mm_absent_pte(TOP_LEVEL)));
660 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
661
662 auto table0_l1 = get_table(
663 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
664 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
665 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
666 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
667
668 auto table0_l0 = get_table(
669 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
670 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
671 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
672 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
673 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
674 TOP_LEVEL - 2)),
675 Eq(pa_addr(low_begin)));
676
677 /* Check only the first page of the second table is mapped. */
678 auto table1_l2 = tables[1];
679 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
680 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
681
682 auto table1_l1 =
683 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
684 EXPECT_THAT(table1_l1.subspan(1),
685 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
686 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
687
688 auto table1_l0 =
689 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
690 EXPECT_THAT(table1_l0.subspan(1),
691 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
692 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
693 EXPECT_THAT(
694 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
695 Eq(pa_addr(high_begin)));
696
697 mm_vm_fini(&ptable, &ppool);
698}
699
Andrew Scull73b89542019-11-20 17:31:26 +0000700/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000701 * If nothing is mapped, unmapping the hypervisor has no effect.
702 */
703TEST_F(mm, vm_unmap_hypervisor_not_mapped)
704{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000705 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000706 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000707 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000708 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000709 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000710 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000711 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000712}
713
714/**
715 * If range is not mapped, unmapping has no effect.
716 */
717TEST_F(mm, unmap_not_mapped)
718{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000719 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000720 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000721 EXPECT_TRUE(
722 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000723 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000724 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000725 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000726 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000727}
728
729/**
730 * Unmapping everything should result in an empty page table with no subtables.
731 */
732TEST_F(mm, unmap_all)
733{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100734 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000735 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
736 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
737 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
738 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
739 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000740 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000741 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
742 nullptr));
743 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
744 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000745 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000746 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000747 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000748 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000749 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000750}
751
752/**
753 * Unmap range is rounded to the containing pages.
754 */
755TEST_F(mm, unmap_round_to_page)
756{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100757 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000758 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
759 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
760 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000761
Andrew Scullda3df7f2019-01-05 17:49:27 +0000762 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000763 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000764 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000765 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000766 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000767
768 auto tables = get_ptable(ptable);
769 constexpr auto l3_index = 2;
770
771 /* Check all other top level entries are empty... */
772 EXPECT_THAT(std::span(tables).first(l3_index),
773 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
774 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
775 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
776
777 /* Except the mapped page which is absent. */
778 auto table_l2 = tables[l3_index];
779 constexpr auto l2_index = 384;
780 EXPECT_THAT(table_l2.first(l2_index),
781 Each(arch_mm_absent_pte(TOP_LEVEL)));
782 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
783 EXPECT_THAT(table_l2.subspan(l2_index + 1),
784 Each(arch_mm_absent_pte(TOP_LEVEL)));
785
786 auto table_l1 = get_table(
787 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
788 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
789 EXPECT_THAT(table_l1.subspan(1),
790 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
791
792 auto table_l0 = get_table(
793 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
794 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
795
Andrew Scullda3df7f2019-01-05 17:49:27 +0000796 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000797}
798
799/**
800 * Unmap a range that of page mappings that spans multiple concatenated tables.
801 */
802TEST_F(mm, unmap_across_tables)
803{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100804 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000805 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
806 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
807 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000808
Andrew Scullda3df7f2019-01-05 17:49:27 +0000809 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000810 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000811 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000812 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000813
814 auto tables = get_ptable(ptable);
815
816 /* Check the untouched tables are empty. */
817 EXPECT_THAT(std::span(tables).first(2),
818 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
819
820 /* Check the last page is explicity marked as absent. */
821 auto table2_l2 = tables[2];
822 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
823 Each(arch_mm_absent_pte(TOP_LEVEL)));
824 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
825
826 auto table2_l1 = get_table(
827 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
828 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
829 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
830 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
831
832 auto table2_l0 = get_table(
833 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
834 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
835
836 /* Check the first page is explicitly marked as absent. */
837 auto table3_l2 = tables[3];
838 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
839 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
840
841 auto table3_l1 = get_table(
842 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
843 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
844 EXPECT_THAT(table3_l1.subspan(1),
845 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
846
847 auto table3_l0 = get_table(
848 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
849 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
850
Andrew Scullda3df7f2019-01-05 17:49:27 +0000851 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000852}
853
854/**
855 * Unmapping outside the range of memory had no effect.
856 */
857TEST_F(mm, unmap_out_of_range)
858{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100859 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000860 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000861 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000862 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000863 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000864 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000865 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000866 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000867 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000868 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
869 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000870 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000871}
872
873/**
874 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
875 * an empty range so no change is made.
876 */
877TEST_F(mm, unmap_reverse_range)
878{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100879 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000880 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000881 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000882 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000883 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000884 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000885 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000886 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000887 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000888 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
889 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000890 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000891}
892
893/**
894 * Unmapping a reverse range in the same page will unmap the page because the
895 * start of the range is rounded down and the end is rounded up.
896 *
897 * This serves as a form of documentation of behaviour rather than a
898 * requirement. Check whether any code relies on this before changing it.
899 */
900TEST_F(mm, unmap_reverse_range_quirk)
901{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100902 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000903 const paddr_t page_begin = pa_init(0x180'0000'0000);
904 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
905 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000906 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000907 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000908 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000909 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000910 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000911
912 auto tables = get_ptable(ptable);
913 constexpr auto l3_index = 3;
914
915 /* Check all other top level entries are empty... */
916 EXPECT_THAT(std::span(tables).first(l3_index),
917 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
918
919 /* Except the mapped page which is absent. */
920 auto table_l2 = tables[l3_index];
921 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
922 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
923
924 auto table_l1 = get_table(
925 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
926 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
927 EXPECT_THAT(table_l1.subspan(1),
928 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
929
930 auto table_l0 = get_table(
931 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
932 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
933
Andrew Scullda3df7f2019-01-05 17:49:27 +0000934 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000935}
936
937/**
938 * Unmapping a range up to the maximum address causes the range end to wrap to
939 * zero as it is rounded up to a page boundary meaning no change is made.
940 *
941 * This serves as a form of documentation of behaviour rather than a
942 * requirement. Check whether any code relies on this before changing it.
943 */
944TEST_F(mm, unmap_last_address_quirk)
945{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100946 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000947 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000948 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000949 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000950 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000951 ASSERT_TRUE(mm_vm_unmap(
952 &ptable, pa_init(0),
953 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000954 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000955 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000956 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
957 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000958 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000959}
960
961/**
962 * Mapping then unmapping a page does not defrag the table.
963 */
964TEST_F(mm, unmap_does_not_defrag)
965{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100966 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000967 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
968 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
969 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
970 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
971 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000972 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000973 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
974 nullptr));
975 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
976 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000977 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
978 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000979 EXPECT_THAT(get_ptable(ptable),
980 AllOf(SizeIs(4),
981 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000982 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000983}
984
985/**
986 * Nothing is mapped in an empty table.
987 */
988TEST_F(mm, is_mapped_empty)
989{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000990 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000991 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000992 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
993 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
994 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000995 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000996}
997
998/**
999 * Everything is mapped in a full table.
1000 */
1001TEST_F(mm, is_mapped_all)
1002{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001003 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001004 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001005 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001006 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001007 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001008 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
1009 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
1010 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001011 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001012}
1013
1014/**
1015 * A page is mapped for the range [begin, end).
1016 */
1017TEST_F(mm, is_mapped_page)
1018{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001019 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001020 const paddr_t page_begin = pa_init(0x100'0000'0000);
1021 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
1022 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001023 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001024 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001025 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001026 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
1027 EXPECT_TRUE(
1028 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
1029 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001030 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001031}
1032
1033/**
1034 * Everything out of range is not mapped.
1035 */
1036TEST_F(mm, is_mapped_out_of_range)
1037{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001038 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001039 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001040 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001041 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001042 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001043 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
1044 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001045 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +00001046 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001047 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001048}
1049
1050/**
1051 * The mode of unmapped addresses can be retrieved and is set to invalid,
1052 * unowned and shared.
1053 */
1054TEST_F(mm, get_mode_empty)
1055{
Andrew Scull81e85092018-12-12 12:56:20 +00001056 constexpr int default_mode =
1057 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
1058 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001059 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001060 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001061
1062 read_mode = 0;
1063 EXPECT_TRUE(
1064 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
1065 EXPECT_THAT(read_mode, Eq(default_mode));
1066
1067 read_mode = 0;
1068 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
1069 ipa_init(0x3c97'e000), &read_mode));
1070 EXPECT_THAT(read_mode, Eq(default_mode));
1071
1072 read_mode = 0;
1073 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
1074 ipa_init(0x1ff'ffff'ffff), &read_mode));
1075 EXPECT_THAT(read_mode, Eq(default_mode));
1076
Andrew Scullda3df7f2019-01-05 17:49:27 +00001077 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001078}
1079
1080/**
1081 * Get the mode of a range comprised of individual pages which are either side
1082 * of a root table boundary.
1083 */
1084TEST_F(mm, get_mode_pages_across_tables)
1085{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001086 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +00001087 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
1088 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
1089 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001090 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001091 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001092 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001093 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001094
1095 read_mode = 0;
1096 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1097 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1098 &read_mode));
1099 EXPECT_THAT(read_mode, Eq(mode));
1100
1101 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1102 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1103 &read_mode));
1104
1105 read_mode = 0;
1106 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1107 ipa_from_pa(map_end), &read_mode));
1108 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001109 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001110}
1111
1112/**
1113 * Anything out of range fail to retrieve the mode.
1114 */
1115TEST_F(mm, get_mode_out_of_range)
1116{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001117 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +00001118 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001119 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001120 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001121 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001122 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001123 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1124 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1125 &read_mode));
1126 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
1127 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1128 &read_mode));
1129 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
1130 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001131 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001132}
1133
1134/**
1135 * Defragging an entirely empty table has no effect.
1136 */
1137TEST_F(mm, defrag_empty)
1138{
Andrew Scull1ba470e2018-10-31 15:14:31 +00001139 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001140 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
1141 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001142 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001143 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001144 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001145 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001146}
1147
1148/**
1149 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001150 * an empty table.
1151 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001152TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001153{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001154 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001155 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1156 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1157 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1158 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001159 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001160 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001161 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1162 nullptr));
1163 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1164 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001165 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1166 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001167 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001168 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001169 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001170 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001171 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001172}
1173
1174/**
1175 * Any subtable with all blocks with the same attributes should be replaced
1176 * with a single block.
1177 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001178TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001179{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001180 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001181 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1182 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1183 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001184 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001185 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001186 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001187 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001188 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001189 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1190 nullptr));
1191 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1192 nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001193 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001194 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001195 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001196 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1197 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001198 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +01001199}
1200
Andrew Scull232d5602018-10-15 11:07:45 +01001201} /* namespace */