blob: 05723b2215f9ded78dcd2c09d652038826112c13 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull232d5602018-10-15 11:07:45 +010031namespace
32{
Andrew Scull1ba470e2018-10-31 15:14:31 +000033using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010034
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using ::testing::AllOf;
36using ::testing::Contains;
37using ::testing::Each;
38using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000039using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000040using ::testing::SizeIs;
41using ::testing::Truly;
42
43constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000044const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000045const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010046
47/**
48 * Calculates the size of the address space represented by a page table entry at
49 * the given level.
50 */
Andrew Scull232d5602018-10-15 11:07:45 +010051size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010052{
53 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
54}
55
56/**
Andrew Scull81e85092018-12-12 12:56:20 +000057 * Checks whether the address is mapped in the address space.
58 */
59bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
60{
Andrew Walbran1281ed42019-10-22 17:23:40 +010061 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000062 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
63 (mode & MM_MODE_INVALID) == 0;
64}
65
66/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000067 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010068 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000069std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010070{
Andrew Scull1ba470e2018-10-31 15:14:31 +000071 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010072 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000073 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010074}
75
76/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000077 * Get an STL representation of the ptable.
Andrew Scull4e5f8142018-10-12 14:37:19 +010078 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000079std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
Andrew Scullda3df7f2019-01-05 17:49:27 +000080 const struct mm_ptable &ptable)
Andrew Scull4e5f8142018-10-12 14:37:19 +010081{
Andrew Scull1ba470e2018-10-31 15:14:31 +000082 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
Andrew Scullda3df7f2019-01-05 17:49:27 +000083 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
Andrew Scull1ba470e2018-10-31 15:14:31 +000084 for (uint8_t i = 0; i < root_table_count; ++i) {
85 all.push_back(get_table(
86 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
Andrew Walbran9fa106c2018-09-28 14:19:29 +010087 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000088 return all;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010089}
90
Andrew Scull1ba470e2018-10-31 15:14:31 +000091class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010092{
Andrew Scull1ba470e2018-10-31 15:14:31 +000093 void SetUp() override
94 {
95 /*
96 * TODO: replace with direct use of stdlib allocator so
97 * sanitizers are more effective.
98 */
99 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000100 mpool_init(&ppool, sizeof(struct mm_page_table));
101 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100102 }
Andrew Scull1ba470e2018-10-31 15:14:31 +0000103
104 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000105
106 protected:
107 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000108};
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100109
110/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000111 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100112 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000113TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100114{
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100115 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000116 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000117 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000118 get_ptable(ptable),
119 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
120 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100121}
122
123/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 * Each new concatenated table is initially empty.
125 */
126TEST_F(mm, ptable_init_concatenated_empty)
127{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000128 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000129 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000130 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000131 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000132 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000133 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000134}
135
136/**
137 * Only the first page is mapped with all others left absent.
138 */
139TEST_F(mm, map_first_page)
140{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100141 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000142 const paddr_t page_begin = pa_init(0);
143 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
144 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000145 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000146 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000147 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000148
Andrew Scullda3df7f2019-01-05 17:49:27 +0000149 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000150 EXPECT_THAT(tables, SizeIs(4));
151 ASSERT_THAT(TOP_LEVEL, Eq(2));
152
153 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000154 EXPECT_THAT(std::span(tables).last(3),
155 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000156
157 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000158 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000159 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
160
Andrew Scull3681b8d2018-12-12 14:22:59 +0000161 auto table_l1 =
162 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
163 EXPECT_THAT(table_l1.subspan(1),
164 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000165 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
166
Andrew Scull3681b8d2018-12-12 14:22:59 +0000167 auto table_l0 =
168 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
169 EXPECT_THAT(table_l0.subspan(1),
170 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000171 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000172 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000173 Eq(pa_addr(page_begin)));
174
Andrew Scullda3df7f2019-01-05 17:49:27 +0000175 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000176}
177
178/**
179 * The start address is rounded down and the end address is rounded up to page
180 * boundaries.
181 */
182TEST_F(mm, map_round_to_page)
183{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100184 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000185 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
186 const paddr_t map_end = pa_add(map_begin, 268);
187 ipaddr_t ipa = ipa_init(-1);
188 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000189 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000190 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode, &ipa,
191 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000192 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
193
Andrew Scullda3df7f2019-01-05 17:49:27 +0000194 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000195 EXPECT_THAT(tables, SizeIs(4));
196 ASSERT_THAT(TOP_LEVEL, Eq(2));
197
198 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000199 EXPECT_THAT(std::span(tables).first(3),
200 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000201
202 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000203 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
204 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000205 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
206
Andrew Scull3681b8d2018-12-12 14:22:59 +0000207 auto table_l1 = get_table(
208 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
209 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
210 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000211 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
212
Andrew Scull3681b8d2018-12-12 14:22:59 +0000213 auto table_l0 = get_table(
214 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
215 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
216 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000217 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000218 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
219 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000220 Eq(0x200'0000'0000 - PAGE_SIZE));
221
Andrew Scullda3df7f2019-01-05 17:49:27 +0000222 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000223}
224
225/**
226 * Map a two page range over the boundary of two tables.
227 */
228TEST_F(mm, map_across_tables)
229{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100230 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000231 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
232 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
233 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000234 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000235 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
236 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000237
Andrew Scullda3df7f2019-01-05 17:49:27 +0000238 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000239 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000240 EXPECT_THAT(std::span(tables).last(2),
241 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000242 ASSERT_THAT(TOP_LEVEL, Eq(2));
243
244 /* Check only the last page of the first table is mapped. */
245 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000246 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
247 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000248 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
249
Andrew Scull3681b8d2018-12-12 14:22:59 +0000250 auto table0_l1 = get_table(
251 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
252 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
253 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000254 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
255
Andrew Scull3681b8d2018-12-12 14:22:59 +0000256 auto table0_l0 = get_table(
257 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
258 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
259 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000260 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000261 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
262 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000263 Eq(pa_addr(map_begin)));
264
Andrew Scull164f8152019-11-19 14:29:55 +0000265 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000266 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000267 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000268 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
269
Andrew Scull3681b8d2018-12-12 14:22:59 +0000270 auto table1_l1 =
271 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
272 EXPECT_THAT(table1_l1.subspan(1),
273 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000274 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
275
Andrew Scull3681b8d2018-12-12 14:22:59 +0000276 auto table1_l0 =
277 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
278 EXPECT_THAT(table1_l0.subspan(1),
279 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000280 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000281 EXPECT_THAT(
282 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
283 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000284
Andrew Scullda3df7f2019-01-05 17:49:27 +0000285 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000286}
287
288/**
289 * Mapping all of memory creates blocks at the highest level.
290 */
291TEST_F(mm, map_all_at_top_level)
292{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100293 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000294 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000295 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000296 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000297 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000298 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000299 EXPECT_THAT(
300 tables,
301 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
302 _1, TOP_LEVEL))))));
303 for (uint64_t i = 0; i < tables.size(); ++i) {
304 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000305 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
306 TOP_LEVEL)),
307 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
308 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000309 << "i=" << i << " j=" << j;
310 }
311 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000312 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000313}
314
315/**
316 * Map all memory then trying to map a page again doesn't introduce a special
317 * mapping for that particular page.
318 */
319TEST_F(mm, map_already_mapped)
320{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100321 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000322 ipaddr_t ipa = ipa_init(-1);
323 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000324 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000325 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000326 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000327 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000328 mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000329 EXPECT_THAT(ipa_addr(ipa), Eq(0));
330 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000331 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000332 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
333 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000334 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000335}
336
337/**
338 * Mapping a reverse range, i.e. the end comes before the start, is treated as
339 * an empty range so no mappings are made.
340 */
341TEST_F(mm, map_reverse_range)
342{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100343 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000344 ipaddr_t ipa = ipa_init(-1);
345 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000346 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000347 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000348 pa_init(0x5000), mode, &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000349 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000350 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000351 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000352 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000353 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000354}
355
356/**
357 * Mapping a reverse range in the same page will map the page because the start
358 * of the range is rounded down and the end is rounded up.
359 *
360 * This serves as a form of documentation of behaviour rather than a
361 * requirement. Check whether any code relies on this before changing it.
362 */
363TEST_F(mm, map_reverse_range_quirk)
364{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100365 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000366 ipaddr_t ipa = ipa_init(-1);
367 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000368 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000369 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000370 &ipa, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000371 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000372 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000373 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000374}
375
376/**
377 * Mapping a range up to the maximum address causes the range end to wrap to
378 * zero as it is rounded up to a page boundary meaning no memory is mapped.
379 *
380 * This serves as a form of documentation of behaviour rather than a
381 * requirement. Check whether any code relies on this before changing it.
382 */
383TEST_F(mm, map_last_address_quirk)
384{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100385 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000386 ipaddr_t ipa = ipa_init(-1);
387 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000388 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000389 ASSERT_TRUE(mm_vm_identity_map(
390 &ptable, pa_init(0),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000391 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ipa,
392 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000393 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000394 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000395 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000396 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000397 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398}
399
400/**
401 * Mapping a range that goes beyond the available memory clamps to the available
402 * range.
403 */
404TEST_F(mm, map_clamp_to_range)
405{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100406 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000407 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000408 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000409 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
410 pa_init(0xf32'0000'0000'0000), mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000411 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000412 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000413 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000414 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
415 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000416 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000417}
418
419/**
420 * Mapping a range outside of the available memory is ignored and doesn't alter
421 * the page tables.
422 */
423TEST_F(mm, map_ignore_out_of_range)
424{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100425 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000426 ipaddr_t ipa = ipa_init(-1);
427 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000428 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000429 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
430 pa_init(0xf0'0000'0000'0000), mode, &ipa,
431 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000432 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000433 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000434 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000435 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000436 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000437}
438
439/**
440 * Map a single page and then map all of memory which replaces the single page
441 * mapping with a higher level block mapping.
442 */
443TEST_F(mm, map_block_replaces_table)
444{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100445 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000446 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
447 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
448 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000449 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000450 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000451 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000452 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000453 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000454 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000455 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000456 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
457 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000458 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000459}
460
461/**
462 * Map all memory at the top level, unmapping a page and remapping at a lower
463 * level does not result in all memory being mapped at the top level again.
464 */
465TEST_F(mm, map_does_not_defrag)
466{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100467 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000468 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
469 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
470 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000471 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000472 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000473 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000474 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000475 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000476 nullptr, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000477 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000478 AllOf(SizeIs(4),
479 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
480 TOP_LEVEL)))),
481 Contains(Contains(Truly(std::bind(
482 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
483 Contains(Contains(Truly(std::bind(
484 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000485 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000486}
487
488/**
Andrew Scull73b89542019-11-20 17:31:26 +0000489 * Mapping with a mode that indicates unmapping results in the addresses being
490 * unmapped with absent entries.
491 */
492TEST_F(mm, map_to_unmap)
493{
494 constexpr uint32_t mode = 0;
495 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
496 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
497 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
498 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
499 struct mm_ptable ptable;
500 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
501 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
502 &ppool));
503 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
504 &ppool));
505 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
506 MM_MODE_UNMAPPED_MASK, nullptr, &ppool));
507 EXPECT_THAT(
508 get_ptable(ptable),
509 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
510 mm_vm_fini(&ptable, &ppool);
511}
512
513/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000514 * If nothing is mapped, unmapping the hypervisor has no effect.
515 */
516TEST_F(mm, vm_unmap_hypervisor_not_mapped)
517{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000518 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000519 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000520 EXPECT_TRUE(mm_vm_unmap_hypervisor(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000521 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000522 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000523 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000524 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000525}
526
527/**
528 * If range is not mapped, unmapping has no effect.
529 */
530TEST_F(mm, unmap_not_mapped)
531{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000532 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000533 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000534 EXPECT_TRUE(
535 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000536 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000537 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000538 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000539 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000540}
541
542/**
543 * Unmapping everything should result in an empty page table with no subtables.
544 */
545TEST_F(mm, unmap_all)
546{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100547 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000548 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
549 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
550 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
551 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
552 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000553 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000554 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
555 &ppool));
556 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
557 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000558 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000559 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000560 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000561 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000562 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000563}
564
565/**
566 * Unmap range is rounded to the containing pages.
567 */
568TEST_F(mm, unmap_round_to_page)
569{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100570 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000571 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
572 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
573 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000574
Andrew Scullda3df7f2019-01-05 17:49:27 +0000575 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000576 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
577 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000578 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000579 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000580
581 auto tables = get_ptable(ptable);
582 constexpr auto l3_index = 2;
583
584 /* Check all other top level entries are empty... */
585 EXPECT_THAT(std::span(tables).first(l3_index),
586 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
587 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
588 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
589
590 /* Except the mapped page which is absent. */
591 auto table_l2 = tables[l3_index];
592 constexpr auto l2_index = 384;
593 EXPECT_THAT(table_l2.first(l2_index),
594 Each(arch_mm_absent_pte(TOP_LEVEL)));
595 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
596 EXPECT_THAT(table_l2.subspan(l2_index + 1),
597 Each(arch_mm_absent_pte(TOP_LEVEL)));
598
599 auto table_l1 = get_table(
600 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
601 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
602 EXPECT_THAT(table_l1.subspan(1),
603 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
604
605 auto table_l0 = get_table(
606 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
607 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
608
Andrew Scullda3df7f2019-01-05 17:49:27 +0000609 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000610}
611
612/**
613 * Unmap a range that of page mappings that spans multiple concatenated tables.
614 */
615TEST_F(mm, unmap_across_tables)
616{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100617 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000618 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
619 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
620 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000621
Andrew Scullda3df7f2019-01-05 17:49:27 +0000622 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000623 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
624 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000625 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000626
627 auto tables = get_ptable(ptable);
628
629 /* Check the untouched tables are empty. */
630 EXPECT_THAT(std::span(tables).first(2),
631 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
632
633 /* Check the last page is explicity marked as absent. */
634 auto table2_l2 = tables[2];
635 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
636 Each(arch_mm_absent_pte(TOP_LEVEL)));
637 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
638
639 auto table2_l1 = get_table(
640 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
641 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
642 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
643 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
644
645 auto table2_l0 = get_table(
646 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
647 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
648
649 /* Check the first page is explicitly marked as absent. */
650 auto table3_l2 = tables[3];
651 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
652 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
653
654 auto table3_l1 = get_table(
655 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
656 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
657 EXPECT_THAT(table3_l1.subspan(1),
658 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
659
660 auto table3_l0 = get_table(
661 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
662 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
663
Andrew Scullda3df7f2019-01-05 17:49:27 +0000664 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000665}
666
667/**
668 * Unmapping outside the range of memory had no effect.
669 */
670TEST_F(mm, unmap_out_of_range)
671{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100672 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000673 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000674 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000675 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000676 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000677 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000678 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000679 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000680 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000681 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
682 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000683 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000684}
685
686/**
687 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
688 * an empty range so no change is made.
689 */
690TEST_F(mm, unmap_reverse_range)
691{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100692 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000693 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000694 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000695 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000696 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000697 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000698 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000699 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000700 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000701 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
702 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000703 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000704}
705
706/**
707 * Unmapping a reverse range in the same page will unmap the page because the
708 * start of the range is rounded down and the end is rounded up.
709 *
710 * This serves as a form of documentation of behaviour rather than a
711 * requirement. Check whether any code relies on this before changing it.
712 */
713TEST_F(mm, unmap_reverse_range_quirk)
714{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100715 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000716 const paddr_t page_begin = pa_init(0x180'0000'0000);
717 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
718 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000719 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000720 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000721 nullptr, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000722 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000723 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000724
725 auto tables = get_ptable(ptable);
726 constexpr auto l3_index = 3;
727
728 /* Check all other top level entries are empty... */
729 EXPECT_THAT(std::span(tables).first(l3_index),
730 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
731
732 /* Except the mapped page which is absent. */
733 auto table_l2 = tables[l3_index];
734 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
735 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
736
737 auto table_l1 = get_table(
738 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
739 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
740 EXPECT_THAT(table_l1.subspan(1),
741 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
742
743 auto table_l0 = get_table(
744 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
745 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
746
Andrew Scullda3df7f2019-01-05 17:49:27 +0000747 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000748}
749
750/**
751 * Unmapping a range up to the maximum address causes the range end to wrap to
752 * zero as it is rounded up to a page boundary meaning no change is made.
753 *
754 * This serves as a form of documentation of behaviour rather than a
755 * requirement. Check whether any code relies on this before changing it.
756 */
757TEST_F(mm, unmap_last_address_quirk)
758{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100759 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000760 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000761 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000762 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000763 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000764 ASSERT_TRUE(mm_vm_unmap(
765 &ptable, pa_init(0),
766 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000767 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000768 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000769 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
770 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000771 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000772}
773
774/**
775 * Mapping then unmapping a page does not defrag the table.
776 */
777TEST_F(mm, unmap_does_not_defrag)
778{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100779 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000780 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
781 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
782 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
783 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
784 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000785 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000786 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
787 &ppool));
788 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
789 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000790 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
791 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000792 EXPECT_THAT(get_ptable(ptable),
793 AllOf(SizeIs(4),
794 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000795 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000796}
797
798/**
799 * Nothing is mapped in an empty table.
800 */
801TEST_F(mm, is_mapped_empty)
802{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000803 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000804 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000805 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
806 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
807 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000808 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000809}
810
811/**
812 * Everything is mapped in a full table.
813 */
814TEST_F(mm, is_mapped_all)
815{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100816 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000817 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000818 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000819 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000820 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000821 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
822 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
823 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000824 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000825}
826
827/**
828 * A page is mapped for the range [begin, end).
829 */
830TEST_F(mm, is_mapped_page)
831{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100832 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000833 const paddr_t page_begin = pa_init(0x100'0000'0000);
834 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
835 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000836 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000837 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000838 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000839 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
840 EXPECT_TRUE(
841 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
842 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000843 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000844}
845
846/**
847 * Everything out of range is not mapped.
848 */
849TEST_F(mm, is_mapped_out_of_range)
850{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100851 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000852 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000853 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000854 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000855 nullptr, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000856 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
857 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000858 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +0000859 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000860 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000861}
862
863/**
864 * The mode of unmapped addresses can be retrieved and is set to invalid,
865 * unowned and shared.
866 */
867TEST_F(mm, get_mode_empty)
868{
Andrew Scull81e85092018-12-12 12:56:20 +0000869 constexpr int default_mode =
870 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
871 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100872 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000873 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000874
875 read_mode = 0;
876 EXPECT_TRUE(
877 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
878 EXPECT_THAT(read_mode, Eq(default_mode));
879
880 read_mode = 0;
881 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
882 ipa_init(0x3c97'e000), &read_mode));
883 EXPECT_THAT(read_mode, Eq(default_mode));
884
885 read_mode = 0;
886 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
887 ipa_init(0x1ff'ffff'ffff), &read_mode));
888 EXPECT_THAT(read_mode, Eq(default_mode));
889
Andrew Scullda3df7f2019-01-05 17:49:27 +0000890 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000891}
892
893/**
894 * Get the mode of a range comprised of individual pages which are either side
895 * of a root table boundary.
896 */
897TEST_F(mm, get_mode_pages_across_tables)
898{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100899 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +0000900 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
901 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
902 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100903 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000904 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000905 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
906 nullptr, &ppool));
907
908 read_mode = 0;
909 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
910 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
911 &read_mode));
912 EXPECT_THAT(read_mode, Eq(mode));
913
914 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
915 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
916 &read_mode));
917
918 read_mode = 0;
919 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
920 ipa_from_pa(map_end), &read_mode));
921 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000922 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +0000923}
924
925/**
926 * Anything out of range fail to retrieve the mode.
927 */
928TEST_F(mm, get_mode_out_of_range)
929{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100930 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +0000931 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100932 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000933 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000934 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
935 nullptr, &ppool));
936 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
937 ipa_from_pa(pa_add(VM_MEM_END, 1)),
938 &read_mode));
939 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
940 ipa_from_pa(pa_add(VM_MEM_END, 1)),
941 &read_mode));
942 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
943 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000944 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000945}
946
947/**
948 * Defragging an entirely empty table has no effect.
949 */
950TEST_F(mm, defrag_empty)
951{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000952 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000953 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
954 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000955 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000956 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000957 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000958 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000959}
960
961/**
962 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100963 * an empty table.
964 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000965TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100966{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100967 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000968 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
969 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
970 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
971 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100972 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000973 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000974 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, nullptr,
975 &ppool));
Andrew Scull12122ce2019-11-19 14:21:07 +0000976 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, nullptr,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000977 &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000978 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
979 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000980 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000981 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000982 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000983 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000984 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100985}
986
987/**
988 * Any subtable with all blocks with the same attributes should be replaced
989 * with a single block.
990 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000991TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100992{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100993 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000994 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
995 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
996 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100997 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000998 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000999 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001000 nullptr, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +00001001 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001002 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, nullptr,
1003 &ppool));
1004 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, nullptr,
1005 &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001006 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001007 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001008 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001009 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1010 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001011 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +01001012}
1013
Andrew Scull232d5602018-10-15 11:07:45 +01001014} /* namespace */