blob: 887d16c0968e8ba89f6098337c3b85ea2634ce99 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Walbran9fa106c2018-09-28 14:19:29 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include <gmock/gmock.h>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010018
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019extern "C" {
Andrew Walbran9fa106c2018-09-28 14:19:29 +010020#include "hf/arch/mm.h"
21
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000022#include "hf/mm.h"
23#include "hf/mpool.h"
Andrew Walbran9fa106c2018-09-28 14:19:29 +010024}
25
Andrew Scull1ba470e2018-10-31 15:14:31 +000026#include <limits>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010027#include <memory>
Andrew Scull1ba470e2018-10-31 15:14:31 +000028#include <span>
29#include <vector>
Andrew Walbran9fa106c2018-09-28 14:19:29 +010030
Andrew Scull3c257452019-11-26 13:32:50 +000031#include "mm_test.hh"
32
Andrew Scull232d5602018-10-15 11:07:45 +010033namespace
34{
Andrew Scull1ba470e2018-10-31 15:14:31 +000035using namespace ::std::placeholders;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010036
Andrew Scull1ba470e2018-10-31 15:14:31 +000037using ::testing::AllOf;
38using ::testing::Contains;
39using ::testing::Each;
40using ::testing::Eq;
Andrew Scull164f8152019-11-19 14:29:55 +000041using ::testing::Not;
Andrew Scull1ba470e2018-10-31 15:14:31 +000042using ::testing::SizeIs;
43using ::testing::Truly;
44
Andrew Scull3c257452019-11-26 13:32:50 +000045using ::mm_test::get_ptable;
46
Andrew Scull1ba470e2018-10-31 15:14:31 +000047constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
Andrew Scullda3df7f2019-01-05 17:49:27 +000048const int TOP_LEVEL = arch_mm_stage2_max_level();
Andrew Scull1ba470e2018-10-31 15:14:31 +000049const paddr_t VM_MEM_END = pa_init(0x200'0000'0000);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010050
51/**
52 * Calculates the size of the address space represented by a page table entry at
53 * the given level.
54 */
Andrew Scull232d5602018-10-15 11:07:45 +010055size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010056{
57 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
58}
59
60/**
Andrew Scull81e85092018-12-12 12:56:20 +000061 * Checks whether the address is mapped in the address space.
62 */
63bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa)
64{
Andrew Walbran1281ed42019-10-22 17:23:40 +010065 uint32_t mode;
Andrew Scull81e85092018-12-12 12:56:20 +000066 return mm_vm_get_mode(t, ipa, ipa_add(ipa, 1), &mode) &&
67 (mode & MM_MODE_INVALID) == 0;
68}
69
70/**
Andrew Scull1ba470e2018-10-31 15:14:31 +000071 * Get an STL representation of the page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +010072 */
Andrew Scull1ba470e2018-10-31 15:14:31 +000073std::span<pte_t, MM_PTE_PER_PAGE> get_table(paddr_t pa)
Andrew Scull4e5f8142018-10-12 14:37:19 +010074{
Andrew Scull1ba470e2018-10-31 15:14:31 +000075 auto table = reinterpret_cast<struct mm_page_table *>(
Andrew Scull4e5f8142018-10-12 14:37:19 +010076 ptr_from_va(va_from_pa(pa)));
Andrew Scull1ba470e2018-10-31 15:14:31 +000077 return std::span<pte_t>(table->entries, std::end(table->entries));
Andrew Scull4e5f8142018-10-12 14:37:19 +010078}
79
Andrew Scull1ba470e2018-10-31 15:14:31 +000080class mm : public ::testing::Test
Andrew Walbran9fa106c2018-09-28 14:19:29 +010081{
Andrew Scull1ba470e2018-10-31 15:14:31 +000082 void SetUp() override
83 {
84 /*
85 * TODO: replace with direct use of stdlib allocator so
86 * sanitizers are more effective.
87 */
88 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000089 mpool_init(&ppool, sizeof(struct mm_page_table));
90 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010091 }
Andrew Scull1ba470e2018-10-31 15:14:31 +000092
93 std::unique_ptr<uint8_t[]> test_heap;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000094
95 protected:
96 struct mpool ppool;
Andrew Scull1ba470e2018-10-31 15:14:31 +000097};
Andrew Walbran9fa106c2018-09-28 14:19:29 +010098
99/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000100 * A new table is initially empty.
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100101 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000102TEST_F(mm, ptable_init_empty)
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100103{
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100104 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000105 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000106 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000107 get_ptable(ptable),
108 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
109 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100110}
111
112/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000113 * Each new concatenated table is initially empty.
114 */
115TEST_F(mm, ptable_init_concatenated_empty)
116{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000117 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000118 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000119 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000120 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000121 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000122 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000123}
124
125/**
126 * Only the first page is mapped with all others left absent.
127 */
128TEST_F(mm, map_first_page)
129{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100130 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000131 const paddr_t page_begin = pa_init(0);
132 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
133 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000134 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000135 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000136 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000137
Andrew Scullda3df7f2019-01-05 17:49:27 +0000138 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000139 EXPECT_THAT(tables, SizeIs(4));
140 ASSERT_THAT(TOP_LEVEL, Eq(2));
141
142 /* Check that the first page is mapped and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000143 EXPECT_THAT(std::span(tables).last(3),
144 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000145
146 auto table_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000147 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000148 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
149
Andrew Scull3681b8d2018-12-12 14:22:59 +0000150 auto table_l1 =
151 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
152 EXPECT_THAT(table_l1.subspan(1),
153 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000154 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
155
Andrew Scull3681b8d2018-12-12 14:22:59 +0000156 auto table_l0 =
157 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
158 EXPECT_THAT(table_l0.subspan(1),
159 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000160 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000161 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000162 Eq(pa_addr(page_begin)));
163
Andrew Scullda3df7f2019-01-05 17:49:27 +0000164 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000165}
166
167/**
168 * The start address is rounded down and the end address is rounded up to page
169 * boundaries.
170 */
171TEST_F(mm, map_round_to_page)
172{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100173 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000174 const paddr_t map_begin = pa_init(0x200'0000'0000 - PAGE_SIZE + 23);
175 const paddr_t map_end = pa_add(map_begin, 268);
176 ipaddr_t ipa = ipa_init(-1);
177 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000178 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000179 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
180 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000181 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(map_begin)));
182
Andrew Scullda3df7f2019-01-05 17:49:27 +0000183 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000184 EXPECT_THAT(tables, SizeIs(4));
185 ASSERT_THAT(TOP_LEVEL, Eq(2));
186
187 /* Check that the last page is mapped, and nothing else. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000188 EXPECT_THAT(std::span(tables).first(3),
189 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000190
191 auto table_l2 = tables.back();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000192 EXPECT_THAT(table_l2.first(table_l2.size() - 1),
193 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000194 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.last(1)[0], TOP_LEVEL));
195
Andrew Scull3681b8d2018-12-12 14:22:59 +0000196 auto table_l1 = get_table(
197 arch_mm_table_from_pte(table_l2.last(1)[0], TOP_LEVEL));
198 EXPECT_THAT(table_l1.first(table_l1.size() - 1),
199 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000200 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.last(1)[0], TOP_LEVEL - 1));
201
Andrew Scull3681b8d2018-12-12 14:22:59 +0000202 auto table_l0 = get_table(
203 arch_mm_table_from_pte(table_l1.last(1)[0], TOP_LEVEL - 1));
204 EXPECT_THAT(table_l0.first(table_l0.size() - 1),
205 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000206 ASSERT_TRUE(arch_mm_pte_is_block(table_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000207 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0.last(1)[0],
208 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000209 Eq(0x200'0000'0000 - PAGE_SIZE));
210
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000212}
213
214/**
215 * Map a two page range over the boundary of two tables.
216 */
217TEST_F(mm, map_across_tables)
218{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100219 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000220 const paddr_t map_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
221 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
222 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000223 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000224 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000225 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000226
Andrew Scullda3df7f2019-01-05 17:49:27 +0000227 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000228 EXPECT_THAT(tables, SizeIs(4));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000229 EXPECT_THAT(std::span(tables).last(2),
230 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000231 ASSERT_THAT(TOP_LEVEL, Eq(2));
232
233 /* Check only the last page of the first table is mapped. */
234 auto table0_l2 = tables.front();
Andrew Scull3681b8d2018-12-12 14:22:59 +0000235 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
236 Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000237 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
238
Andrew Scull3681b8d2018-12-12 14:22:59 +0000239 auto table0_l1 = get_table(
240 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
241 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
242 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000243 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
244
Andrew Scull3681b8d2018-12-12 14:22:59 +0000245 auto table0_l0 = get_table(
246 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
247 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
248 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000249 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000250 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
251 TOP_LEVEL - 2)),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000252 Eq(pa_addr(map_begin)));
253
Andrew Scull164f8152019-11-19 14:29:55 +0000254 /* Check only the first page of the second table is mapped. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000255 auto table1_l2 = tables[1];
Andrew Scull3681b8d2018-12-12 14:22:59 +0000256 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000257 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
258
Andrew Scull3681b8d2018-12-12 14:22:59 +0000259 auto table1_l1 =
260 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
261 EXPECT_THAT(table1_l1.subspan(1),
262 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000263 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
264
Andrew Scull3681b8d2018-12-12 14:22:59 +0000265 auto table1_l0 =
266 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
267 EXPECT_THAT(table1_l0.subspan(1),
268 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000269 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000270 EXPECT_THAT(
271 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
272 Eq(pa_addr(pa_add(map_begin, PAGE_SIZE))));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000273
Andrew Scullda3df7f2019-01-05 17:49:27 +0000274 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000275}
276
277/**
278 * Mapping all of memory creates blocks at the highest level.
279 */
280TEST_F(mm, map_all_at_top_level)
281{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100282 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000283 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000284 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000285 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000286 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000287 auto tables = get_ptable(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000288 EXPECT_THAT(
289 tables,
290 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
291 _1, TOP_LEVEL))))));
292 for (uint64_t i = 0; i < tables.size(); ++i) {
293 for (uint64_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000294 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(tables[i][j],
295 TOP_LEVEL)),
296 Eq((i * mm_entry_size(TOP_LEVEL + 1)) +
297 (j * mm_entry_size(TOP_LEVEL))))
Andrew Scull1ba470e2018-10-31 15:14:31 +0000298 << "i=" << i << " j=" << j;
299 }
300 }
Andrew Scullda3df7f2019-01-05 17:49:27 +0000301 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000302}
303
304/**
305 * Map all memory then trying to map a page again doesn't introduce a special
306 * mapping for that particular page.
307 */
308TEST_F(mm, map_already_mapped)
309{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100310 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000311 ipaddr_t ipa = ipa_init(-1);
312 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000313 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000314 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000315 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000316 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000317 mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000318 EXPECT_THAT(ipa_addr(ipa), Eq(0));
319 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000320 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000321 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
322 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000323 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000324}
325
326/**
327 * Mapping a reverse range, i.e. the end comes before the start, is treated as
328 * an empty range so no mappings are made.
329 */
330TEST_F(mm, map_reverse_range)
331{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100332 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000333 ipaddr_t ipa = ipa_init(-1);
334 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000335 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000336 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0x1234'5678),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000337 pa_init(0x5000), mode, &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000338 EXPECT_THAT(ipa_addr(ipa), Eq(0x1234'5678));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000339 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000340 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000341 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000342 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000343}
344
345/**
346 * Mapping a reverse range in the same page will map the page because the start
347 * of the range is rounded down and the end is rounded up.
348 *
349 * This serves as a form of documentation of behaviour rather than a
350 * requirement. Check whether any code relies on this before changing it.
351 */
352TEST_F(mm, map_reverse_range_quirk)
353{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100354 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000355 ipaddr_t ipa = ipa_init(-1);
356 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000357 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000358 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(20), pa_init(10), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000359 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000360 EXPECT_THAT(ipa_addr(ipa), Eq(20));
Andrew Scull81e85092018-12-12 12:56:20 +0000361 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000362 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000363}
364
365/**
366 * Mapping a range up to the maximum address causes the range end to wrap to
367 * zero as it is rounded up to a page boundary meaning no memory is mapped.
368 *
369 * This serves as a form of documentation of behaviour rather than a
370 * requirement. Check whether any code relies on this before changing it.
371 */
372TEST_F(mm, map_last_address_quirk)
373{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100374 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000375 ipaddr_t ipa = ipa_init(-1);
376 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000377 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000378 ASSERT_TRUE(mm_vm_identity_map(
379 &ptable, pa_init(0),
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000380 pa_init(std::numeric_limits<uintpaddr_t>::max()), mode, &ppool,
381 &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000382 EXPECT_THAT(ipa_addr(ipa), Eq(0));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000383 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000384 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000385 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000386 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000387}
388
389/**
390 * Mapping a range that goes beyond the available memory clamps to the available
391 * range.
392 */
393TEST_F(mm, map_clamp_to_range)
394{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100395 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000396 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000397 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0),
399 pa_init(0xf32'0000'0000'0000), mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000400 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000401 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000402 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000403 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
404 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000405 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000406}
407
408/**
409 * Mapping a range outside of the available memory is ignored and doesn't alter
410 * the page tables.
411 */
412TEST_F(mm, map_ignore_out_of_range)
413{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100414 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000415 ipaddr_t ipa = ipa_init(-1);
416 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000417 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000418 ASSERT_TRUE(mm_vm_identity_map(&ptable, VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000419 pa_init(0xf0'0000'0000'0000), mode,
420 &ppool, &ipa));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000421 EXPECT_THAT(ipa_addr(ipa), Eq(pa_addr(VM_MEM_END)));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000422 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000423 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000424 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000425 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000426}
427
428/**
429 * Map a single page and then map all of memory which replaces the single page
430 * mapping with a higher level block mapping.
431 */
432TEST_F(mm, map_block_replaces_table)
433{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100434 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000435 const paddr_t page_begin = pa_init(34567 * PAGE_SIZE);
436 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
437 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000438 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000439 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000440 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000441 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000442 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000444 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000445 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
446 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000447 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000448}
449
450/**
451 * Map all memory at the top level, unmapping a page and remapping at a lower
452 * level does not result in all memory being mapped at the top level again.
453 */
454TEST_F(mm, map_does_not_defrag)
455{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100456 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000457 const paddr_t page_begin = pa_init(12000 * PAGE_SIZE);
458 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
459 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000460 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000461 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000462 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000463 ASSERT_TRUE(mm_vm_unmap(&ptable, page_begin, page_end, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000464 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000465 &ppool, nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000466 EXPECT_THAT(get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000467 AllOf(SizeIs(4),
468 Each(Each(Truly(std::bind(arch_mm_pte_is_present, _1,
469 TOP_LEVEL)))),
470 Contains(Contains(Truly(std::bind(
471 arch_mm_pte_is_block, _1, TOP_LEVEL)))),
472 Contains(Contains(Truly(std::bind(
473 arch_mm_pte_is_table, _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000474 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000475}
476
477/**
Andrew Scull73b89542019-11-20 17:31:26 +0000478 * Mapping with a mode that indicates unmapping results in the addresses being
479 * unmapped with absent entries.
480 */
481TEST_F(mm, map_to_unmap)
482{
483 constexpr uint32_t mode = 0;
484 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
485 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
486 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
487 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
488 struct mm_ptable ptable;
489 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000490 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
491 nullptr));
492 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
493 nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000494 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000495 MM_MODE_UNMAPPED_MASK, &ppool, nullptr));
Andrew Scull73b89542019-11-20 17:31:26 +0000496 EXPECT_THAT(
497 get_ptable(ptable),
498 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
499 mm_vm_fini(&ptable, &ppool);
500}
501
Andrew Scull4e83cef2019-11-19 14:17:54 +0000502/*
503 * Preparing and committing an address range works the same as mapping it.
504 */
505TEST_F(mm, prepare_and_commit_first_page)
506{
507 constexpr uint32_t mode = 0;
508 const paddr_t page_begin = pa_init(0);
509 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
510 struct mm_ptable ptable;
511 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
512 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, page_begin, page_end, mode,
513 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000514 mm_vm_identity_commit(&ptable, page_begin, page_end, mode, &ppool,
515 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000516
517 auto tables = get_ptable(ptable);
518 EXPECT_THAT(tables, SizeIs(4));
519 ASSERT_THAT(TOP_LEVEL, Eq(2));
520
521 /* Check that the first page is mapped and nothing else. */
522 EXPECT_THAT(std::span(tables).last(3),
523 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
524
525 auto table_l2 = tables.front();
526 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
527 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[0], TOP_LEVEL));
528
529 auto table_l1 =
530 get_table(arch_mm_table_from_pte(table_l2[0], TOP_LEVEL));
531 EXPECT_THAT(table_l1.subspan(1),
532 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
533 ASSERT_TRUE(arch_mm_pte_is_table(table_l1[0], TOP_LEVEL - 1));
534
535 auto table_l0 =
536 get_table(arch_mm_table_from_pte(table_l1[0], TOP_LEVEL - 1));
537 EXPECT_THAT(table_l0.subspan(1),
538 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
539 ASSERT_TRUE(arch_mm_pte_is_block(table_l0[0], TOP_LEVEL - 2));
540 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table_l0[0], TOP_LEVEL - 2)),
541 Eq(pa_addr(page_begin)));
542
543 mm_vm_fini(&ptable, &ppool);
544}
545
546/**
547 * Disjoint address ranges can be prepared and committed together.
548 */
549TEST_F(mm, prepare_and_commit_disjoint_regions)
550{
551 constexpr uint32_t mode = 0;
552 const paddr_t first_begin = pa_init(0);
553 const paddr_t first_end = pa_add(first_begin, PAGE_SIZE);
554 const paddr_t last_begin = pa_init(pa_addr(VM_MEM_END) - PAGE_SIZE);
555 const paddr_t last_end = VM_MEM_END;
556 struct mm_ptable ptable;
557 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
558 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, first_begin, first_end,
559 mode, &ppool));
560 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, last_begin, last_end, mode,
561 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000562 mm_vm_identity_commit(&ptable, first_begin, first_end, mode, &ppool,
563 nullptr);
564 mm_vm_identity_commit(&ptable, last_begin, last_end, mode, &ppool,
565 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000566
567 auto tables = get_ptable(ptable);
568 EXPECT_THAT(tables, SizeIs(4));
569 ASSERT_THAT(TOP_LEVEL, Eq(2));
570
571 /* Check that the first and last pages are mapped and nothing else. */
572 EXPECT_THAT(std::span(tables).subspan(1, 2),
573 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
574
575 /* Check the first page. */
576 auto table0_l2 = tables.front();
577 EXPECT_THAT(table0_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
578 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2[0], TOP_LEVEL));
579
580 auto table0_l1 =
581 get_table(arch_mm_table_from_pte(table0_l2[0], TOP_LEVEL));
582 EXPECT_THAT(table0_l1.subspan(1),
583 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
584 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1[0], TOP_LEVEL - 1));
585
586 auto table0_l0 =
587 get_table(arch_mm_table_from_pte(table0_l1[0], TOP_LEVEL - 1));
588 EXPECT_THAT(table0_l0.subspan(1),
589 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
590 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0[0], TOP_LEVEL - 2));
591 EXPECT_THAT(
592 pa_addr(arch_mm_block_from_pte(table0_l0[0], TOP_LEVEL - 2)),
593 Eq(pa_addr(first_begin)));
594
595 /* Check the last page. */
596 auto table3_l2 = tables.back();
597 EXPECT_THAT(table3_l2.first(table3_l2.size() - 1),
598 Each(arch_mm_absent_pte(TOP_LEVEL)));
599 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.last(1)[0], TOP_LEVEL));
600
601 auto table3_l1 = get_table(
602 arch_mm_table_from_pte(table3_l2.last(1)[0], TOP_LEVEL));
603 EXPECT_THAT(table3_l1.first(table3_l1.size() - 1),
604 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
605 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.last(1)[0], TOP_LEVEL - 1));
606
607 auto table3_l0 = get_table(
608 arch_mm_table_from_pte(table3_l1.last(1)[0], TOP_LEVEL - 1));
609 EXPECT_THAT(table3_l0.first(table3_l0.size() - 1),
610 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
611 ASSERT_TRUE(arch_mm_pte_is_block(table3_l0.last(1)[0], TOP_LEVEL - 2));
612 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table3_l0.last(1)[0],
613 TOP_LEVEL - 2)),
614 Eq(pa_addr(last_begin)));
615
616 mm_vm_fini(&ptable, &ppool);
617}
618
619/**
620 * Overlapping address ranges can be prepared and committed together.
621 */
622TEST_F(mm, prepare_and_commit_overlapping_regions)
623{
624 constexpr uint32_t mode = 0;
625 const paddr_t low_begin = pa_init(0x80'0000'0000 - PAGE_SIZE);
626 const paddr_t high_begin = pa_add(low_begin, PAGE_SIZE);
627 const paddr_t map_end = pa_add(high_begin, PAGE_SIZE);
628 struct mm_ptable ptable;
629 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
630 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, high_begin, map_end, mode,
631 &ppool));
632 ASSERT_TRUE(mm_vm_identity_prepare(&ptable, low_begin, map_end, mode,
633 &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000634 mm_vm_identity_commit(&ptable, high_begin, map_end, mode, &ppool,
635 nullptr);
636 mm_vm_identity_commit(&ptable, low_begin, map_end, mode, &ppool,
637 nullptr);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000638
639 auto tables = get_ptable(ptable);
640 EXPECT_THAT(tables, SizeIs(4));
641 EXPECT_THAT(std::span(tables).last(2),
642 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
643 ASSERT_THAT(TOP_LEVEL, Eq(2));
644
645 /* Check only the last page of the first table is mapped. */
646 auto table0_l2 = tables.front();
647 EXPECT_THAT(table0_l2.first(table0_l2.size() - 1),
648 Each(arch_mm_absent_pte(TOP_LEVEL)));
649 ASSERT_TRUE(arch_mm_pte_is_table(table0_l2.last(1)[0], TOP_LEVEL));
650
651 auto table0_l1 = get_table(
652 arch_mm_table_from_pte(table0_l2.last(1)[0], TOP_LEVEL));
653 EXPECT_THAT(table0_l1.first(table0_l1.size() - 1),
654 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
655 ASSERT_TRUE(arch_mm_pte_is_table(table0_l1.last(1)[0], TOP_LEVEL - 1));
656
657 auto table0_l0 = get_table(
658 arch_mm_table_from_pte(table0_l1.last(1)[0], TOP_LEVEL - 1));
659 EXPECT_THAT(table0_l0.first(table0_l0.size() - 1),
660 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
661 ASSERT_TRUE(arch_mm_pte_is_block(table0_l0.last(1)[0], TOP_LEVEL - 2));
662 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table0_l0.last(1)[0],
663 TOP_LEVEL - 2)),
664 Eq(pa_addr(low_begin)));
665
666 /* Check only the first page of the second table is mapped. */
667 auto table1_l2 = tables[1];
668 EXPECT_THAT(table1_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
669 ASSERT_TRUE(arch_mm_pte_is_table(table1_l2[0], TOP_LEVEL));
670
671 auto table1_l1 =
672 get_table(arch_mm_table_from_pte(table1_l2[0], TOP_LEVEL));
673 EXPECT_THAT(table1_l1.subspan(1),
674 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
675 ASSERT_TRUE(arch_mm_pte_is_table(table1_l1[0], TOP_LEVEL - 1));
676
677 auto table1_l0 =
678 get_table(arch_mm_table_from_pte(table1_l1[0], TOP_LEVEL - 1));
679 EXPECT_THAT(table1_l0.subspan(1),
680 Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
681 ASSERT_TRUE(arch_mm_pte_is_block(table1_l0[0], TOP_LEVEL - 2));
682 EXPECT_THAT(
683 pa_addr(arch_mm_block_from_pte(table1_l0[0], TOP_LEVEL - 2)),
684 Eq(pa_addr(high_begin)));
685
686 mm_vm_fini(&ptable, &ppool);
687}
688
Andrew Scull73b89542019-11-20 17:31:26 +0000689/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000690 * If range is not mapped, unmapping has no effect.
691 */
692TEST_F(mm, unmap_not_mapped)
693{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000694 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000695 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scullda241972019-01-05 18:17:48 +0000696 EXPECT_TRUE(
697 mm_vm_unmap(&ptable, pa_init(12345), pa_init(987652), &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000698 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000699 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000700 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000701 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000702}
703
704/**
705 * Unmapping everything should result in an empty page table with no subtables.
706 */
707TEST_F(mm, unmap_all)
708{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100709 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000710 const paddr_t l0_begin = pa_init(uintpaddr_t(524421) * PAGE_SIZE);
711 const paddr_t l0_end = pa_add(l0_begin, 17 * PAGE_SIZE);
712 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
713 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
714 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000715 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000716 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
717 nullptr));
718 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
719 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000720 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), VM_MEM_END, &ppool));
Andrew Scull3681b8d2018-12-12 14:22:59 +0000721 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000722 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +0000723 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000724 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000725}
726
727/**
728 * Unmap range is rounded to the containing pages.
729 */
730TEST_F(mm, unmap_round_to_page)
731{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100732 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000733 const paddr_t map_begin = pa_init(0x160'0000'0000 + PAGE_SIZE);
734 const paddr_t map_end = pa_add(map_begin, PAGE_SIZE);
735 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000736
Andrew Scullda3df7f2019-01-05 17:49:27 +0000737 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000738 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000739 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000740 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(map_begin, 93),
Andrew Scullda241972019-01-05 18:17:48 +0000741 pa_add(map_begin, 99), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000742
743 auto tables = get_ptable(ptable);
744 constexpr auto l3_index = 2;
745
746 /* Check all other top level entries are empty... */
747 EXPECT_THAT(std::span(tables).first(l3_index),
748 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
749 EXPECT_THAT(std::span(tables).subspan(l3_index + 1),
750 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
751
752 /* Except the mapped page which is absent. */
753 auto table_l2 = tables[l3_index];
754 constexpr auto l2_index = 384;
755 EXPECT_THAT(table_l2.first(l2_index),
756 Each(arch_mm_absent_pte(TOP_LEVEL)));
757 ASSERT_TRUE(arch_mm_pte_is_table(table_l2[l2_index], TOP_LEVEL));
758 EXPECT_THAT(table_l2.subspan(l2_index + 1),
759 Each(arch_mm_absent_pte(TOP_LEVEL)));
760
761 auto table_l1 = get_table(
762 arch_mm_table_from_pte(table_l2[l2_index], TOP_LEVEL));
763 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
764 EXPECT_THAT(table_l1.subspan(1),
765 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
766
767 auto table_l0 = get_table(
768 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
769 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
770
Andrew Scullda3df7f2019-01-05 17:49:27 +0000771 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000772}
773
774/**
775 * Unmap a range that of page mappings that spans multiple concatenated tables.
776 */
777TEST_F(mm, unmap_across_tables)
778{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100779 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000780 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
781 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
782 struct mm_ptable ptable;
Andrew Scull164f8152019-11-19 14:29:55 +0000783
Andrew Scullda3df7f2019-01-05 17:49:27 +0000784 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000785 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000786 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000787 ASSERT_TRUE(mm_vm_unmap(&ptable, map_begin, map_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000788
789 auto tables = get_ptable(ptable);
790
791 /* Check the untouched tables are empty. */
792 EXPECT_THAT(std::span(tables).first(2),
793 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
794
795 /* Check the last page is explicity marked as absent. */
796 auto table2_l2 = tables[2];
797 EXPECT_THAT(table2_l2.first(table2_l2.size() - 1),
798 Each(arch_mm_absent_pte(TOP_LEVEL)));
799 ASSERT_TRUE(arch_mm_pte_is_table(table2_l2.last(1)[0], TOP_LEVEL));
800
801 auto table2_l1 = get_table(
802 arch_mm_table_from_pte(table2_l2.last(1)[0], TOP_LEVEL));
803 EXPECT_THAT(table2_l1.first(table2_l1.size() - 1),
804 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
805 ASSERT_TRUE(arch_mm_pte_is_table(table2_l1.last(1)[0], TOP_LEVEL - 1));
806
807 auto table2_l0 = get_table(
808 arch_mm_table_from_pte(table2_l1.last(1)[0], TOP_LEVEL - 1));
809 EXPECT_THAT(table2_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
810
811 /* Check the first page is explicitly marked as absent. */
812 auto table3_l2 = tables[3];
813 ASSERT_TRUE(arch_mm_pte_is_table(table3_l2.first(1)[0], TOP_LEVEL));
814 EXPECT_THAT(table3_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
815
816 auto table3_l1 = get_table(
817 arch_mm_table_from_pte(table3_l2.first(1)[0], TOP_LEVEL));
818 ASSERT_TRUE(arch_mm_pte_is_table(table3_l1.first(1)[0], TOP_LEVEL - 1));
819 EXPECT_THAT(table3_l1.subspan(1),
820 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
821
822 auto table3_l0 = get_table(
823 arch_mm_table_from_pte(table3_l1.first(1)[0], TOP_LEVEL - 1));
824 EXPECT_THAT(table3_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
825
Andrew Scullda3df7f2019-01-05 17:49:27 +0000826 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000827}
828
829/**
830 * Unmapping outside the range of memory had no effect.
831 */
832TEST_F(mm, unmap_out_of_range)
833{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100834 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000835 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000836 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000837 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000838 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000839 ASSERT_TRUE(mm_vm_unmap(&ptable, VM_MEM_END, pa_init(0x4000'0000'0000),
Andrew Scullda241972019-01-05 18:17:48 +0000840 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000841 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000842 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000843 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
844 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000845 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000846}
847
848/**
849 * Unmapping a reverse range, i.e. the end comes before the start, is treated as
850 * an empty range so no change is made.
851 */
852TEST_F(mm, unmap_reverse_range)
853{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100854 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000855 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000856 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000857 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000858 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000859 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_init(0x80'a000'0000), pa_init(27),
Andrew Scullda241972019-01-05 18:17:48 +0000860 &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000861 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000862 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000863 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
864 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000865 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000866}
867
868/**
869 * Unmapping a reverse range in the same page will unmap the page because the
870 * start of the range is rounded down and the end is rounded up.
871 *
872 * This serves as a form of documentation of behaviour rather than a
873 * requirement. Check whether any code relies on this before changing it.
874 */
875TEST_F(mm, unmap_reverse_range_quirk)
876{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100877 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000878 const paddr_t page_begin = pa_init(0x180'0000'0000);
879 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
880 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000881 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000882 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000883 &ppool, nullptr));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000884 ASSERT_TRUE(mm_vm_unmap(&ptable, pa_add(page_begin, 100),
Andrew Scullda241972019-01-05 18:17:48 +0000885 pa_add(page_begin, 50), &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000886
887 auto tables = get_ptable(ptable);
888 constexpr auto l3_index = 3;
889
890 /* Check all other top level entries are empty... */
891 EXPECT_THAT(std::span(tables).first(l3_index),
892 Each(Each(arch_mm_absent_pte(TOP_LEVEL))));
893
894 /* Except the mapped page which is absent. */
895 auto table_l2 = tables[l3_index];
896 ASSERT_TRUE(arch_mm_pte_is_table(table_l2.first(1)[0], TOP_LEVEL));
897 EXPECT_THAT(table_l2.subspan(1), Each(arch_mm_absent_pte(TOP_LEVEL)));
898
899 auto table_l1 = get_table(
900 arch_mm_table_from_pte(table_l2.first(1)[0], TOP_LEVEL));
901 ASSERT_TRUE(arch_mm_pte_is_table(table_l1.first(1)[0], TOP_LEVEL - 1));
902 EXPECT_THAT(table_l1.subspan(1),
903 Each(arch_mm_absent_pte(TOP_LEVEL - 1)));
904
905 auto table_l0 = get_table(
906 arch_mm_table_from_pte(table_l1.first(1)[0], TOP_LEVEL - 1));
907 EXPECT_THAT(table_l0, Each(arch_mm_absent_pte(TOP_LEVEL - 2)));
908
Andrew Scullda3df7f2019-01-05 17:49:27 +0000909 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000910}
911
912/**
913 * Unmapping a range up to the maximum address causes the range end to wrap to
914 * zero as it is rounded up to a page boundary meaning no change is made.
915 *
916 * This serves as a form of documentation of behaviour rather than a
917 * requirement. Check whether any code relies on this before changing it.
918 */
919TEST_F(mm, unmap_last_address_quirk)
920{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100921 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000922 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000923 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000924 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000925 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000926 ASSERT_TRUE(mm_vm_unmap(
927 &ptable, pa_init(0),
928 pa_init(std::numeric_limits<uintpaddr_t>::max()), &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000929 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +0000930 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +0000931 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
932 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000933 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000934}
935
936/**
937 * Mapping then unmapping a page does not defrag the table.
938 */
939TEST_F(mm, unmap_does_not_defrag)
940{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100941 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000942 const paddr_t l0_begin = pa_init(5555 * PAGE_SIZE);
943 const paddr_t l0_end = pa_add(l0_begin, 13 * PAGE_SIZE);
944 const paddr_t l1_begin = pa_init(666 * mm_entry_size(1));
945 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
946 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000947 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000948 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
949 nullptr));
950 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
951 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +0000952 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
953 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scull164f8152019-11-19 14:29:55 +0000954 EXPECT_THAT(get_ptable(ptable),
955 AllOf(SizeIs(4),
956 Not(Each(Each(arch_mm_absent_pte(TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000957 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000958}
959
960/**
961 * Nothing is mapped in an empty table.
962 */
963TEST_F(mm, is_mapped_empty)
964{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000965 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000966 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +0000967 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0)));
968 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x8123'2344)));
969 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1e0'0000'0073)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000970 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000971}
972
973/**
974 * Everything is mapped in a full table.
975 */
976TEST_F(mm, is_mapped_all)
977{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100978 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000979 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000980 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000981 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000982 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +0000983 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0)));
984 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0xf247'a7b3)));
985 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_init(0x1ff'7bfa'983b)));
Andrew Scullda3df7f2019-01-05 17:49:27 +0000986 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000987}
988
989/**
990 * A page is mapped for the range [begin, end).
991 */
992TEST_F(mm, is_mapped_page)
993{
Andrew Walbran1281ed42019-10-22 17:23:40 +0100994 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000995 const paddr_t page_begin = pa_init(0x100'0000'0000);
996 const paddr_t page_end = pa_add(page_begin, PAGE_SIZE);
997 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000998 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000999 ASSERT_TRUE(mm_vm_identity_map(&ptable, page_begin, page_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001000 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001001 EXPECT_TRUE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_begin)));
1002 EXPECT_TRUE(
1003 mm_vm_is_mapped(&ptable, ipa_from_pa(pa_add(page_begin, 127))));
1004 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(page_end)));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001005 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001006}
1007
1008/**
1009 * Everything out of range is not mapped.
1010 */
1011TEST_F(mm, is_mapped_out_of_range)
1012{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001013 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001014 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001015 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001016 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001017 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001018 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_from_pa(VM_MEM_END)));
1019 EXPECT_FALSE(mm_vm_is_mapped(&ptable, ipa_init(0x1000'adb7'8123)));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001020 EXPECT_FALSE(mm_vm_is_mapped(
Andrew Scull81e85092018-12-12 12:56:20 +00001021 &ptable, ipa_init(std::numeric_limits<uintpaddr_t>::max())));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001022 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001023}
1024
1025/**
1026 * The mode of unmapped addresses can be retrieved and is set to invalid,
1027 * unowned and shared.
1028 */
1029TEST_F(mm, get_mode_empty)
1030{
Andrew Scull81e85092018-12-12 12:56:20 +00001031 constexpr int default_mode =
1032 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
1033 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001034 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001035 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001036
1037 read_mode = 0;
1038 EXPECT_TRUE(
1039 mm_vm_get_mode(&ptable, ipa_init(0), ipa_init(20), &read_mode));
1040 EXPECT_THAT(read_mode, Eq(default_mode));
1041
1042 read_mode = 0;
1043 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x3c97'654d),
1044 ipa_init(0x3c97'e000), &read_mode));
1045 EXPECT_THAT(read_mode, Eq(default_mode));
1046
1047 read_mode = 0;
1048 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_init(0x5f'ffff'ffff),
1049 ipa_init(0x1ff'ffff'ffff), &read_mode));
1050 EXPECT_THAT(read_mode, Eq(default_mode));
1051
Andrew Scullda3df7f2019-01-05 17:49:27 +00001052 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001053}
1054
1055/**
1056 * Get the mode of a range comprised of individual pages which are either side
1057 * of a root table boundary.
1058 */
1059TEST_F(mm, get_mode_pages_across_tables)
1060{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001061 constexpr uint32_t mode = MM_MODE_INVALID | MM_MODE_SHARED;
Andrew Scull81e85092018-12-12 12:56:20 +00001062 const paddr_t map_begin = pa_init(0x180'0000'0000 - PAGE_SIZE);
1063 const paddr_t map_end = pa_add(map_begin, 2 * PAGE_SIZE);
1064 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001065 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001066 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001067 ASSERT_TRUE(mm_vm_identity_map(&ptable, map_begin, map_end, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001068 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001069
1070 read_mode = 0;
1071 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1072 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1073 &read_mode));
1074 EXPECT_THAT(read_mode, Eq(mode));
1075
1076 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1077 ipa_from_pa(pa_add(map_begin, PAGE_SIZE)),
1078 &read_mode));
1079
1080 read_mode = 0;
1081 EXPECT_TRUE(mm_vm_get_mode(&ptable, ipa_from_pa(map_begin),
1082 ipa_from_pa(map_end), &read_mode));
1083 EXPECT_THAT(read_mode, Eq(mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001084 mm_vm_fini(&ptable, &ppool);
Andrew Scull81e85092018-12-12 12:56:20 +00001085}
1086
1087/**
1088 * Anything out of range fail to retrieve the mode.
1089 */
1090TEST_F(mm, get_mode_out_of_range)
1091{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001092 constexpr uint32_t mode = MM_MODE_UNOWNED;
Andrew Scull81e85092018-12-12 12:56:20 +00001093 struct mm_ptable ptable;
Andrew Walbran1281ed42019-10-22 17:23:40 +01001094 uint32_t read_mode;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001095 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull81e85092018-12-12 12:56:20 +00001096 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001097 &ppool, nullptr));
Andrew Scull81e85092018-12-12 12:56:20 +00001098 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0),
1099 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1100 &read_mode));
1101 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_from_pa(VM_MEM_END),
1102 ipa_from_pa(pa_add(VM_MEM_END, 1)),
1103 &read_mode));
1104 EXPECT_FALSE(mm_vm_get_mode(&ptable, ipa_init(0x1'1234'1234'1234),
1105 ipa_init(2'0000'0000'0000), &read_mode));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001106 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001107}
1108
1109/**
1110 * Defragging an entirely empty table has no effect.
1111 */
1112TEST_F(mm, defrag_empty)
1113{
Andrew Scull1ba470e2018-10-31 15:14:31 +00001114 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001115 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
1116 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001117 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001118 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001119 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001120 mm_vm_fini(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001121}
1122
1123/**
1124 * Defragging a table with some empty subtables (even nested) results in
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001125 * an empty table.
1126 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001127TEST_F(mm, defrag_empty_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001128{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001129 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001130 const paddr_t l0_begin = pa_init(120000 * PAGE_SIZE);
1131 const paddr_t l0_end = pa_add(l0_begin, PAGE_SIZE);
1132 const paddr_t l1_begin = pa_init(3 * mm_entry_size(1));
1133 const paddr_t l1_end = pa_add(l1_begin, 5 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001134 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001135 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001136 ASSERT_TRUE(mm_vm_identity_map(&ptable, l0_begin, l0_end, mode, &ppool,
1137 nullptr));
1138 ASSERT_TRUE(mm_vm_identity_map(&ptable, l1_begin, l1_end, mode, &ppool,
1139 nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001140 ASSERT_TRUE(mm_vm_unmap(&ptable, l0_begin, l0_end, &ppool));
1141 ASSERT_TRUE(mm_vm_unmap(&ptable, l1_begin, l1_end, &ppool));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001142 mm_vm_defrag(&ptable, &ppool);
Andrew Scull3681b8d2018-12-12 14:22:59 +00001143 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001144 get_ptable(ptable),
Andrew Scull3681b8d2018-12-12 14:22:59 +00001145 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001146 mm_vm_fini(&ptable, &ppool);
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001147}
1148
1149/**
1150 * Any subtable with all blocks with the same attributes should be replaced
1151 * with a single block.
1152 */
Andrew Scull1ba470e2018-10-31 15:14:31 +00001153TEST_F(mm, defrag_block_subtables)
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001154{
Andrew Walbran1281ed42019-10-22 17:23:40 +01001155 constexpr uint32_t mode = 0;
Andrew Scull1ba470e2018-10-31 15:14:31 +00001156 const paddr_t begin = pa_init(39456 * mm_entry_size(1));
1157 const paddr_t middle = pa_add(begin, 67 * PAGE_SIZE);
1158 const paddr_t end = pa_add(begin, 4 * mm_entry_size(1));
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001159 struct mm_ptable ptable;
Andrew Scullda3df7f2019-01-05 17:49:27 +00001160 ASSERT_TRUE(mm_vm_init(&ptable, &ppool));
Andrew Scull1ba470e2018-10-31 15:14:31 +00001161 ASSERT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), VM_MEM_END, mode,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001162 &ppool, nullptr));
Andrew Scullda241972019-01-05 18:17:48 +00001163 ASSERT_TRUE(mm_vm_unmap(&ptable, begin, end, &ppool));
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001164 ASSERT_TRUE(mm_vm_identity_map(&ptable, begin, middle, mode, &ppool,
1165 nullptr));
1166 ASSERT_TRUE(mm_vm_identity_map(&ptable, middle, end, mode, &ppool,
1167 nullptr));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001168 mm_vm_defrag(&ptable, &ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +00001169 EXPECT_THAT(
Andrew Scullda3df7f2019-01-05 17:49:27 +00001170 get_ptable(ptable),
Andrew Scull1ba470e2018-10-31 15:14:31 +00001171 AllOf(SizeIs(4), Each(Each(Truly(std::bind(arch_mm_pte_is_block,
1172 _1, TOP_LEVEL))))));
Andrew Scullda3df7f2019-01-05 17:49:27 +00001173 mm_vm_fini(&ptable, &ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +01001174}
1175
Andrew Scull232d5602018-10-15 11:07:45 +01001176} /* namespace */
Andrew Scull3c257452019-11-26 13:32:50 +00001177
1178namespace mm_test
1179{
1180/**
1181 * Get an STL representation of the ptable.
1182 */
1183std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> get_ptable(
1184 const struct mm_ptable &ptable)
1185{
1186 std::vector<std::span<pte_t, MM_PTE_PER_PAGE>> all;
1187 const uint8_t root_table_count = arch_mm_stage2_root_table_count();
1188 for (uint8_t i = 0; i < root_table_count; ++i) {
1189 all.push_back(get_table(
1190 pa_add(ptable.root, i * sizeof(struct mm_page_table))));
1191 }
1192 return all;
1193}
1194
1195} /* namespace mm_test */