blob: 7990d27db44aae661470c50189d2cadc3a1baf78 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17extern "C" {
18#include "hf/mm.h"
19
20#include "hf/arch/mm.h"
21
22#include "hf/alloc.h"
23}
24
25#include <memory>
26
27#include <gmock/gmock.h>
28
Andrew Scull232d5602018-10-15 11:07:45 +010029namespace
30{
Andrew Walbran9fa106c2018-09-28 14:19:29 +010031using ::testing::Eq;
32
33constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
Andrew Scull232d5602018-10-15 11:07:45 +010034const int TOP_LEVEL = arch_mm_max_level(0);
35const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010036
37/**
38 * Calculates the size of the address space represented by a page table entry at
39 * the given level.
40 */
Andrew Scull232d5602018-10-15 11:07:45 +010041size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010042{
43 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
44}
45
46/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010047 * Get the page table from the physical address.
48 */
49struct mm_page_table *page_table_from_pa(paddr_t pa)
50{
51 return reinterpret_cast<struct mm_page_table *>(
52 ptr_from_va(va_from_pa(pa)));
53}
54
55/**
56 * Allocate a page table.
57 */
58struct mm_page_table *alloc_page_table()
59{
60 return reinterpret_cast<struct mm_page_table *>(halloc_aligned(
61 sizeof(struct mm_page_table), alignof(struct mm_page_table)));
62}
63
64/**
Andrew Walbran9fa106c2018-09-28 14:19:29 +010065 * Fill a ptable with absent entries.
66 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010067void init_absent(struct mm_page_table *table)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010068{
Andrew Scull4e5f8142018-10-12 14:37:19 +010069 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
70 table->entries[i] = ABSENT_ENTRY;
Andrew Walbran9fa106c2018-09-28 14:19:29 +010071 }
72}
73
74/**
75 * Fill a ptable with block entries.
76 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010077void init_blocks(struct mm_page_table *table, int level, paddr_t start_address,
78 uint64_t attrs)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010079{
Andrew Scull4e5f8142018-10-12 14:37:19 +010080 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
81 table->entries[i] = arch_mm_block_pte(
Andrew Walbran9fa106c2018-09-28 14:19:29 +010082 level, pa_add(start_address, i * mm_entry_size(level)),
83 attrs);
84 }
85}
86
87/**
88 * Defragging an entirely empty table should have no effect.
89 */
90TEST(mm, ptable_defrag_empty)
91{
92 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
93 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
94
Andrew Scull4e5f8142018-10-12 14:37:19 +010095 struct mm_page_table *table = alloc_page_table();
Andrew Walbran9fa106c2018-09-28 14:19:29 +010096 init_absent(table);
97 struct mm_ptable ptable;
98 ptable.table = pa_init((uintpaddr_t)table);
99
100 mm_ptable_defrag(&ptable, 0);
101
Andrew Scull4e5f8142018-10-12 14:37:19 +0100102 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
103 EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100104 }
105}
106
107/**
108 * Defragging a table with some empty subtables (even nested) should result in
109 * an empty table.
110 */
111TEST(mm, ptable_defrag_empty_subtables)
112{
113 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
114 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
115
Andrew Scull4e5f8142018-10-12 14:37:19 +0100116 struct mm_page_table *subtable_a = alloc_page_table();
117 struct mm_page_table *subtable_aa = alloc_page_table();
118 struct mm_page_table *subtable_b = alloc_page_table();
119 struct mm_page_table *table = alloc_page_table();
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100120 init_absent(subtable_a);
121 init_absent(subtable_aa);
122 init_absent(subtable_b);
123 init_absent(table);
124
Andrew Scull4e5f8142018-10-12 14:37:19 +0100125 subtable_a->entries[3] = arch_mm_table_pte(
126 TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
127 table->entries[0] =
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100128 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
Andrew Scull4e5f8142018-10-12 14:37:19 +0100129 table->entries[5] =
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100130 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
131
132 struct mm_ptable ptable;
133 ptable.table = pa_init((uintpaddr_t)table);
134
135 mm_ptable_defrag(&ptable, 0);
136
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
138 EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100139 }
140}
141
142/**
143 * Any subtable with all blocks with the same attributes should be replaced
144 * with a single block.
145 */
146TEST(mm, ptable_defrag_block_subtables)
147{
148 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
149 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
150
Andrew Scull4e5f8142018-10-12 14:37:19 +0100151 struct mm_page_table *subtable_a = alloc_page_table();
152 struct mm_page_table *subtable_aa = alloc_page_table();
153 struct mm_page_table *subtable_b = alloc_page_table();
154 struct mm_page_table *table = alloc_page_table();
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100155 init_blocks(subtable_a, TOP_LEVEL - 1, pa_init(0), 0);
156 init_blocks(subtable_aa, TOP_LEVEL - 2,
157 pa_init(3 * mm_entry_size(TOP_LEVEL - 1)), 0);
158 init_blocks(subtable_b, TOP_LEVEL - 1,
159 pa_init(5 * mm_entry_size(TOP_LEVEL)), 0);
160 init_blocks(table, TOP_LEVEL, pa_init(0), 0);
161
Andrew Scull4e5f8142018-10-12 14:37:19 +0100162 subtable_a->entries[3] = arch_mm_table_pte(
163 TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
164 table->entries[0] =
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100165 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
Andrew Scull4e5f8142018-10-12 14:37:19 +0100166 table->entries[5] =
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100167 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
168
169 struct mm_ptable ptable;
170 ptable.table = pa_init((uintpaddr_t)table);
171
172 mm_ptable_defrag(&ptable, 0);
173
Andrew Scull4e5f8142018-10-12 14:37:19 +0100174 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
175 EXPECT_TRUE(
176 arch_mm_pte_is_present(table->entries[i], TOP_LEVEL))
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100177 << "i=" << i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100178 EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100179 << "i=" << i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100180 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table->entries[i])),
Andrew Walbran9fa106c2018-09-28 14:19:29 +0100181 Eq(i * mm_entry_size(TOP_LEVEL)))
182 << "i=" << i;
183 }
184}
Andrew Walbran6324fc92018-10-03 11:46:43 +0100185
186/** If nothing is mapped, unmapping the hypervisor should have no effect. */
187TEST(mm, ptable_unmap_hypervisor_not_mapped)
188{
189 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
190 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
191
Andrew Scull4e5f8142018-10-12 14:37:19 +0100192 struct mm_page_table *table = alloc_page_table();
Andrew Walbran6324fc92018-10-03 11:46:43 +0100193 init_absent(table);
194
195 struct mm_ptable ptable;
196 ptable.table = pa_init((uintpaddr_t)table);
197
198 EXPECT_TRUE(mm_ptable_unmap_hypervisor(&ptable, 0));
199
Andrew Scull4e5f8142018-10-12 14:37:19 +0100200 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
201 EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100202 }
203}
204
205/**
206 * Unmapping everything should result in an empty page table with no subtables.
207 */
208TEST(mm, vm_unmap)
209{
210 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
211 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
212
Andrew Scull4e5f8142018-10-12 14:37:19 +0100213 struct mm_page_table *subtable_a = alloc_page_table();
214 struct mm_page_table *subtable_aa = alloc_page_table();
215 struct mm_page_table *table = alloc_page_table();
Andrew Walbran6324fc92018-10-03 11:46:43 +0100216 init_absent(subtable_a);
217 init_absent(subtable_aa);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100218 init_absent(table);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100219
Andrew Scull4e5f8142018-10-12 14:37:19 +0100220 subtable_aa->entries[0] =
221 arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
222 subtable_a->entries[0] = arch_mm_table_pte(
223 TOP_LEVEL - 1, pa_init((uintpaddr_t)subtable_aa));
224 table->entries[0] =
Andrew Walbran6324fc92018-10-03 11:46:43 +0100225 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
226
227 struct mm_ptable ptable;
228 ptable.table = pa_init((uintpaddr_t)table);
229
230 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), pa_init(1), 0));
231
Andrew Scull4e5f8142018-10-12 14:37:19 +0100232 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
233 EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100234 }
235}
236
237/**
238 * Mapping a range should result in just the corresponding pages being mapped.
239 */
240TEST(mm, vm_identity_map)
241{
242 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
243 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
244
245 /* Start with an empty page table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100246 struct mm_page_table *table = alloc_page_table();
Andrew Walbran6324fc92018-10-03 11:46:43 +0100247 init_absent(table);
248 struct mm_ptable ptable;
249 ptable.table = pa_init((uintpaddr_t)table);
250
251 /* Try mapping the first page. */
252 ipaddr_t ipa = ipa_init(-1);
253 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
254 0, &ipa));
255 EXPECT_THAT(ipa_addr(ipa), Eq(0));
256
257 /* Check that the first page is mapped, and nothing else. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100258 for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
259 EXPECT_THAT(table->entries[i], Eq(ABSENT_ENTRY)) << "i=" << i;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100260 }
Andrew Scull4e5f8142018-10-12 14:37:19 +0100261 ASSERT_TRUE(arch_mm_pte_is_table(table->entries[0], TOP_LEVEL));
262 struct mm_page_table *subtable_a =
263 page_table_from_pa(arch_mm_table_from_pte(table->entries[0]));
264 for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
265 EXPECT_THAT(subtable_a->entries[i], Eq(ABSENT_ENTRY))
266 << "i=" << i;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100267 }
Andrew Scull4e5f8142018-10-12 14:37:19 +0100268 ASSERT_TRUE(
269 arch_mm_pte_is_table(subtable_a->entries[0], TOP_LEVEL - 1));
270 struct mm_page_table *subtable_aa = page_table_from_pa(
271 arch_mm_table_from_pte(subtable_a->entries[0]));
272 for (uint64_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
273 EXPECT_THAT(subtable_aa->entries[i], Eq(ABSENT_ENTRY))
274 << "i=" << i;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100275 }
Andrew Scull4e5f8142018-10-12 14:37:19 +0100276 EXPECT_TRUE(
277 arch_mm_pte_is_block(subtable_aa->entries[0], TOP_LEVEL - 2));
278 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa->entries[0])),
279 Eq(0));
Andrew Walbran6324fc92018-10-03 11:46:43 +0100280}
281
282/** Mapping a range that is already mapped should be a no-op. */
283TEST(mm, vm_identity_map_already_mapped)
284{
285 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
286 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
287
288 /* Start with a full page table mapping everything. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100289 struct mm_page_table *table = alloc_page_table();
Andrew Walbran6324fc92018-10-03 11:46:43 +0100290 init_blocks(table, TOP_LEVEL, pa_init(0), 0);
291 struct mm_ptable ptable;
292 ptable.table = pa_init((uintpaddr_t)table);
293
294 /* Try mapping the first page. */
295 ipaddr_t ipa = ipa_init(-1);
296 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
297 0, &ipa));
298 EXPECT_THAT(ipa_addr(ipa), Eq(0));
299
300 /*
301 * The table should still be full of blocks, with no subtables or
302 * anything else.
303 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100304 for (uint64_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
305 EXPECT_TRUE(arch_mm_pte_is_block(table->entries[i], TOP_LEVEL))
Andrew Walbran6324fc92018-10-03 11:46:43 +0100306 << "i=" << i;
307 }
308}
309
Andrew Scull232d5602018-10-15 11:07:45 +0100310} /* namespace */