blob: d4c1d3272fb59189043d9a8ddabac40cfad5e5c6 [file] [log] [blame]
Andrew Walbran9fa106c2018-09-28 14:19:29 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17extern "C" {
18#include "hf/mm.h"
19
20#include "hf/arch/mm.h"
21
22#include "hf/alloc.h"
23}
24
25#include <memory>
26
27#include <gmock/gmock.h>
28
Andrew Scull232d5602018-10-15 11:07:45 +010029namespace
30{
Andrew Walbran9fa106c2018-09-28 14:19:29 +010031using ::testing::Eq;
32
33constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 10;
34constexpr size_t ENTRY_COUNT = PAGE_SIZE / sizeof(pte_t);
Andrew Scull232d5602018-10-15 11:07:45 +010035const int TOP_LEVEL = arch_mm_max_level(0);
36const pte_t ABSENT_ENTRY = arch_mm_absent_pte(TOP_LEVEL);
Andrew Walbran9fa106c2018-09-28 14:19:29 +010037
38/**
39 * Calculates the size of the address space represented by a page table entry at
40 * the given level.
41 */
Andrew Scull232d5602018-10-15 11:07:45 +010042size_t mm_entry_size(int level)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010043{
44 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
45}
46
47/**
48 * Fill a ptable with absent entries.
49 */
Andrew Scull232d5602018-10-15 11:07:45 +010050void init_absent(pte_t *table)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010051{
52 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
53 table[i] = ABSENT_ENTRY;
54 }
55}
56
57/**
58 * Fill a ptable with block entries.
59 */
Andrew Scull232d5602018-10-15 11:07:45 +010060void init_blocks(pte_t *table, int level, paddr_t start_address, uint64_t attrs)
Andrew Walbran9fa106c2018-09-28 14:19:29 +010061{
62 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
63 table[i] = arch_mm_block_pte(
64 level, pa_add(start_address, i * mm_entry_size(level)),
65 attrs);
66 }
67}
68
69/**
70 * Defragging an entirely empty table should have no effect.
71 */
72TEST(mm, ptable_defrag_empty)
73{
74 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
75 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
76
77 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
78 init_absent(table);
79 struct mm_ptable ptable;
80 ptable.table = pa_init((uintpaddr_t)table);
81
82 mm_ptable_defrag(&ptable, 0);
83
84 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
85 EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
86 }
87}
88
89/**
90 * Defragging a table with some empty subtables (even nested) should result in
91 * an empty table.
92 */
93TEST(mm, ptable_defrag_empty_subtables)
94{
95 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
96 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
97
98 pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
99 pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
100 pte_t *subtable_b = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
101 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
102 init_absent(subtable_a);
103 init_absent(subtable_aa);
104 init_absent(subtable_b);
105 init_absent(table);
106
107 subtable_a[3] = arch_mm_table_pte(TOP_LEVEL - 1,
108 pa_init((uintpaddr_t)subtable_aa));
109 table[0] =
110 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
111 table[5] =
112 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
113
114 struct mm_ptable ptable;
115 ptable.table = pa_init((uintpaddr_t)table);
116
117 mm_ptable_defrag(&ptable, 0);
118
119 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
120 EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
121 }
122}
123
124/**
125 * Any subtable with all blocks with the same attributes should be replaced
126 * with a single block.
127 */
128TEST(mm, ptable_defrag_block_subtables)
129{
130 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
131 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
132
133 pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
134 pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
135 pte_t *subtable_b = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
136 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
137 init_blocks(subtable_a, TOP_LEVEL - 1, pa_init(0), 0);
138 init_blocks(subtable_aa, TOP_LEVEL - 2,
139 pa_init(3 * mm_entry_size(TOP_LEVEL - 1)), 0);
140 init_blocks(subtable_b, TOP_LEVEL - 1,
141 pa_init(5 * mm_entry_size(TOP_LEVEL)), 0);
142 init_blocks(table, TOP_LEVEL, pa_init(0), 0);
143
144 subtable_a[3] = arch_mm_table_pte(TOP_LEVEL - 1,
145 pa_init((uintpaddr_t)subtable_aa));
146 table[0] =
147 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
148 table[5] =
149 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_b));
150
151 struct mm_ptable ptable;
152 ptable.table = pa_init((uintpaddr_t)table);
153
154 mm_ptable_defrag(&ptable, 0);
155
156 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
157 EXPECT_TRUE(arch_mm_pte_is_present(table[i], TOP_LEVEL))
158 << "i=" << i;
159 EXPECT_TRUE(arch_mm_pte_is_block(table[i], TOP_LEVEL))
160 << "i=" << i;
161 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(table[i])),
162 Eq(i * mm_entry_size(TOP_LEVEL)))
163 << "i=" << i;
164 }
165}
Andrew Walbran6324fc92018-10-03 11:46:43 +0100166
167/** If nothing is mapped, unmapping the hypervisor should have no effect. */
168TEST(mm, ptable_unmap_hypervisor_not_mapped)
169{
170 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
171 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
172
173 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
174 init_absent(table);
175
176 struct mm_ptable ptable;
177 ptable.table = pa_init((uintpaddr_t)table);
178
179 EXPECT_TRUE(mm_ptable_unmap_hypervisor(&ptable, 0));
180
181 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
182 EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
183 }
184}
185
186/**
187 * Unmapping everything should result in an empty page table with no subtables.
188 */
189TEST(mm, vm_unmap)
190{
191 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
192 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
193
194 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
195 pte_t *subtable_a = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
196 pte_t *subtable_aa = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
197 init_absent(table);
198 init_absent(subtable_a);
199 init_absent(subtable_aa);
200
201 subtable_aa[0] = arch_mm_block_pte(TOP_LEVEL - 2, pa_init(0), 0);
202 subtable_a[0] = arch_mm_table_pte(TOP_LEVEL - 1,
203 pa_init((uintpaddr_t)subtable_aa));
204 table[0] =
205 arch_mm_table_pte(TOP_LEVEL, pa_init((uintpaddr_t)subtable_a));
206
207 struct mm_ptable ptable;
208 ptable.table = pa_init((uintpaddr_t)table);
209
210 EXPECT_TRUE(mm_vm_unmap(&ptable, pa_init(0), pa_init(1), 0));
211
212 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
213 EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
214 }
215}
216
217/**
218 * Mapping a range should result in just the corresponding pages being mapped.
219 */
220TEST(mm, vm_identity_map)
221{
222 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
223 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
224
225 /* Start with an empty page table. */
226 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
227 init_absent(table);
228 struct mm_ptable ptable;
229 ptable.table = pa_init((uintpaddr_t)table);
230
231 /* Try mapping the first page. */
232 ipaddr_t ipa = ipa_init(-1);
233 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
234 0, &ipa));
235 EXPECT_THAT(ipa_addr(ipa), Eq(0));
236
237 /* Check that the first page is mapped, and nothing else. */
238 for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
239 EXPECT_THAT(table[i], Eq(ABSENT_ENTRY)) << "i=" << i;
240 }
241 ASSERT_TRUE(arch_mm_pte_is_table(table[0], TOP_LEVEL));
242 pte_t *subtable_a = (pte_t *)ptr_from_va(
243 va_from_pa(arch_mm_table_from_pte(table[0])));
244 for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
245 EXPECT_THAT(subtable_a[i], Eq(ABSENT_ENTRY)) << "i=" << i;
246 }
247 ASSERT_TRUE(arch_mm_pte_is_table(subtable_a[0], TOP_LEVEL - 1));
248 pte_t *subtable_aa = (pte_t *)ptr_from_va(
249 va_from_pa(arch_mm_table_from_pte(subtable_a[0])));
250 for (uint64_t i = 1; i < ENTRY_COUNT; ++i) {
251 EXPECT_THAT(subtable_aa[i], Eq(ABSENT_ENTRY)) << "i=" << i;
252 }
253 EXPECT_TRUE(arch_mm_pte_is_block(subtable_aa[0], TOP_LEVEL - 2));
254 EXPECT_THAT(pa_addr(arch_mm_block_from_pte(subtable_aa[0])), Eq(0));
255}
256
257/** Mapping a range that is already mapped should be a no-op. */
258TEST(mm, vm_identity_map_already_mapped)
259{
260 auto test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
261 halloc_init((size_t)test_heap.get(), TEST_HEAP_SIZE);
262
263 /* Start with a full page table mapping everything. */
264 pte_t *table = (pte_t *)halloc_aligned(PAGE_SIZE, PAGE_SIZE);
265 init_blocks(table, TOP_LEVEL, pa_init(0), 0);
266 struct mm_ptable ptable;
267 ptable.table = pa_init((uintpaddr_t)table);
268
269 /* Try mapping the first page. */
270 ipaddr_t ipa = ipa_init(-1);
271 EXPECT_TRUE(mm_vm_identity_map(&ptable, pa_init(0), pa_init(PAGE_SIZE),
272 0, &ipa));
273 EXPECT_THAT(ipa_addr(ipa), Eq(0));
274
275 /*
276 * The table should still be full of blocks, with no subtables or
277 * anything else.
278 */
279 for (uint64_t i = 0; i < ENTRY_COUNT; ++i) {
280 EXPECT_TRUE(arch_mm_pte_is_block(table[i], TOP_LEVEL))
281 << "i=" << i;
282 }
283}
284
Andrew Scull232d5602018-10-15 11:07:45 +0100285} /* namespace */