blob: 3e17f4723041bc321db49b9629c6bdfadac1da1b [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/alloc.h"
24#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010025#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Walbran2400ed22018-09-27 14:45:58 +010027/**
28 * This file has functions for managing the level 1 and 2 page tables used by
29 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
30 * and then a level 2 mapping per VM. The design assumes that all page tables
31 * contain only 1-1 mappings, aligned on the block boundaries.
32 */
33
Andrew Scull80871322018-08-06 12:04:09 +010034/* The type of addresses stored in the page table. */
35typedef uintvaddr_t ptable_addr_t;
36
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010037/*
38 * For stage 2, the input is an intermediate physical addresses rather than a
39 * virtual address so:
40 */
Andrew Scull80871322018-08-06 12:04:09 +010041static_assert(
42 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
43 "Currently, the same code manages the stage 1 and stage 2 page tables "
44 "which only works if the virtual and intermediate physical addresses "
45 "are the same size. It looks like that assumption might not be holding "
46 "so we need to check that everything is going to be ok.");
47
Andrew Scull4f170f52018-07-19 12:58:20 +010048/* Keep macro alignment */
49/* clang-format off */
50
Andrew Scullf2f948e2018-10-22 18:39:28 +010051#define MAP_FLAG_NOSYNC 0x01
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052#define MAP_FLAG_COMMIT 0x02
Andrew Walbran6324fc92018-10-03 11:46:43 +010053#define MAP_FLAG_UNMAP 0x04
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010054
Andrew Scull4f170f52018-07-19 12:58:20 +010055/* clang-format on */
56
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010057static struct mm_ptable ptable;
58
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010059/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010060 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010061 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010062static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010063{
64 return ptr_from_va(va_from_pa(pa));
65}
66
67/**
Andrew Scull80871322018-08-06 12:04:09 +010068 * Rounds an address down to a page boundary.
69 */
70static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
71{
72 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
73}
74
75/**
76 * Rounds an address up to a page boundary.
77 */
78static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
79{
80 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
81}
82
83/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084 * Calculates the size of the address space represented by a page table entry at
85 * the given level.
86 */
Andrew Sculle9827712018-10-19 14:54:20 +010087static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010088{
Andrew Scull78d6fd92018-09-06 15:08:36 +010089 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090}
91
92/**
Andrew Scull80871322018-08-06 12:04:09 +010093 * For a given address, calculates the maximum (plus one) address that can be
94 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010095 */
Andrew Sculle9827712018-10-19 14:54:20 +010096static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010097{
98 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +010099 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100100}
101
102/**
Andrew Scull80871322018-08-06 12:04:09 +0100103 * For a given address, calculates the index at which its entry is stored in a
104 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100105 */
Andrew Sculle9827712018-10-19 14:54:20 +0100106static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107{
Andrew Scull80871322018-08-06 12:04:09 +0100108 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100109 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100110}
111
112/**
Andrew Scull4e5f8142018-10-12 14:37:19 +0100113 * Allocate a new page table.
114 */
Andrew Scullf2f948e2018-10-22 18:39:28 +0100115static struct mm_page_table *mm_alloc_page_table(bool nosync)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100116{
Andrew Scullf2f948e2018-10-22 18:39:28 +0100117 if (nosync) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100118 return halloc_aligned_nosync(sizeof(struct mm_page_table),
119 alignof(struct mm_page_table));
120 }
121
122 return halloc_aligned(sizeof(struct mm_page_table),
123 alignof(struct mm_page_table));
124}
125
126/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127 * Populates the provided page table entry with a reference to another table if
128 * needed, that is, if it does not yet point to another table.
129 *
130 * Returns a pointer to the table the entry now points to.
131 */
Andrew Sculle9827712018-10-19 14:54:20 +0100132static struct mm_page_table *mm_populate_table_pte(pte_t *pte, uint8_t level,
Andrew Scullf2f948e2018-10-22 18:39:28 +0100133 bool nosync)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100134{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100135 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100136 pte_t v = *pte;
137 pte_t new_pte;
138 size_t i;
139 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100140 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100141
142 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100143 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100144 return mm_page_table_from_pa(arch_mm_table_from_pte(v));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100145 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100146
147 /* Allocate a new table. */
Andrew Scullf2f948e2018-10-22 18:39:28 +0100148 ntable = mm_alloc_page_table(nosync);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100149 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100150 dlog("Failed to allocate memory for page table\n");
151 return NULL;
152 }
153
154 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100155 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100156 inc = mm_entry_size(level_below);
157 new_pte = arch_mm_block_pte(level_below,
158 arch_mm_block_from_pte(v),
159 arch_mm_pte_attrs(v));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100160 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100161 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100162 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100163 }
164
165 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100166 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
167 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100168 new_pte += inc;
169 }
170
171 /*
172 * Ensure initialisation is visible before updating the actual pte, then
173 * update it.
174 */
175 atomic_thread_fence(memory_order_release);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100176 *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100177
178 return ntable;
179}
180
181/**
182 * Frees all page-table-related memory associated with the given pte at the
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100183 * given level, including any subtables recursively.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100184 */
Andrew Sculle9827712018-10-19 14:54:20 +0100185static void mm_free_page_pte(pte_t pte, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100186{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100187 struct mm_page_table *table;
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100188 uint64_t i;
189
190 if (!arch_mm_pte_is_table(pte, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100191 return;
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100192 }
193
Andrew Scull4e5f8142018-10-12 14:37:19 +0100194 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte));
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100195 /* Recursively free any subtables. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100196 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
197 mm_free_page_pte(table->entries[i], level - 1);
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100198 }
199
200 /* Free the table itself. */
201 hfree(table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100202}
203
204/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100205 * Returns whether all entries in this table are absent.
206 */
Andrew Sculle9827712018-10-19 14:54:20 +0100207static bool mm_ptable_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100208{
209 uint64_t i;
210
Andrew Scull4e5f8142018-10-12 14:37:19 +0100211 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
212 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100213 return false;
214 }
215 }
216 return true;
217}
218
219/**
Andrew Scull80871322018-08-06 12:04:09 +0100220 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100221 * physical range using the provided (architecture-specific) attributes. Or if
222 * MAP_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100223 *
224 * This function calls itself recursively if it needs to update additional
225 * levels, but the recursion is bound by the maximum number of levels in a page
226 * table.
227 */
Andrew Scull80871322018-08-06 12:04:09 +0100228static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100229 uint64_t attrs, struct mm_page_table *table,
230 uint8_t level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100231{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100232 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100233 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100234 size_t entry_size = mm_entry_size(level);
235 bool commit = flags & MAP_FLAG_COMMIT;
Andrew Scullf2f948e2018-10-22 18:39:28 +0100236 bool nosync = flags & MAP_FLAG_NOSYNC;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100237 bool unmap = flags & MAP_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100238
Andrew Scull265ada92018-07-30 15:19:01 +0100239 /* Cap end so that we don't go over the current level max. */
240 if (end > level_end) {
241 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100242 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100243
244 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100245 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100246 if (unmap ? !arch_mm_pte_is_present(*pte, level)
247 : arch_mm_pte_is_block(*pte, level) &&
248 arch_mm_pte_attrs(*pte) == attrs) {
249 /*
250 * If the entry is already mapped with the right
251 * attributes, or already absent in the case of
252 * unmapping, no need to do anything; carry on to the
253 * next entry.
254 */
255 } else if ((end - begin) >= entry_size &&
256 (unmap || arch_mm_is_block_allowed(level)) &&
257 (begin & (entry_size - 1)) == 0) {
258 /*
259 * If the entire entry is within the region we want to
260 * map, map/unmap the whole entry.
261 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100262 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100263 pte_t v = *pte;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100264 *pte = unmap ? arch_mm_absent_pte(level)
265 : arch_mm_block_pte(level, pa,
266 attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100267 /* TODO: Add barrier. How do we ensure this
268 * isn't in use by another CPU? Send IPI? */
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100269 mm_free_page_pte(v, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100270 }
271 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100272 /*
273 * If the entry is already a subtable get it; otherwise
274 * replace it with an equivalent subtable and get that.
275 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100276 struct mm_page_table *nt =
Andrew Scullf2f948e2018-10-22 18:39:28 +0100277 mm_populate_table_pte(pte, level, nosync);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100278 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100279 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100280 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281
Andrew Walbran6324fc92018-10-03 11:46:43 +0100282 /*
283 * Recurse to map/unmap the appropriate entries within
284 * the subtable.
285 */
Andrew Scull80871322018-08-06 12:04:09 +0100286 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
287 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100288 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100289 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100290
291 /*
292 * If the subtable is now empty, replace it with an
293 * absent entry at this level.
294 */
295 if (commit && unmap &&
296 mm_ptable_is_empty(nt, level - 1)) {
297 pte_t v = *pte;
298 *pte = arch_mm_absent_pte(level);
299 /* TODO: Add barrier. How do we ensure this
300 * isn't in use by another CPU? Send IPI? */
301 mm_free_page_pte(v, level);
302 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100303 }
304
Andrew Scull265ada92018-07-30 15:19:01 +0100305 begin = (begin + entry_size) & ~(entry_size - 1);
306 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100307 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100308 }
309
310 return true;
311}
312
313/**
Andrew Scull80871322018-08-06 12:04:09 +0100314 * Invalidates the TLB for the given address range.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100315 */
Andrew Scull80871322018-08-06 12:04:09 +0100316static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
317 bool stage1)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100318{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100319 if (stage1) {
Andrew Scull80871322018-08-06 12:04:09 +0100320 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100321 } else {
Andrew Scull80871322018-08-06 12:04:09 +0100322 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100323 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100324}
325
326/**
Andrew Scull80871322018-08-06 12:04:09 +0100327 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000328 * or not mapped into the address space with the architecture-agnostic mode
329 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100330 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000331static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
332 paddr_t pa_end, int mode, bool unmap)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333{
Andrew Sculla6da8342018-11-01 12:29:49 +0000334 uint64_t attrs = unmap ? 0 : arch_mm_mode_to_attrs(mode);
335 int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) |
336 (unmap ? MAP_FLAG_UNMAP : 0);
Andrew Sculle9827712018-10-19 14:54:20 +0100337 uint8_t level = arch_mm_max_level(mode);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100338 struct mm_page_table *table = mm_page_table_from_pa(t->table);
Andrew Scull80871322018-08-06 12:04:09 +0100339 ptable_addr_t begin;
340 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100341
Andrew Scull80871322018-08-06 12:04:09 +0100342 pa_begin = arch_mm_clear_pa(pa_begin);
343 begin = pa_addr(pa_begin);
344 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100345
346 /*
347 * Do it in two steps to prevent leaving the table in a halfway updated
348 * state. In such a two-step implementation, the table may be left with
349 * extra internal tables, but no different mapping on failure.
350 */
Andrew Scull80871322018-08-06 12:04:09 +0100351 if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100352 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100353 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100354
Andrew Scull80871322018-08-06 12:04:09 +0100355 mm_map_level(begin, end, pa_begin, attrs, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100356 flags | MAP_FLAG_COMMIT);
357
358 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100359 if (!(mode & MM_MODE_NOINVALIDATE)) {
360 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
361 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100362
363 return true;
364}
365
366/**
Andrew Sculla6da8342018-11-01 12:29:49 +0000367 * Updates the given table such that the given physical address range is mapped
368 * into the address space with the architecture-agnostic mode provided.
369 */
370static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
371 paddr_t pa_end, int mode)
372{
373 return mm_ptable_identity_update(t, pa_begin, pa_end, mode, false);
374}
375
376/**
Andrew Scull80871322018-08-06 12:04:09 +0100377 * Updates the given table such that the given physical address range is not
378 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100379 */
Andrew Scull80871322018-08-06 12:04:09 +0100380static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
381 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100382{
Andrew Sculla6da8342018-11-01 12:29:49 +0000383 return mm_ptable_identity_update(t, pa_begin, pa_end, mode, true);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100384}
385
386/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100387 * Writes the given table to the debug log, calling itself recursively to
388 * write sub-tables.
389 */
Andrew Sculle9827712018-10-19 14:54:20 +0100390static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100391 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100392{
393 uint64_t i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100394 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
395 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100396 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100397 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100398
Andrew Scull4e5f8142018-10-12 14:37:19 +0100399 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
400 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100401
Andrew Scull4e5f8142018-10-12 14:37:19 +0100402 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100403 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100404 mm_page_table_from_pa(arch_mm_table_from_pte(
405 table->entries[i])),
Andrew Scull80871322018-08-06 12:04:09 +0100406 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100407 }
408 }
409}
410
411/**
412 * Write the given table to the debug log.
413 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100414void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100415{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100416 struct mm_page_table *table = mm_page_table_from_pa(t->table);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100417 int max_level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100418 mm_dump_table_recursive(table, max_level, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100419}
420
421/**
Andrew Walbran2400ed22018-09-27 14:45:58 +0100422 * Given that `entry` is a subtable but its entries are all absent, return the
423 * absent entry with which it can be replaced. Note that `entry` will no longer
424 * be valid after calling this function as the subtable will have been freed.
425 */
Andrew Sculle9827712018-10-19 14:54:20 +0100426static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100427{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100428 struct mm_page_table *table =
429 mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100430 /*
431 * Free the subtable. This is safe to do directly (rather than
432 * using mm_free_page_pte) because we know by this point that it
433 * doesn't have any subtables of its own.
434 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100435 hfree(table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100436 /* Replace subtable with a single absent entry. */
437 return arch_mm_absent_pte(level);
438}
439
440/**
441 * Given that `entry` is a subtable and its entries are all identical, return
442 * the single block entry with which it can be replaced if possible. Note that
443 * `entry` will no longer be valid after calling this function as the subtable
444 * may have been freed.
445 */
Andrew Sculle9827712018-10-19 14:54:20 +0100446static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100447{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100448 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100449 uint64_t block_attrs;
450 uint64_t table_attrs;
451 uint64_t combined_attrs;
452 paddr_t block_address;
453
454 if (!arch_mm_is_block_allowed(level)) {
455 return entry;
456 }
457
Andrew Scull4e5f8142018-10-12 14:37:19 +0100458 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100459 /*
460 * Replace subtable with a single block, with equivalent
461 * attributes.
462 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100463 block_attrs = arch_mm_pte_attrs(table->entries[0]);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100464 table_attrs = arch_mm_pte_attrs(entry);
465 combined_attrs =
466 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100467 block_address = arch_mm_block_from_pte(table->entries[0]);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100468 /* Free the subtable. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100469 hfree(table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100470 /*
471 * We can assume that the block is aligned properly
472 * because all virtual addresses are aligned by
473 * definition, and we have a 1-1 mapping from virtual to
474 * physical addresses.
475 */
476 return arch_mm_block_pte(level, block_address, combined_attrs);
477}
478
479/**
480 * Defragment the given ptable entry by recursively replacing any tables with
481 * block or absent entries where possible.
482 */
Andrew Sculle9827712018-10-19 14:54:20 +0100483static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100484{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100485 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100486 uint64_t i;
487 uint64_t attrs;
488 bool identical_blocks_so_far = true;
489 bool all_absent_so_far = true;
490
491 if (!arch_mm_pte_is_table(entry, level)) {
492 return entry;
493 }
494
Andrew Scull4e5f8142018-10-12 14:37:19 +0100495 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100496
497 /*
498 * Check if all entries are blocks with the same flags or are all
499 * absent.
500 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100501 attrs = arch_mm_pte_attrs(table->entries[0]);
502 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100503 /*
504 * First try to defrag the entry, in case it is a subtable.
505 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100506 table->entries[i] =
507 mm_ptable_defrag_entry(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100508
Andrew Scull4e5f8142018-10-12 14:37:19 +0100509 if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100510 all_absent_so_far = false;
511 }
512
513 /*
514 * If the entry is a block, check that the flags are the same as
515 * what we have so far.
516 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100517 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
518 arch_mm_pte_attrs(table->entries[i]) != attrs) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100519 identical_blocks_so_far = false;
520 }
521 }
522 if (identical_blocks_so_far) {
523 return mm_table_pte_to_block(entry, level);
524 }
525 if (all_absent_so_far) {
526 return mm_table_pte_to_absent(entry, level);
527 }
528 return entry;
529}
530
531/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100532 * Defragments the given page table by converting page table references to
533 * blocks whenever possible.
534 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100535void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100536{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100537 struct mm_page_table *table = mm_page_table_from_pa(t->table);
Andrew Sculle9827712018-10-19 14:54:20 +0100538 uint8_t level = arch_mm_max_level(mode);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100539 uint64_t i;
540
541 /*
542 * Loop through each entry in the table. If it points to another table,
543 * check if that table can be replaced by a block or an absent entry.
544 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100545 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
546 table->entries[i] =
547 mm_ptable_defrag_entry(table->entries[i], level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100548 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100549}
550
551/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100552 * Unmaps the hypervisor pages from the given page table.
553 */
554bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
555{
556 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100557 return mm_ptable_unmap(t, layout_text_begin(), layout_text_end(),
558 mode) &&
559 mm_ptable_unmap(t, layout_rodata_begin(), layout_rodata_end(),
560 mode) &&
561 mm_ptable_unmap(t, layout_data_begin(), layout_data_end(), mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100562}
563
564/**
Andrew Scull80871322018-08-06 12:04:09 +0100565 * Determines if the given address is mapped in the given page table by
566 * recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100567 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100568static bool mm_is_mapped_recursive(struct mm_page_table *table,
Andrew Sculle9827712018-10-19 14:54:20 +0100569 ptable_addr_t addr, uint8_t level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100570{
571 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100572 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100573
574 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100575 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100576 return false;
577 }
578
Andrew Scull4e5f8142018-10-12 14:37:19 +0100579 pte = table->entries[mm_index(addr, level)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100580
Andrew Scull78d6fd92018-09-06 15:08:36 +0100581 if (arch_mm_pte_is_block(pte, level)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100582 return true;
583 }
584
Andrew Scull78d6fd92018-09-06 15:08:36 +0100585 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100586 return mm_is_mapped_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100587 mm_page_table_from_pa(arch_mm_table_from_pte(pte)),
588 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100589 }
590
Andrew Scull78d6fd92018-09-06 15:08:36 +0100591 /* The entry is not present. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100592 return false;
593}
594
595/**
Andrew Scull80871322018-08-06 12:04:09 +0100596 * Determines if the given address is mapped in the given page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100597 */
Andrew Scull80871322018-08-06 12:04:09 +0100598static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
599 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100600{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100601 struct mm_page_table *table = mm_page_table_from_pa(t->table);
Andrew Sculle9827712018-10-19 14:54:20 +0100602 uint8_t level = arch_mm_max_level(mode);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100603
Andrew Scull80871322018-08-06 12:04:09 +0100604 addr = mm_round_down_to_page(addr);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100605
Andrew Scull265ada92018-07-30 15:19:01 +0100606 return mm_is_mapped_recursive(table, addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100607}
608
609/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100610 * Initialises the given page table.
611 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100612bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100613{
614 size_t i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100615 struct mm_page_table *table;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100616
Andrew Scull4e5f8142018-10-12 14:37:19 +0100617 table = mm_alloc_page_table(mode & MM_MODE_NOSYNC);
618 if (table == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100619 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100620 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100621
Andrew Scull4e5f8142018-10-12 14:37:19 +0100622 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
623 table->entries[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100624 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100625
Andrew Scull265ada92018-07-30 15:19:01 +0100626 /* TODO: halloc could return a virtual or physical address if mm not
627 * enabled? */
628 t->table = pa_init((uintpaddr_t)table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100629
630 return true;
631}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100632
633/**
Andrew Scull80871322018-08-06 12:04:09 +0100634 * Updates a VM's page table such that the given physical address range is
635 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100636 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100637 */
Andrew Scull80871322018-08-06 12:04:09 +0100638bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
639 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100640{
Andrew Scull80871322018-08-06 12:04:09 +0100641 bool success =
642 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
643
644 if (success && ipa != NULL) {
645 *ipa = ipa_from_pa(begin);
646 }
647
648 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100649}
650
651/**
Andrew Scull80871322018-08-06 12:04:09 +0100652 * Updates the VM's table such that the given physical address range is not
653 * mapped in the address space.
654 */
655bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
656{
657 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
658}
659
660/**
661 * Checks whether the given intermediate physical addess is mapped in the given
662 * page table of a VM.
663 */
664bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
665{
666 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
667}
668
669/**
670 * Translates an intermediate physical address to a physical address. Addresses
671 * are currently identity mapped so this is a simple type convertion. Returns
672 * true if the address was mapped in the table and the address was converted.
673 */
674bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
675{
676 bool mapped = mm_vm_is_mapped(t, ipa, 0);
677
678 if (mapped) {
679 *pa = pa_init(ipa_addr(ipa));
680 }
681
682 return mapped;
683}
684
685/**
686 * Updates the hypervisor page table such that the given physical address range
687 * is mapped into the address space at the corresponding address range in the
688 * architecture-agnostic mode provided.
689 */
690void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
691{
692 if (mm_ptable_identity_map(&ptable, begin, end,
693 mode | MM_MODE_STAGE1)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100694 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100695 }
696
697 return NULL;
698}
699
700/**
701 * Updates the hypervisor table such that the given physical address range is
702 * not mapped in the address space.
703 */
704bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100705{
706 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
707}
708
709/**
710 * Initialises memory management for the hypervisor itself.
711 */
712bool mm_init(void)
713{
Andrew Scull5991ec92018-10-08 14:55:02 +0100714 dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
715 pa_addr(layout_text_end()));
716 dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
717 pa_addr(layout_rodata_end()));
718 dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
719 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100720
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100721 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100722 dlog("Unable to allocate memory for page table.\n");
723 return false;
724 }
725
726 /* Map page for uart. */
727 /* TODO: We may not want to map this. */
Andrew Scull24e032f2018-10-15 17:18:12 +0100728 mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
729 pa_add(pa_init(PL011_BASE), PAGE_SIZE),
730 MM_MODE_R | MM_MODE_W | MM_MODE_D |
731 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100732
733 /* Map each section. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100734 mm_identity_map(layout_text_begin(), layout_text_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100735 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100736
Andrew Scull5991ec92018-10-08 14:55:02 +0100737 mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100738 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100739
Andrew Scull5991ec92018-10-08 14:55:02 +0100740 mm_identity_map(layout_data_begin(), layout_data_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100741 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100742
Andrew Scull265ada92018-07-30 15:19:01 +0100743 return arch_mm_init(ptable.table, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100744}
745
746bool mm_cpu_init(void)
747{
Andrew Scull265ada92018-07-30 15:19:01 +0100748 return arch_mm_init(ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100749}
750
751/**
752 * Defragments the hypervisor page table.
753 */
754void mm_defrag(void)
755{
756 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
757}