blob: 161fbeb35004af66e44faff1215bf17938efa80f [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/alloc.h"
24#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010025#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Walbran2400ed22018-09-27 14:45:58 +010027/**
28 * This file has functions for managing the level 1 and 2 page tables used by
29 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
30 * and then a level 2 mapping per VM. The design assumes that all page tables
31 * contain only 1-1 mappings, aligned on the block boundaries.
32 */
33
Andrew Scull80871322018-08-06 12:04:09 +010034/* The type of addresses stored in the page table. */
35typedef uintvaddr_t ptable_addr_t;
36
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010037/*
38 * For stage 2, the input is an intermediate physical addresses rather than a
39 * virtual address so:
40 */
Andrew Scull80871322018-08-06 12:04:09 +010041static_assert(
42 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
43 "Currently, the same code manages the stage 1 and stage 2 page tables "
44 "which only works if the virtual and intermediate physical addresses "
45 "are the same size. It looks like that assumption might not be holding "
46 "so we need to check that everything is going to be ok.");
47
Andrew Scull4f170f52018-07-19 12:58:20 +010048/* Keep macro alignment */
49/* clang-format off */
50
Andrew Scullf2f948e2018-10-22 18:39:28 +010051#define MAP_FLAG_NOSYNC 0x01
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052#define MAP_FLAG_COMMIT 0x02
Andrew Walbran6324fc92018-10-03 11:46:43 +010053#define MAP_FLAG_UNMAP 0x04
Wedson Almeida Filho7c913232018-11-23 18:20:29 +000054#define MAP_FLAG_NOBBM 0x08
55#define MAP_FLAG_STAGE1 0x10
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056
Andrew Scull4f170f52018-07-19 12:58:20 +010057/* clang-format on */
58
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059static struct mm_ptable ptable;
60
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010061/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010062 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010063 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010064static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010065{
66 return ptr_from_va(va_from_pa(pa));
67}
68
69/**
Andrew Scull80871322018-08-06 12:04:09 +010070 * Rounds an address down to a page boundary.
71 */
72static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
73{
74 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
75}
76
77/**
78 * Rounds an address up to a page boundary.
79 */
80static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
81{
82 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
83}
84
85/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086 * Calculates the size of the address space represented by a page table entry at
87 * the given level.
88 */
Andrew Sculle9827712018-10-19 14:54:20 +010089static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090{
Andrew Scull78d6fd92018-09-06 15:08:36 +010091 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010092}
93
94/**
Andrew Scull80871322018-08-06 12:04:09 +010095 * For a given address, calculates the maximum (plus one) address that can be
96 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010097 */
Andrew Sculle9827712018-10-19 14:54:20 +010098static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010099{
100 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +0100101 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100102}
103
104/**
Andrew Scull80871322018-08-06 12:04:09 +0100105 * For a given address, calculates the index at which its entry is stored in a
106 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107 */
Andrew Sculle9827712018-10-19 14:54:20 +0100108static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100109{
Andrew Scull80871322018-08-06 12:04:09 +0100110 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100111 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100112}
113
114/**
Andrew Scull4e5f8142018-10-12 14:37:19 +0100115 * Allocate a new page table.
116 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000117static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100118{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000119 size_t size_and_align = count * sizeof(struct mm_page_table);
Andrew Scullf2f948e2018-10-22 18:39:28 +0100120 if (nosync) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000121 return halloc_aligned_nosync(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100122 }
123
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 return halloc_aligned(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100125}
126
127/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000128 * Invalidates the TLB for the given address range.
129 */
130static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
131 bool stage1)
132{
133 if (stage1) {
134 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
135 } else {
136 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
137 }
138}
139
140/**
141 * Frees all page-table-related memory associated with the given pte at the
142 * given level, including any subtables recursively.
143 */
144static void mm_free_page_pte(pte_t pte, uint8_t level)
145{
146 struct mm_page_table *table;
147 uint64_t i;
148
149 if (!arch_mm_pte_is_table(pte, level)) {
150 return;
151 }
152
153 /* Recursively free any subtables. */
154 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte));
155 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
156 mm_free_page_pte(table->entries[i], level - 1);
157 }
158
159 /* Free the table itself. */
160 hfree(table);
161}
162
163/**
164 * Replaces a page table entry with the given value. If both old and new values
165 * are present, it performs a break-before-make sequence where it first writes
166 * an absent value to the PTE, flushes the TLB, then writes the actual new
167 * value. This is to prevent cases where CPUs have different 'present' values in
168 * their TLBs, which may result in issues for example in cache coherency.
169 */
170static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
171 uint8_t level, int flags)
172{
173 pte_t v = *pte;
174
175 /*
176 * We need to do the break-before-make sequence if both values are
177 * present, and if it hasn't been inhibited by the NOBBM flag.
178 */
179 if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_present(v, level) &&
180 arch_mm_pte_is_present(new_pte, level)) {
181 *pte = arch_mm_absent_pte(level);
182 mm_invalidate_tlb(begin, begin + mm_entry_size(level),
183 flags & MAP_FLAG_STAGE1);
184 }
185
186 /* Assign the new pte. */
187 *pte = new_pte;
188
189 /* Free pages that aren't in use anymore. */
190 mm_free_page_pte(v, level);
191}
192
193/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100194 * Populates the provided page table entry with a reference to another table if
195 * needed, that is, if it does not yet point to another table.
196 *
197 * Returns a pointer to the table the entry now points to.
198 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000199static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
200 pte_t *pte, uint8_t level,
201 int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100202{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100203 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100204 pte_t v = *pte;
205 pte_t new_pte;
206 size_t i;
207 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100208 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100209
210 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100211 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100212 return mm_page_table_from_pa(arch_mm_table_from_pte(v));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100213 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100214
215 /* Allocate a new table. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000216 ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100217 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100218 dlog("Failed to allocate memory for page table\n");
219 return NULL;
220 }
221
222 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100223 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100224 inc = mm_entry_size(level_below);
225 new_pte = arch_mm_block_pte(level_below,
226 arch_mm_block_from_pte(v),
227 arch_mm_pte_attrs(v));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100229 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100230 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100231 }
232
233 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100234 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
235 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100236 new_pte += inc;
237 }
238
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000239 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100240 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000241
242 /* Replace the pte entry, doing a break-before-make if needed. */
243 mm_replace_entry(begin, pte,
244 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
245 level, flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100246
247 return ntable;
248}
249
250/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100251 * Returns whether all entries in this table are absent.
252 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000253static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100254{
255 uint64_t i;
256
Andrew Scull4e5f8142018-10-12 14:37:19 +0100257 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
258 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100259 return false;
260 }
261 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000262
Andrew Walbran6324fc92018-10-03 11:46:43 +0100263 return true;
264}
265
266/**
Andrew Scull80871322018-08-06 12:04:09 +0100267 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100268 * physical range using the provided (architecture-specific) attributes. Or if
269 * MAP_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100270 *
271 * This function calls itself recursively if it needs to update additional
272 * levels, but the recursion is bound by the maximum number of levels in a page
273 * table.
274 */
Andrew Scull80871322018-08-06 12:04:09 +0100275static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100276 uint64_t attrs, struct mm_page_table *table,
277 uint8_t level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100278{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100279 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100280 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281 size_t entry_size = mm_entry_size(level);
282 bool commit = flags & MAP_FLAG_COMMIT;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100283 bool unmap = flags & MAP_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284
Andrew Scull265ada92018-07-30 15:19:01 +0100285 /* Cap end so that we don't go over the current level max. */
286 if (end > level_end) {
287 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100288 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289
290 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100291 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100292 if (unmap ? !arch_mm_pte_is_present(*pte, level)
293 : arch_mm_pte_is_block(*pte, level) &&
294 arch_mm_pte_attrs(*pte) == attrs) {
295 /*
296 * If the entry is already mapped with the right
297 * attributes, or already absent in the case of
298 * unmapping, no need to do anything; carry on to the
299 * next entry.
300 */
301 } else if ((end - begin) >= entry_size &&
302 (unmap || arch_mm_is_block_allowed(level)) &&
303 (begin & (entry_size - 1)) == 0) {
304 /*
305 * If the entire entry is within the region we want to
306 * map, map/unmap the whole entry.
307 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100308 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000309 pte_t new_pte =
310 unmap ? arch_mm_absent_pte(level)
311 : arch_mm_block_pte(level, pa,
312 attrs);
313 mm_replace_entry(begin, pte, new_pte, level,
314 flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100315 }
316 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100317 /*
318 * If the entry is already a subtable get it; otherwise
319 * replace it with an equivalent subtable and get that.
320 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100321 struct mm_page_table *nt =
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000322 mm_populate_table_pte(begin, pte, level, flags);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100323 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100324 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100325 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326
Andrew Walbran6324fc92018-10-03 11:46:43 +0100327 /*
328 * Recurse to map/unmap the appropriate entries within
329 * the subtable.
330 */
Andrew Scull80871322018-08-06 12:04:09 +0100331 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
332 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100334 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100335
336 /*
337 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000338 * absent entry at this level. We never need to do
339 * break-before-makes here because we are assigning
340 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100341 */
342 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000343 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100344 pte_t v = *pte;
345 *pte = arch_mm_absent_pte(level);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100346 mm_free_page_pte(v, level);
347 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100348 }
349
Andrew Scull265ada92018-07-30 15:19:01 +0100350 begin = (begin + entry_size) & ~(entry_size - 1);
351 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100352 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100353 }
354
355 return true;
356}
357
358/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000359 * Updates the page table from the root to map the given address range to a
360 * physical range using the provided (architecture-specific) attributes. Or if
361 * MAP_FLAG_UNMAP is set, unmap the given range instead.
362 */
363static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
364 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
365 int flags)
366{
367 size_t root_table_size = mm_entry_size(root_level);
368 struct mm_page_table *table =
369 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
370
371 while (begin < end) {
372 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
373 root_level - 1, flags)) {
374 return false;
375 }
376 begin = (begin + root_table_size) & ~(root_table_size - 1);
377 table++;
378 }
379
380 return true;
381}
382
383/**
Andrew Scull80871322018-08-06 12:04:09 +0100384 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000385 * or not mapped into the address space with the architecture-agnostic mode
386 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100387 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000388static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
389 paddr_t pa_end, int mode, bool unmap)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100390{
Andrew Sculla6da8342018-11-01 12:29:49 +0000391 uint64_t attrs = unmap ? 0 : arch_mm_mode_to_attrs(mode);
392 int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) |
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000393 (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
394 (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
Andrew Sculla6da8342018-11-01 12:29:49 +0000395 (unmap ? MAP_FLAG_UNMAP : 0);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000396 uint8_t root_level = arch_mm_max_level(mode) + 1;
397 ptable_addr_t ptable_end =
398 arch_mm_root_table_count(mode) * mm_entry_size(root_level);
399 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
400 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100401
Andrew Scull1ba470e2018-10-31 15:14:31 +0000402 /*
403 * TODO: replace with assertions that the max level will be greater than
404 * 0 and less than 255 so wrapping will not be a problem and will not
405 * lead to subsequent overflows.
406 */
407 if (root_level == 0 || root_level == 1) {
408 return false;
409 }
410
411 /* Cap end to stay within the bounds of the page table. */
412 if (end > ptable_end) {
413 end = ptable_end;
414 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100415
416 /*
417 * Do it in two steps to prevent leaving the table in a halfway updated
418 * state. In such a two-step implementation, the table may be left with
419 * extra internal tables, but no different mapping on failure.
420 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000421 if (!mm_map_root(t, begin, end, attrs, root_level, flags) ||
422 !mm_map_root(t, begin, end, attrs, root_level,
423 flags | MAP_FLAG_COMMIT)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100424 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100425 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100426
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100427 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100428 if (!(mode & MM_MODE_NOINVALIDATE)) {
429 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
430 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100431
432 return true;
433}
434
435/**
Andrew Sculla6da8342018-11-01 12:29:49 +0000436 * Updates the given table such that the given physical address range is mapped
437 * into the address space with the architecture-agnostic mode provided.
438 */
439static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
440 paddr_t pa_end, int mode)
441{
442 return mm_ptable_identity_update(t, pa_begin, pa_end, mode, false);
443}
444
445/**
Andrew Scull80871322018-08-06 12:04:09 +0100446 * Updates the given table such that the given physical address range is not
447 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448 */
Andrew Scull80871322018-08-06 12:04:09 +0100449static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
450 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100451{
Andrew Sculla6da8342018-11-01 12:29:49 +0000452 return mm_ptable_identity_update(t, pa_begin, pa_end, mode, true);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453}
454
455/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100456 * Writes the given table to the debug log, calling itself recursively to
457 * write sub-tables.
458 */
Andrew Sculle9827712018-10-19 14:54:20 +0100459static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100460 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100461{
462 uint64_t i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100463 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
464 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100465 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100466 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100467
Andrew Scull4e5f8142018-10-12 14:37:19 +0100468 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
469 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100470
Andrew Scull4e5f8142018-10-12 14:37:19 +0100471 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100472 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100473 mm_page_table_from_pa(arch_mm_table_from_pte(
474 table->entries[i])),
Andrew Scull80871322018-08-06 12:04:09 +0100475 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100476 }
477 }
478}
479
480/**
481 * Write the given table to the debug log.
482 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100483void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100484{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000485 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100486 int max_level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000487 uint8_t root_table_count = arch_mm_root_table_count(mode);
488 uint8_t i;
489 for (i = 0; i < root_table_count; ++i) {
490 mm_dump_table_recursive(&tables[i], max_level, max_level);
491 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100492}
493
494/**
Andrew Walbran2400ed22018-09-27 14:45:58 +0100495 * Given that `entry` is a subtable but its entries are all absent, return the
496 * absent entry with which it can be replaced. Note that `entry` will no longer
497 * be valid after calling this function as the subtable will have been freed.
498 */
Andrew Sculle9827712018-10-19 14:54:20 +0100499static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100500{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100501 struct mm_page_table *table =
502 mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000503
Andrew Walbran2400ed22018-09-27 14:45:58 +0100504 /*
505 * Free the subtable. This is safe to do directly (rather than
506 * using mm_free_page_pte) because we know by this point that it
507 * doesn't have any subtables of its own.
508 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100509 hfree(table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000510
Andrew Walbran2400ed22018-09-27 14:45:58 +0100511 /* Replace subtable with a single absent entry. */
512 return arch_mm_absent_pte(level);
513}
514
515/**
516 * Given that `entry` is a subtable and its entries are all identical, return
517 * the single block entry with which it can be replaced if possible. Note that
518 * `entry` will no longer be valid after calling this function as the subtable
519 * may have been freed.
520 */
Andrew Sculle9827712018-10-19 14:54:20 +0100521static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100522{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100523 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100524 uint64_t block_attrs;
525 uint64_t table_attrs;
526 uint64_t combined_attrs;
527 paddr_t block_address;
528
529 if (!arch_mm_is_block_allowed(level)) {
530 return entry;
531 }
532
Andrew Scull4e5f8142018-10-12 14:37:19 +0100533 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100534 /*
535 * Replace subtable with a single block, with equivalent
536 * attributes.
537 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100538 block_attrs = arch_mm_pte_attrs(table->entries[0]);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100539 table_attrs = arch_mm_pte_attrs(entry);
540 combined_attrs =
541 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100542 block_address = arch_mm_block_from_pte(table->entries[0]);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100543 /* Free the subtable. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100544 hfree(table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100545 /*
546 * We can assume that the block is aligned properly
547 * because all virtual addresses are aligned by
548 * definition, and we have a 1-1 mapping from virtual to
549 * physical addresses.
550 */
551 return arch_mm_block_pte(level, block_address, combined_attrs);
552}
553
554/**
555 * Defragment the given ptable entry by recursively replacing any tables with
556 * block or absent entries where possible.
557 */
Andrew Sculle9827712018-10-19 14:54:20 +0100558static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100559{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100560 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100561 uint64_t i;
562 uint64_t attrs;
563 bool identical_blocks_so_far = true;
564 bool all_absent_so_far = true;
565
566 if (!arch_mm_pte_is_table(entry, level)) {
567 return entry;
568 }
569
Andrew Scull4e5f8142018-10-12 14:37:19 +0100570 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100571
572 /*
573 * Check if all entries are blocks with the same flags or are all
574 * absent.
575 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100576 attrs = arch_mm_pte_attrs(table->entries[0]);
577 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100578 /*
579 * First try to defrag the entry, in case it is a subtable.
580 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100581 table->entries[i] =
582 mm_ptable_defrag_entry(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100583
Andrew Scull4e5f8142018-10-12 14:37:19 +0100584 if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100585 all_absent_so_far = false;
586 }
587
588 /*
589 * If the entry is a block, check that the flags are the same as
590 * what we have so far.
591 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100592 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
593 arch_mm_pte_attrs(table->entries[i]) != attrs) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100594 identical_blocks_so_far = false;
595 }
596 }
597 if (identical_blocks_so_far) {
598 return mm_table_pte_to_block(entry, level);
599 }
600 if (all_absent_so_far) {
601 return mm_table_pte_to_absent(entry, level);
602 }
603 return entry;
604}
605
606/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100607 * Defragments the given page table by converting page table references to
608 * blocks whenever possible.
609 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100610void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100611{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000612 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100613 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000614 uint8_t root_table_count = arch_mm_root_table_count(mode);
615 uint8_t i;
616 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100617
618 /*
619 * Loop through each entry in the table. If it points to another table,
620 * check if that table can be replaced by a block or an absent entry.
621 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000622 for (i = 0; i < root_table_count; ++i) {
623 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
624 tables[i].entries[j] = mm_ptable_defrag_entry(
625 tables[i].entries[j], level);
626 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100627 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100628}
629
630/**
Andrew Scull80871322018-08-06 12:04:09 +0100631 * Determines if the given address is mapped in the given page table by
632 * recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100633 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100634static bool mm_is_mapped_recursive(struct mm_page_table *table,
Andrew Sculle9827712018-10-19 14:54:20 +0100635 ptable_addr_t addr, uint8_t level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100636{
637 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100638 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100639
640 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100641 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100642 return false;
643 }
644
Andrew Scull4e5f8142018-10-12 14:37:19 +0100645 pte = table->entries[mm_index(addr, level)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100646
Andrew Scull78d6fd92018-09-06 15:08:36 +0100647 if (arch_mm_pte_is_block(pte, level)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100648 return true;
649 }
650
Andrew Scull78d6fd92018-09-06 15:08:36 +0100651 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100652 return mm_is_mapped_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100653 mm_page_table_from_pa(arch_mm_table_from_pte(pte)),
654 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100655 }
656
Andrew Scull78d6fd92018-09-06 15:08:36 +0100657 /* The entry is not present. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100658 return false;
659}
660
661/**
Andrew Scull80871322018-08-06 12:04:09 +0100662 * Determines if the given address is mapped in the given page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100663 */
Andrew Scull80871322018-08-06 12:04:09 +0100664static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
665 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100666{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000667 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100668 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000669 size_t index;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100670
Andrew Scull80871322018-08-06 12:04:09 +0100671 addr = mm_round_down_to_page(addr);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000672 index = mm_index(addr, level + 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100673
Andrew Scull1ba470e2018-10-31 15:14:31 +0000674 if (index >= arch_mm_root_table_count(mode)) {
675 return false;
676 }
677
678 return mm_is_mapped_recursive(&tables[index], addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100679}
680
681/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100682 * Initialises the given page table.
683 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100684bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100685{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000686 uint8_t i;
687 size_t j;
688 struct mm_page_table *tables;
689 uint8_t root_table_count = arch_mm_root_table_count(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100690
Andrew Scull1ba470e2018-10-31 15:14:31 +0000691 tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC);
692 if (tables == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100693 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100694 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100695
Andrew Scull1ba470e2018-10-31 15:14:31 +0000696 for (i = 0; i < root_table_count; i++) {
697 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
698 tables[i].entries[j] =
699 arch_mm_absent_pte(arch_mm_max_level(mode));
700 }
Andrew Scull7364a8e2018-07-19 15:39:29 +0100701 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100702
Andrew Scull265ada92018-07-30 15:19:01 +0100703 /* TODO: halloc could return a virtual or physical address if mm not
704 * enabled? */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000705 t->root = pa_init((uintpaddr_t)tables);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100706
707 return true;
708}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100709
710/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000711 * Frees all memory associated with the give page table.
712 */
713void mm_ptable_fini(struct mm_ptable *t, int mode)
714{
715 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
716 uint8_t level = arch_mm_max_level(mode);
717 uint8_t root_table_count = arch_mm_root_table_count(mode);
718 uint8_t i;
719 uint64_t j;
720
721 for (i = 0; i < root_table_count; ++i) {
722 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
723 mm_free_page_pte(tables[i].entries[j], level);
724 }
725 }
726
727 hfree(tables);
728}
729
730/**
Andrew Scull80871322018-08-06 12:04:09 +0100731 * Updates a VM's page table such that the given physical address range is
732 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100733 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100734 */
Andrew Scull80871322018-08-06 12:04:09 +0100735bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
736 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100737{
Andrew Scull80871322018-08-06 12:04:09 +0100738 bool success =
739 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
740
741 if (success && ipa != NULL) {
742 *ipa = ipa_from_pa(begin);
743 }
744
745 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100746}
747
748/**
Andrew Scull80871322018-08-06 12:04:09 +0100749 * Updates the VM's table such that the given physical address range is not
750 * mapped in the address space.
751 */
752bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
753{
754 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
755}
756
757/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000758 * Unmaps the hypervisor pages from the given page table.
759 */
760bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode)
761{
762 /* TODO: If we add pages dynamically, they must be included here too. */
763 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) &&
764 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
765 mode) &&
766 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode);
767}
768
769/**
Andrew Scull80871322018-08-06 12:04:09 +0100770 * Checks whether the given intermediate physical addess is mapped in the given
771 * page table of a VM.
772 */
773bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
774{
775 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
776}
777
778/**
779 * Translates an intermediate physical address to a physical address. Addresses
780 * are currently identity mapped so this is a simple type convertion. Returns
781 * true if the address was mapped in the table and the address was converted.
782 */
783bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
784{
785 bool mapped = mm_vm_is_mapped(t, ipa, 0);
786
787 if (mapped) {
788 *pa = pa_init(ipa_addr(ipa));
789 }
790
791 return mapped;
792}
793
794/**
795 * Updates the hypervisor page table such that the given physical address range
796 * is mapped into the address space at the corresponding address range in the
797 * architecture-agnostic mode provided.
798 */
799void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
800{
801 if (mm_ptable_identity_map(&ptable, begin, end,
802 mode | MM_MODE_STAGE1)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100803 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100804 }
805
806 return NULL;
807}
808
809/**
810 * Updates the hypervisor table such that the given physical address range is
811 * not mapped in the address space.
812 */
813bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100814{
815 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
816}
817
818/**
819 * Initialises memory management for the hypervisor itself.
820 */
821bool mm_init(void)
822{
Andrew Scullcb0a7412018-11-06 17:28:14 +0000823 dlog_nosync("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
824 pa_addr(layout_text_end()));
825 dlog_nosync("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
826 pa_addr(layout_rodata_end()));
827 dlog_nosync("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
828 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100829
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100830 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Andrew Scullcb0a7412018-11-06 17:28:14 +0000831 dlog_nosync("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100832 return false;
833 }
834
835 /* Map page for uart. */
836 /* TODO: We may not want to map this. */
Andrew Scull24e032f2018-10-15 17:18:12 +0100837 mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
838 pa_add(pa_init(PL011_BASE), PAGE_SIZE),
839 MM_MODE_R | MM_MODE_W | MM_MODE_D |
840 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100841
842 /* Map each section. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100843 mm_identity_map(layout_text_begin(), layout_text_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100844 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100845
Andrew Scull5991ec92018-10-08 14:55:02 +0100846 mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100847 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100848
Andrew Scull5991ec92018-10-08 14:55:02 +0100849 mm_identity_map(layout_data_begin(), layout_data_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100850 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100851
Andrew Scull1ba470e2018-10-31 15:14:31 +0000852 return arch_mm_init(ptable.root, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100853}
854
855bool mm_cpu_init(void)
856{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000857 return arch_mm_init(ptable.root, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100858}
859
860/**
861 * Defragments the hypervisor page table.
862 */
863void mm_defrag(void)
864{
865 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
866}