blob: dcf94a50e09f26a4223880beb1f90d1b6e9c96a8 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/alloc.h"
24#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010025#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Walbran2400ed22018-09-27 14:45:58 +010027/**
28 * This file has functions for managing the level 1 and 2 page tables used by
29 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
30 * and then a level 2 mapping per VM. The design assumes that all page tables
31 * contain only 1-1 mappings, aligned on the block boundaries.
32 */
33
Andrew Scull80871322018-08-06 12:04:09 +010034/* The type of addresses stored in the page table. */
35typedef uintvaddr_t ptable_addr_t;
36
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010037/*
38 * For stage 2, the input is an intermediate physical addresses rather than a
39 * virtual address so:
40 */
Andrew Scull80871322018-08-06 12:04:09 +010041static_assert(
42 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
43 "Currently, the same code manages the stage 1 and stage 2 page tables "
44 "which only works if the virtual and intermediate physical addresses "
45 "are the same size. It looks like that assumption might not be holding "
46 "so we need to check that everything is going to be ok.");
47
Andrew Scull4f170f52018-07-19 12:58:20 +010048/* Keep macro alignment */
49/* clang-format off */
50
Andrew Scullf2f948e2018-10-22 18:39:28 +010051#define MAP_FLAG_NOSYNC 0x01
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052#define MAP_FLAG_COMMIT 0x02
Andrew Walbran6324fc92018-10-03 11:46:43 +010053#define MAP_FLAG_UNMAP 0x04
Wedson Almeida Filho7c913232018-11-23 18:20:29 +000054#define MAP_FLAG_NOBBM 0x08
55#define MAP_FLAG_STAGE1 0x10
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056
Andrew Scull4f170f52018-07-19 12:58:20 +010057/* clang-format on */
58
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059static struct mm_ptable ptable;
60
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010061/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010062 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010063 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010064static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010065{
66 return ptr_from_va(va_from_pa(pa));
67}
68
69/**
Andrew Scull80871322018-08-06 12:04:09 +010070 * Rounds an address down to a page boundary.
71 */
72static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
73{
74 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
75}
76
77/**
78 * Rounds an address up to a page boundary.
79 */
80static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
81{
82 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
83}
84
85/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086 * Calculates the size of the address space represented by a page table entry at
87 * the given level.
88 */
Andrew Sculle9827712018-10-19 14:54:20 +010089static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090{
Andrew Scull78d6fd92018-09-06 15:08:36 +010091 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010092}
93
94/**
Andrew Scull80871322018-08-06 12:04:09 +010095 * For a given address, calculates the maximum (plus one) address that can be
96 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010097 */
Andrew Sculle9827712018-10-19 14:54:20 +010098static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010099{
100 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +0100101 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100102}
103
104/**
Andrew Scull80871322018-08-06 12:04:09 +0100105 * For a given address, calculates the index at which its entry is stored in a
106 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107 */
Andrew Sculle9827712018-10-19 14:54:20 +0100108static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100109{
Andrew Scull80871322018-08-06 12:04:09 +0100110 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100111 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100112}
113
114/**
Andrew Scull4e5f8142018-10-12 14:37:19 +0100115 * Allocate a new page table.
116 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000117static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100118{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000119 size_t size_and_align = count * sizeof(struct mm_page_table);
Andrew Scullf2f948e2018-10-22 18:39:28 +0100120 if (nosync) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000121 return halloc_aligned_nosync(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100122 }
123
Andrew Scull1ba470e2018-10-31 15:14:31 +0000124 return halloc_aligned(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100125}
126
127/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000128 * Invalidates the TLB for the given address range.
129 */
130static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
131 bool stage1)
132{
133 if (stage1) {
134 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
135 } else {
136 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
137 }
138}
139
140/**
141 * Frees all page-table-related memory associated with the given pte at the
142 * given level, including any subtables recursively.
143 */
144static void mm_free_page_pte(pte_t pte, uint8_t level)
145{
146 struct mm_page_table *table;
147 uint64_t i;
148
149 if (!arch_mm_pte_is_table(pte, level)) {
150 return;
151 }
152
153 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000154 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000155 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
156 mm_free_page_pte(table->entries[i], level - 1);
157 }
158
159 /* Free the table itself. */
160 hfree(table);
161}
162
163/**
164 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000165 * are valid, it performs a break-before-make sequence where it first writes an
166 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
167 * This is to prevent cases where CPUs have different 'valid' values in their
168 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 */
170static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
171 uint8_t level, int flags)
172{
173 pte_t v = *pte;
174
175 /*
176 * We need to do the break-before-make sequence if both values are
177 * present, and if it hasn't been inhibited by the NOBBM flag.
178 */
Andrew Scullc66a04d2018-12-07 13:41:56 +0000179 if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_valid(v, level) &&
180 arch_mm_pte_is_valid(new_pte, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000181 *pte = arch_mm_absent_pte(level);
182 mm_invalidate_tlb(begin, begin + mm_entry_size(level),
183 flags & MAP_FLAG_STAGE1);
184 }
185
186 /* Assign the new pte. */
187 *pte = new_pte;
188
189 /* Free pages that aren't in use anymore. */
190 mm_free_page_pte(v, level);
191}
192
193/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100194 * Populates the provided page table entry with a reference to another table if
195 * needed, that is, if it does not yet point to another table.
196 *
197 * Returns a pointer to the table the entry now points to.
198 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000199static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
200 pte_t *pte, uint8_t level,
201 int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100202{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100203 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100204 pte_t v = *pte;
205 pte_t new_pte;
206 size_t i;
207 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100208 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100209
210 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100211 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000212 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100213 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100214
215 /* Allocate a new table. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000216 ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100217 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100218 dlog("Failed to allocate memory for page table\n");
219 return NULL;
220 }
221
222 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100223 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100224 inc = mm_entry_size(level_below);
225 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000226 arch_mm_block_from_pte(v, level),
227 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100229 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100230 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100231 }
232
233 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100234 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
235 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100236 new_pte += inc;
237 }
238
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000239 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100240 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000241
242 /* Replace the pte entry, doing a break-before-make if needed. */
243 mm_replace_entry(begin, pte,
244 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
245 level, flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100246
247 return ntable;
248}
249
250/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100251 * Returns whether all entries in this table are absent.
252 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000253static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100254{
255 uint64_t i;
256
Andrew Scull4e5f8142018-10-12 14:37:19 +0100257 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
258 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100259 return false;
260 }
261 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000262
Andrew Walbran6324fc92018-10-03 11:46:43 +0100263 return true;
264}
265
266/**
Andrew Scull80871322018-08-06 12:04:09 +0100267 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100268 * physical range using the provided (architecture-specific) attributes. Or if
269 * MAP_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100270 *
271 * This function calls itself recursively if it needs to update additional
272 * levels, but the recursion is bound by the maximum number of levels in a page
273 * table.
274 */
Andrew Scull80871322018-08-06 12:04:09 +0100275static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100276 uint64_t attrs, struct mm_page_table *table,
277 uint8_t level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100278{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100279 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100280 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281 size_t entry_size = mm_entry_size(level);
282 bool commit = flags & MAP_FLAG_COMMIT;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100283 bool unmap = flags & MAP_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284
Andrew Scull265ada92018-07-30 15:19:01 +0100285 /* Cap end so that we don't go over the current level max. */
286 if (end > level_end) {
287 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100288 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289
290 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100291 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100292 if (unmap ? !arch_mm_pte_is_present(*pte, level)
293 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000294 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100295 /*
296 * If the entry is already mapped with the right
297 * attributes, or already absent in the case of
298 * unmapping, no need to do anything; carry on to the
299 * next entry.
300 */
301 } else if ((end - begin) >= entry_size &&
302 (unmap || arch_mm_is_block_allowed(level)) &&
303 (begin & (entry_size - 1)) == 0) {
304 /*
305 * If the entire entry is within the region we want to
306 * map, map/unmap the whole entry.
307 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100308 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000309 pte_t new_pte =
310 unmap ? arch_mm_absent_pte(level)
311 : arch_mm_block_pte(level, pa,
312 attrs);
313 mm_replace_entry(begin, pte, new_pte, level,
314 flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100315 }
316 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100317 /*
318 * If the entry is already a subtable get it; otherwise
319 * replace it with an equivalent subtable and get that.
320 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100321 struct mm_page_table *nt =
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000322 mm_populate_table_pte(begin, pte, level, flags);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100323 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100324 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100325 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326
Andrew Walbran6324fc92018-10-03 11:46:43 +0100327 /*
328 * Recurse to map/unmap the appropriate entries within
329 * the subtable.
330 */
Andrew Scull80871322018-08-06 12:04:09 +0100331 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
332 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100334 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100335
336 /*
337 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000338 * absent entry at this level. We never need to do
339 * break-before-makes here because we are assigning
340 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100341 */
342 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000343 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100344 pte_t v = *pte;
345 *pte = arch_mm_absent_pte(level);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100346 mm_free_page_pte(v, level);
347 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100348 }
349
Andrew Scull265ada92018-07-30 15:19:01 +0100350 begin = (begin + entry_size) & ~(entry_size - 1);
351 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100352 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100353 }
354
355 return true;
356}
357
358/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000359 * Updates the page table from the root to map the given address range to a
360 * physical range using the provided (architecture-specific) attributes. Or if
361 * MAP_FLAG_UNMAP is set, unmap the given range instead.
362 */
363static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
364 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
365 int flags)
366{
367 size_t root_table_size = mm_entry_size(root_level);
368 struct mm_page_table *table =
369 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
370
371 while (begin < end) {
372 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
373 root_level - 1, flags)) {
374 return false;
375 }
376 begin = (begin + root_table_size) & ~(root_table_size - 1);
377 table++;
378 }
379
380 return true;
381}
382
383/**
Andrew Scull80871322018-08-06 12:04:09 +0100384 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000385 * or not mapped into the address space with the architecture-agnostic mode
386 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100387 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000388static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
Andrew Scullc66a04d2018-12-07 13:41:56 +0000389 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100390{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000391 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Sculla6da8342018-11-01 12:29:49 +0000392 int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) |
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000393 (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
394 (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
Andrew Scullc66a04d2018-12-07 13:41:56 +0000395 (mode & MM_MODE_INVALID && mode & MM_MODE_UNOWNED
396 ? MAP_FLAG_UNMAP
397 : 0);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000398 uint8_t root_level = arch_mm_max_level(mode) + 1;
399 ptable_addr_t ptable_end =
400 arch_mm_root_table_count(mode) * mm_entry_size(root_level);
401 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
402 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100403
Andrew Scull1ba470e2018-10-31 15:14:31 +0000404 /*
405 * TODO: replace with assertions that the max level will be greater than
406 * 0 and less than 255 so wrapping will not be a problem and will not
407 * lead to subsequent overflows.
408 */
409 if (root_level == 0 || root_level == 1) {
410 return false;
411 }
412
413 /* Cap end to stay within the bounds of the page table. */
414 if (end > ptable_end) {
415 end = ptable_end;
416 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100417
418 /*
419 * Do it in two steps to prevent leaving the table in a halfway updated
420 * state. In such a two-step implementation, the table may be left with
421 * extra internal tables, but no different mapping on failure.
422 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000423 if (!mm_map_root(t, begin, end, attrs, root_level, flags) ||
424 !mm_map_root(t, begin, end, attrs, root_level,
425 flags | MAP_FLAG_COMMIT)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100426 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100427 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100428
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100429 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100430 if (!(mode & MM_MODE_NOINVALIDATE)) {
431 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
432 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100433
434 return true;
435}
436
437/**
Andrew Sculla6da8342018-11-01 12:29:49 +0000438 * Updates the given table such that the given physical address range is mapped
439 * into the address space with the architecture-agnostic mode provided.
440 */
441static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
442 paddr_t pa_end, int mode)
443{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000444 return mm_ptable_identity_update(t, pa_begin, pa_end, mode);
Andrew Sculla6da8342018-11-01 12:29:49 +0000445}
446
447/**
Andrew Scull80871322018-08-06 12:04:09 +0100448 * Updates the given table such that the given physical address range is not
449 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100450 */
Andrew Scull80871322018-08-06 12:04:09 +0100451static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
452 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000454 return mm_ptable_identity_update(
455 t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100456}
457
458/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100459 * Writes the given table to the debug log, calling itself recursively to
460 * write sub-tables.
461 */
Andrew Sculle9827712018-10-19 14:54:20 +0100462static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100463 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100464{
465 uint64_t i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100466 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
467 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100468 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100469 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100470
Andrew Scull4e5f8142018-10-12 14:37:19 +0100471 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
472 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100473
Andrew Scull4e5f8142018-10-12 14:37:19 +0100474 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100475 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100476 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000477 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100478 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100479 }
480 }
481}
482
483/**
484 * Write the given table to the debug log.
485 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100486void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100487{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000488 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100489 int max_level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000490 uint8_t root_table_count = arch_mm_root_table_count(mode);
491 uint8_t i;
492 for (i = 0; i < root_table_count; ++i) {
493 mm_dump_table_recursive(&tables[i], max_level, max_level);
494 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100495}
496
497/**
Andrew Walbran2400ed22018-09-27 14:45:58 +0100498 * Given that `entry` is a subtable but its entries are all absent, return the
499 * absent entry with which it can be replaced. Note that `entry` will no longer
500 * be valid after calling this function as the subtable will have been freed.
501 */
Andrew Sculle9827712018-10-19 14:54:20 +0100502static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100503{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100504 struct mm_page_table *table =
Andrew Scull3681b8d2018-12-12 14:22:59 +0000505 mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000506
Andrew Walbran2400ed22018-09-27 14:45:58 +0100507 /*
508 * Free the subtable. This is safe to do directly (rather than
509 * using mm_free_page_pte) because we know by this point that it
510 * doesn't have any subtables of its own.
511 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100512 hfree(table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000513
Andrew Walbran2400ed22018-09-27 14:45:58 +0100514 /* Replace subtable with a single absent entry. */
515 return arch_mm_absent_pte(level);
516}
517
518/**
519 * Given that `entry` is a subtable and its entries are all identical, return
520 * the single block entry with which it can be replaced if possible. Note that
521 * `entry` will no longer be valid after calling this function as the subtable
522 * may have been freed.
523 */
Andrew Sculle9827712018-10-19 14:54:20 +0100524static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100525{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100526 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100527 uint64_t block_attrs;
528 uint64_t table_attrs;
529 uint64_t combined_attrs;
530 paddr_t block_address;
531
532 if (!arch_mm_is_block_allowed(level)) {
533 return entry;
534 }
535
Andrew Scull3681b8d2018-12-12 14:22:59 +0000536 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100537 /*
538 * Replace subtable with a single block, with equivalent
539 * attributes.
540 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000541 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
542 table_attrs = arch_mm_pte_attrs(entry, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100543 combined_attrs =
544 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000545 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100546 /* Free the subtable. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100547 hfree(table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100548 /*
549 * We can assume that the block is aligned properly
550 * because all virtual addresses are aligned by
551 * definition, and we have a 1-1 mapping from virtual to
552 * physical addresses.
553 */
554 return arch_mm_block_pte(level, block_address, combined_attrs);
555}
556
557/**
558 * Defragment the given ptable entry by recursively replacing any tables with
559 * block or absent entries where possible.
560 */
Andrew Sculle9827712018-10-19 14:54:20 +0100561static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100562{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100563 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100564 uint64_t i;
565 uint64_t attrs;
566 bool identical_blocks_so_far = true;
567 bool all_absent_so_far = true;
568
569 if (!arch_mm_pte_is_table(entry, level)) {
570 return entry;
571 }
572
Andrew Scull3681b8d2018-12-12 14:22:59 +0000573 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100574
575 /*
576 * Check if all entries are blocks with the same flags or are all
577 * absent.
578 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000579 attrs = arch_mm_pte_attrs(table->entries[0], level);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100580 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100581 /*
582 * First try to defrag the entry, in case it is a subtable.
583 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100584 table->entries[i] =
585 mm_ptable_defrag_entry(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100586
Andrew Scull4e5f8142018-10-12 14:37:19 +0100587 if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100588 all_absent_so_far = false;
589 }
590
591 /*
592 * If the entry is a block, check that the flags are the same as
593 * what we have so far.
594 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100595 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
Andrew Scull3681b8d2018-12-12 14:22:59 +0000596 arch_mm_pte_attrs(table->entries[i], level) != attrs) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100597 identical_blocks_so_far = false;
598 }
599 }
600 if (identical_blocks_so_far) {
601 return mm_table_pte_to_block(entry, level);
602 }
603 if (all_absent_so_far) {
604 return mm_table_pte_to_absent(entry, level);
605 }
606 return entry;
607}
608
609/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100610 * Defragments the given page table by converting page table references to
611 * blocks whenever possible.
612 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100613void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100614{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000615 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100616 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000617 uint8_t root_table_count = arch_mm_root_table_count(mode);
618 uint8_t i;
619 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100620
621 /*
622 * Loop through each entry in the table. If it points to another table,
623 * check if that table can be replaced by a block or an absent entry.
624 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000625 for (i = 0; i < root_table_count; ++i) {
626 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
627 tables[i].entries[j] = mm_ptable_defrag_entry(
628 tables[i].entries[j], level);
629 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100630 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100631}
632
633/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000634 * Determines if the given address is valid in the address space of the given
635 * page table by recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100636 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100637static bool mm_is_mapped_recursive(struct mm_page_table *table,
Andrew Sculle9827712018-10-19 14:54:20 +0100638 ptable_addr_t addr, uint8_t level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100639{
640 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100641 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100642
643 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100644 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100645 return false;
646 }
647
Andrew Scull4e5f8142018-10-12 14:37:19 +0100648 pte = table->entries[mm_index(addr, level)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100649
Andrew Scullc66a04d2018-12-07 13:41:56 +0000650 if (!arch_mm_pte_is_valid(pte, level)) {
651 return false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100652 }
653
Andrew Scull78d6fd92018-09-06 15:08:36 +0100654 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100655 return mm_is_mapped_recursive(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000656 mm_page_table_from_pa(
657 arch_mm_table_from_pte(pte, level)),
Andrew Scull4e5f8142018-10-12 14:37:19 +0100658 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100659 }
660
Andrew Scullc66a04d2018-12-07 13:41:56 +0000661 /* The entry is a valid block. */
662 return true;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100663}
664
665/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000666 * Determines if the given address is valid in the address space of the given
667 * page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100668 */
Andrew Scull80871322018-08-06 12:04:09 +0100669static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
670 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100671{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000672 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100673 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000674 size_t index;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100675
Andrew Scull80871322018-08-06 12:04:09 +0100676 addr = mm_round_down_to_page(addr);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000677 index = mm_index(addr, level + 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100678
Andrew Scull1ba470e2018-10-31 15:14:31 +0000679 if (index >= arch_mm_root_table_count(mode)) {
680 return false;
681 }
682
683 return mm_is_mapped_recursive(&tables[index], addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100684}
685
686/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100687 * Initialises the given page table.
688 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100689bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100690{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000691 uint8_t i;
692 size_t j;
693 struct mm_page_table *tables;
694 uint8_t root_table_count = arch_mm_root_table_count(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100695
Andrew Scull1ba470e2018-10-31 15:14:31 +0000696 tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC);
697 if (tables == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100698 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100699 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100700
Andrew Scull1ba470e2018-10-31 15:14:31 +0000701 for (i = 0; i < root_table_count; i++) {
702 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
703 tables[i].entries[j] =
704 arch_mm_absent_pte(arch_mm_max_level(mode));
705 }
Andrew Scull7364a8e2018-07-19 15:39:29 +0100706 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100707
Andrew Scull265ada92018-07-30 15:19:01 +0100708 /* TODO: halloc could return a virtual or physical address if mm not
709 * enabled? */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000710 t->root = pa_init((uintpaddr_t)tables);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100711
712 return true;
713}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100714
715/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000716 * Frees all memory associated with the give page table.
717 */
718void mm_ptable_fini(struct mm_ptable *t, int mode)
719{
720 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
721 uint8_t level = arch_mm_max_level(mode);
722 uint8_t root_table_count = arch_mm_root_table_count(mode);
723 uint8_t i;
724 uint64_t j;
725
726 for (i = 0; i < root_table_count; ++i) {
727 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
728 mm_free_page_pte(tables[i].entries[j], level);
729 }
730 }
731
732 hfree(tables);
733}
734
735/**
Andrew Scull80871322018-08-06 12:04:09 +0100736 * Updates a VM's page table such that the given physical address range is
737 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100738 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100739 */
Andrew Scull80871322018-08-06 12:04:09 +0100740bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
741 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100742{
Andrew Scull80871322018-08-06 12:04:09 +0100743 bool success =
744 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
745
746 if (success && ipa != NULL) {
747 *ipa = ipa_from_pa(begin);
748 }
749
750 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100751}
752
753/**
Andrew Scull80871322018-08-06 12:04:09 +0100754 * Updates the VM's table such that the given physical address range is not
755 * mapped in the address space.
756 */
757bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
758{
759 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
760}
761
762/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000763 * Unmaps the hypervisor pages from the given page table.
764 */
765bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode)
766{
767 /* TODO: If we add pages dynamically, they must be included here too. */
768 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) &&
769 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
770 mode) &&
771 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode);
772}
773
774/**
Andrew Scull80871322018-08-06 12:04:09 +0100775 * Checks whether the given intermediate physical addess is mapped in the given
776 * page table of a VM.
777 */
778bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
779{
780 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
781}
782
783/**
784 * Translates an intermediate physical address to a physical address. Addresses
785 * are currently identity mapped so this is a simple type convertion. Returns
786 * true if the address was mapped in the table and the address was converted.
787 */
788bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
789{
790 bool mapped = mm_vm_is_mapped(t, ipa, 0);
791
792 if (mapped) {
793 *pa = pa_init(ipa_addr(ipa));
794 }
795
796 return mapped;
797}
798
799/**
800 * Updates the hypervisor page table such that the given physical address range
801 * is mapped into the address space at the corresponding address range in the
802 * architecture-agnostic mode provided.
803 */
804void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
805{
806 if (mm_ptable_identity_map(&ptable, begin, end,
807 mode | MM_MODE_STAGE1)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100808 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100809 }
810
811 return NULL;
812}
813
814/**
815 * Updates the hypervisor table such that the given physical address range is
816 * not mapped in the address space.
817 */
818bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100819{
820 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
821}
822
823/**
824 * Initialises memory management for the hypervisor itself.
825 */
826bool mm_init(void)
827{
Andrew Scullcb0a7412018-11-06 17:28:14 +0000828 dlog_nosync("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
829 pa_addr(layout_text_end()));
830 dlog_nosync("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
831 pa_addr(layout_rodata_end()));
832 dlog_nosync("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
833 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100834
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100835 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Andrew Scullcb0a7412018-11-06 17:28:14 +0000836 dlog_nosync("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100837 return false;
838 }
839
840 /* Map page for uart. */
841 /* TODO: We may not want to map this. */
Andrew Scull24e032f2018-10-15 17:18:12 +0100842 mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
843 pa_add(pa_init(PL011_BASE), PAGE_SIZE),
844 MM_MODE_R | MM_MODE_W | MM_MODE_D |
845 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100846
847 /* Map each section. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100848 mm_identity_map(layout_text_begin(), layout_text_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100849 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100850
Andrew Scull5991ec92018-10-08 14:55:02 +0100851 mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100852 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100853
Andrew Scull5991ec92018-10-08 14:55:02 +0100854 mm_identity_map(layout_data_begin(), layout_data_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100855 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100856
Andrew Scull1ba470e2018-10-31 15:14:31 +0000857 return arch_mm_init(ptable.root, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100858}
859
860bool mm_cpu_init(void)
861{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000862 return arch_mm_init(ptable.root, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100863}
864
865/**
866 * Defragments the hypervisor page table.
867 */
868void mm_defrag(void)
869{
870 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
871}