blob: 8de47325a1801466589b0ff659c20980822eb1ca [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/alloc.h"
24#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010025#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Walbran2400ed22018-09-27 14:45:58 +010027/**
28 * This file has functions for managing the level 1 and 2 page tables used by
29 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
30 * and then a level 2 mapping per VM. The design assumes that all page tables
31 * contain only 1-1 mappings, aligned on the block boundaries.
32 */
33
Andrew Scull80871322018-08-06 12:04:09 +010034/* The type of addresses stored in the page table. */
35typedef uintvaddr_t ptable_addr_t;
36
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010037/*
38 * For stage 2, the input is an intermediate physical addresses rather than a
39 * virtual address so:
40 */
Andrew Scull80871322018-08-06 12:04:09 +010041static_assert(
42 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
43 "Currently, the same code manages the stage 1 and stage 2 page tables "
44 "which only works if the virtual and intermediate physical addresses "
45 "are the same size. It looks like that assumption might not be holding "
46 "so we need to check that everything is going to be ok.");
47
Andrew Scull4f170f52018-07-19 12:58:20 +010048/* Keep macro alignment */
49/* clang-format off */
50
Andrew Scullf2f948e2018-10-22 18:39:28 +010051#define MAP_FLAG_NOSYNC 0x01
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052#define MAP_FLAG_COMMIT 0x02
Andrew Walbran6324fc92018-10-03 11:46:43 +010053#define MAP_FLAG_UNMAP 0x04
Wedson Almeida Filho7c913232018-11-23 18:20:29 +000054#define MAP_FLAG_NOBBM 0x08
55#define MAP_FLAG_STAGE1 0x10
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056
Andrew Scull4f170f52018-07-19 12:58:20 +010057/* clang-format on */
58
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059static struct mm_ptable ptable;
60
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010061/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010062 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010063 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010064static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010065{
66 return ptr_from_va(va_from_pa(pa));
67}
68
69/**
Andrew Scull80871322018-08-06 12:04:09 +010070 * Rounds an address down to a page boundary.
71 */
72static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
73{
74 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
75}
76
77/**
78 * Rounds an address up to a page boundary.
79 */
80static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
81{
82 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
83}
84
85/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086 * Calculates the size of the address space represented by a page table entry at
87 * the given level.
88 */
Andrew Sculle9827712018-10-19 14:54:20 +010089static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090{
Andrew Scull78d6fd92018-09-06 15:08:36 +010091 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010092}
93
94/**
Andrew Scullcae45572018-12-13 15:46:30 +000095 * Gets the address of the start of the next block of the given size. The size
96 * must be a power of two.
97 */
98static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
99 size_t block_size)
100{
101 return (addr + block_size) & ~(block_size - 1);
102}
103
104/**
105 * Gets the physical address of the start of the next block of the given size.
106 * The size must be a power of two.
107 */
108static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
109{
110 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
111}
112
113/**
Andrew Scull80871322018-08-06 12:04:09 +0100114 * For a given address, calculates the maximum (plus one) address that can be
115 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116 */
Andrew Sculle9827712018-10-19 14:54:20 +0100117static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100118{
119 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +0100120 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121}
122
123/**
Andrew Scull80871322018-08-06 12:04:09 +0100124 * For a given address, calculates the index at which its entry is stored in a
125 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100126 */
Andrew Sculle9827712018-10-19 14:54:20 +0100127static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100128{
Andrew Scull80871322018-08-06 12:04:09 +0100129 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100130 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100131}
132
133/**
Andrew Scull4e5f8142018-10-12 14:37:19 +0100134 * Allocate a new page table.
135 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000136static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000138 size_t size_and_align = count * sizeof(struct mm_page_table);
Andrew Scullf2f948e2018-10-22 18:39:28 +0100139 if (nosync) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000140 return halloc_aligned_nosync(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100141 }
142
Andrew Scull1ba470e2018-10-31 15:14:31 +0000143 return halloc_aligned(size_and_align, size_and_align);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100144}
145
146/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000147 * Invalidates the TLB for the given address range.
148 */
149static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
150 bool stage1)
151{
152 if (stage1) {
153 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
154 } else {
155 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
156 }
157}
158
159/**
160 * Frees all page-table-related memory associated with the given pte at the
161 * given level, including any subtables recursively.
162 */
163static void mm_free_page_pte(pte_t pte, uint8_t level)
164{
165 struct mm_page_table *table;
166 uint64_t i;
167
168 if (!arch_mm_pte_is_table(pte, level)) {
169 return;
170 }
171
172 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000173 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000174 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
175 mm_free_page_pte(table->entries[i], level - 1);
176 }
177
178 /* Free the table itself. */
179 hfree(table);
180}
181
182/**
183 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000184 * are valid, it performs a break-before-make sequence where it first writes an
185 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
186 * This is to prevent cases where CPUs have different 'valid' values in their
187 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000188 */
189static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
190 uint8_t level, int flags)
191{
192 pte_t v = *pte;
193
194 /*
195 * We need to do the break-before-make sequence if both values are
196 * present, and if it hasn't been inhibited by the NOBBM flag.
197 */
Andrew Scullc66a04d2018-12-07 13:41:56 +0000198 if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_valid(v, level) &&
199 arch_mm_pte_is_valid(new_pte, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000200 *pte = arch_mm_absent_pte(level);
201 mm_invalidate_tlb(begin, begin + mm_entry_size(level),
202 flags & MAP_FLAG_STAGE1);
203 }
204
205 /* Assign the new pte. */
206 *pte = new_pte;
207
208 /* Free pages that aren't in use anymore. */
209 mm_free_page_pte(v, level);
210}
211
212/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100213 * Populates the provided page table entry with a reference to another table if
214 * needed, that is, if it does not yet point to another table.
215 *
216 * Returns a pointer to the table the entry now points to.
217 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000218static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
219 pte_t *pte, uint8_t level,
220 int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100221{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100222 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100223 pte_t v = *pte;
224 pte_t new_pte;
225 size_t i;
226 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100227 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228
229 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100230 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000231 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100232 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100233
234 /* Allocate a new table. */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000235 ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100236 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100237 dlog("Failed to allocate memory for page table\n");
238 return NULL;
239 }
240
241 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100242 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100243 inc = mm_entry_size(level_below);
244 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000245 arch_mm_block_from_pte(v, level),
246 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100247 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100248 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100249 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100250 }
251
252 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100253 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
254 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100255 new_pte += inc;
256 }
257
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000258 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100259 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000260
261 /* Replace the pte entry, doing a break-before-make if needed. */
262 mm_replace_entry(begin, pte,
263 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
264 level, flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100265
266 return ntable;
267}
268
269/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100270 * Returns whether all entries in this table are absent.
271 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000272static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100273{
274 uint64_t i;
275
Andrew Scull4e5f8142018-10-12 14:37:19 +0100276 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
277 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100278 return false;
279 }
280 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281
Andrew Walbran6324fc92018-10-03 11:46:43 +0100282 return true;
283}
284
285/**
Andrew Scull80871322018-08-06 12:04:09 +0100286 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100287 * physical range using the provided (architecture-specific) attributes. Or if
288 * MAP_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289 *
290 * This function calls itself recursively if it needs to update additional
291 * levels, but the recursion is bound by the maximum number of levels in a page
292 * table.
293 */
Andrew Scull80871322018-08-06 12:04:09 +0100294static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100295 uint64_t attrs, struct mm_page_table *table,
296 uint8_t level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100297{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100298 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100299 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100300 size_t entry_size = mm_entry_size(level);
301 bool commit = flags & MAP_FLAG_COMMIT;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100302 bool unmap = flags & MAP_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100303
Andrew Scull265ada92018-07-30 15:19:01 +0100304 /* Cap end so that we don't go over the current level max. */
305 if (end > level_end) {
306 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100307 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100308
309 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100310 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100311 if (unmap ? !arch_mm_pte_is_present(*pte, level)
312 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000313 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100314 /*
315 * If the entry is already mapped with the right
316 * attributes, or already absent in the case of
317 * unmapping, no need to do anything; carry on to the
318 * next entry.
319 */
320 } else if ((end - begin) >= entry_size &&
321 (unmap || arch_mm_is_block_allowed(level)) &&
322 (begin & (entry_size - 1)) == 0) {
323 /*
324 * If the entire entry is within the region we want to
325 * map, map/unmap the whole entry.
326 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100327 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000328 pte_t new_pte =
329 unmap ? arch_mm_absent_pte(level)
330 : arch_mm_block_pte(level, pa,
331 attrs);
332 mm_replace_entry(begin, pte, new_pte, level,
333 flags);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100334 }
335 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100336 /*
337 * If the entry is already a subtable get it; otherwise
338 * replace it with an equivalent subtable and get that.
339 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100340 struct mm_page_table *nt =
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341 mm_populate_table_pte(begin, pte, level, flags);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100342 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100343 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100344 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100345
Andrew Walbran6324fc92018-10-03 11:46:43 +0100346 /*
347 * Recurse to map/unmap the appropriate entries within
348 * the subtable.
349 */
Andrew Scull80871322018-08-06 12:04:09 +0100350 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
351 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100352 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100353 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100354
355 /*
356 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000357 * absent entry at this level. We never need to do
358 * break-before-makes here because we are assigning
359 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100360 */
361 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000362 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100363 pte_t v = *pte;
364 *pte = arch_mm_absent_pte(level);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100365 mm_free_page_pte(v, level);
366 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100367 }
368
Andrew Scullcae45572018-12-13 15:46:30 +0000369 begin = mm_start_of_next_block(begin, entry_size);
370 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100371 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372 }
373
374 return true;
375}
376
377/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000378 * Updates the page table from the root to map the given address range to a
379 * physical range using the provided (architecture-specific) attributes. Or if
380 * MAP_FLAG_UNMAP is set, unmap the given range instead.
381 */
382static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
383 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
384 int flags)
385{
386 size_t root_table_size = mm_entry_size(root_level);
387 struct mm_page_table *table =
388 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
389
390 while (begin < end) {
391 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
392 root_level - 1, flags)) {
393 return false;
394 }
Andrew Scullcae45572018-12-13 15:46:30 +0000395 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000396 table++;
397 }
398
399 return true;
400}
401
402/**
Andrew Scull80871322018-08-06 12:04:09 +0100403 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000404 * or not mapped into the address space with the architecture-agnostic mode
405 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100406 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000407static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
Andrew Scullc66a04d2018-12-07 13:41:56 +0000408 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100409{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000410 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Sculla6da8342018-11-01 12:29:49 +0000411 int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) |
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000412 (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
413 (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
Andrew Scullc66a04d2018-12-07 13:41:56 +0000414 (mode & MM_MODE_INVALID && mode & MM_MODE_UNOWNED
415 ? MAP_FLAG_UNMAP
416 : 0);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000417 uint8_t root_level = arch_mm_max_level(mode) + 1;
418 ptable_addr_t ptable_end =
419 arch_mm_root_table_count(mode) * mm_entry_size(root_level);
420 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
421 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100422
Andrew Scull1ba470e2018-10-31 15:14:31 +0000423 /*
424 * TODO: replace with assertions that the max level will be greater than
425 * 0 and less than 255 so wrapping will not be a problem and will not
426 * lead to subsequent overflows.
427 */
428 if (root_level == 0 || root_level == 1) {
429 return false;
430 }
431
432 /* Cap end to stay within the bounds of the page table. */
433 if (end > ptable_end) {
434 end = ptable_end;
435 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100436
437 /*
438 * Do it in two steps to prevent leaving the table in a halfway updated
439 * state. In such a two-step implementation, the table may be left with
440 * extra internal tables, but no different mapping on failure.
441 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000442 if (!mm_map_root(t, begin, end, attrs, root_level, flags) ||
443 !mm_map_root(t, begin, end, attrs, root_level,
444 flags | MAP_FLAG_COMMIT)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100445 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100446 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100447
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100449 if (!(mode & MM_MODE_NOINVALIDATE)) {
450 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
451 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100452
453 return true;
454}
455
456/**
Andrew Sculla6da8342018-11-01 12:29:49 +0000457 * Updates the given table such that the given physical address range is mapped
458 * into the address space with the architecture-agnostic mode provided.
459 */
460static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
461 paddr_t pa_end, int mode)
462{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000463 return mm_ptable_identity_update(t, pa_begin, pa_end, mode);
Andrew Sculla6da8342018-11-01 12:29:49 +0000464}
465
466/**
Andrew Scull80871322018-08-06 12:04:09 +0100467 * Updates the given table such that the given physical address range is not
468 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100469 */
Andrew Scull80871322018-08-06 12:04:09 +0100470static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
471 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100472{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000473 return mm_ptable_identity_update(
474 t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100475}
476
477/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100478 * Writes the given table to the debug log, calling itself recursively to
479 * write sub-tables.
480 */
Andrew Sculle9827712018-10-19 14:54:20 +0100481static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100482 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100483{
484 uint64_t i;
Andrew Scull4e5f8142018-10-12 14:37:19 +0100485 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
486 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100487 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100488 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100489
Andrew Scull4e5f8142018-10-12 14:37:19 +0100490 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
491 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100492
Andrew Scull4e5f8142018-10-12 14:37:19 +0100493 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100494 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100495 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000496 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100497 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100498 }
499 }
500}
501
502/**
503 * Write the given table to the debug log.
504 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100505void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100506{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000507 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100508 int max_level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000509 uint8_t root_table_count = arch_mm_root_table_count(mode);
510 uint8_t i;
511 for (i = 0; i < root_table_count; ++i) {
512 mm_dump_table_recursive(&tables[i], max_level, max_level);
513 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100514}
515
516/**
Andrew Walbran2400ed22018-09-27 14:45:58 +0100517 * Given that `entry` is a subtable but its entries are all absent, return the
518 * absent entry with which it can be replaced. Note that `entry` will no longer
519 * be valid after calling this function as the subtable will have been freed.
520 */
Andrew Sculle9827712018-10-19 14:54:20 +0100521static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100522{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100523 struct mm_page_table *table =
Andrew Scull3681b8d2018-12-12 14:22:59 +0000524 mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000525
Andrew Walbran2400ed22018-09-27 14:45:58 +0100526 /*
527 * Free the subtable. This is safe to do directly (rather than
528 * using mm_free_page_pte) because we know by this point that it
529 * doesn't have any subtables of its own.
530 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100531 hfree(table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000532
Andrew Walbran2400ed22018-09-27 14:45:58 +0100533 /* Replace subtable with a single absent entry. */
534 return arch_mm_absent_pte(level);
535}
536
537/**
538 * Given that `entry` is a subtable and its entries are all identical, return
539 * the single block entry with which it can be replaced if possible. Note that
540 * `entry` will no longer be valid after calling this function as the subtable
541 * may have been freed.
542 */
Andrew Sculle9827712018-10-19 14:54:20 +0100543static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100544{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100545 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100546 uint64_t block_attrs;
547 uint64_t table_attrs;
548 uint64_t combined_attrs;
549 paddr_t block_address;
550
551 if (!arch_mm_is_block_allowed(level)) {
552 return entry;
553 }
554
Andrew Scull3681b8d2018-12-12 14:22:59 +0000555 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100556 /*
557 * Replace subtable with a single block, with equivalent
558 * attributes.
559 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000560 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
561 table_attrs = arch_mm_pte_attrs(entry, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100562 combined_attrs =
563 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000564 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100565 /* Free the subtable. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100566 hfree(table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100567 /*
568 * We can assume that the block is aligned properly
569 * because all virtual addresses are aligned by
570 * definition, and we have a 1-1 mapping from virtual to
571 * physical addresses.
572 */
573 return arch_mm_block_pte(level, block_address, combined_attrs);
574}
575
576/**
577 * Defragment the given ptable entry by recursively replacing any tables with
578 * block or absent entries where possible.
579 */
Andrew Sculle9827712018-10-19 14:54:20 +0100580static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100581{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100582 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100583 uint64_t i;
584 uint64_t attrs;
585 bool identical_blocks_so_far = true;
586 bool all_absent_so_far = true;
587
588 if (!arch_mm_pte_is_table(entry, level)) {
589 return entry;
590 }
591
Andrew Scull3681b8d2018-12-12 14:22:59 +0000592 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100593
594 /*
595 * Check if all entries are blocks with the same flags or are all
596 * absent.
597 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000598 attrs = arch_mm_pte_attrs(table->entries[0], level);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100599 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100600 /*
601 * First try to defrag the entry, in case it is a subtable.
602 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100603 table->entries[i] =
604 mm_ptable_defrag_entry(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100605
Andrew Scull4e5f8142018-10-12 14:37:19 +0100606 if (arch_mm_pte_is_present(table->entries[i], level - 1)) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100607 all_absent_so_far = false;
608 }
609
610 /*
611 * If the entry is a block, check that the flags are the same as
612 * what we have so far.
613 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100614 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
Andrew Scull3681b8d2018-12-12 14:22:59 +0000615 arch_mm_pte_attrs(table->entries[i], level) != attrs) {
Andrew Walbran2400ed22018-09-27 14:45:58 +0100616 identical_blocks_so_far = false;
617 }
618 }
619 if (identical_blocks_so_far) {
620 return mm_table_pte_to_block(entry, level);
621 }
622 if (all_absent_so_far) {
623 return mm_table_pte_to_absent(entry, level);
624 }
625 return entry;
626}
627
628/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100629 * Defragments the given page table by converting page table references to
630 * blocks whenever possible.
631 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100632void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100633{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000634 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100635 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000636 uint8_t root_table_count = arch_mm_root_table_count(mode);
637 uint8_t i;
638 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100639
640 /*
641 * Loop through each entry in the table. If it points to another table,
642 * check if that table can be replaced by a block or an absent entry.
643 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000644 for (i = 0; i < root_table_count; ++i) {
645 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
646 tables[i].entries[j] = mm_ptable_defrag_entry(
647 tables[i].entries[j], level);
648 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100649 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100650}
651
652/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000653 * Determines if the given address is valid in the address space of the given
654 * page table by recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100655 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100656static bool mm_is_mapped_recursive(struct mm_page_table *table,
Andrew Sculle9827712018-10-19 14:54:20 +0100657 ptable_addr_t addr, uint8_t level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100658{
659 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100660 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100661
662 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100663 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100664 return false;
665 }
666
Andrew Scull4e5f8142018-10-12 14:37:19 +0100667 pte = table->entries[mm_index(addr, level)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100668
Andrew Scullc66a04d2018-12-07 13:41:56 +0000669 if (!arch_mm_pte_is_valid(pte, level)) {
670 return false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100671 }
672
Andrew Scull78d6fd92018-09-06 15:08:36 +0100673 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100674 return mm_is_mapped_recursive(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000675 mm_page_table_from_pa(
676 arch_mm_table_from_pte(pte, level)),
Andrew Scull4e5f8142018-10-12 14:37:19 +0100677 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100678 }
679
Andrew Scullc66a04d2018-12-07 13:41:56 +0000680 /* The entry is a valid block. */
681 return true;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100682}
683
684/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000685 * Determines if the given address is valid in the address space of the given
686 * page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100687 */
Andrew Scull80871322018-08-06 12:04:09 +0100688static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
689 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100690{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000691 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100692 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000693 size_t index;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100694
Andrew Scull80871322018-08-06 12:04:09 +0100695 addr = mm_round_down_to_page(addr);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000696 index = mm_index(addr, level + 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100697
Andrew Scull1ba470e2018-10-31 15:14:31 +0000698 if (index >= arch_mm_root_table_count(mode)) {
699 return false;
700 }
701
702 return mm_is_mapped_recursive(&tables[index], addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100703}
704
705/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100706 * Initialises the given page table.
707 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100708bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100709{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000710 uint8_t i;
711 size_t j;
712 struct mm_page_table *tables;
713 uint8_t root_table_count = arch_mm_root_table_count(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100714
Andrew Scull1ba470e2018-10-31 15:14:31 +0000715 tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC);
716 if (tables == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100717 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100718 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100719
Andrew Scull1ba470e2018-10-31 15:14:31 +0000720 for (i = 0; i < root_table_count; i++) {
721 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
722 tables[i].entries[j] =
723 arch_mm_absent_pte(arch_mm_max_level(mode));
724 }
Andrew Scull7364a8e2018-07-19 15:39:29 +0100725 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100726
Andrew Scull265ada92018-07-30 15:19:01 +0100727 /* TODO: halloc could return a virtual or physical address if mm not
728 * enabled? */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000729 t->root = pa_init((uintpaddr_t)tables);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100730
731 return true;
732}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100733
734/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000735 * Frees all memory associated with the give page table.
736 */
737void mm_ptable_fini(struct mm_ptable *t, int mode)
738{
739 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
740 uint8_t level = arch_mm_max_level(mode);
741 uint8_t root_table_count = arch_mm_root_table_count(mode);
742 uint8_t i;
743 uint64_t j;
744
745 for (i = 0; i < root_table_count; ++i) {
746 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
747 mm_free_page_pte(tables[i].entries[j], level);
748 }
749 }
750
751 hfree(tables);
752}
753
754/**
Andrew Scull80871322018-08-06 12:04:09 +0100755 * Updates a VM's page table such that the given physical address range is
756 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100757 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100758 */
Andrew Scull80871322018-08-06 12:04:09 +0100759bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
760 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100761{
Andrew Scull80871322018-08-06 12:04:09 +0100762 bool success =
763 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
764
765 if (success && ipa != NULL) {
766 *ipa = ipa_from_pa(begin);
767 }
768
769 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100770}
771
772/**
Andrew Scull80871322018-08-06 12:04:09 +0100773 * Updates the VM's table such that the given physical address range is not
774 * mapped in the address space.
775 */
776bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
777{
778 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
779}
780
781/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000782 * Unmaps the hypervisor pages from the given page table.
783 */
784bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode)
785{
786 /* TODO: If we add pages dynamically, they must be included here too. */
787 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) &&
788 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
789 mode) &&
790 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode);
791}
792
793/**
Andrew Scull80871322018-08-06 12:04:09 +0100794 * Checks whether the given intermediate physical addess is mapped in the given
795 * page table of a VM.
796 */
797bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
798{
799 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
800}
801
802/**
803 * Translates an intermediate physical address to a physical address. Addresses
804 * are currently identity mapped so this is a simple type convertion. Returns
805 * true if the address was mapped in the table and the address was converted.
806 */
807bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
808{
809 bool mapped = mm_vm_is_mapped(t, ipa, 0);
810
811 if (mapped) {
812 *pa = pa_init(ipa_addr(ipa));
813 }
814
815 return mapped;
816}
817
818/**
819 * Updates the hypervisor page table such that the given physical address range
820 * is mapped into the address space at the corresponding address range in the
821 * architecture-agnostic mode provided.
822 */
823void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
824{
825 if (mm_ptable_identity_map(&ptable, begin, end,
826 mode | MM_MODE_STAGE1)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100827 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100828 }
829
830 return NULL;
831}
832
833/**
834 * Updates the hypervisor table such that the given physical address range is
835 * not mapped in the address space.
836 */
837bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100838{
839 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
840}
841
842/**
843 * Initialises memory management for the hypervisor itself.
844 */
845bool mm_init(void)
846{
Andrew Scullcb0a7412018-11-06 17:28:14 +0000847 dlog_nosync("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
848 pa_addr(layout_text_end()));
849 dlog_nosync("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
850 pa_addr(layout_rodata_end()));
851 dlog_nosync("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
852 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100853
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100854 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Andrew Scullcb0a7412018-11-06 17:28:14 +0000855 dlog_nosync("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100856 return false;
857 }
858
859 /* Map page for uart. */
860 /* TODO: We may not want to map this. */
Andrew Scull24e032f2018-10-15 17:18:12 +0100861 mm_ptable_identity_map(&ptable, pa_init(PL011_BASE),
862 pa_add(pa_init(PL011_BASE), PAGE_SIZE),
863 MM_MODE_R | MM_MODE_W | MM_MODE_D |
864 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100865
866 /* Map each section. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100867 mm_identity_map(layout_text_begin(), layout_text_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100868 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100869
Andrew Scull5991ec92018-10-08 14:55:02 +0100870 mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100871 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100872
Andrew Scull5991ec92018-10-08 14:55:02 +0100873 mm_identity_map(layout_data_begin(), layout_data_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100874 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100875
Andrew Scull1ba470e2018-10-31 15:14:31 +0000876 return arch_mm_init(ptable.root, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100877}
878
879bool mm_cpu_init(void)
880{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000881 return arch_mm_init(ptable.root, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100882}
883
884/**
885 * Defragments the hypervisor page table.
886 */
887void mm_defrag(void)
888{
889 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
890}