blob: 50f4aa180c0c24603cdb0064fa43415a1727b204 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
19#include <stdatomic.h>
20#include <stdint.h>
21
Andrew Scull5c496a32019-04-04 11:57:33 +010022#include "hf/assert.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010024#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010025#include "hf/plat/console.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Walbran2400ed22018-09-27 14:45:58 +010027/**
28 * This file has functions for managing the level 1 and 2 page tables used by
29 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
30 * and then a level 2 mapping per VM. The design assumes that all page tables
31 * contain only 1-1 mappings, aligned on the block boundaries.
32 */
33
Andrew Scull80871322018-08-06 12:04:09 +010034/* The type of addresses stored in the page table. */
35typedef uintvaddr_t ptable_addr_t;
36
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010037/*
38 * For stage 2, the input is an intermediate physical addresses rather than a
39 * virtual address so:
40 */
Andrew Scull80871322018-08-06 12:04:09 +010041static_assert(
42 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
43 "Currently, the same code manages the stage 1 and stage 2 page tables "
44 "which only works if the virtual and intermediate physical addresses "
45 "are the same size. It looks like that assumption might not be holding "
46 "so we need to check that everything is going to be ok.");
47
Andrew Scull4f170f52018-07-19 12:58:20 +010048/* Keep macro alignment */
49/* clang-format off */
50
Andrew Scullda3df7f2019-01-05 17:49:27 +000051#define MM_FLAG_COMMIT 0x01
52#define MM_FLAG_UNMAP 0x02
Andrew Scullda241972019-01-05 18:17:48 +000053#define MM_FLAG_STAGE1 0x04
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010054
Andrew Scull4f170f52018-07-19 12:58:20 +010055/* clang-format on */
56
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010057static struct mm_ptable ptable;
58
Andrew Scullda241972019-01-05 18:17:48 +000059static bool mm_stage2_invalidate = false;
60
61/**
62 * After calling this function, modifications to stage-2 page tables will use
63 * break-before-make and invalidate the TLB for the affected range.
64 */
65void mm_vm_enable_invalidation(void)
66{
67 mm_stage2_invalidate = true;
68}
69
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010070/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010071 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010072 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010073static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010074{
75 return ptr_from_va(va_from_pa(pa));
76}
77
78/**
Andrew Scull80871322018-08-06 12:04:09 +010079 * Rounds an address down to a page boundary.
80 */
81static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
82{
83 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
84}
85
86/**
87 * Rounds an address up to a page boundary.
88 */
89static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
90{
91 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
92}
93
94/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010095 * Calculates the size of the address space represented by a page table entry at
96 * the given level.
97 */
Andrew Sculle9827712018-10-19 14:54:20 +010098static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010099{
Andrew Scull78d6fd92018-09-06 15:08:36 +0100100 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100101}
102
103/**
Andrew Scullcae45572018-12-13 15:46:30 +0000104 * Gets the address of the start of the next block of the given size. The size
105 * must be a power of two.
106 */
107static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
108 size_t block_size)
109{
110 return (addr + block_size) & ~(block_size - 1);
111}
112
113/**
114 * Gets the physical address of the start of the next block of the given size.
115 * The size must be a power of two.
116 */
117static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
118{
119 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
120}
121
122/**
Andrew Scull80871322018-08-06 12:04:09 +0100123 * For a given address, calculates the maximum (plus one) address that can be
124 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100125 */
Andrew Sculle9827712018-10-19 14:54:20 +0100126static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127{
128 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000129
Andrew Scull80871322018-08-06 12:04:09 +0100130 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100131}
132
133/**
Andrew Scull80871322018-08-06 12:04:09 +0100134 * For a given address, calculates the index at which its entry is stored in a
135 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100136 */
Andrew Sculle9827712018-10-19 14:54:20 +0100137static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100138{
Andrew Scull80871322018-08-06 12:04:09 +0100139 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000140
Andrew Scull78d6fd92018-09-06 15:08:36 +0100141 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100142}
143
144/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000145 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100146 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000147static struct mm_page_table *mm_alloc_page_tables(size_t count,
148 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100149{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000150 if (count == 1) {
151 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100152 }
153
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000154 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100155}
156
157/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000158 * Returns the maximum level in the page table given the flags.
159 */
160static uint8_t mm_max_level(int flags)
161{
162 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
163 : arch_mm_stage2_max_level();
164}
165
166/**
167 * Returns the number of root-level tables given the flags.
168 */
169static uint8_t mm_root_table_count(int flags)
170{
171 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
172 : arch_mm_stage2_root_table_count();
173}
174
175/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000176 * Invalidates the TLB for the given address range.
177 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000178static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000179{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000180 if (flags & MM_FLAG_STAGE1) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000181 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
182 } else {
183 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
184 }
185}
186
187/**
188 * Frees all page-table-related memory associated with the given pte at the
189 * given level, including any subtables recursively.
190 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000191static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000192{
193 struct mm_page_table *table;
194 uint64_t i;
195
196 if (!arch_mm_pte_is_table(pte, level)) {
197 return;
198 }
199
200 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000201 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000202 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000203 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000204 }
205
206 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000207 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000208}
209
210/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 * Initialises the given page table.
212 */
213static bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
214{
215 uint8_t i;
216 size_t j;
217 struct mm_page_table *tables;
218 uint8_t root_table_count = mm_root_table_count(flags);
219
220 tables = mm_alloc_page_tables(root_table_count, ppool);
221 if (tables == NULL) {
222 return false;
223 }
224
225 for (i = 0; i < root_table_count; i++) {
226 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
227 tables[i].entries[j] =
228 arch_mm_absent_pte(mm_max_level(flags));
229 }
230 }
231
232 /*
233 * TODO: halloc could return a virtual or physical address if mm not
234 * enabled?
235 */
236 t->root = pa_init((uintpaddr_t)tables);
237
238 return true;
239}
240
241/**
242 * Frees all memory associated with the give page table.
243 */
244static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
245{
246 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
247 uint8_t level = mm_max_level(flags);
248 uint8_t root_table_count = mm_root_table_count(flags);
249 uint8_t i;
250 uint64_t j;
251
252 for (i = 0; i < root_table_count; ++i) {
253 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
254 mm_free_page_pte(tables[i].entries[j], level, ppool);
255 }
256 }
257
258 mpool_add_chunk(ppool, tables,
259 sizeof(struct mm_page_table) * root_table_count);
260}
261
262/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000263 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000264 * are valid, it performs a break-before-make sequence where it first writes an
265 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
266 * This is to prevent cases where CPUs have different 'valid' values in their
267 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268 */
269static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000270 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000271{
272 pte_t v = *pte;
273
274 /*
275 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000276 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000277 */
Andrew Scullda241972019-01-05 18:17:48 +0000278 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
279 arch_mm_pte_is_valid(v, level) &&
Andrew Scullc66a04d2018-12-07 13:41:56 +0000280 arch_mm_pte_is_valid(new_pte, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281 *pte = arch_mm_absent_pte(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000282 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000283 }
284
285 /* Assign the new pte. */
286 *pte = new_pte;
287
288 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000289 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000290}
291
292/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100293 * Populates the provided page table entry with a reference to another table if
294 * needed, that is, if it does not yet point to another table.
295 *
296 * Returns a pointer to the table the entry now points to.
297 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000298static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
299 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000300 int flags,
301 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100303 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304 pte_t v = *pte;
305 pte_t new_pte;
306 size_t i;
307 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100308 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100311 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000312 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100313 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314
315 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000316 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100317 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100318 dlog("Failed to allocate memory for page table\n");
319 return NULL;
320 }
321
322 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100323 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100324 inc = mm_entry_size(level_below);
325 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000326 arch_mm_block_from_pte(v, level),
327 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100329 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100330 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331 }
332
333 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100334 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
335 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336 new_pte += inc;
337 }
338
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100340 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341
342 /* Replace the pte entry, doing a break-before-make if needed. */
343 mm_replace_entry(begin, pte,
344 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000345 level, flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100346
347 return ntable;
348}
349
350/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100351 * Returns whether all entries in this table are absent.
352 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100354{
355 uint64_t i;
356
Andrew Scull4e5f8142018-10-12 14:37:19 +0100357 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
358 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100359 return false;
360 }
361 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000362
Andrew Walbran6324fc92018-10-03 11:46:43 +0100363 return true;
364}
365
366/**
Andrew Scull80871322018-08-06 12:04:09 +0100367 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100368 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000369 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100370 *
371 * This function calls itself recursively if it needs to update additional
372 * levels, but the recursion is bound by the maximum number of levels in a page
373 * table.
374 */
Andrew Scull80871322018-08-06 12:04:09 +0100375static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100376 uint64_t attrs, struct mm_page_table *table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000377 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100378{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100379 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100380 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100381 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000382 bool commit = flags & MM_FLAG_COMMIT;
383 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100384
Andrew Scull265ada92018-07-30 15:19:01 +0100385 /* Cap end so that we don't go over the current level max. */
386 if (end > level_end) {
387 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100388 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100389
390 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100391 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100392 if (unmap ? !arch_mm_pte_is_present(*pte, level)
393 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000394 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100395 /*
396 * If the entry is already mapped with the right
397 * attributes, or already absent in the case of
398 * unmapping, no need to do anything; carry on to the
399 * next entry.
400 */
401 } else if ((end - begin) >= entry_size &&
402 (unmap || arch_mm_is_block_allowed(level)) &&
403 (begin & (entry_size - 1)) == 0) {
404 /*
405 * If the entire entry is within the region we want to
406 * map, map/unmap the whole entry.
407 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100408 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000409 pte_t new_pte =
410 unmap ? arch_mm_absent_pte(level)
411 : arch_mm_block_pte(level, pa,
412 attrs);
413 mm_replace_entry(begin, pte, new_pte, level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000414 flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100415 }
416 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100417 /*
418 * If the entry is already a subtable get it; otherwise
419 * replace it with an equivalent subtable and get that.
420 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000421 struct mm_page_table *nt = mm_populate_table_pte(
422 begin, pte, level, flags, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100423 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100424 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100425 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100426
Andrew Walbran6324fc92018-10-03 11:46:43 +0100427 /*
428 * Recurse to map/unmap the appropriate entries within
429 * the subtable.
430 */
Andrew Scull80871322018-08-06 12:04:09 +0100431 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000432 flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100433 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100434 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100435
436 /*
437 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000438 * absent entry at this level. We never need to do
439 * break-before-makes here because we are assigning
440 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100441 */
442 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100444 pte_t v = *pte;
445 *pte = arch_mm_absent_pte(level);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000446 mm_free_page_pte(v, level, ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100447 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448 }
449
Andrew Scullcae45572018-12-13 15:46:30 +0000450 begin = mm_start_of_next_block(begin, entry_size);
451 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100452 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453 }
454
455 return true;
456}
457
458/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000459 * Updates the page table from the root to map the given address range to a
460 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000461 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000462 */
463static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
464 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000465 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000466{
467 size_t root_table_size = mm_entry_size(root_level);
468 struct mm_page_table *table =
469 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
470
471 while (begin < end) {
472 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000473 root_level - 1, flags, ppool)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474 return false;
475 }
Andrew Scullcae45572018-12-13 15:46:30 +0000476 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477 table++;
478 }
479
480 return true;
481}
482
483/**
Andrew Scull80871322018-08-06 12:04:09 +0100484 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000485 * or not mapped into the address space with the architecture-agnostic mode
486 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100487 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000488static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000489 paddr_t pa_end, uint64_t attrs, int flags,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000490 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100491{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000492 uint8_t root_level = mm_max_level(flags) + 1;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000493 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000494 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000495 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
496 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100497
Andrew Scull1ba470e2018-10-31 15:14:31 +0000498 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100499 * Assert condition to communicate the API constraint of mm_max_level(),
500 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000501 */
Andrew Scullf8252932019-04-04 13:51:22 +0100502 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000503
504 /* Cap end to stay within the bounds of the page table. */
505 if (end > ptable_end) {
506 end = ptable_end;
507 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100508
509 /*
510 * Do it in two steps to prevent leaving the table in a halfway updated
511 * state. In such a two-step implementation, the table may be left with
512 * extra internal tables, but no different mapping on failure.
513 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000514 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool) ||
Andrew Scull1ba470e2018-10-31 15:14:31 +0000515 !mm_map_root(t, begin, end, attrs, root_level,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000516 flags | MM_FLAG_COMMIT, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100517 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100518 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100519
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100520 /* Invalidate the tlb. */
Andrew Scullda241972019-01-05 18:17:48 +0000521 if ((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000522 mm_invalidate_tlb(begin, end, flags);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100523 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100524
525 return true;
526}
527
528/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100529 * Writes the given table to the debug log, calling itself recursively to
530 * write sub-tables.
531 */
Andrew Sculle9827712018-10-19 14:54:20 +0100532static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100533 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100534{
535 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000536
Andrew Scull4e5f8142018-10-12 14:37:19 +0100537 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
538 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100539 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100540 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100541
Andrew Scull4e5f8142018-10-12 14:37:19 +0100542 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
543 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100544
Andrew Scull4e5f8142018-10-12 14:37:19 +0100545 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100546 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100547 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000548 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100549 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100550 }
551 }
552}
553
554/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000555 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100556 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000557static void mm_ptable_dump(struct mm_ptable *t, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100558{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000559 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000560 uint8_t max_level = mm_max_level(flags);
561 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000562 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000563
Andrew Scull1ba470e2018-10-31 15:14:31 +0000564 for (i = 0; i < root_table_count; ++i) {
565 mm_dump_table_recursive(&tables[i], max_level, max_level);
566 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100567}
568
569/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000570 * Given the table PTE entries all have identical attributes, returns the single
571 * entry with which it can be replaced. Note that the table PTE will no longer
572 * be valid after calling this function as the table may have been freed.
Andrew Scullb6b9b562018-12-21 14:41:35 +0000573 *
574 * If the table is freed, the memory is freed directly rather than calling
575 * `mm_free_page_pte()` as it is known to not have subtables.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100576 */
Andrew Scullb6b9b562018-12-21 14:41:35 +0000577static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level,
578 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100579{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100580 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100581 uint64_t block_attrs;
582 uint64_t table_attrs;
583 uint64_t combined_attrs;
584 paddr_t block_address;
585
Andrew Scullb6b9b562018-12-21 14:41:35 +0000586 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
587
588 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
589 /* Free the table and return an absent entry. */
590 mpool_free(ppool, table);
591 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100592 }
593
Andrew Scullb6b9b562018-12-21 14:41:35 +0000594 /* Might not be possible to merge the table into a single block. */
595 if (!arch_mm_is_block_allowed(level)) {
596 return table_pte;
597 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000598
Andrew Scullb6b9b562018-12-21 14:41:35 +0000599 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000600 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000601 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100602 combined_attrs =
603 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000604 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000605
Andrew Scullb6b9b562018-12-21 14:41:35 +0000606 /* Free the table and return a block. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000607 mpool_free(ppool, table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100608 return arch_mm_block_pte(level, block_address, combined_attrs);
609}
610
611/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000612 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000613 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100614 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000615static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level,
616 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100617{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100618 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100619 uint64_t i;
620 uint64_t attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100621
622 if (!arch_mm_pte_is_table(entry, level)) {
623 return entry;
624 }
625
Andrew Scull3681b8d2018-12-12 14:22:59 +0000626 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100627
628 /*
629 * Check if all entries are blocks with the same flags or are all
Andrew Scullb6b9b562018-12-21 14:41:35 +0000630 * absent. It assumes addresses are contiguous due to identity mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100631 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000632 attrs = arch_mm_pte_attrs(table->entries[0], level);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100633 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000634 /* First try to defrag the entry, in case it is a subtable. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000635 table->entries[i] = mm_ptable_defrag_entry(table->entries[i],
636 level - 1, ppool);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100637
Andrew Walbran2400ed22018-09-27 14:45:58 +0100638 /*
Andrew Scullb6b9b562018-12-21 14:41:35 +0000639 * If the entry isn't a block or has different attributes then
640 * it isn't possible to defragment it.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100641 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100642 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
Andrew Scull3681b8d2018-12-12 14:22:59 +0000643 arch_mm_pte_attrs(table->entries[i], level) != attrs) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000644 return entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100645 }
646 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000647
648 return mm_merge_table_pte(entry, level, ppool);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100649}
650
651/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100652 * Defragments the given page table by converting page table references to
653 * blocks whenever possible.
654 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000655static void mm_ptable_defrag(struct mm_ptable *t, int flags,
656 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100657{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000658 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000659 uint8_t level = mm_max_level(flags);
660 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000661 uint8_t i;
662 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100663
664 /*
665 * Loop through each entry in the table. If it points to another table,
666 * check if that table can be replaced by a block or an absent entry.
667 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000668 for (i = 0; i < root_table_count; ++i) {
669 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
670 tables[i].entries[j] = mm_ptable_defrag_entry(
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000671 tables[i].entries[j], level, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000672 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100673 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100674}
675
676/**
Andrew Scull81e85092018-12-12 12:56:20 +0000677 * Gets the attributes applied to the given range of stage-2 addresses at the
678 * given level.
679 *
680 * The `got_attrs` argument is initially passed as false until `attrs` contains
681 * attributes of the memory region at which point it is passed as true.
682 *
683 * The value returned in `attrs` is only valid if the function returns true.
684 *
685 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100686 */
Andrew Scull81e85092018-12-12 12:56:20 +0000687static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
688 ptable_addr_t begin, ptable_addr_t end,
689 uint8_t level, bool got_attrs,
690 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100691{
Andrew Scull81e85092018-12-12 12:56:20 +0000692 pte_t *pte = &table->entries[mm_index(begin, level)];
693 ptable_addr_t level_end = mm_level_end(begin, level);
694 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100695
Andrew Scull81e85092018-12-12 12:56:20 +0000696 /* Cap end so that we don't go over the current level max. */
697 if (end > level_end) {
698 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100699 }
700
Andrew Scull81e85092018-12-12 12:56:20 +0000701 /* Check that each entry is owned. */
702 while (begin < end) {
703 if (arch_mm_pte_is_table(*pte, level)) {
704 if (!mm_ptable_get_attrs_level(
705 mm_page_table_from_pa(
706 arch_mm_table_from_pte(*pte,
707 level)),
708 begin, end, level - 1, got_attrs, attrs)) {
709 return false;
710 }
711 got_attrs = true;
712 } else {
713 if (!got_attrs) {
714 *attrs = arch_mm_pte_attrs(*pte, level);
715 got_attrs = true;
716 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
717 return false;
718 }
719 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100720
Andrew Scull81e85092018-12-12 12:56:20 +0000721 begin = mm_start_of_next_block(begin, entry_size);
722 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100723 }
724
Andrew Scullc66a04d2018-12-07 13:41:56 +0000725 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000726 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100727}
728
729/**
Andrew Scull81e85092018-12-12 12:56:20 +0000730 * Gets the attributes applies to the given range of addresses in the stage-2
731 * table.
732 *
733 * The value returned in `attrs` is only valid if the function returns true.
734 *
735 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100736 */
Andrew Scull81e85092018-12-12 12:56:20 +0000737static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
738 ptable_addr_t end, uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100739{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000740 int flags = 0;
741 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000742 uint8_t root_level = max_level + 1;
743 size_t root_table_size = mm_entry_size(root_level);
744 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000745 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000746 struct mm_page_table *table;
747 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100748
Andrew Scull81e85092018-12-12 12:56:20 +0000749 begin = mm_round_down_to_page(begin);
750 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100751
Andrew Scull81e85092018-12-12 12:56:20 +0000752 /* Fail if the addresses are out of range. */
753 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000754 return false;
755 }
756
Andrew Scull81e85092018-12-12 12:56:20 +0000757 table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
758 while (begin < end) {
759 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
760 got_attrs, attrs)) {
761 return false;
762 }
763
764 got_attrs = true;
765 begin = mm_start_of_next_block(begin, root_table_size);
766 table++;
767 }
768
769 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100770}
771
Andrew Scullda3df7f2019-01-05 17:49:27 +0000772bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100773{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000774 return mm_ptable_init(t, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100775}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100776
Andrew Scullda3df7f2019-01-05 17:49:27 +0000777void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000778{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000779 mm_ptable_fini(t, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000780}
781
782/**
Andrew Scull80871322018-08-06 12:04:09 +0100783 * Updates a VM's page table such that the given physical address range is
784 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100785 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100786 */
Andrew Scull80871322018-08-06 12:04:09 +0100787bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000788 int mode, ipaddr_t *ipa, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100789{
Andrew Scullda241972019-01-05 18:17:48 +0000790 int flags = 0;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000791 bool success = mm_ptable_identity_update(
792 t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
793 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100794
795 if (success && ipa != NULL) {
796 *ipa = ipa_from_pa(begin);
797 }
798
799 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100800}
801
802/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000803 * Updates the VM's table such that the given physical address range has no
804 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100805 */
Andrew Scullda241972019-01-05 18:17:48 +0000806bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000807 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100808{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000809 return mm_ptable_identity_update(
810 t, begin, end,
811 arch_mm_mode_to_stage2_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
812 MM_MODE_SHARED),
Andrew Scullda241972019-01-05 18:17:48 +0000813 MM_FLAG_UNMAP, ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100814}
815
816/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000817 * Unmaps the hypervisor pages from the given page table.
818 */
Andrew Scullda241972019-01-05 18:17:48 +0000819bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000820{
821 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scullda241972019-01-05 18:17:48 +0000822 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), ppool) &&
823 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000824 ppool) &&
Andrew Scullda241972019-01-05 18:17:48 +0000825 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000826}
827
828/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000829 * Write the given page table of a VM to the debug log.
830 */
831void mm_vm_dump(struct mm_ptable *t)
832{
833 mm_ptable_dump(t, 0);
834}
835
836/**
837 * Defragments the VM page table.
838 */
839void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
840{
841 mm_ptable_defrag(t, 0, ppool);
842}
843
844/**
Andrew Scull81e85092018-12-12 12:56:20 +0000845 * Gets the mode of the give range of intermediate physical addresses if they
846 * are mapped with the same mode.
847 *
848 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +0100849 */
Andrew Scull81e85092018-12-12 12:56:20 +0000850bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
851 int *mode)
Andrew Scull80871322018-08-06 12:04:09 +0100852{
Andrew Scull81e85092018-12-12 12:56:20 +0000853 uint64_t attrs;
854 bool ret;
855
856 ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs);
857 if (ret) {
858 *mode = arch_mm_stage2_attrs_to_mode(attrs);
859 }
860
861 return ret;
Andrew Scull80871322018-08-06 12:04:09 +0100862}
863
864/**
Andrew Scull80871322018-08-06 12:04:09 +0100865 * Updates the hypervisor page table such that the given physical address range
866 * is mapped into the address space at the corresponding address range in the
867 * architecture-agnostic mode provided.
868 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000869void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100870{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000871 if (mm_ptable_identity_update(&ptable, begin, end,
Andrew Scullda241972019-01-05 18:17:48 +0000872 arch_mm_mode_to_stage1_attrs(mode),
873 MM_FLAG_STAGE1, ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100874 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100875 }
876
877 return NULL;
878}
879
880/**
881 * Updates the hypervisor table such that the given physical address range is
882 * not mapped in the address space.
883 */
Andrew Scullda241972019-01-05 18:17:48 +0000884bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100885{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000886 return mm_ptable_identity_update(
887 &ptable, begin, end,
888 arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
889 MM_MODE_SHARED),
Andrew Scullda241972019-01-05 18:17:48 +0000890 MM_FLAG_STAGE1 | MM_FLAG_UNMAP, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100891}
892
893/**
894 * Initialises memory management for the hypervisor itself.
895 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000896bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100897{
Andrew Scullfdd716e2018-12-20 05:37:31 +0000898 dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
899 pa_addr(layout_text_end()));
900 dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
901 pa_addr(layout_rodata_end()));
902 dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
903 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100904
Andrew Scullda3df7f2019-01-05 17:49:27 +0000905 if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) {
Andrew Scullfdd716e2018-12-20 05:37:31 +0000906 dlog("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100907 return false;
908 }
909
Andrew Walbran48699362019-05-20 14:38:00 +0100910 /* Let console driver map pages for itself. */
911 plat_console_mm_init(ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100912
913 /* Map each section. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000914 mm_identity_map(layout_text_begin(), layout_text_end(), MM_MODE_X,
915 ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100916
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000917 mm_identity_map(layout_rodata_begin(), layout_rodata_end(), MM_MODE_R,
918 ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100919
Andrew Scull5991ec92018-10-08 14:55:02 +0100920 mm_identity_map(layout_data_begin(), layout_data_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000921 MM_MODE_R | MM_MODE_W, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100922
Andrew Scull1ba470e2018-10-31 15:14:31 +0000923 return arch_mm_init(ptable.root, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100924}
925
926bool mm_cpu_init(void)
927{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000928 return arch_mm_init(ptable.root, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100929}
930
931/**
932 * Defragments the hypervisor page table.
933 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000934void mm_defrag(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100935{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000936 mm_ptable_defrag(&ptable, MM_FLAG_STAGE1, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100937}