blob: 113263c33aad57e9a4eb3e5b1157d6df0a48e6bf [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Andrew Scull877ae4b2019-07-02 12:52:33 +010014#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010016#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010017#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010018#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010019
Andrew Walbran2400ed22018-09-27 14:45:58 +010020/**
21 * This file has functions for managing the level 1 and 2 page tables used by
22 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
23 * and then a level 2 mapping per VM. The design assumes that all page tables
24 * contain only 1-1 mappings, aligned on the block boundaries.
25 */
26
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010027/*
28 * For stage 2, the input is an intermediate physical addresses rather than a
29 * virtual address so:
30 */
Andrew Scull80871322018-08-06 12:04:09 +010031static_assert(
32 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
33 "Currently, the same code manages the stage 1 and stage 2 page tables "
34 "which only works if the virtual and intermediate physical addresses "
35 "are the same size. It looks like that assumption might not be holding "
36 "so we need to check that everything is going to be ok.");
37
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010038static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010039static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010040
Andrew Scullda241972019-01-05 18:17:48 +000041static bool mm_stage2_invalidate = false;
42
43/**
44 * After calling this function, modifications to stage-2 page tables will use
45 * break-before-make and invalidate the TLB for the affected range.
46 */
47void mm_vm_enable_invalidation(void)
48{
49 mm_stage2_invalidate = true;
50}
51
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010053 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010054 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010055static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010056{
57 return ptr_from_va(va_from_pa(pa));
58}
59
60/**
Andrew Scull80871322018-08-06 12:04:09 +010061 * Rounds an address down to a page boundary.
62 */
63static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
64{
65 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
66}
67
68/**
69 * Rounds an address up to a page boundary.
70 */
71static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
72{
73 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
74}
75
76/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010077 * Calculates the size of the address space represented by a page table entry at
78 * the given level.
79 */
Andrew Sculle9827712018-10-19 14:54:20 +010080static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010081{
Andrew Scull78d6fd92018-09-06 15:08:36 +010082 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010083}
84
85/**
Andrew Scullcae45572018-12-13 15:46:30 +000086 * Gets the address of the start of the next block of the given size. The size
87 * must be a power of two.
88 */
89static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
90 size_t block_size)
91{
92 return (addr + block_size) & ~(block_size - 1);
93}
94
95/**
96 * Gets the physical address of the start of the next block of the given size.
97 * The size must be a power of two.
98 */
99static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
100{
101 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
102}
103
104/**
Andrew Scull80871322018-08-06 12:04:09 +0100105 * For a given address, calculates the maximum (plus one) address that can be
106 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107 */
Andrew Sculle9827712018-10-19 14:54:20 +0100108static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100109{
110 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000111
Andrew Scull80871322018-08-06 12:04:09 +0100112 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100113}
114
115/**
Andrew Scull80871322018-08-06 12:04:09 +0100116 * For a given address, calculates the index at which its entry is stored in a
117 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100118 */
Andrew Sculle9827712018-10-19 14:54:20 +0100119static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100120{
Andrew Scull80871322018-08-06 12:04:09 +0100121 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000122
Andrew Scull78d6fd92018-09-06 15:08:36 +0100123 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100124}
125
126/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000127 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100128 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000129static struct mm_page_table *mm_alloc_page_tables(size_t count,
130 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100131{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000132 if (count == 1) {
133 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100134 }
135
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000136 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137}
138
139/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000140 * Returns the maximum level in the page table given the flags.
141 */
142static uint8_t mm_max_level(int flags)
143{
144 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
145 : arch_mm_stage2_max_level();
146}
147
148/**
149 * Returns the number of root-level tables given the flags.
150 */
151static uint8_t mm_root_table_count(int flags)
152{
153 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
154 : arch_mm_stage2_root_table_count();
155}
156
157/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000158 * Invalidates the TLB for the given address range.
159 */
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800160static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags,
161 uint16_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000162{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000163 if (flags & MM_FLAG_STAGE1) {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800164 arch_mm_invalidate_stage1_range(id, va_init(begin),
165 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000166 } else {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800167 arch_mm_invalidate_stage2_range(id, ipa_init(begin),
168 ipa_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 }
170}
171
172/**
173 * Frees all page-table-related memory associated with the given pte at the
174 * given level, including any subtables recursively.
175 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000176static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000177{
178 struct mm_page_table *table;
179 uint64_t i;
180
181 if (!arch_mm_pte_is_table(pte, level)) {
182 return;
183 }
184
185 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000186 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000187 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000188 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000189 }
190
191 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000192 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000193}
194
195/**
David Brazdil711fbe92019-08-06 13:39:58 +0100196 * Returns the first address which cannot be encoded in page tables given by
197 * `flags`. It is the exclusive end of the address space created by the tables.
198 */
199ptable_addr_t mm_ptable_addr_space_end(int flags)
200{
201 return mm_root_table_count(flags) *
202 mm_entry_size(mm_max_level(flags) + 1);
203}
204
205/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000206 * Initialises the given page table.
207 */
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800208bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
209 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000210{
211 uint8_t i;
212 size_t j;
213 struct mm_page_table *tables;
214 uint8_t root_table_count = mm_root_table_count(flags);
215
216 tables = mm_alloc_page_tables(root_table_count, ppool);
217 if (tables == NULL) {
218 return false;
219 }
220
221 for (i = 0; i < root_table_count; i++) {
222 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
223 tables[i].entries[j] =
224 arch_mm_absent_pte(mm_max_level(flags));
225 }
226 }
227
228 /*
229 * TODO: halloc could return a virtual or physical address if mm not
230 * enabled?
231 */
232 t->root = pa_init((uintpaddr_t)tables);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800233 t->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000234 return true;
235}
236
237/**
238 * Frees all memory associated with the give page table.
239 */
240static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
241{
242 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
243 uint8_t level = mm_max_level(flags);
244 uint8_t root_table_count = mm_root_table_count(flags);
245 uint8_t i;
246 uint64_t j;
247
248 for (i = 0; i < root_table_count; ++i) {
249 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
250 mm_free_page_pte(tables[i].entries[j], level, ppool);
251 }
252 }
253
254 mpool_add_chunk(ppool, tables,
255 sizeof(struct mm_page_table) * root_table_count);
256}
257
258/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000259 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000260 * are valid, it performs a break-before-make sequence where it first writes an
261 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
262 * This is to prevent cases where CPUs have different 'valid' values in their
263 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000264 */
265static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800266 uint8_t level, int flags, struct mpool *ppool,
267 uint16_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268{
269 pte_t v = *pte;
270
271 /*
272 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000273 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000274 */
Andrew Scullda241972019-01-05 18:17:48 +0000275 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800276 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000277 *pte = arch_mm_absent_pte(level);
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800278 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
279 id);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000280 }
281
282 /* Assign the new pte. */
283 *pte = new_pte;
284
285 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000286 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000287}
288
289/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100290 * Populates the provided page table entry with a reference to another table if
291 * needed, that is, if it does not yet point to another table.
292 *
293 * Returns a pointer to the table the entry now points to.
294 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000295static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
296 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000297 int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800298 struct mpool *ppool,
299 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100300{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100301 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302 pte_t v = *pte;
303 pte_t new_pte;
304 size_t i;
305 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100306 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100307
308 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100309 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000310 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100311 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100312
313 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000314 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100315 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000316 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100317 return NULL;
318 }
319
320 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100321 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100322 inc = mm_entry_size(level_below);
323 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000324 arch_mm_block_from_pte(v, level),
325 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100327 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100328 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100329 }
330
331 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100332 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
333 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100334 new_pte += inc;
335 }
336
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000337 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100338 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339
340 /* Replace the pte entry, doing a break-before-make if needed. */
341 mm_replace_entry(begin, pte,
342 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800343 level, flags, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100344
345 return ntable;
346}
347
348/**
Andrew Scull80871322018-08-06 12:04:09 +0100349 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100350 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000351 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100352 *
353 * This function calls itself recursively if it needs to update additional
354 * levels, but the recursion is bound by the maximum number of levels in a page
355 * table.
356 */
Andrew Scull80871322018-08-06 12:04:09 +0100357static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100358 uint64_t attrs, struct mm_page_table *table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800359 uint8_t level, int flags, struct mpool *ppool,
360 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100361{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100362 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100363 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100364 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000365 bool commit = flags & MM_FLAG_COMMIT;
366 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100367
Andrew Scull265ada92018-07-30 15:19:01 +0100368 /* Cap end so that we don't go over the current level max. */
369 if (end > level_end) {
370 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100371 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372
373 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100374 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100375 if (unmap ? !arch_mm_pte_is_present(*pte, level)
376 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000377 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100378 /*
379 * If the entry is already mapped with the right
380 * attributes, or already absent in the case of
381 * unmapping, no need to do anything; carry on to the
382 * next entry.
383 */
384 } else if ((end - begin) >= entry_size &&
385 (unmap || arch_mm_is_block_allowed(level)) &&
386 (begin & (entry_size - 1)) == 0) {
387 /*
388 * If the entire entry is within the region we want to
389 * map, map/unmap the whole entry.
390 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100391 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000392 pte_t new_pte =
393 unmap ? arch_mm_absent_pte(level)
394 : arch_mm_block_pte(level, pa,
395 attrs);
396 mm_replace_entry(begin, pte, new_pte, level,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800397 flags, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100398 }
399 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100400 /*
401 * If the entry is already a subtable get it; otherwise
402 * replace it with an equivalent subtable and get that.
403 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000404 struct mm_page_table *nt = mm_populate_table_pte(
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800405 begin, pte, level, flags, ppool, id);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100406 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100407 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100408 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100409
Andrew Walbran6324fc92018-10-03 11:46:43 +0100410 /*
411 * Recurse to map/unmap the appropriate entries within
412 * the subtable.
413 */
Andrew Scull80871322018-08-06 12:04:09 +0100414 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800415 flags, ppool, id)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100416 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100417 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100418 }
419
Andrew Scullcae45572018-12-13 15:46:30 +0000420 begin = mm_start_of_next_block(begin, entry_size);
421 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100422 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423 }
424
425 return true;
426}
427
428/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000429 * Updates the page table from the root to map the given address range to a
430 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000431 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000432 */
433static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
434 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000435 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436{
437 size_t root_table_size = mm_entry_size(root_level);
438 struct mm_page_table *table =
439 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
440
441 while (begin < end) {
442 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800443 root_level - 1, flags, ppool, t->id)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000444 return false;
445 }
Andrew Scullcae45572018-12-13 15:46:30 +0000446 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000447 table++;
448 }
449
450 return true;
451}
452
453/**
Andrew Scull80871322018-08-06 12:04:09 +0100454 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000455 * or not mapped into the address space with the architecture-agnostic mode
Andrew Walbran58a6e542019-11-19 14:23:15 +0000456 * provided. Only commits the change if MM_FLAG_COMMIT is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100457 */
Andrew Walbran58a6e542019-11-19 14:23:15 +0000458static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
459 paddr_t pa_end, uint64_t attrs, int flags,
460 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100461{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000462 uint8_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100463 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000464 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
465 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100466
Andrew Scull1ba470e2018-10-31 15:14:31 +0000467 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100468 * Assert condition to communicate the API constraint of mm_max_level(),
469 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000470 */
Andrew Scull877ae4b2019-07-02 12:52:33 +0100471 CHECK(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000472
473 /* Cap end to stay within the bounds of the page table. */
474 if (end > ptable_end) {
475 end = ptable_end;
476 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100477
Andrew Walbran58a6e542019-11-19 14:23:15 +0000478 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool)) {
479 return false;
480 }
481
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800482 /*
483 * All TLB invalidations must be complete already if any entries were
484 * replaced by mm_replace_entry. Sync all page table writes so that code
485 * following this can use them.
486 */
487 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000488
489 return true;
490}
491
Andrew Scull4e83cef2019-11-19 14:17:54 +0000492/*
493 * Prepares the given page table for the given address mapping such that it
494 * will be able to commit the change without failure. It does so by ensuring
495 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100496 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000497 *
498 * In particular, multiple calls to this function will result in the
499 * corresponding calls to commit the changes to succeed.
500 */
501static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin,
502 paddr_t pa_end, uint64_t attrs,
503 int flags, struct mpool *ppool)
504{
505 flags &= ~MM_FLAG_COMMIT;
506 return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool);
507}
508
509/**
510 * Commits the given address mapping to the page table assuming the operation
511 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
512 * ensure this condition.
513 *
514 * Without the table being properly prepared, the commit may only partially
515 * complete if it runs out of memory resulting in an inconsistent state that
516 * isn't handled.
517 *
518 * Since the non-failure assumtion is used in the reasoning about the atomicity
519 * of higher level memory operations, any detected violations result in a panic.
520 *
521 * TODO: remove ppool argument to be sure no changes are made.
522 */
523static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin,
524 paddr_t pa_end, uint64_t attrs, int flags,
525 struct mpool *ppool)
526{
527 CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
528 flags | MM_FLAG_COMMIT, ppool));
529}
530
Andrew Walbran58a6e542019-11-19 14:23:15 +0000531/**
532 * Updates the given table such that the given physical address range is mapped
533 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000534 * provided.
535 *
536 * The page table is updated using the separate prepare and commit stages so
537 * that, on failure, a partial update of the address space cannot happen. The
538 * table may be left with extra internal tables but the address space is
539 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000540 */
541static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
542 paddr_t pa_end, uint64_t attrs, int flags,
543 struct mpool *ppool)
544{
Andrew Scull4e83cef2019-11-19 14:17:54 +0000545 if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags,
546 ppool)) {
547 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100548 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100549
Andrew Scull4e83cef2019-11-19 14:17:54 +0000550 mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool);
551
552 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100553}
554
555/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100556 * Writes the given table to the debug log, calling itself recursively to
557 * write sub-tables.
558 */
Andrew Sculle9827712018-10-19 14:54:20 +0100559static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100560 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100561{
562 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000563
Andrew Scull4e5f8142018-10-12 14:37:19 +0100564 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
565 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100566 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100567 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100568
Andrew Scull4e5f8142018-10-12 14:37:19 +0100569 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
570 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100571
Andrew Scull4e5f8142018-10-12 14:37:19 +0100572 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100573 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100574 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000575 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100576 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100577 }
578 }
579}
580
581/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000582 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100583 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000584static void mm_ptable_dump(struct mm_ptable *t, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100585{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000586 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000587 uint8_t max_level = mm_max_level(flags);
588 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000589 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000590
Andrew Scull1ba470e2018-10-31 15:14:31 +0000591 for (i = 0; i < root_table_count; ++i) {
592 mm_dump_table_recursive(&tables[i], max_level, max_level);
593 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100594}
595
596/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000597 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800598 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100599 */
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800600static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100601{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100602 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100603 uint64_t block_attrs;
604 uint64_t table_attrs;
605 uint64_t combined_attrs;
606 paddr_t block_address;
607
Andrew Scullb6b9b562018-12-21 14:41:35 +0000608 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
609
610 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000611 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100612 }
613
Andrew Scullb6b9b562018-12-21 14:41:35 +0000614 /* Might not be possible to merge the table into a single block. */
615 if (!arch_mm_is_block_allowed(level)) {
616 return table_pte;
617 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000618
Andrew Scullb6b9b562018-12-21 14:41:35 +0000619 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000620 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000621 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100622 combined_attrs =
623 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000624 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000625
Andrew Walbran2400ed22018-09-27 14:45:58 +0100626 return arch_mm_block_pte(level, block_address, combined_attrs);
627}
628
629/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000630 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000631 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100632 */
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800633static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
634 uint8_t level, int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800635 struct mpool *ppool, uint16_t id)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100636{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100637 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100638 uint64_t i;
Andrew Scull12122ce2019-11-19 14:21:07 +0000639 bool mergeable;
640 bool base_present;
641 uint64_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800642 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100643
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800644 if (!arch_mm_pte_is_table(*entry, level)) {
645 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100646 }
647
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800648 table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100649
Andrew Scull12122ce2019-11-19 14:21:07 +0000650 /* Defrag the first entry in the table and use it as the base entry. */
651 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800652
653 mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800654 flags, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800655
Andrew Scull12122ce2019-11-19 14:21:07 +0000656 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
657 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
658
Andrew Walbran2400ed22018-09-27 14:45:58 +0100659 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000660 * Defrag the remaining entries in the table and check whether they are
661 * compatible with the base entry meaning the table can be merged into a
662 * block entry. It assumes addresses are contiguous due to identity
663 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100664 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000665 mergeable = true;
666 for (i = 1; i < MM_PTE_PER_PAGE; ++i) {
667 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800668 ptable_addr_t block_addr =
669 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000670
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800671 mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800672 level - 1, flags, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800673
Andrew Scull12122ce2019-11-19 14:21:07 +0000674 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100675
Andrew Scull12122ce2019-11-19 14:21:07 +0000676 if (present != base_present) {
677 mergeable = false;
678 continue;
679 }
680
681 if (!present) {
682 continue;
683 }
684
685 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
686 mergeable = false;
687 continue;
688 }
689
690 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
691 base_attrs) {
692 mergeable = false;
693 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100694 }
695 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000696
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800697 if (!mergeable) {
698 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000699 }
700
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800701 new_entry = mm_merge_table_pte(*entry, level);
702 if (*entry != new_entry) {
703 mm_replace_entry(base_addr, entry, new_entry, level, flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800704 ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800705 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100706}
707
708/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100709 * Defragments the given page table by converting page table references to
710 * blocks whenever possible.
711 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000712static void mm_ptable_defrag(struct mm_ptable *t, int flags,
713 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100714{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000715 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000716 uint8_t level = mm_max_level(flags);
717 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000718 uint8_t i;
719 uint64_t j;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800720 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100721
722 /*
723 * Loop through each entry in the table. If it points to another table,
724 * check if that table can be replaced by a block or an absent entry.
725 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000726 for (i = 0; i < root_table_count; ++i) {
727 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800728 mm_ptable_defrag_entry(block_addr,
729 &(tables[i].entries[j]), level,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800730 flags, ppool, t->id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800731 block_addr = mm_start_of_next_block(
732 block_addr, mm_entry_size(level));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000733 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100734 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800735
736 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100737}
738
739/**
Andrew Scull81e85092018-12-12 12:56:20 +0000740 * Gets the attributes applied to the given range of stage-2 addresses at the
741 * given level.
742 *
743 * The `got_attrs` argument is initially passed as false until `attrs` contains
744 * attributes of the memory region at which point it is passed as true.
745 *
746 * The value returned in `attrs` is only valid if the function returns true.
747 *
748 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100749 */
Andrew Scull81e85092018-12-12 12:56:20 +0000750static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
751 ptable_addr_t begin, ptable_addr_t end,
752 uint8_t level, bool got_attrs,
753 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100754{
Andrew Scull81e85092018-12-12 12:56:20 +0000755 pte_t *pte = &table->entries[mm_index(begin, level)];
756 ptable_addr_t level_end = mm_level_end(begin, level);
757 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100758
Andrew Scull81e85092018-12-12 12:56:20 +0000759 /* Cap end so that we don't go over the current level max. */
760 if (end > level_end) {
761 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100762 }
763
Andrew Scull81e85092018-12-12 12:56:20 +0000764 /* Check that each entry is owned. */
765 while (begin < end) {
766 if (arch_mm_pte_is_table(*pte, level)) {
767 if (!mm_ptable_get_attrs_level(
768 mm_page_table_from_pa(
769 arch_mm_table_from_pte(*pte,
770 level)),
771 begin, end, level - 1, got_attrs, attrs)) {
772 return false;
773 }
774 got_attrs = true;
775 } else {
776 if (!got_attrs) {
777 *attrs = arch_mm_pte_attrs(*pte, level);
778 got_attrs = true;
779 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
780 return false;
781 }
782 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100783
Andrew Scull81e85092018-12-12 12:56:20 +0000784 begin = mm_start_of_next_block(begin, entry_size);
785 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100786 }
787
Andrew Scullc66a04d2018-12-07 13:41:56 +0000788 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000789 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100790}
791
792/**
Andrew Scull81e85092018-12-12 12:56:20 +0000793 * Gets the attributes applies to the given range of addresses in the stage-2
794 * table.
795 *
796 * The value returned in `attrs` is only valid if the function returns true.
797 *
798 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100799 */
Andrew Scull81e85092018-12-12 12:56:20 +0000800static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
801 ptable_addr_t end, uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100802{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000803 int flags = 0;
804 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000805 uint8_t root_level = max_level + 1;
806 size_t root_table_size = mm_entry_size(root_level);
807 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000808 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000809 struct mm_page_table *table;
810 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100811
Andrew Scull81e85092018-12-12 12:56:20 +0000812 begin = mm_round_down_to_page(begin);
813 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100814
Andrew Scull81e85092018-12-12 12:56:20 +0000815 /* Fail if the addresses are out of range. */
816 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000817 return false;
818 }
819
Andrew Scull81e85092018-12-12 12:56:20 +0000820 table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
821 while (begin < end) {
822 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
823 got_attrs, attrs)) {
824 return false;
825 }
826
827 got_attrs = true;
828 begin = mm_start_of_next_block(begin, root_table_size);
829 table++;
830 }
831
832 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100833}
834
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800835bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100836{
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800837 return mm_ptable_init(t, id, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100838}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100839
Andrew Scullda3df7f2019-01-05 17:49:27 +0000840void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000841{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000842 mm_ptable_fini(t, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000843}
844
845/**
Andrew Scull73b89542019-11-20 17:31:26 +0000846 * Selects flags to pass to the page table manipulation operation based on the
847 * mapping mode.
848 */
849static int mm_mode_to_flags(uint32_t mode)
850{
851 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
852 return MM_FLAG_UNMAP;
853 }
854
855 return 0;
856}
857
858/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000859 * See `mm_ptable_identity_prepare`.
860 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800861 * This must be called before `mm_identity_commit` for the same mapping.
862 *
863 * Returns true on success, or false if the update would fail.
864 */
865bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
866 uint32_t mode, struct mpool *ppool)
867{
868 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
869
870 return mm_ptable_identity_prepare(t, begin, end,
871 arch_mm_mode_to_stage1_attrs(mode),
872 flags, ppool);
873}
874
875/**
876 * See `mm_ptable_identity_commit`.
877 *
878 * `mm_identity_prepare` must be called before this for the same mapping.
879 */
880void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
881 uint32_t mode, struct mpool *ppool)
882{
883 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
884
885 mm_ptable_identity_commit(t, begin, end,
886 arch_mm_mode_to_stage1_attrs(mode), flags,
887 ppool);
888 return ptr_from_va(va_from_pa(begin));
889}
890
891/**
892 * See `mm_ptable_identity_prepare`.
893 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000894 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000895 *
896 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000897 */
898bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
899 uint32_t mode, struct mpool *ppool)
900{
901 int flags = mm_mode_to_flags(mode);
902
903 return mm_ptable_identity_prepare(t, begin, end,
904 arch_mm_mode_to_stage2_attrs(mode),
905 flags, ppool);
906}
907
908/**
909 * See `mm_ptable_identity_commit`.
910 *
911 * `mm_vm_identity_prepare` must be called before this for the same mapping.
912 */
913void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000914 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000915{
916 int flags = mm_mode_to_flags(mode);
917
918 mm_ptable_identity_commit(t, begin, end,
919 arch_mm_mode_to_stage2_attrs(mode), flags,
920 ppool);
921
922 if (ipa != NULL) {
923 *ipa = ipa_from_pa(begin);
924 }
925}
926
927/**
Andrew Scull80871322018-08-06 12:04:09 +0100928 * Updates a VM's page table such that the given physical address range is
929 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100930 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000931 *
932 * mm_vm_defrag should always be called after a series of page table updates,
933 * whether they succeed or fail. This is because on failure extra page table
934 * entries may have been allocated and then not used, while on success it may be
935 * possible to compact the page table by merging several entries into a block.
936 *
937 * Returns true on success, or false if the update failed and no changes were
938 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100939 */
Andrew Scull80871322018-08-06 12:04:09 +0100940bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000941 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100942{
Andrew Scull73b89542019-11-20 17:31:26 +0000943 int flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000944 bool success = mm_ptable_identity_update(
945 t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
946 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100947
948 if (success && ipa != NULL) {
949 *ipa = ipa_from_pa(begin);
950 }
951
952 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100953}
954
955/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000956 * Updates the VM's table such that the given physical address range has no
957 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100958 */
Andrew Scullda241972019-01-05 18:17:48 +0000959bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000960 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100961{
Andrew Scull73b89542019-11-20 17:31:26 +0000962 uint32_t mode = MM_MODE_UNMAPPED_MASK;
963
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000964 return mm_vm_identity_map(t, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +0100965}
966
967/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000968 * Write the given page table of a VM to the debug log.
969 */
970void mm_vm_dump(struct mm_ptable *t)
971{
972 mm_ptable_dump(t, 0);
973}
974
975/**
976 * Defragments the VM page table.
977 */
978void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
979{
980 mm_ptable_defrag(t, 0, ppool);
981}
982
983/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100984 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +0000985 * are mapped with the same mode.
986 *
987 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +0100988 */
Andrew Scull81e85092018-12-12 12:56:20 +0000989bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100990 uint32_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +0100991{
Andrew Scull81e85092018-12-12 12:56:20 +0000992 uint64_t attrs;
993 bool ret;
994
995 ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs);
996 if (ret) {
997 *mode = arch_mm_stage2_attrs_to_mode(attrs);
998 }
999
1000 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001001}
1002
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001003static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1004{
1005 return (struct mm_stage1_locked){.ptable = &ptable};
1006}
1007
1008struct mm_stage1_locked mm_lock_stage1(void)
1009{
1010 sl_lock(&ptable_lock);
1011 return mm_stage1_lock_unsafe();
1012}
1013
1014void mm_unlock_stage1(struct mm_stage1_locked *lock)
1015{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001016 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001017 sl_unlock(&ptable_lock);
1018 lock->ptable = NULL;
1019}
1020
Andrew Scull80871322018-08-06 12:04:09 +01001021/**
Andrew Scull80871322018-08-06 12:04:09 +01001022 * Updates the hypervisor page table such that the given physical address range
1023 * is mapped into the address space at the corresponding address range in the
1024 * architecture-agnostic mode provided.
1025 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001026void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001027 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001028{
Andrew Scull73b89542019-11-20 17:31:26 +00001029 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
1030
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001031 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001032 arch_mm_mode_to_stage1_attrs(mode), flags,
1033 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001034 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001035 }
1036
1037 return NULL;
1038}
1039
1040/**
1041 * Updates the hypervisor table such that the given physical address range is
1042 * not mapped in the address space.
1043 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001044bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1045 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001046{
Andrew Scull73b89542019-11-20 17:31:26 +00001047 uint32_t mode = MM_MODE_UNMAPPED_MASK;
1048
1049 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001050}
1051
1052/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001053 * Defragments the hypervisor page table.
1054 */
1055void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1056{
1057 mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
1058}
1059
1060/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001061 * Initialises memory management for the hypervisor itself.
1062 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001063bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001064{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001065 /* Locking is not enabled yet so fake it, */
1066 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
1067
Andrew Walbran17eebf92020-02-05 16:35:49 +00001068 dlog_info("text: %#x - %#x\n", pa_addr(layout_text_begin()),
1069 pa_addr(layout_text_end()));
1070 dlog_info("rodata: %#x - %#x\n", pa_addr(layout_rodata_begin()),
1071 pa_addr(layout_rodata_end()));
1072 dlog_info("data: %#x - %#x\n", pa_addr(layout_data_begin()),
1073 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001074
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001075 /* ASID 0 is reserved for use by the hypervisor. */
1076 if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001077 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001078 return false;
1079 }
1080
Andrew Walbran48699362019-05-20 14:38:00 +01001081 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001082 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001083
1084 /* Map each section. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001085 mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(),
1086 MM_MODE_X, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001087
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001088 mm_identity_map(stage1_locked, layout_rodata_begin(),
1089 layout_rodata_end(), MM_MODE_R, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001090
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001091 mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001092 MM_MODE_R | MM_MODE_W, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001093
Andrew Scullb2910562019-09-17 14:08:27 +01001094 return arch_mm_init(ptable.root);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001095}