blob: 8180be9d58a1506197067f83f736f5c21dc606c5 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
15
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010020#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010021#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010022
Andrew Walbran2400ed22018-09-27 14:45:58 +010023/**
24 * This file has functions for managing the level 1 and 2 page tables used by
25 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
26 * and then a level 2 mapping per VM. The design assumes that all page tables
27 * contain only 1-1 mappings, aligned on the block boundaries.
28 */
29
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010030/*
31 * For stage 2, the input is an intermediate physical addresses rather than a
32 * virtual address so:
33 */
Andrew Scull80871322018-08-06 12:04:09 +010034static_assert(
35 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
36 "Currently, the same code manages the stage 1 and stage 2 page tables "
37 "which only works if the virtual and intermediate physical addresses "
38 "are the same size. It looks like that assumption might not be holding "
39 "so we need to check that everything is going to be ok.");
40
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010042static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043
Andrew Scullda241972019-01-05 18:17:48 +000044static bool mm_stage2_invalidate = false;
45
46/**
47 * After calling this function, modifications to stage-2 page tables will use
48 * break-before-make and invalidate the TLB for the affected range.
49 */
50void mm_vm_enable_invalidation(void)
51{
52 mm_stage2_invalidate = true;
53}
54
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010055/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010056 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010057 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010058static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010059{
60 return ptr_from_va(va_from_pa(pa));
61}
62
63/**
Andrew Scull80871322018-08-06 12:04:09 +010064 * Rounds an address down to a page boundary.
65 */
66static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
67{
68 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
69}
70
71/**
72 * Rounds an address up to a page boundary.
73 */
74static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
75{
76 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
77}
78
79/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 * Calculates the size of the address space represented by a page table entry at
81 * the given level.
82 */
Andrew Sculle9827712018-10-19 14:54:20 +010083static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084{
Andrew Scull78d6fd92018-09-06 15:08:36 +010085 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086}
87
88/**
Andrew Scullcae45572018-12-13 15:46:30 +000089 * Gets the address of the start of the next block of the given size. The size
90 * must be a power of two.
91 */
92static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
93 size_t block_size)
94{
95 return (addr + block_size) & ~(block_size - 1);
96}
97
98/**
99 * Gets the physical address of the start of the next block of the given size.
100 * The size must be a power of two.
101 */
102static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
103{
104 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
105}
106
107/**
Andrew Scull80871322018-08-06 12:04:09 +0100108 * For a given address, calculates the maximum (plus one) address that can be
109 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100110 */
Andrew Sculle9827712018-10-19 14:54:20 +0100111static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100112{
113 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000114
Andrew Scull80871322018-08-06 12:04:09 +0100115 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116}
117
118/**
Andrew Scull80871322018-08-06 12:04:09 +0100119 * For a given address, calculates the index at which its entry is stored in a
120 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121 */
Andrew Sculle9827712018-10-19 14:54:20 +0100122static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100123{
Andrew Scull80871322018-08-06 12:04:09 +0100124 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000125
Andrew Scull78d6fd92018-09-06 15:08:36 +0100126 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127}
128
129/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000130 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100131 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000132static struct mm_page_table *mm_alloc_page_tables(size_t count,
133 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100134{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000135 if (count == 1) {
136 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137 }
138
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000139 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100140}
141
142/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000143 * Returns the maximum level in the page table given the flags.
144 */
145static uint8_t mm_max_level(int flags)
146{
147 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
148 : arch_mm_stage2_max_level();
149}
150
151/**
152 * Returns the number of root-level tables given the flags.
153 */
154static uint8_t mm_root_table_count(int flags)
155{
156 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
157 : arch_mm_stage2_root_table_count();
158}
159
160/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000161 * Invalidates the TLB for the given address range.
162 */
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800163static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags,
164 uint16_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000165{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000166 if (flags & MM_FLAG_STAGE1) {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800167 arch_mm_invalidate_stage1_range(id, va_init(begin),
168 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 } else {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800170 arch_mm_invalidate_stage2_range(id, ipa_init(begin),
171 ipa_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000172 }
173}
174
175/**
176 * Frees all page-table-related memory associated with the given pte at the
177 * given level, including any subtables recursively.
178 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100179// NOLINTNEXTLINE(misc-no-recursion)
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000180static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000181{
182 struct mm_page_table *table;
183 uint64_t i;
184
185 if (!arch_mm_pte_is_table(pte, level)) {
186 return;
187 }
188
189 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000190 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000191 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000192 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000193 }
194
195 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000196 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000197}
198
199/**
David Brazdil711fbe92019-08-06 13:39:58 +0100200 * Returns the first address which cannot be encoded in page tables given by
201 * `flags`. It is the exclusive end of the address space created by the tables.
202 */
203ptable_addr_t mm_ptable_addr_space_end(int flags)
204{
205 return mm_root_table_count(flags) *
206 mm_entry_size(mm_max_level(flags) + 1);
207}
208
209/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000210 * Initialises the given page table.
211 */
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800212bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
213 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000214{
215 uint8_t i;
216 size_t j;
217 struct mm_page_table *tables;
218 uint8_t root_table_count = mm_root_table_count(flags);
219
220 tables = mm_alloc_page_tables(root_table_count, ppool);
221 if (tables == NULL) {
222 return false;
223 }
224
225 for (i = 0; i < root_table_count; i++) {
226 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
227 tables[i].entries[j] =
228 arch_mm_absent_pte(mm_max_level(flags));
229 }
230 }
231
232 /*
233 * TODO: halloc could return a virtual or physical address if mm not
234 * enabled?
235 */
236 t->root = pa_init((uintpaddr_t)tables);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800237 t->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000238 return true;
239}
240
241/**
242 * Frees all memory associated with the give page table.
243 */
244static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
245{
246 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
247 uint8_t level = mm_max_level(flags);
248 uint8_t root_table_count = mm_root_table_count(flags);
249 uint8_t i;
250 uint64_t j;
251
252 for (i = 0; i < root_table_count; ++i) {
253 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
254 mm_free_page_pte(tables[i].entries[j], level, ppool);
255 }
256 }
257
258 mpool_add_chunk(ppool, tables,
259 sizeof(struct mm_page_table) * root_table_count);
260}
261
262/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000263 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000264 * are valid, it performs a break-before-make sequence where it first writes an
265 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
266 * This is to prevent cases where CPUs have different 'valid' values in their
267 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268 */
269static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800270 uint8_t level, int flags, struct mpool *ppool,
271 uint16_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000272{
273 pte_t v = *pte;
274
275 /*
276 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000277 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000278 */
Andrew Scullda241972019-01-05 18:17:48 +0000279 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800280 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281 *pte = arch_mm_absent_pte(level);
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800282 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
283 id);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000284 }
285
286 /* Assign the new pte. */
287 *pte = new_pte;
288
289 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000290 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000291}
292
293/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100294 * Populates the provided page table entry with a reference to another table if
295 * needed, that is, if it does not yet point to another table.
296 *
297 * Returns a pointer to the table the entry now points to.
298 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000299static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
300 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000301 int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800302 struct mpool *ppool,
303 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100305 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100306 pte_t v = *pte;
307 pte_t new_pte;
308 size_t i;
309 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100310 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100311
312 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100313 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000314 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100315 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100316
317 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000318 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100319 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000320 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100321 return NULL;
322 }
323
324 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100325 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100326 inc = mm_entry_size(level_below);
327 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000328 arch_mm_block_from_pte(v, level),
329 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100330 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100331 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100332 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 }
334
335 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100336 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
337 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100338 new_pte += inc;
339 }
340
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100342 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000343
344 /* Replace the pte entry, doing a break-before-make if needed. */
345 mm_replace_entry(begin, pte,
346 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800347 level, flags, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100348
349 return ntable;
350}
351
352/**
Andrew Scull80871322018-08-06 12:04:09 +0100353 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100354 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000355 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100356 *
357 * This function calls itself recursively if it needs to update additional
358 * levels, but the recursion is bound by the maximum number of levels in a page
359 * table.
360 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100361// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull80871322018-08-06 12:04:09 +0100362static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100363 uint64_t attrs, struct mm_page_table *table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800364 uint8_t level, int flags, struct mpool *ppool,
365 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100366{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100367 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100368 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100369 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000370 bool commit = flags & MM_FLAG_COMMIT;
371 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372
Andrew Scull265ada92018-07-30 15:19:01 +0100373 /* Cap end so that we don't go over the current level max. */
374 if (end > level_end) {
375 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100376 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100377
378 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100379 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100380 if (unmap ? !arch_mm_pte_is_present(*pte, level)
381 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000382 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100383 /*
384 * If the entry is already mapped with the right
385 * attributes, or already absent in the case of
386 * unmapping, no need to do anything; carry on to the
387 * next entry.
388 */
389 } else if ((end - begin) >= entry_size &&
390 (unmap || arch_mm_is_block_allowed(level)) &&
391 (begin & (entry_size - 1)) == 0) {
392 /*
393 * If the entire entry is within the region we want to
394 * map, map/unmap the whole entry.
395 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100396 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000397 pte_t new_pte =
398 unmap ? arch_mm_absent_pte(level)
399 : arch_mm_block_pte(level, pa,
400 attrs);
401 mm_replace_entry(begin, pte, new_pte, level,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800402 flags, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100403 }
404 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100405 /*
406 * If the entry is already a subtable get it; otherwise
407 * replace it with an equivalent subtable and get that.
408 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000409 struct mm_page_table *nt = mm_populate_table_pte(
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800410 begin, pte, level, flags, ppool, id);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100411 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100412 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100413 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100414
Andrew Walbran6324fc92018-10-03 11:46:43 +0100415 /*
416 * Recurse to map/unmap the appropriate entries within
417 * the subtable.
418 */
Andrew Scull80871322018-08-06 12:04:09 +0100419 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800420 flags, ppool, id)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100421 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100422 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423 }
424
Andrew Scullcae45572018-12-13 15:46:30 +0000425 begin = mm_start_of_next_block(begin, entry_size);
426 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100427 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100428 }
429
430 return true;
431}
432
433/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000434 * Updates the page table from the root to map the given address range to a
435 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000436 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000437 */
438static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
439 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000440 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000441{
442 size_t root_table_size = mm_entry_size(root_level);
443 struct mm_page_table *table =
444 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
445
446 while (begin < end) {
447 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800448 root_level - 1, flags, ppool, t->id)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 return false;
450 }
Andrew Scullcae45572018-12-13 15:46:30 +0000451 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000452 table++;
453 }
454
455 return true;
456}
457
458/**
Andrew Scull80871322018-08-06 12:04:09 +0100459 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000460 * or not mapped into the address space with the architecture-agnostic mode
Andrew Walbran58a6e542019-11-19 14:23:15 +0000461 * provided. Only commits the change if MM_FLAG_COMMIT is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100462 */
Andrew Walbran58a6e542019-11-19 14:23:15 +0000463static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
464 paddr_t pa_end, uint64_t attrs, int flags,
465 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100466{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000467 uint8_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100468 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000469 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
470 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100471
Andrew Scull1ba470e2018-10-31 15:14:31 +0000472 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100473 * Assert condition to communicate the API constraint of mm_max_level(),
474 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000475 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000476 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477
478 /* Cap end to stay within the bounds of the page table. */
479 if (end > ptable_end) {
480 end = ptable_end;
481 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100482
Andrew Walbran58a6e542019-11-19 14:23:15 +0000483 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool)) {
484 return false;
485 }
486
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800487 /*
488 * All TLB invalidations must be complete already if any entries were
489 * replaced by mm_replace_entry. Sync all page table writes so that code
490 * following this can use them.
491 */
492 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000493
494 return true;
495}
496
Andrew Scull4e83cef2019-11-19 14:17:54 +0000497/*
498 * Prepares the given page table for the given address mapping such that it
499 * will be able to commit the change without failure. It does so by ensuring
500 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100501 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000502 *
503 * In particular, multiple calls to this function will result in the
504 * corresponding calls to commit the changes to succeed.
505 */
506static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin,
507 paddr_t pa_end, uint64_t attrs,
508 int flags, struct mpool *ppool)
509{
510 flags &= ~MM_FLAG_COMMIT;
511 return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool);
512}
513
514/**
515 * Commits the given address mapping to the page table assuming the operation
516 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
517 * ensure this condition.
518 *
519 * Without the table being properly prepared, the commit may only partially
520 * complete if it runs out of memory resulting in an inconsistent state that
521 * isn't handled.
522 *
523 * Since the non-failure assumtion is used in the reasoning about the atomicity
524 * of higher level memory operations, any detected violations result in a panic.
525 *
526 * TODO: remove ppool argument to be sure no changes are made.
527 */
528static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin,
529 paddr_t pa_end, uint64_t attrs, int flags,
530 struct mpool *ppool)
531{
532 CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
533 flags | MM_FLAG_COMMIT, ppool));
534}
535
Andrew Walbran58a6e542019-11-19 14:23:15 +0000536/**
537 * Updates the given table such that the given physical address range is mapped
538 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000539 * provided.
540 *
541 * The page table is updated using the separate prepare and commit stages so
542 * that, on failure, a partial update of the address space cannot happen. The
543 * table may be left with extra internal tables but the address space is
544 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000545 */
546static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
547 paddr_t pa_end, uint64_t attrs, int flags,
548 struct mpool *ppool)
549{
Andrew Scull4e83cef2019-11-19 14:17:54 +0000550 if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags,
551 ppool)) {
552 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100553 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100554
Andrew Scull4e83cef2019-11-19 14:17:54 +0000555 mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool);
556
557 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100558}
559
560/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100561 * Writes the given table to the debug log, calling itself recursively to
562 * write sub-tables.
563 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100564// NOLINTNEXTLINE(misc-no-recursion)
Andrew Sculle9827712018-10-19 14:54:20 +0100565static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100566 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100567{
568 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000569
Andrew Scull4e5f8142018-10-12 14:37:19 +0100570 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
571 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100572 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100573 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100574
Andrew Scull4e5f8142018-10-12 14:37:19 +0100575 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
576 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100577
Andrew Scull4e5f8142018-10-12 14:37:19 +0100578 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100579 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100580 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000581 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100582 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100583 }
584 }
585}
586
587/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000588 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100589 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000590static void mm_ptable_dump(struct mm_ptable *t, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100591{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000592 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000593 uint8_t max_level = mm_max_level(flags);
594 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000595 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000596
Andrew Scull1ba470e2018-10-31 15:14:31 +0000597 for (i = 0; i < root_table_count; ++i) {
598 mm_dump_table_recursive(&tables[i], max_level, max_level);
599 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100600}
601
602/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000603 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800604 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100605 */
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800606static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100607{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100608 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100609 uint64_t block_attrs;
610 uint64_t table_attrs;
611 uint64_t combined_attrs;
612 paddr_t block_address;
613
Andrew Scullb6b9b562018-12-21 14:41:35 +0000614 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
615
616 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000617 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100618 }
619
Andrew Scullb6b9b562018-12-21 14:41:35 +0000620 /* Might not be possible to merge the table into a single block. */
621 if (!arch_mm_is_block_allowed(level)) {
622 return table_pte;
623 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000624
Andrew Scullb6b9b562018-12-21 14:41:35 +0000625 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000626 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000627 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100628 combined_attrs =
629 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000630 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000631
Andrew Walbran2400ed22018-09-27 14:45:58 +0100632 return arch_mm_block_pte(level, block_address, combined_attrs);
633}
634
635/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000636 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000637 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100638 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100639// NOLINTNEXTLINE(misc-no-recursion)
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800640static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
641 uint8_t level, int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800642 struct mpool *ppool, uint16_t id)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100643{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100644 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100645 uint64_t i;
Andrew Scull12122ce2019-11-19 14:21:07 +0000646 bool mergeable;
647 bool base_present;
648 uint64_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800649 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100650
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800651 if (!arch_mm_pte_is_table(*entry, level)) {
652 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100653 }
654
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800655 table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100656
Andrew Scull12122ce2019-11-19 14:21:07 +0000657 /* Defrag the first entry in the table and use it as the base entry. */
658 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800659
660 mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800661 flags, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800662
Andrew Scull12122ce2019-11-19 14:21:07 +0000663 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
664 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
665
Andrew Walbran2400ed22018-09-27 14:45:58 +0100666 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000667 * Defrag the remaining entries in the table and check whether they are
668 * compatible with the base entry meaning the table can be merged into a
669 * block entry. It assumes addresses are contiguous due to identity
670 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100671 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000672 mergeable = true;
673 for (i = 1; i < MM_PTE_PER_PAGE; ++i) {
674 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800675 ptable_addr_t block_addr =
676 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000677
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800678 mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800679 level - 1, flags, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800680
Andrew Scull12122ce2019-11-19 14:21:07 +0000681 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100682
Andrew Scull12122ce2019-11-19 14:21:07 +0000683 if (present != base_present) {
684 mergeable = false;
685 continue;
686 }
687
688 if (!present) {
689 continue;
690 }
691
692 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
693 mergeable = false;
694 continue;
695 }
696
697 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
698 base_attrs) {
699 mergeable = false;
700 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100701 }
702 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000703
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800704 if (!mergeable) {
705 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000706 }
707
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800708 new_entry = mm_merge_table_pte(*entry, level);
709 if (*entry != new_entry) {
710 mm_replace_entry(base_addr, entry, new_entry, level, flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800711 ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800712 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100713}
714
715/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100716 * Defragments the given page table by converting page table references to
717 * blocks whenever possible.
718 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000719static void mm_ptable_defrag(struct mm_ptable *t, int flags,
720 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100721{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000722 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000723 uint8_t level = mm_max_level(flags);
724 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000725 uint8_t i;
726 uint64_t j;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800727 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100728
729 /*
730 * Loop through each entry in the table. If it points to another table,
731 * check if that table can be replaced by a block or an absent entry.
732 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000733 for (i = 0; i < root_table_count; ++i) {
734 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800735 mm_ptable_defrag_entry(block_addr,
736 &(tables[i].entries[j]), level,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800737 flags, ppool, t->id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800738 block_addr = mm_start_of_next_block(
739 block_addr, mm_entry_size(level));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000740 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100741 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800742
743 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100744}
745
746/**
Andrew Scull81e85092018-12-12 12:56:20 +0000747 * Gets the attributes applied to the given range of stage-2 addresses at the
748 * given level.
749 *
750 * The `got_attrs` argument is initially passed as false until `attrs` contains
751 * attributes of the memory region at which point it is passed as true.
752 *
753 * The value returned in `attrs` is only valid if the function returns true.
754 *
755 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100756 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100757// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull81e85092018-12-12 12:56:20 +0000758static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
759 ptable_addr_t begin, ptable_addr_t end,
760 uint8_t level, bool got_attrs,
761 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100762{
Andrew Scull81e85092018-12-12 12:56:20 +0000763 pte_t *pte = &table->entries[mm_index(begin, level)];
764 ptable_addr_t level_end = mm_level_end(begin, level);
765 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100766
Andrew Scull81e85092018-12-12 12:56:20 +0000767 /* Cap end so that we don't go over the current level max. */
768 if (end > level_end) {
769 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100770 }
771
Andrew Scull81e85092018-12-12 12:56:20 +0000772 /* Check that each entry is owned. */
773 while (begin < end) {
774 if (arch_mm_pte_is_table(*pte, level)) {
775 if (!mm_ptable_get_attrs_level(
776 mm_page_table_from_pa(
777 arch_mm_table_from_pte(*pte,
778 level)),
779 begin, end, level - 1, got_attrs, attrs)) {
780 return false;
781 }
782 got_attrs = true;
783 } else {
784 if (!got_attrs) {
785 *attrs = arch_mm_pte_attrs(*pte, level);
786 got_attrs = true;
787 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
788 return false;
789 }
790 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100791
Andrew Scull81e85092018-12-12 12:56:20 +0000792 begin = mm_start_of_next_block(begin, entry_size);
793 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100794 }
795
Andrew Scullc66a04d2018-12-07 13:41:56 +0000796 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000797 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100798}
799
800/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800801 * Gets the attributes applied to the given range of addresses in the page
802 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000803 *
804 * The value returned in `attrs` is only valid if the function returns true.
805 *
806 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100807 */
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800808static bool mm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
809 ptable_addr_t end, uint64_t *attrs, int flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100810{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000811 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000812 uint8_t root_level = max_level + 1;
813 size_t root_table_size = mm_entry_size(root_level);
814 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000815 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000816 struct mm_page_table *table;
817 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100818
Andrew Scull81e85092018-12-12 12:56:20 +0000819 begin = mm_round_down_to_page(begin);
820 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100821
Andrew Scull81e85092018-12-12 12:56:20 +0000822 /* Fail if the addresses are out of range. */
823 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000824 return false;
825 }
826
Andrew Scull81e85092018-12-12 12:56:20 +0000827 table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
828 while (begin < end) {
829 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
830 got_attrs, attrs)) {
831 return false;
832 }
833
834 got_attrs = true;
835 begin = mm_start_of_next_block(begin, root_table_size);
836 table++;
837 }
838
839 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100840}
841
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800842bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100843{
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800844 return mm_ptable_init(t, id, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100845}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100846
Andrew Scullda3df7f2019-01-05 17:49:27 +0000847void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000848{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000849 mm_ptable_fini(t, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000850}
851
852/**
Andrew Scull73b89542019-11-20 17:31:26 +0000853 * Selects flags to pass to the page table manipulation operation based on the
854 * mapping mode.
855 */
856static int mm_mode_to_flags(uint32_t mode)
857{
858 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
859 return MM_FLAG_UNMAP;
860 }
861
862 return 0;
863}
864
865/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000866 * See `mm_ptable_identity_prepare`.
867 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800868 * This must be called before `mm_identity_commit` for the same mapping.
869 *
870 * Returns true on success, or false if the update would fail.
871 */
872bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
873 uint32_t mode, struct mpool *ppool)
874{
875 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
876
877 return mm_ptable_identity_prepare(t, begin, end,
878 arch_mm_mode_to_stage1_attrs(mode),
879 flags, ppool);
880}
881
882/**
883 * See `mm_ptable_identity_commit`.
884 *
885 * `mm_identity_prepare` must be called before this for the same mapping.
886 */
887void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
888 uint32_t mode, struct mpool *ppool)
889{
890 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
891
892 mm_ptable_identity_commit(t, begin, end,
893 arch_mm_mode_to_stage1_attrs(mode), flags,
894 ppool);
895 return ptr_from_va(va_from_pa(begin));
896}
897
898/**
899 * See `mm_ptable_identity_prepare`.
900 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000901 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000902 *
903 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000904 */
905bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
906 uint32_t mode, struct mpool *ppool)
907{
908 int flags = mm_mode_to_flags(mode);
909
910 return mm_ptable_identity_prepare(t, begin, end,
911 arch_mm_mode_to_stage2_attrs(mode),
912 flags, ppool);
913}
914
915/**
916 * See `mm_ptable_identity_commit`.
917 *
918 * `mm_vm_identity_prepare` must be called before this for the same mapping.
919 */
920void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000921 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000922{
923 int flags = mm_mode_to_flags(mode);
924
925 mm_ptable_identity_commit(t, begin, end,
926 arch_mm_mode_to_stage2_attrs(mode), flags,
927 ppool);
928
929 if (ipa != NULL) {
930 *ipa = ipa_from_pa(begin);
931 }
932}
933
934/**
Andrew Scull80871322018-08-06 12:04:09 +0100935 * Updates a VM's page table such that the given physical address range is
936 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100937 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000938 *
939 * mm_vm_defrag should always be called after a series of page table updates,
940 * whether they succeed or fail. This is because on failure extra page table
941 * entries may have been allocated and then not used, while on success it may be
942 * possible to compact the page table by merging several entries into a block.
943 *
944 * Returns true on success, or false if the update failed and no changes were
945 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100946 */
Andrew Scull80871322018-08-06 12:04:09 +0100947bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000948 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100949{
Andrew Scull73b89542019-11-20 17:31:26 +0000950 int flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000951 bool success = mm_ptable_identity_update(
952 t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
953 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100954
955 if (success && ipa != NULL) {
956 *ipa = ipa_from_pa(begin);
957 }
958
959 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100960}
961
962/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000963 * Updates the VM's table such that the given physical address range has no
964 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100965 */
Andrew Scullda241972019-01-05 18:17:48 +0000966bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000967 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100968{
Andrew Scull73b89542019-11-20 17:31:26 +0000969 uint32_t mode = MM_MODE_UNMAPPED_MASK;
970
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000971 return mm_vm_identity_map(t, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +0100972}
973
974/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000975 * Write the given page table of a VM to the debug log.
976 */
977void mm_vm_dump(struct mm_ptable *t)
978{
979 mm_ptable_dump(t, 0);
980}
981
982/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700983 * Defragments a stage1 page table.
984 */
985void mm_stage1_defrag(struct mm_ptable *t, struct mpool *ppool)
986{
987 mm_ptable_defrag(t, MM_FLAG_STAGE1, ppool);
988}
989
990/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000991 * Defragments the VM page table.
992 */
993void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
994{
995 mm_ptable_defrag(t, 0, ppool);
996}
997
998/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100999 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001000 * are mapped with the same mode.
1001 *
1002 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001003 */
Andrew Scull81e85092018-12-12 12:56:20 +00001004bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001005 uint32_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001006{
Andrew Scull81e85092018-12-12 12:56:20 +00001007 uint64_t attrs;
1008 bool ret;
1009
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001010 ret = mm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs, 0);
Andrew Scull81e85092018-12-12 12:56:20 +00001011 if (ret) {
1012 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1013 }
1014
1015 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001016}
1017
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001018/**
1019 * Gets the mode of the given range of virtual addresses if they
1020 * are mapped with the same mode.
1021 *
1022 * Returns true if the range is mapped with the same mode and false otherwise.
1023 */
1024bool mm_get_mode(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
1025 uint32_t *mode)
1026{
1027 uint64_t attrs;
1028 bool ret;
1029
1030 ret = mm_get_attrs(t, va_addr(begin), va_addr(end), &attrs,
1031 MM_FLAG_STAGE1);
1032 if (ret) {
1033 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1034 }
1035
1036 return ret;
1037}
1038
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001039static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1040{
1041 return (struct mm_stage1_locked){.ptable = &ptable};
1042}
1043
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001044struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1045{
1046 return (struct mm_stage1_locked){.ptable = ptable};
1047}
1048
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001049struct mm_stage1_locked mm_lock_stage1(void)
1050{
1051 sl_lock(&ptable_lock);
1052 return mm_stage1_lock_unsafe();
1053}
1054
1055void mm_unlock_stage1(struct mm_stage1_locked *lock)
1056{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001057 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001058 sl_unlock(&ptable_lock);
1059 lock->ptable = NULL;
1060}
1061
Andrew Scull80871322018-08-06 12:04:09 +01001062/**
Andrew Scull80871322018-08-06 12:04:09 +01001063 * Updates the hypervisor page table such that the given physical address range
1064 * is mapped into the address space at the corresponding address range in the
1065 * architecture-agnostic mode provided.
1066 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001067void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001068 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001069{
Andrew Scull73b89542019-11-20 17:31:26 +00001070 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
1071
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001072 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001073 arch_mm_mode_to_stage1_attrs(mode), flags,
1074 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001075 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001076 }
1077
1078 return NULL;
1079}
1080
1081/**
1082 * Updates the hypervisor table such that the given physical address range is
1083 * not mapped in the address space.
1084 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001085bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1086 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001087{
Andrew Scull73b89542019-11-20 17:31:26 +00001088 uint32_t mode = MM_MODE_UNMAPPED_MASK;
1089
1090 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001091}
1092
1093/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001094 * Defragments the hypervisor page table.
1095 */
1096void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1097{
1098 mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
1099}
1100
1101/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001102 * Initialises memory management for the hypervisor itself.
1103 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001104bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001105{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001106 /* Locking is not enabled yet so fake it, */
1107 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
1108
Andrew Walbran17eebf92020-02-05 16:35:49 +00001109 dlog_info("text: %#x - %#x\n", pa_addr(layout_text_begin()),
1110 pa_addr(layout_text_end()));
1111 dlog_info("rodata: %#x - %#x\n", pa_addr(layout_rodata_begin()),
1112 pa_addr(layout_rodata_end()));
1113 dlog_info("data: %#x - %#x\n", pa_addr(layout_data_begin()),
1114 pa_addr(layout_data_end()));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001115 dlog_info("stacks: %#x - %#x\n", pa_addr(layout_stacks_begin()),
1116 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001117
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001118 /* ASID 0 is reserved for use by the hypervisor. */
1119 if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001120 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001121 return false;
1122 }
1123
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001124 /* Initialize arch_mm before calling below mapping routines */
1125 if (!arch_mm_init(ptable.root)) {
1126 return false;
1127 }
1128
Andrew Walbran48699362019-05-20 14:38:00 +01001129 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001130 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001131
1132 /* Map each section. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001133 mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(),
1134 MM_MODE_X, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001135
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001136 mm_identity_map(stage1_locked, layout_rodata_begin(),
1137 layout_rodata_end(), MM_MODE_R, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001138
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001139 mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001140 MM_MODE_R | MM_MODE_W, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001141
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001142 /* Arch-specific stack mapping. */
1143 arch_stack_mm_init(stage1_locked, ppool);
1144
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001145 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001146}