blob: 5dc580585c3931f4a013a9cda1b6ecfc43aca947 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
15
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010020#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010021#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010022
Andrew Walbran2400ed22018-09-27 14:45:58 +010023/**
24 * This file has functions for managing the level 1 and 2 page tables used by
25 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
26 * and then a level 2 mapping per VM. The design assumes that all page tables
27 * contain only 1-1 mappings, aligned on the block boundaries.
28 */
29
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010030/*
31 * For stage 2, the input is an intermediate physical addresses rather than a
32 * virtual address so:
33 */
Andrew Scull80871322018-08-06 12:04:09 +010034static_assert(
35 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
36 "Currently, the same code manages the stage 1 and stage 2 page tables "
37 "which only works if the virtual and intermediate physical addresses "
38 "are the same size. It looks like that assumption might not be holding "
39 "so we need to check that everything is going to be ok.");
40
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010042static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043
Andrew Scullda241972019-01-05 18:17:48 +000044static bool mm_stage2_invalidate = false;
45
46/**
47 * After calling this function, modifications to stage-2 page tables will use
48 * break-before-make and invalidate the TLB for the affected range.
49 */
50void mm_vm_enable_invalidation(void)
51{
52 mm_stage2_invalidate = true;
53}
54
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010055/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010056 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010057 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010058static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010059{
60 return ptr_from_va(va_from_pa(pa));
61}
62
63/**
Andrew Scull80871322018-08-06 12:04:09 +010064 * Rounds an address down to a page boundary.
65 */
66static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
67{
68 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
69}
70
71/**
72 * Rounds an address up to a page boundary.
73 */
74static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
75{
76 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
77}
78
79/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 * Calculates the size of the address space represented by a page table entry at
81 * the given level.
82 */
Andrew Sculle9827712018-10-19 14:54:20 +010083static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084{
Andrew Scull78d6fd92018-09-06 15:08:36 +010085 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086}
87
88/**
Andrew Scullcae45572018-12-13 15:46:30 +000089 * Gets the address of the start of the next block of the given size. The size
90 * must be a power of two.
91 */
92static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
93 size_t block_size)
94{
95 return (addr + block_size) & ~(block_size - 1);
96}
97
98/**
99 * Gets the physical address of the start of the next block of the given size.
100 * The size must be a power of two.
101 */
102static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
103{
104 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
105}
106
107/**
Andrew Scull80871322018-08-06 12:04:09 +0100108 * For a given address, calculates the maximum (plus one) address that can be
109 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100110 */
Andrew Sculle9827712018-10-19 14:54:20 +0100111static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100112{
113 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000114
Andrew Scull80871322018-08-06 12:04:09 +0100115 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116}
117
118/**
Andrew Scull80871322018-08-06 12:04:09 +0100119 * For a given address, calculates the index at which its entry is stored in a
120 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121 */
Andrew Sculle9827712018-10-19 14:54:20 +0100122static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100123{
Andrew Scull80871322018-08-06 12:04:09 +0100124 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000125
Andrew Scull78d6fd92018-09-06 15:08:36 +0100126 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127}
128
129/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000130 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100131 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000132static struct mm_page_table *mm_alloc_page_tables(size_t count,
133 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100134{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000135 if (count == 1) {
136 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137 }
138
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000139 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100140}
141
142/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000143 * Returns the maximum level in the page table given the flags.
144 */
145static uint8_t mm_max_level(int flags)
146{
147 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
148 : arch_mm_stage2_max_level();
149}
150
151/**
152 * Returns the number of root-level tables given the flags.
153 */
154static uint8_t mm_root_table_count(int flags)
155{
156 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
157 : arch_mm_stage2_root_table_count();
158}
159
160/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000161 * Invalidates the TLB for the given address range.
162 */
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800163static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100164 uint16_t id, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000165{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000166 if (flags & MM_FLAG_STAGE1) {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800167 arch_mm_invalidate_stage1_range(id, va_init(begin),
168 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 } else {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800170 arch_mm_invalidate_stage2_range(id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100171 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000172 }
173}
174
175/**
176 * Frees all page-table-related memory associated with the given pte at the
177 * given level, including any subtables recursively.
178 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100179// NOLINTNEXTLINE(misc-no-recursion)
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000180static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000181{
182 struct mm_page_table *table;
183 uint64_t i;
184
185 if (!arch_mm_pte_is_table(pte, level)) {
186 return;
187 }
188
189 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000190 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000191 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000192 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000193 }
194
195 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000196 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000197}
198
199/**
David Brazdil711fbe92019-08-06 13:39:58 +0100200 * Returns the first address which cannot be encoded in page tables given by
201 * `flags`. It is the exclusive end of the address space created by the tables.
202 */
203ptable_addr_t mm_ptable_addr_space_end(int flags)
204{
205 return mm_root_table_count(flags) *
206 mm_entry_size(mm_max_level(flags) + 1);
207}
208
209/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000210 * Initialises the given page table.
211 */
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800212bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
213 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000214{
215 uint8_t i;
216 size_t j;
217 struct mm_page_table *tables;
218 uint8_t root_table_count = mm_root_table_count(flags);
219
220 tables = mm_alloc_page_tables(root_table_count, ppool);
221 if (tables == NULL) {
222 return false;
223 }
224
225 for (i = 0; i < root_table_count; i++) {
226 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
227 tables[i].entries[j] =
228 arch_mm_absent_pte(mm_max_level(flags));
229 }
230 }
231
232 /*
233 * TODO: halloc could return a virtual or physical address if mm not
234 * enabled?
235 */
236 t->root = pa_init((uintpaddr_t)tables);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800237 t->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000238 return true;
239}
240
241/**
242 * Frees all memory associated with the give page table.
243 */
244static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
245{
246 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
247 uint8_t level = mm_max_level(flags);
248 uint8_t root_table_count = mm_root_table_count(flags);
249 uint8_t i;
250 uint64_t j;
251
252 for (i = 0; i < root_table_count; ++i) {
253 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
254 mm_free_page_pte(tables[i].entries[j], level, ppool);
255 }
256 }
257
258 mpool_add_chunk(ppool, tables,
259 sizeof(struct mm_page_table) * root_table_count);
260}
261
262/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000263 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000264 * are valid, it performs a break-before-make sequence where it first writes an
265 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
266 * This is to prevent cases where CPUs have different 'valid' values in their
267 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268 */
269static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800270 uint8_t level, int flags, struct mpool *ppool,
Olivier Deprez6f400372022-03-07 09:31:08 +0100271 uint16_t id, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000272{
273 pte_t v = *pte;
274
275 /*
276 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000277 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000278 */
Andrew Scullda241972019-01-05 18:17:48 +0000279 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800280 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281 *pte = arch_mm_absent_pte(level);
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800282 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100283 id, non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000284 }
285
286 /* Assign the new pte. */
287 *pte = new_pte;
288
289 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000290 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000291}
292
293/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100294 * Populates the provided page table entry with a reference to another table if
295 * needed, that is, if it does not yet point to another table.
296 *
297 * Returns a pointer to the table the entry now points to.
298 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000299static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
300 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000301 int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800302 struct mpool *ppool,
Olivier Deprez6f400372022-03-07 09:31:08 +0100303 uint16_t id, bool non_secure)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100305 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100306 pte_t v = *pte;
307 pte_t new_pte;
308 size_t i;
309 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100310 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100311
312 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100313 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000314 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100315 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100316
317 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000318 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100319 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000320 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100321 return NULL;
322 }
323
324 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100325 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100326 inc = mm_entry_size(level_below);
327 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000328 arch_mm_block_from_pte(v, level),
329 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100330 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100331 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100332 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 }
334
335 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100336 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
337 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100338 new_pte += inc;
339 }
340
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100342 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000343
344 /* Replace the pte entry, doing a break-before-make if needed. */
345 mm_replace_entry(begin, pte,
346 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Olivier Deprez6f400372022-03-07 09:31:08 +0100347 level, flags, ppool, id, non_secure);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100348
349 return ntable;
350}
351
352/**
Andrew Scull80871322018-08-06 12:04:09 +0100353 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100354 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000355 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100356 *
357 * This function calls itself recursively if it needs to update additional
358 * levels, but the recursion is bound by the maximum number of levels in a page
359 * table.
360 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100361// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull80871322018-08-06 12:04:09 +0100362static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100363 uint64_t attrs, struct mm_page_table *table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800364 uint8_t level, int flags, struct mpool *ppool,
365 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100366{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100367 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100368 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100369 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000370 bool commit = flags & MM_FLAG_COMMIT;
371 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372
Andrew Scull265ada92018-07-30 15:19:01 +0100373 /* Cap end so that we don't go over the current level max. */
374 if (end > level_end) {
375 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100376 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100377
378 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100379 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100380 if (unmap ? !arch_mm_pte_is_present(*pte, level)
381 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000382 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100383 /*
384 * If the entry is already mapped with the right
385 * attributes, or already absent in the case of
386 * unmapping, no need to do anything; carry on to the
387 * next entry.
388 */
389 } else if ((end - begin) >= entry_size &&
390 (unmap || arch_mm_is_block_allowed(level)) &&
391 (begin & (entry_size - 1)) == 0) {
392 /*
393 * If the entire entry is within the region we want to
394 * map, map/unmap the whole entry.
395 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100396 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000397 pte_t new_pte =
398 unmap ? arch_mm_absent_pte(level)
399 : arch_mm_block_pte(level, pa,
400 attrs);
401 mm_replace_entry(begin, pte, new_pte, level,
Olivier Deprez6f400372022-03-07 09:31:08 +0100402 flags, ppool, id,
403 (attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100404 }
405 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100406 /*
407 * If the entry is already a subtable get it; otherwise
408 * replace it with an equivalent subtable and get that.
409 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000410 struct mm_page_table *nt = mm_populate_table_pte(
Olivier Deprez6f400372022-03-07 09:31:08 +0100411 begin, pte, level, flags, ppool, id,
412 (attrs & (1ULL << 57)) != 0);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100413 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100414 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100415 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100416
Andrew Walbran6324fc92018-10-03 11:46:43 +0100417 /*
418 * Recurse to map/unmap the appropriate entries within
419 * the subtable.
420 */
Andrew Scull80871322018-08-06 12:04:09 +0100421 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800422 flags, ppool, id)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100424 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100425 }
426
Andrew Scullcae45572018-12-13 15:46:30 +0000427 begin = mm_start_of_next_block(begin, entry_size);
428 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100429 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100430 }
431
432 return true;
433}
434
435/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436 * Updates the page table from the root to map the given address range to a
437 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000438 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000439 */
440static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
441 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000442 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443{
444 size_t root_table_size = mm_entry_size(root_level);
445 struct mm_page_table *table =
446 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
447
448 while (begin < end) {
449 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800450 root_level - 1, flags, ppool, t->id)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000451 return false;
452 }
Andrew Scullcae45572018-12-13 15:46:30 +0000453 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000454 table++;
455 }
456
457 return true;
458}
459
460/**
Andrew Scull80871322018-08-06 12:04:09 +0100461 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000462 * or not mapped into the address space with the architecture-agnostic mode
Andrew Walbran58a6e542019-11-19 14:23:15 +0000463 * provided. Only commits the change if MM_FLAG_COMMIT is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100464 */
Andrew Walbran58a6e542019-11-19 14:23:15 +0000465static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
466 paddr_t pa_end, uint64_t attrs, int flags,
467 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100468{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000469 uint8_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100470 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000471 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
472 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100473
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100475 * Assert condition to communicate the API constraint of mm_max_level(),
476 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000478 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000479
480 /* Cap end to stay within the bounds of the page table. */
481 if (end > ptable_end) {
482 end = ptable_end;
483 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100484
Andrew Walbran58a6e542019-11-19 14:23:15 +0000485 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool)) {
486 return false;
487 }
488
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800489 /*
490 * All TLB invalidations must be complete already if any entries were
491 * replaced by mm_replace_entry. Sync all page table writes so that code
492 * following this can use them.
493 */
494 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000495
496 return true;
497}
498
Andrew Scull4e83cef2019-11-19 14:17:54 +0000499/*
500 * Prepares the given page table for the given address mapping such that it
501 * will be able to commit the change without failure. It does so by ensuring
502 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100503 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000504 *
505 * In particular, multiple calls to this function will result in the
506 * corresponding calls to commit the changes to succeed.
507 */
508static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin,
509 paddr_t pa_end, uint64_t attrs,
510 int flags, struct mpool *ppool)
511{
512 flags &= ~MM_FLAG_COMMIT;
513 return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool);
514}
515
516/**
517 * Commits the given address mapping to the page table assuming the operation
518 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
519 * ensure this condition.
520 *
521 * Without the table being properly prepared, the commit may only partially
522 * complete if it runs out of memory resulting in an inconsistent state that
523 * isn't handled.
524 *
525 * Since the non-failure assumtion is used in the reasoning about the atomicity
526 * of higher level memory operations, any detected violations result in a panic.
527 *
528 * TODO: remove ppool argument to be sure no changes are made.
529 */
530static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin,
531 paddr_t pa_end, uint64_t attrs, int flags,
532 struct mpool *ppool)
533{
534 CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs,
535 flags | MM_FLAG_COMMIT, ppool));
536}
537
Andrew Walbran58a6e542019-11-19 14:23:15 +0000538/**
539 * Updates the given table such that the given physical address range is mapped
540 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000541 * provided.
542 *
543 * The page table is updated using the separate prepare and commit stages so
544 * that, on failure, a partial update of the address space cannot happen. The
545 * table may be left with extra internal tables but the address space is
546 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000547 */
548static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
549 paddr_t pa_end, uint64_t attrs, int flags,
550 struct mpool *ppool)
551{
Andrew Scull4e83cef2019-11-19 14:17:54 +0000552 if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags,
553 ppool)) {
554 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100555 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100556
Andrew Scull4e83cef2019-11-19 14:17:54 +0000557 mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool);
558
559 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100560}
561
562/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100563 * Writes the given table to the debug log, calling itself recursively to
564 * write sub-tables.
565 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100566// NOLINTNEXTLINE(misc-no-recursion)
Andrew Sculle9827712018-10-19 14:54:20 +0100567static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100568 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100569{
570 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000571
Andrew Scull4e5f8142018-10-12 14:37:19 +0100572 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
573 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100574 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100575 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100576
Karl Meakine8937d92024-03-19 16:04:25 +0000577 dlog("%*s%lx: %lx\n", 4 * (max_level - level), "", i,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100578 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100579
Andrew Scull4e5f8142018-10-12 14:37:19 +0100580 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100581 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100582 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000583 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100584 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100585 }
586 }
587}
588
589/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000590 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100591 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000592static void mm_ptable_dump(struct mm_ptable *t, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100593{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000594 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000595 uint8_t max_level = mm_max_level(flags);
596 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000597 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000598
Andrew Scull1ba470e2018-10-31 15:14:31 +0000599 for (i = 0; i < root_table_count; ++i) {
600 mm_dump_table_recursive(&tables[i], max_level, max_level);
601 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100602}
603
604/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000605 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800606 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100607 */
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800608static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100609{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100610 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100611 uint64_t block_attrs;
612 uint64_t table_attrs;
613 uint64_t combined_attrs;
614 paddr_t block_address;
615
Andrew Scullb6b9b562018-12-21 14:41:35 +0000616 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
617
618 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000619 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100620 }
621
Andrew Scullb6b9b562018-12-21 14:41:35 +0000622 /* Might not be possible to merge the table into a single block. */
623 if (!arch_mm_is_block_allowed(level)) {
624 return table_pte;
625 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000626
Andrew Scullb6b9b562018-12-21 14:41:35 +0000627 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000628 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000629 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100630 combined_attrs =
631 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000632 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000633
Andrew Walbran2400ed22018-09-27 14:45:58 +0100634 return arch_mm_block_pte(level, block_address, combined_attrs);
635}
636
637/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000638 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000639 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100640 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100641// NOLINTNEXTLINE(misc-no-recursion)
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800642static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
643 uint8_t level, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100644 struct mpool *ppool, uint16_t id,
645 bool non_secure)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100646{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100647 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100648 uint64_t i;
Andrew Scull12122ce2019-11-19 14:21:07 +0000649 bool mergeable;
650 bool base_present;
651 uint64_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800652 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100653
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800654 if (!arch_mm_pte_is_table(*entry, level)) {
655 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100656 }
657
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800658 table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100659
Andrew Scull12122ce2019-11-19 14:21:07 +0000660 /* Defrag the first entry in the table and use it as the base entry. */
661 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800662
663 mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
Olivier Deprez6f400372022-03-07 09:31:08 +0100664 flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800665
Andrew Scull12122ce2019-11-19 14:21:07 +0000666 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
667 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
668
Andrew Walbran2400ed22018-09-27 14:45:58 +0100669 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000670 * Defrag the remaining entries in the table and check whether they are
671 * compatible with the base entry meaning the table can be merged into a
672 * block entry. It assumes addresses are contiguous due to identity
673 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100674 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000675 mergeable = true;
676 for (i = 1; i < MM_PTE_PER_PAGE; ++i) {
677 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800678 ptable_addr_t block_addr =
679 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000680
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800681 mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
Olivier Deprez6f400372022-03-07 09:31:08 +0100682 level - 1, flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800683
Andrew Scull12122ce2019-11-19 14:21:07 +0000684 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100685
Andrew Scull12122ce2019-11-19 14:21:07 +0000686 if (present != base_present) {
687 mergeable = false;
688 continue;
689 }
690
691 if (!present) {
692 continue;
693 }
694
695 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
696 mergeable = false;
697 continue;
698 }
699
700 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
701 base_attrs) {
702 mergeable = false;
703 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100704 }
705 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000706
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800707 if (!mergeable) {
708 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000709 }
710
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800711 new_entry = mm_merge_table_pte(*entry, level);
712 if (*entry != new_entry) {
Olivier Deprez6f400372022-03-07 09:31:08 +0100713 mm_replace_entry(base_addr, entry, (uintptr_t)new_entry, level,
714 flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800715 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100716}
717
718/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100719 * Defragments the given page table by converting page table references to
720 * blocks whenever possible.
721 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000722static void mm_ptable_defrag(struct mm_ptable *t, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100723 struct mpool *ppool, bool non_secure)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100724{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000725 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000726 uint8_t level = mm_max_level(flags);
727 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000728 uint8_t i;
729 uint64_t j;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800730 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100731
732 /*
733 * Loop through each entry in the table. If it points to another table,
734 * check if that table can be replaced by a block or an absent entry.
735 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000736 for (i = 0; i < root_table_count; ++i) {
737 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800738 mm_ptable_defrag_entry(block_addr,
739 &(tables[i].entries[j]), level,
Olivier Deprez6f400372022-03-07 09:31:08 +0100740 flags, ppool, t->id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800741 block_addr = mm_start_of_next_block(
742 block_addr, mm_entry_size(level));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000743 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100744 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800745
746 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100747}
748
749/**
Andrew Scull81e85092018-12-12 12:56:20 +0000750 * Gets the attributes applied to the given range of stage-2 addresses at the
751 * given level.
752 *
753 * The `got_attrs` argument is initially passed as false until `attrs` contains
754 * attributes of the memory region at which point it is passed as true.
755 *
756 * The value returned in `attrs` is only valid if the function returns true.
757 *
758 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100759 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100760// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull81e85092018-12-12 12:56:20 +0000761static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
762 ptable_addr_t begin, ptable_addr_t end,
763 uint8_t level, bool got_attrs,
764 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100765{
Andrew Scull81e85092018-12-12 12:56:20 +0000766 pte_t *pte = &table->entries[mm_index(begin, level)];
767 ptable_addr_t level_end = mm_level_end(begin, level);
768 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100769
Andrew Scull81e85092018-12-12 12:56:20 +0000770 /* Cap end so that we don't go over the current level max. */
771 if (end > level_end) {
772 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100773 }
774
Andrew Scull81e85092018-12-12 12:56:20 +0000775 /* Check that each entry is owned. */
776 while (begin < end) {
777 if (arch_mm_pte_is_table(*pte, level)) {
778 if (!mm_ptable_get_attrs_level(
779 mm_page_table_from_pa(
780 arch_mm_table_from_pte(*pte,
781 level)),
782 begin, end, level - 1, got_attrs, attrs)) {
783 return false;
784 }
785 got_attrs = true;
786 } else {
787 if (!got_attrs) {
788 *attrs = arch_mm_pte_attrs(*pte, level);
789 got_attrs = true;
790 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
791 return false;
792 }
793 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100794
Andrew Scull81e85092018-12-12 12:56:20 +0000795 begin = mm_start_of_next_block(begin, entry_size);
796 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100797 }
798
Andrew Scullc66a04d2018-12-07 13:41:56 +0000799 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000800 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100801}
802
803/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800804 * Gets the attributes applied to the given range of addresses in the page
805 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000806 *
807 * The value returned in `attrs` is only valid if the function returns true.
808 *
809 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100810 */
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800811static bool mm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
812 ptable_addr_t end, uint64_t *attrs, int flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100813{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000814 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000815 uint8_t root_level = max_level + 1;
816 size_t root_table_size = mm_entry_size(root_level);
817 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000818 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000819 struct mm_page_table *table;
820 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100821
Andrew Scull81e85092018-12-12 12:56:20 +0000822 begin = mm_round_down_to_page(begin);
823 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100824
Andrew Scull81e85092018-12-12 12:56:20 +0000825 /* Fail if the addresses are out of range. */
826 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000827 return false;
828 }
829
Andrew Scull81e85092018-12-12 12:56:20 +0000830 table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
831 while (begin < end) {
832 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
833 got_attrs, attrs)) {
834 return false;
835 }
836
837 got_attrs = true;
838 begin = mm_start_of_next_block(begin, root_table_size);
839 table++;
840 }
841
842 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100843}
844
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800845bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100846{
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800847 return mm_ptable_init(t, id, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100848}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100849
Andrew Scullda3df7f2019-01-05 17:49:27 +0000850void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000851{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000852 mm_ptable_fini(t, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000853}
854
855/**
Andrew Scull73b89542019-11-20 17:31:26 +0000856 * Selects flags to pass to the page table manipulation operation based on the
857 * mapping mode.
858 */
859static int mm_mode_to_flags(uint32_t mode)
860{
861 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
862 return MM_FLAG_UNMAP;
863 }
864
865 return 0;
866}
867
868/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000869 * See `mm_ptable_identity_prepare`.
870 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800871 * This must be called before `mm_identity_commit` for the same mapping.
872 *
873 * Returns true on success, or false if the update would fail.
874 */
875bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
876 uint32_t mode, struct mpool *ppool)
877{
878 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
879
880 return mm_ptable_identity_prepare(t, begin, end,
881 arch_mm_mode_to_stage1_attrs(mode),
882 flags, ppool);
883}
884
885/**
886 * See `mm_ptable_identity_commit`.
887 *
888 * `mm_identity_prepare` must be called before this for the same mapping.
889 */
890void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
891 uint32_t mode, struct mpool *ppool)
892{
893 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
894
895 mm_ptable_identity_commit(t, begin, end,
896 arch_mm_mode_to_stage1_attrs(mode), flags,
897 ppool);
898 return ptr_from_va(va_from_pa(begin));
899}
900
901/**
902 * See `mm_ptable_identity_prepare`.
903 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000904 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000905 *
906 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000907 */
908bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
909 uint32_t mode, struct mpool *ppool)
910{
911 int flags = mm_mode_to_flags(mode);
912
913 return mm_ptable_identity_prepare(t, begin, end,
914 arch_mm_mode_to_stage2_attrs(mode),
915 flags, ppool);
916}
917
918/**
919 * See `mm_ptable_identity_commit`.
920 *
921 * `mm_vm_identity_prepare` must be called before this for the same mapping.
922 */
923void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000924 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000925{
926 int flags = mm_mode_to_flags(mode);
927
928 mm_ptable_identity_commit(t, begin, end,
929 arch_mm_mode_to_stage2_attrs(mode), flags,
930 ppool);
931
932 if (ipa != NULL) {
933 *ipa = ipa_from_pa(begin);
934 }
935}
936
937/**
Andrew Scull80871322018-08-06 12:04:09 +0100938 * Updates a VM's page table such that the given physical address range is
939 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100940 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000941 *
942 * mm_vm_defrag should always be called after a series of page table updates,
943 * whether they succeed or fail. This is because on failure extra page table
944 * entries may have been allocated and then not used, while on success it may be
945 * possible to compact the page table by merging several entries into a block.
946 *
947 * Returns true on success, or false if the update failed and no changes were
948 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100949 */
Andrew Scull80871322018-08-06 12:04:09 +0100950bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000951 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100952{
Andrew Scull73b89542019-11-20 17:31:26 +0000953 int flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000954 bool success = mm_ptable_identity_update(
955 t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
956 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100957
958 if (success && ipa != NULL) {
959 *ipa = ipa_from_pa(begin);
960 }
961
962 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100963}
964
965/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000966 * Updates the VM's table such that the given physical address range has no
967 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100968 */
Andrew Scullda241972019-01-05 18:17:48 +0000969bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000970 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100971{
Andrew Scull73b89542019-11-20 17:31:26 +0000972 uint32_t mode = MM_MODE_UNMAPPED_MASK;
973
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000974 return mm_vm_identity_map(t, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +0100975}
976
977/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000978 * Write the given page table of a VM to the debug log.
979 */
980void mm_vm_dump(struct mm_ptable *t)
981{
982 mm_ptable_dump(t, 0);
983}
984
985/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700986 * Defragments a stage1 page table.
987 */
988void mm_stage1_defrag(struct mm_ptable *t, struct mpool *ppool)
989{
Olivier Deprez6f400372022-03-07 09:31:08 +0100990 mm_ptable_defrag(t, MM_FLAG_STAGE1, ppool, false);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700991}
992
993/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000994 * Defragments the VM page table.
995 */
Olivier Deprez6f400372022-03-07 09:31:08 +0100996void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool, bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000997{
Olivier Deprez6f400372022-03-07 09:31:08 +0100998 mm_ptable_defrag(t, 0, ppool, non_secure);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000999}
1000
1001/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +01001002 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001003 * are mapped with the same mode.
1004 *
1005 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001006 */
Andrew Scull81e85092018-12-12 12:56:20 +00001007bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001008 uint32_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001009{
Andrew Scull81e85092018-12-12 12:56:20 +00001010 uint64_t attrs;
1011 bool ret;
1012
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001013 ret = mm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs, 0);
Andrew Scull81e85092018-12-12 12:56:20 +00001014 if (ret) {
1015 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1016 }
1017
1018 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001019}
1020
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001021/**
1022 * Gets the mode of the given range of virtual addresses if they
1023 * are mapped with the same mode.
1024 *
1025 * Returns true if the range is mapped with the same mode and false otherwise.
1026 */
1027bool mm_get_mode(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
1028 uint32_t *mode)
1029{
1030 uint64_t attrs;
1031 bool ret;
1032
1033 ret = mm_get_attrs(t, va_addr(begin), va_addr(end), &attrs,
1034 MM_FLAG_STAGE1);
1035 if (ret) {
1036 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1037 }
1038
1039 return ret;
1040}
1041
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001042static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1043{
1044 return (struct mm_stage1_locked){.ptable = &ptable};
1045}
1046
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001047struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1048{
1049 return (struct mm_stage1_locked){.ptable = ptable};
1050}
1051
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001052struct mm_stage1_locked mm_lock_stage1(void)
1053{
1054 sl_lock(&ptable_lock);
1055 return mm_stage1_lock_unsafe();
1056}
1057
1058void mm_unlock_stage1(struct mm_stage1_locked *lock)
1059{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001060 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001061 sl_unlock(&ptable_lock);
1062 lock->ptable = NULL;
1063}
1064
Andrew Scull80871322018-08-06 12:04:09 +01001065/**
Andrew Scull80871322018-08-06 12:04:09 +01001066 * Updates the hypervisor page table such that the given physical address range
1067 * is mapped into the address space at the corresponding address range in the
1068 * architecture-agnostic mode provided.
1069 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001070void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001071 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001072{
Andrew Scull73b89542019-11-20 17:31:26 +00001073 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
1074
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001075 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001076 arch_mm_mode_to_stage1_attrs(mode), flags,
1077 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001078 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001079 }
1080
1081 return NULL;
1082}
1083
1084/**
1085 * Updates the hypervisor table such that the given physical address range is
1086 * not mapped in the address space.
1087 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001088bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1089 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001090{
Andrew Scull73b89542019-11-20 17:31:26 +00001091 uint32_t mode = MM_MODE_UNMAPPED_MASK;
1092
1093 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001094}
1095
1096/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001097 * Defragments the hypervisor page table.
1098 */
1099void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1100{
Olivier Deprez6f400372022-03-07 09:31:08 +01001101 mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool, false);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001102}
1103
1104/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001105 * Initialises memory management for the hypervisor itself.
1106 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001107bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001108{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001109 /* Locking is not enabled yet so fake it, */
1110 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
1111
Karl Meakine8937d92024-03-19 16:04:25 +00001112 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001113 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001114 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001115 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001116 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001117 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001118 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001119 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001120
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001121 /* ASID 0 is reserved for use by the hypervisor. */
1122 if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001123 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001124 return false;
1125 }
1126
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001127 /* Initialize arch_mm before calling below mapping routines */
1128 if (!arch_mm_init(ptable.root)) {
1129 return false;
1130 }
1131
Andrew Walbran48699362019-05-20 14:38:00 +01001132 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001133 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001134
1135 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001136 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1137 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001138
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001139 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1140 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001141
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001142 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1143 layout_data_end(), MM_MODE_R | MM_MODE_W,
1144 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001145
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001146 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001147 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001148
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001149 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001150}