blob: cad84b5f0f49186d3f81ea0fd7f0a7b0c08386bf [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000015#include "hf/arch/mm.h"
Maksims Svecovs134b8f92022-03-04 15:14:09 +000016
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010020#include "hf/plat/console.h"
Karl Meakin25954e32025-02-07 16:12:51 +000021#include "hf/std.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010022
Andrew Walbran2400ed22018-09-27 14:45:58 +010023/**
24 * This file has functions for managing the level 1 and 2 page tables used by
25 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
26 * and then a level 2 mapping per VM. The design assumes that all page tables
27 * contain only 1-1 mappings, aligned on the block boundaries.
28 */
29
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010030/*
31 * For stage 2, the input is an intermediate physical addresses rather than a
32 * virtual address so:
33 */
Andrew Scull80871322018-08-06 12:04:09 +010034static_assert(
35 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
36 "Currently, the same code manages the stage 1 and stage 2 page tables "
37 "which only works if the virtual and intermediate physical addresses "
38 "are the same size. It looks like that assumption might not be holding "
39 "so we need to check that everything is going to be ok.");
40
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010042static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043
Andrew Scullda241972019-01-05 18:17:48 +000044static bool mm_stage2_invalidate = false;
45
46/**
47 * After calling this function, modifications to stage-2 page tables will use
48 * break-before-make and invalidate the TLB for the affected range.
49 */
50void mm_vm_enable_invalidation(void)
51{
52 mm_stage2_invalidate = true;
53}
54
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010055/**
Andrew Scull80871322018-08-06 12:04:09 +010056 * Rounds an address down to a page boundary.
57 */
58static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
59{
Karl Meakin25954e32025-02-07 16:12:51 +000060 return align_down(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010061}
62
63/**
64 * Rounds an address up to a page boundary.
65 */
66static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
67{
Karl Meakin25954e32025-02-07 16:12:51 +000068 return align_up(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010069}
70
71/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010072 * Calculates the size of the address space represented by a page table entry at
Karl Meakina3a9f952025-02-08 00:11:16 +000073 * the given level. See also Arm ARM, table D8-15
74 * - `level == 4`: 256 TiB (1 << 48)
75 * - `level == 3`: 512 GiB (1 << 39)
76 * - `level == 2`: 1 GiB (1 << 30)
77 * - `level == 1`: 2 MiB (1 << 21)
78 * - `level == 0`: 4 KiB (1 << 12)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010079 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000080static size_t mm_entry_size(mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010081{
Karl Meakina3a9f952025-02-08 00:11:16 +000082 assert(level <= 4);
Andrew Scull78d6fd92018-09-06 15:08:36 +010083 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084}
85
86/**
Karl Meakina3a9f952025-02-08 00:11:16 +000087 * Get the start address of the range mapped by the next block of the given
88 * level.
Andrew Scullcae45572018-12-13 15:46:30 +000089 */
90static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
Karl Meakin25954e32025-02-07 16:12:51 +000091 mm_level_t level)
Andrew Scullcae45572018-12-13 15:46:30 +000092{
Karl Meakina3a9f952025-02-08 00:11:16 +000093 assert(level <= 4);
Karl Meakin25954e32025-02-07 16:12:51 +000094 return align_up(addr + 1, mm_entry_size(level));
Andrew Scullcae45572018-12-13 15:46:30 +000095}
96
97/**
Andrew Scull80871322018-08-06 12:04:09 +010098 * For a given address, calculates the maximum (plus one) address that can be
99 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100100 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000101static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100102{
103 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000104
Andrew Scull80871322018-08-06 12:04:09 +0100105 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100106}
107
108/**
Andrew Scull80871322018-08-06 12:04:09 +0100109 * For a given address, calculates the index at which its entry is stored in a
Karl Meakina3a9f952025-02-08 00:11:16 +0000110 * table at the given level. See also Arm ARM, table D8-14
111 * - `level == 4`: bits[51:48]
112 * - `level == 3`: bits[47:39]
113 * - `level == 2`: bits[38:30]
114 * - `level == 1`: bits[29:21]
115 * - `level == 0`: bits[20:12]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000117static size_t mm_index(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100118{
Andrew Scull80871322018-08-06 12:04:09 +0100119 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000120
Andrew Scull78d6fd92018-09-06 15:08:36 +0100121 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100122}
123
124/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000125 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100126 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000127static struct mm_page_table *mm_alloc_page_tables(size_t count,
128 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100129{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000130 if (count == 1) {
131 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100132 }
133
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000134 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100135}
136
137/**
Karl Meakina3a9f952025-02-08 00:11:16 +0000138 * Returns the root level in the page table given the flags.
Andrew Scullda3df7f2019-01-05 17:49:27 +0000139 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000140static mm_level_t mm_root_level(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000141{
Karl Meakin0f506a12025-02-08 23:28:45 +0000142 return ptable->stage1 ? arch_mm_stage1_root_level()
143 : arch_mm_stage2_root_level();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000144}
145
146/**
147 * Returns the number of root-level tables given the flags.
148 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000149static uint8_t mm_root_table_count(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000150{
Karl Meakin0f506a12025-02-08 23:28:45 +0000151 return ptable->stage1 ? arch_mm_stage1_root_table_count()
152 : arch_mm_stage2_root_table_count();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000153}
154
155/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000156 * Invalidates the TLB for the given address range.
157 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000158static void mm_invalidate_tlb(const struct mm_ptable *ptable,
159 ptable_addr_t begin, ptable_addr_t end,
Karl Meakin0f506a12025-02-08 23:28:45 +0000160 bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000161{
Karl Meakin0f506a12025-02-08 23:28:45 +0000162 if (ptable->stage1) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000163 arch_mm_invalidate_stage1_range(ptable->id, va_init(begin),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800164 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000165 } else {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000166 arch_mm_invalidate_stage2_range(ptable->id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100167 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000168 }
169}
170
171/**
172 * Frees all page-table-related memory associated with the given pte at the
173 * given level, including any subtables recursively.
174 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100175// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin07a69ab2025-02-07 14:53:19 +0000176static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000177{
178 struct mm_page_table *table;
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000179
180 if (!arch_mm_pte_is_table(pte, level)) {
181 return;
182 }
183
184 /* Recursively free any subtables. */
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000185 table = arch_mm_table_from_pte(pte, level);
Karl Meakind64aaf82025-02-08 01:12:55 +0000186 for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000187 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000188 }
189
190 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000191 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000192}
193
194/**
David Brazdil711fbe92019-08-06 13:39:58 +0100195 * Returns the first address which cannot be encoded in page tables given by
196 * `flags`. It is the exclusive end of the address space created by the tables.
197 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000198ptable_addr_t mm_ptable_addr_space_end(const struct mm_ptable *ptable)
David Brazdil711fbe92019-08-06 13:39:58 +0100199{
Karl Meakin0f506a12025-02-08 23:28:45 +0000200 return mm_root_table_count(ptable) *
201 mm_entry_size(mm_root_level(ptable));
David Brazdil711fbe92019-08-06 13:39:58 +0100202}
203
204/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000205 * Initialises the given page table.
206 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000207bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id, bool stage1,
208 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000209{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000210 struct mm_page_table *root_tables;
Karl Meakin0f506a12025-02-08 23:28:45 +0000211 uint8_t root_table_count = stage1 ? arch_mm_stage1_root_table_count()
212 : arch_mm_stage2_root_table_count();
213 mm_level_t root_level = stage1 ? arch_mm_stage1_root_level()
214 : arch_mm_stage2_root_level();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000215
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000216 root_tables = mm_alloc_page_tables(root_table_count, ppool);
217 if (root_tables == NULL) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000218 return false;
219 }
220
Karl Meakind64aaf82025-02-08 01:12:55 +0000221 for (size_t i = 0; i < root_table_count; i++) {
222 for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) {
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000223 root_tables[i].entries[j] =
Karl Meakina3a9f952025-02-08 00:11:16 +0000224 arch_mm_absent_pte(root_level - 1);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000225 }
226 }
227
228 /*
229 * TODO: halloc could return a virtual or physical address if mm not
230 * enabled?
231 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000232 ptable->id = id;
Karl Meakin0f506a12025-02-08 23:28:45 +0000233 ptable->root_tables = root_tables;
234 ptable->stage1 = stage1;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000235 return true;
236}
237
238/**
239 * Frees all memory associated with the give page table.
240 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000241static void mm_ptable_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000242{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000243 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakin0f506a12025-02-08 23:28:45 +0000244 mm_level_t root_level = mm_root_level(ptable);
245 uint8_t root_table_count = mm_root_table_count(ptable);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000246
Karl Meakind64aaf82025-02-08 01:12:55 +0000247 for (size_t i = 0; i < root_table_count; ++i) {
248 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000249 mm_free_page_pte(root_tables[i].entries[j],
250 root_level - 1, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000251 }
252 }
253
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000254 mpool_add_chunk(ppool, root_tables,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000255 sizeof(struct mm_page_table) * root_table_count);
256}
257
258/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000259 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000260 * are valid, it performs a break-before-make sequence where it first writes an
261 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
262 * This is to prevent cases where CPUs have different 'valid' values in their
263 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000264 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000265static void mm_replace_entry(const struct mm_ptable *ptable,
266 ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Karl Meakin0f506a12025-02-08 23:28:45 +0000267 mm_level_t level, bool non_secure,
268 struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000269{
270 pte_t v = *pte;
271
272 /*
273 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000274 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000275 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000276 if ((ptable->stage1 || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800277 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000278 *pte = arch_mm_absent_pte(level);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000279 mm_invalidate_tlb(ptable, begin, begin + mm_entry_size(level),
Karl Meakin0f506a12025-02-08 23:28:45 +0000280 non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281 }
282
283 /* Assign the new pte. */
284 *pte = new_pte;
285
286 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000287 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000288}
289
290/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100291 * Populates the provided page table entry with a reference to another table if
292 * needed, that is, if it does not yet point to another table.
293 *
294 * Returns a pointer to the table the entry now points to.
295 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000296static struct mm_page_table *mm_populate_table_pte(struct mm_ptable *ptable,
297 ptable_addr_t begin,
298 pte_t *pte, mm_level_t level,
299 bool non_secure,
300 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100301{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100302 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100303 pte_t v = *pte;
304 pte_t new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100305 size_t inc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000306 mm_level_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100307
308 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100309 if (arch_mm_pte_is_table(v, level)) {
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000310 return arch_mm_table_from_pte(v, level);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100311 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100312
313 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000314 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100315 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000316 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100317 return NULL;
318 }
319
320 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100321 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100322 inc = mm_entry_size(level_below);
323 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000324 arch_mm_block_from_pte(v, level),
325 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100327 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100328 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100329 }
330
331 /* Initialise entries in the new table. */
Karl Meakind64aaf82025-02-08 01:12:55 +0000332 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100333 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100334 new_pte += inc;
335 }
336
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000337 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100338 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339
340 /* Replace the pte entry, doing a break-before-make if needed. */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000341 mm_replace_entry(ptable, begin, pte,
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000342 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Karl Meakin0f506a12025-02-08 23:28:45 +0000343 level, non_secure, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100344
345 return ntable;
346}
347
348/**
Andrew Scull80871322018-08-06 12:04:09 +0100349 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100350 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000351 * `flags.unmap` is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100352 *
353 * This function calls itself recursively if it needs to update additional
354 * levels, but the recursion is bound by the maximum number of levels in a page
355 * table.
356 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100357// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000358static bool mm_map_level(struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin25954e32025-02-07 16:12:51 +0000359 ptable_addr_t end, mm_attr_t attrs,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000360 struct mm_page_table *child_table, mm_level_t level,
361 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100362{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000363 pte_t *pte = &child_table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100364 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100365 size_t entry_size = mm_entry_size(level);
Karl Meakin1fd4b822025-02-01 17:13:47 +0000366 bool commit = flags.commit;
367 bool unmap = flags.unmap;
368 bool non_secure = ((attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100369
Andrew Scull265ada92018-07-30 15:19:01 +0100370 /* Cap end so that we don't go over the current level max. */
371 if (end > level_end) {
372 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100373 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100374
375 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100376 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100377 if (unmap ? !arch_mm_pte_is_present(*pte, level)
378 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000379 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100380 /*
381 * If the entry is already mapped with the right
382 * attributes, or already absent in the case of
383 * unmapping, no need to do anything; carry on to the
384 * next entry.
385 */
386 } else if ((end - begin) >= entry_size &&
387 (unmap || arch_mm_is_block_allowed(level)) &&
Karl Meakin25954e32025-02-07 16:12:51 +0000388 is_aligned(begin, entry_size)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100389 /*
390 * If the entire entry is within the region we want to
391 * map, map/unmap the whole entry.
392 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100393 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000394 pte_t new_pte =
395 unmap ? arch_mm_absent_pte(level)
Karl Meakin25954e32025-02-07 16:12:51 +0000396 : arch_mm_block_pte(
397 level, pa_init(begin),
398 attrs);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000399 mm_replace_entry(ptable, begin, pte, new_pte,
Karl Meakin0f506a12025-02-08 23:28:45 +0000400 level, non_secure, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100401 }
402 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100403 /*
404 * If the entry is already a subtable get it; otherwise
405 * replace it with an equivalent subtable and get that.
406 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000407 struct mm_page_table *nt = mm_populate_table_pte(
408 ptable, begin, pte, level, non_secure, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100409 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100410 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100411 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100412
Andrew Walbran6324fc92018-10-03 11:46:43 +0100413 /*
414 * Recurse to map/unmap the appropriate entries within
415 * the subtable.
416 */
Karl Meakin25954e32025-02-07 16:12:51 +0000417 if (!mm_map_level(ptable, begin, end, attrs, nt,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000418 level - 1, flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100419 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100420 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100421 }
422
Karl Meakin25954e32025-02-07 16:12:51 +0000423 begin = mm_start_of_next_block(begin, level);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100424 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100425 }
426
427 return true;
428}
429
430/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431 * Updates the page table from the root to map the given address range to a
Karl Meakinaac38012025-02-07 23:57:37 +0000432 * physical range using the provided (architecture-specific) attributes.
433 *
434 * Flags:
435 * - `flags.unmap`: unmap the given range instead of mapping it.
436 * - `flags.commit`: the change is only committed if this flag is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100437 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000438static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000439 paddr_t pa_end, mm_attr_t attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000440 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100441{
Karl Meakin0f506a12025-02-08 23:28:45 +0000442 mm_level_t root_level = mm_root_level(ptable);
443 ptable_addr_t ptable_end = mm_ptable_addr_space_end(ptable);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000444 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
Karl Meakinc17ab272025-02-08 03:29:17 +0000445 ptable_addr_t begin = mm_round_down_to_page(pa_addr(pa_begin));
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000446 struct mm_page_table *root_table =
447 &ptable->root_tables[mm_index(begin, root_level)];
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 /*
Karl Meakina3a9f952025-02-08 00:11:16 +0000450 * Assert condition to communicate the API constraint of
451 * mm_root_level(), that isn't encoded in the types, to the static
452 * analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 */
Karl Meakina3a9f952025-02-08 00:11:16 +0000454 assert(root_level >= 3);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000455
456 /* Cap end to stay within the bounds of the page table. */
457 if (end > ptable_end) {
Karl Meakin30506952025-02-18 18:13:06 +0000458 dlog_verbose(
459 "ptable_map: input range end falls outside of ptable "
460 "address space (%#016lx > %#016lx), capping to ptable "
461 "address space end\n",
462 end, ptable_end);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000463 end = ptable_end;
464 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100465
Karl Meakin30506952025-02-18 18:13:06 +0000466 if (begin >= end) {
467 dlog_verbose(
468 "ptable_map: input range is backwards (%#016lx >= "
469 "%#016lx), request will have no effect\n",
470 begin, end);
471 } else if (pa_addr(pa_begin) >= pa_addr(pa_end)) {
472 dlog_verbose(
473 "ptable_map: input range was backwards (%#016lx >= "
474 "%#016lx), but due to rounding the range %#016lx to "
475 "%#016lx will be mapped\n",
476 begin, end, pa_addr(pa_begin), pa_addr(pa_end));
477 }
478
Karl Meakinaac38012025-02-07 23:57:37 +0000479 while (begin < end) {
480 if (!mm_map_level(ptable, begin, end, attrs, root_table,
481 root_level - 1, flags, ppool)) {
482 return false;
483 }
484 begin = mm_start_of_next_block(begin, root_level);
485 root_table++;
Andrew Walbran58a6e542019-11-19 14:23:15 +0000486 }
487
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800488 /*
489 * All TLB invalidations must be complete already if any entries were
490 * replaced by mm_replace_entry. Sync all page table writes so that code
491 * following this can use them.
492 */
493 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000494
495 return true;
496}
497
Andrew Scull4e83cef2019-11-19 14:17:54 +0000498/*
499 * Prepares the given page table for the given address mapping such that it
500 * will be able to commit the change without failure. It does so by ensuring
501 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100502 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000503 *
504 * In particular, multiple calls to this function will result in the
505 * corresponding calls to commit the changes to succeed.
506 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000507static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
508 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000509 mm_attr_t attrs, struct mm_flags flags,
Karl Meakind64aaf82025-02-08 01:12:55 +0000510 struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000511{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000512 flags.commit = false;
Karl Meakind64aaf82025-02-08 01:12:55 +0000513 return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
514 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000515}
516
517/**
518 * Commits the given address mapping to the page table assuming the operation
519 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
520 * ensure this condition.
521 *
522 * Without the table being properly prepared, the commit may only partially
523 * complete if it runs out of memory resulting in an inconsistent state that
524 * isn't handled.
525 *
526 * Since the non-failure assumtion is used in the reasoning about the atomicity
527 * of higher level memory operations, any detected violations result in a panic.
528 *
529 * TODO: remove ppool argument to be sure no changes are made.
530 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000531static void mm_ptable_identity_commit(struct mm_ptable *ptable,
532 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000533 mm_attr_t attrs, struct mm_flags flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000534 struct mpool *ppool)
535{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000536 flags.commit = true;
537 CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
538 ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000539}
540
Andrew Walbran58a6e542019-11-19 14:23:15 +0000541/**
542 * Updates the given table such that the given physical address range is mapped
543 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000544 * provided.
545 *
546 * The page table is updated using the separate prepare and commit stages so
547 * that, on failure, a partial update of the address space cannot happen. The
548 * table may be left with extra internal tables but the address space is
549 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000550 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000551static bool mm_ptable_identity_update(struct mm_ptable *ptable,
552 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000553 mm_attr_t attrs, struct mm_flags flags,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000554 struct mpool *ppool)
555{
Karl Meakind64aaf82025-02-08 01:12:55 +0000556 if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000557 ppool)) {
558 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100559 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100560
Karl Meakind64aaf82025-02-08 01:12:55 +0000561 mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags,
562 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000563
564 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100565}
566
Karl Meakinc88ad412025-02-11 16:04:49 +0000567static void mm_dump_entries(const pte_t *entries, mm_level_t level,
568 uint32_t indent);
569
570static void mm_dump_block_entry(pte_t entry, mm_level_t level, uint32_t indent)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100571{
Karl Meakinc88ad412025-02-11 16:04:49 +0000572 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
573 paddr_t addr = arch_mm_block_from_pte(entry, level);
574
575 if (arch_mm_pte_is_valid(entry, level)) {
576 if (level == 0) {
577 dlog("page {\n");
578 } else {
579 dlog("block {\n");
580 }
581 } else {
582 dlog("invalid_block {\n");
583 }
584
585 indent += 1;
586 {
587 dlog_indent(indent, ".addr = %#016lx\n", pa_addr(addr));
588 dlog_indent(indent, ".attrs = %#016lx\n", attrs);
589 }
590 indent -= 1;
591 dlog_indent(indent, "}");
592}
593
594// NOLINTNEXTLINE(misc-no-recursion)
595static void mm_dump_table_entry(pte_t entry, mm_level_t level, uint32_t indent)
596{
597 dlog("table {\n");
598 indent += 1;
599 {
600 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
Karl Meakinc88ad412025-02-11 16:04:49 +0000601 const struct mm_page_table *child_table =
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000602 arch_mm_table_from_pte(entry, level);
603 paddr_t addr = pa_init((uintpaddr_t)child_table);
Karl Meakinc88ad412025-02-11 16:04:49 +0000604
605 dlog_indent(indent, ".pte = %#016lx,\n", entry);
606 dlog_indent(indent, ".attrs = %#016lx,\n", attrs);
607 dlog_indent(indent, ".addr = %#016lx,\n", pa_addr(addr));
608 dlog_indent(indent, ".entries = ");
609 mm_dump_entries(child_table->entries, level - 1, indent);
610 dlog(",\n");
611 }
612 indent -= 1;
613 dlog_indent(indent, "}");
614}
615
616// NOLINTNEXTLINE(misc-no-recursion)
617static void mm_dump_entry(pte_t entry, mm_level_t level, uint32_t indent)
618{
619 switch (arch_mm_pte_type(entry, level)) {
620 case PTE_TYPE_ABSENT:
621 dlog("absent {}");
622 break;
623 case PTE_TYPE_INVALID_BLOCK:
624 case PTE_TYPE_VALID_BLOCK: {
625 mm_dump_block_entry(entry, level, indent);
626 break;
627 }
628 case PTE_TYPE_TABLE: {
629 mm_dump_table_entry(entry, level, indent);
630 break;
631 }
632 }
633}
634
635// NOLINTNEXTLINE(misc-no-recursion)
636static void mm_dump_entries(const pte_t *entries, mm_level_t level,
637 uint32_t indent)
638{
639 dlog("{\n");
640 indent += 1;
641
Karl Meakind64aaf82025-02-08 01:12:55 +0000642 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Karl Meakinc88ad412025-02-11 16:04:49 +0000643 pte_t entry = entries[i];
Karl Meakin100b0b22025-02-08 00:59:25 +0000644
645 if (arch_mm_pte_is_absent(entry, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100646 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100647 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100648
Karl Meakinc88ad412025-02-11 16:04:49 +0000649 dlog_indent(indent, "[level = %u, index = %zu] = ", level, i);
650 mm_dump_entry(entry, level, indent);
651 dlog(",\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100652 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000653
654 indent -= 1;
655 dlog_indent(indent, "}");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100656}
657
658/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000659 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100660 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000661static void mm_ptable_dump(const struct mm_ptable *ptable)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100662{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000663 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakin0f506a12025-02-08 23:28:45 +0000664 mm_level_t root_level = mm_root_level(ptable);
665 uint8_t root_table_count = mm_root_table_count(ptable);
Karl Meakinc88ad412025-02-11 16:04:49 +0000666 uint32_t indent = 0;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000667
Karl Meakinc88ad412025-02-11 16:04:49 +0000668 dlog_indent(indent, "mm_ptable {\n");
669 indent += 1;
670 {
671 dlog_indent(indent, ".stage = %s,\n",
Karl Meakin0f506a12025-02-08 23:28:45 +0000672 ptable->stage1 ? "stage1" : "stage2");
Karl Meakinc88ad412025-02-11 16:04:49 +0000673 dlog_indent(indent, ".id = %hu,\n", ptable->id);
674 dlog_indent(indent, ".root_tables = {\n");
675
676 indent += 1;
677 {
678 for (size_t i = 0; i < root_table_count; ++i) {
679 dlog_indent(
680 indent,
681 "[level = %u, index = %zu].entries = ",
682 root_level, i);
683 mm_dump_entries(root_tables[i].entries,
684 root_level - 1, indent);
685 dlog(",\n");
686 }
687 }
688 indent -= 1;
689 dlog_indent(indent, "},\n");
Andrew Scull1ba470e2018-10-31 15:14:31 +0000690 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000691 indent -= 1;
692 dlog_indent(indent, "}\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100693}
694
695/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000696 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800697 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100698 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000699static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100700{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100701 struct mm_page_table *table;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000702 mm_attr_t block_attrs;
703 mm_attr_t table_attrs;
704 mm_attr_t combined_attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100705 paddr_t block_address;
706
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000707 table = arch_mm_table_from_pte(table_pte, level);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000708
709 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000710 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100711 }
712
Andrew Scullb6b9b562018-12-21 14:41:35 +0000713 /* Might not be possible to merge the table into a single block. */
714 if (!arch_mm_is_block_allowed(level)) {
715 return table_pte;
716 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000717
Andrew Scullb6b9b562018-12-21 14:41:35 +0000718 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000719 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000720 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100721 combined_attrs =
722 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000723 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000724
Andrew Walbran2400ed22018-09-27 14:45:58 +0100725 return arch_mm_block_pte(level, block_address, combined_attrs);
726}
727
728/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000729 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000730 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100731 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100732// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000733static void mm_ptable_defrag_entry(struct mm_ptable *ptable,
734 ptable_addr_t base_addr, pte_t *entry,
Karl Meakin0f506a12025-02-08 23:28:45 +0000735 mm_level_t level, bool non_secure,
736 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100737{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000738 struct mm_page_table *child_table;
Andrew Scull12122ce2019-11-19 14:21:07 +0000739 bool mergeable;
740 bool base_present;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000741 mm_attr_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800742 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100743
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800744 if (!arch_mm_pte_is_table(*entry, level)) {
745 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100746 }
747
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000748 child_table = arch_mm_table_from_pte(*entry, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100749
Andrew Scull12122ce2019-11-19 14:21:07 +0000750 /* Defrag the first entry in the table and use it as the base entry. */
751 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800752
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000753 mm_ptable_defrag_entry(ptable, base_addr, &(child_table->entries[0]),
Karl Meakin0f506a12025-02-08 23:28:45 +0000754 level - 1, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800755
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000756 base_present =
757 arch_mm_pte_is_present(child_table->entries[0], level - 1);
758 base_attrs = arch_mm_pte_attrs(child_table->entries[0], level - 1);
Andrew Scull12122ce2019-11-19 14:21:07 +0000759
Andrew Walbran2400ed22018-09-27 14:45:58 +0100760 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000761 * Defrag the remaining entries in the table and check whether they are
762 * compatible with the base entry meaning the table can be merged into a
763 * block entry. It assumes addresses are contiguous due to identity
764 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100765 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000766 mergeable = true;
Karl Meakind64aaf82025-02-08 01:12:55 +0000767 for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000768 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800769 ptable_addr_t block_addr =
770 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000771
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000772 mm_ptable_defrag_entry(ptable, block_addr,
773 &(child_table->entries[i]), level - 1,
Karl Meakin0f506a12025-02-08 23:28:45 +0000774 non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800775
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000776 present = arch_mm_pte_is_present(child_table->entries[i],
777 level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100778
Andrew Scull12122ce2019-11-19 14:21:07 +0000779 if (present != base_present) {
780 mergeable = false;
781 continue;
782 }
783
784 if (!present) {
785 continue;
786 }
787
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000788 if (!arch_mm_pte_is_block(child_table->entries[i], level - 1)) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000789 mergeable = false;
790 continue;
791 }
792
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000793 if (arch_mm_pte_attrs(child_table->entries[i], level - 1) !=
Andrew Scull12122ce2019-11-19 14:21:07 +0000794 base_attrs) {
795 mergeable = false;
796 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100797 }
798 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000799
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800800 if (!mergeable) {
801 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000802 }
803
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800804 new_entry = mm_merge_table_pte(*entry, level);
805 if (*entry != new_entry) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000806 mm_replace_entry(ptable, base_addr, entry, (uintptr_t)new_entry,
Karl Meakin0f506a12025-02-08 23:28:45 +0000807 level, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800808 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100809}
810
811/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100812 * Defragments the given page table by converting page table references to
813 * blocks whenever possible.
814 */
Karl Meakin0f506a12025-02-08 23:28:45 +0000815static void mm_ptable_defrag(struct mm_ptable *ptable, bool non_secure,
816 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100817{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000818 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakin0f506a12025-02-08 23:28:45 +0000819 mm_level_t root_level = mm_root_level(ptable);
820 uint8_t root_table_count = mm_root_table_count(ptable);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800821 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100822
823 /*
824 * Loop through each entry in the table. If it points to another table,
825 * check if that table can be replaced by a block or an absent entry.
826 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000827 for (size_t i = 0; i < root_table_count; ++i) {
828 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Karl Meakina3a9f952025-02-08 00:11:16 +0000829 mm_ptable_defrag_entry(
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000830 ptable, block_addr, &root_tables[i].entries[j],
Karl Meakin0f506a12025-02-08 23:28:45 +0000831 root_level - 1, non_secure, ppool);
Karl Meakina3a9f952025-02-08 00:11:16 +0000832 block_addr = mm_start_of_next_block(block_addr,
833 root_level - 1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000834 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100835 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800836
837 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100838}
839
Karl Meakind127a452025-02-18 16:25:45 +0000840struct mm_get_attrs_state {
841 /**
842 * The attributes the range is mapped with.
843 * Only valid if `got_attrs` is true.
844 */
845 mm_attr_t attrs;
846 /**
847 * The address of the first page that does not match the attributes of
848 * the pages before it in the range.
849 * Only valid if `got_mismatch` is true.
850 */
851 ptable_addr_t mismatch;
852 bool got_attrs : 1;
853 bool got_mismatch : 1;
854};
855
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100856/**
Andrew Scull81e85092018-12-12 12:56:20 +0000857 * Gets the attributes applied to the given range of stage-2 addresses at the
858 * given level.
859 *
860 * The `got_attrs` argument is initially passed as false until `attrs` contains
861 * attributes of the memory region at which point it is passed as true.
862 *
863 * The value returned in `attrs` is only valid if the function returns true.
864 *
865 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100866 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100867// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind127a452025-02-18 16:25:45 +0000868static struct mm_get_attrs_state mm_ptable_get_attrs_level(
869 const struct mm_page_table *table, ptable_addr_t begin,
870 ptable_addr_t end, mm_level_t level, struct mm_get_attrs_state state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100871{
Karl Meakind64aaf82025-02-08 01:12:55 +0000872 const pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000873 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100874
Andrew Scull81e85092018-12-12 12:56:20 +0000875 /* Cap end so that we don't go over the current level max. */
876 if (end > level_end) {
877 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100878 }
879
Andrew Scull81e85092018-12-12 12:56:20 +0000880 /* Check that each entry is owned. */
Karl Meakind127a452025-02-18 16:25:45 +0000881 while (begin < end && !state.got_mismatch) {
Karl Meakind9696472025-02-18 14:07:25 +0000882 switch (arch_mm_pte_type(*pte, level)) {
883 case PTE_TYPE_TABLE: {
884 const struct mm_page_table *child_table =
885 arch_mm_table_from_pte(*pte, level);
Karl Meakind127a452025-02-18 16:25:45 +0000886 state = mm_ptable_get_attrs_level(
887 child_table, begin, end, level - 1, state);
Karl Meakind9696472025-02-18 14:07:25 +0000888 break;
889 }
890
891 case PTE_TYPE_ABSENT:
892 case PTE_TYPE_INVALID_BLOCK:
893 case PTE_TYPE_VALID_BLOCK: {
894 mm_attr_t block_attrs = arch_mm_pte_attrs(*pte, level);
895
Karl Meakind127a452025-02-18 16:25:45 +0000896 if (state.got_attrs && block_attrs != state.attrs) {
897 state.mismatch = begin;
898 state.got_mismatch = true;
899 continue;
Andrew Scull81e85092018-12-12 12:56:20 +0000900 }
Karl Meakind127a452025-02-18 16:25:45 +0000901
902 state.got_attrs = true;
903 state.attrs = block_attrs;
Karl Meakind9696472025-02-18 14:07:25 +0000904 break;
905 }
Andrew Scull81e85092018-12-12 12:56:20 +0000906 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100907
Karl Meakin25954e32025-02-07 16:12:51 +0000908 begin = mm_start_of_next_block(begin, level);
Andrew Scull81e85092018-12-12 12:56:20 +0000909 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100910 }
911
Andrew Scullc66a04d2018-12-07 13:41:56 +0000912 /* The entry is a valid block. */
Karl Meakind127a452025-02-18 16:25:45 +0000913 return state;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100914}
915
916/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800917 * Gets the attributes applied to the given range of addresses in the page
918 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000919 *
920 * The value returned in `attrs` is only valid if the function returns true.
921 *
922 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100923 */
Karl Meakind127a452025-02-18 16:25:45 +0000924static struct mm_get_attrs_state mm_get_attrs(const struct mm_ptable *ptable,
925 ptable_addr_t begin,
926 ptable_addr_t end)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100927{
Karl Meakin0f506a12025-02-08 23:28:45 +0000928 mm_level_t root_level = mm_root_level(ptable);
929 ptable_addr_t ptable_end = mm_ptable_addr_space_end(ptable);
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000930 struct mm_page_table *root_table;
Karl Meakind127a452025-02-18 16:25:45 +0000931 struct mm_get_attrs_state state = {0};
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100932
Karl Meakin30506952025-02-18 18:13:06 +0000933 if (begin >= end) {
934 dlog_verbose(
935 "mm_get: input range is backwards (%#016lx >= "
936 "%#016lx)\n",
937 begin, end);
938 }
939
Andrew Scull81e85092018-12-12 12:56:20 +0000940 begin = mm_round_down_to_page(begin);
941 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100942
Andrew Scull81e85092018-12-12 12:56:20 +0000943 /* Fail if the addresses are out of range. */
944 if (end > ptable_end) {
Karl Meakind127a452025-02-18 16:25:45 +0000945 return state;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000946 }
947
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000948 root_table = &ptable->root_tables[mm_index(begin, root_level)];
Karl Meakind127a452025-02-18 16:25:45 +0000949 while (begin < end && !state.got_mismatch) {
950 state = mm_ptable_get_attrs_level(root_table, begin, end,
951 root_level - 1, state);
Andrew Scull81e85092018-12-12 12:56:20 +0000952
Karl Meakin25954e32025-02-07 16:12:51 +0000953 begin = mm_start_of_next_block(begin, root_level);
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000954 root_table++;
Andrew Scull81e85092018-12-12 12:56:20 +0000955 }
956
Karl Meakind127a452025-02-18 16:25:45 +0000957 return state;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100958}
959
Karl Meakin07a69ab2025-02-07 14:53:19 +0000960bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100961{
Karl Meakin0f506a12025-02-08 23:28:45 +0000962 return mm_ptable_init(ptable, id, false, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100963}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100964
Karl Meakind64aaf82025-02-08 01:12:55 +0000965void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000966{
Karl Meakin0f506a12025-02-08 23:28:45 +0000967 mm_ptable_fini(ptable, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000968}
969
970/**
Andrew Scull73b89542019-11-20 17:31:26 +0000971 * Selects flags to pass to the page table manipulation operation based on the
972 * mapping mode.
973 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000974static struct mm_flags mm_mode_to_flags(mm_mode_t mode)
Andrew Scull73b89542019-11-20 17:31:26 +0000975{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000976 struct mm_flags flags = {0};
977
Andrew Scull73b89542019-11-20 17:31:26 +0000978 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
Karl Meakin1fd4b822025-02-01 17:13:47 +0000979 flags.unmap = true;
Andrew Scull73b89542019-11-20 17:31:26 +0000980 }
981
Karl Meakin1fd4b822025-02-01 17:13:47 +0000982 return flags;
Andrew Scull73b89542019-11-20 17:31:26 +0000983}
984
985/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000986 * See `mm_ptable_identity_prepare`.
987 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800988 * This must be called before `mm_identity_commit` for the same mapping.
989 *
990 * Returns true on success, or false if the update would fail.
991 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000992bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000993 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800994{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000995 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000996
Karl Meakin0f506a12025-02-08 23:28:45 +0000997 assert(ptable->stage1);
Karl Meakind64aaf82025-02-08 01:12:55 +0000998 return mm_ptable_identity_prepare(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800999 arch_mm_mode_to_stage1_attrs(mode),
1000 flags, ppool);
1001}
1002
1003/**
1004 * See `mm_ptable_identity_commit`.
1005 *
1006 * `mm_identity_prepare` must be called before this for the same mapping.
1007 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001008void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001009 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -08001010{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001011 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +00001012
Karl Meakin0f506a12025-02-08 23:28:45 +00001013 assert(ptable->stage1);
Karl Meakind64aaf82025-02-08 01:12:55 +00001014 mm_ptable_identity_commit(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -08001015 arch_mm_mode_to_stage1_attrs(mode), flags,
1016 ppool);
1017 return ptr_from_va(va_from_pa(begin));
1018}
1019
1020/**
1021 * See `mm_ptable_identity_prepare`.
1022 *
Andrew Scull4e83cef2019-11-19 14:17:54 +00001023 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001024 *
1025 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +00001026 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001027bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001028 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +00001029{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001030 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +00001031
Karl Meakind64aaf82025-02-08 01:12:55 +00001032 return mm_ptable_identity_prepare(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +00001033 arch_mm_mode_to_stage2_attrs(mode),
1034 flags, ppool);
1035}
1036
1037/**
1038 * See `mm_ptable_identity_commit`.
1039 *
1040 * `mm_vm_identity_prepare` must be called before this for the same mapping.
1041 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001042void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001043 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +00001044{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001045 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +00001046
Karl Meakind64aaf82025-02-08 01:12:55 +00001047 mm_ptable_identity_commit(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +00001048 arch_mm_mode_to_stage2_attrs(mode), flags,
1049 ppool);
1050
1051 if (ipa != NULL) {
1052 *ipa = ipa_from_pa(begin);
1053 }
1054}
1055
1056/**
Andrew Scull80871322018-08-06 12:04:09 +01001057 * Updates a VM's page table such that the given physical address range is
1058 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +01001059 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001060 *
1061 * mm_vm_defrag should always be called after a series of page table updates,
1062 * whether they succeed or fail. This is because on failure extra page table
1063 * entries may have been allocated and then not used, while on success it may be
1064 * possible to compact the page table by merging several entries into a block.
1065 *
1066 * Returns true on success, or false if the update failed and no changes were
1067 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001068 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001069bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001070 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001071{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001072 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001073 bool success = mm_ptable_identity_update(
Karl Meakind64aaf82025-02-08 01:12:55 +00001074 ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
Andrew Scullda3df7f2019-01-05 17:49:27 +00001075 ppool);
Andrew Scull80871322018-08-06 12:04:09 +01001076
1077 if (success && ipa != NULL) {
1078 *ipa = ipa_from_pa(begin);
1079 }
1080
1081 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001082}
1083
1084/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001085 * Updates the VM's table such that the given physical address range has no
1086 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +01001087 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001088bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001089 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001090{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001091 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001092
Karl Meakind64aaf82025-02-08 01:12:55 +00001093 return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +01001094}
1095
1096/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001097 * Write the given page table of a VM to the debug log.
1098 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001099void mm_vm_dump(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001100{
Karl Meakin0f506a12025-02-08 23:28:45 +00001101 mm_ptable_dump(ptable);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001102}
1103
1104/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001105 * Defragments a stage1 page table.
1106 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001107void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool)
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001108{
Karl Meakin0f506a12025-02-08 23:28:45 +00001109 assert(ptable->stage1);
1110 mm_ptable_defrag(ptable, false, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001111}
1112
1113/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001114 * Defragments the VM page table.
1115 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001116void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
1117 bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001118{
Karl Meakin0f506a12025-02-08 23:28:45 +00001119 mm_ptable_defrag(ptable, non_secure, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001120}
1121
1122/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +01001123 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001124 * are mapped with the same mode.
1125 *
1126 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001127 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001128bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001129 ipaddr_t end, mm_mode_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001130{
Karl Meakind127a452025-02-18 16:25:45 +00001131 struct mm_get_attrs_state ret;
1132 bool success;
Andrew Scull81e85092018-12-12 12:56:20 +00001133
Karl Meakind127a452025-02-18 16:25:45 +00001134 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end));
1135 success = ret.got_attrs && !ret.got_mismatch;
1136
1137 if (success && mode != NULL) {
1138 *mode = arch_mm_stage2_attrs_to_mode(ret.attrs);
Andrew Scull81e85092018-12-12 12:56:20 +00001139 }
1140
Karl Meakind127a452025-02-18 16:25:45 +00001141 return success;
Andrew Scull80871322018-08-06 12:04:09 +01001142}
1143
Karl Meakinb2b5ff72025-02-19 15:47:56 +00001144bool mm_vm_get_mode_partial(const struct mm_ptable *ptable, ipaddr_t begin,
1145 ipaddr_t end, mm_mode_t *mode, ipaddr_t *end_ret)
1146{
1147 struct mm_get_attrs_state ret;
1148 bool success;
1149
1150 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end));
1151 success = ret.got_attrs;
1152
1153 if (success && mode != NULL) {
1154 *mode = arch_mm_stage2_attrs_to_mode(ret.attrs);
1155 }
1156
1157 if (success && end_ret != NULL) {
1158 *end_ret = ret.mismatch ? ipa_init(ret.mismatch) : end;
1159 }
1160
1161 return success;
1162}
1163
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001164/**
1165 * Gets the mode of the given range of virtual addresses if they
1166 * are mapped with the same mode.
1167 *
1168 * Returns true if the range is mapped with the same mode and false otherwise.
1169 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001170bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001171 mm_mode_t *mode)
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001172{
Karl Meakind127a452025-02-18 16:25:45 +00001173 struct mm_get_attrs_state ret;
1174 bool success;
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001175
Karl Meakin0f506a12025-02-08 23:28:45 +00001176 assert(ptable->stage1);
Karl Meakind127a452025-02-18 16:25:45 +00001177
1178 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end));
1179 success = ret.got_attrs && !ret.got_mismatch;
1180
1181 if (success && mode != NULL) {
1182 *mode = arch_mm_stage1_attrs_to_mode(ret.attrs);
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001183 }
1184
Karl Meakind127a452025-02-18 16:25:45 +00001185 return success;
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001186}
1187
Karl Meakinb2b5ff72025-02-19 15:47:56 +00001188bool mm_get_mode_partial(const struct mm_ptable *ptable, vaddr_t begin,
1189 vaddr_t end, mm_mode_t *mode, vaddr_t *end_ret)
1190{
1191 struct mm_get_attrs_state ret;
1192 bool success;
1193
1194 assert(ptable->stage1);
1195
1196 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end));
1197 success = ret.got_attrs;
1198
1199 if (success && mode != NULL) {
1200 *mode = arch_mm_stage1_attrs_to_mode(ret.attrs);
1201 }
1202
1203 if (success && end_ret != NULL) {
1204 *end_ret = ret.mismatch ? va_init(ret.mismatch) : end;
1205 }
1206
1207 return success;
1208}
1209
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001210static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1211{
1212 return (struct mm_stage1_locked){.ptable = &ptable};
1213}
1214
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001215struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1216{
1217 return (struct mm_stage1_locked){.ptable = ptable};
1218}
1219
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001220struct mm_stage1_locked mm_lock_stage1(void)
1221{
1222 sl_lock(&ptable_lock);
1223 return mm_stage1_lock_unsafe();
1224}
1225
1226void mm_unlock_stage1(struct mm_stage1_locked *lock)
1227{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001228 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001229 sl_unlock(&ptable_lock);
1230 lock->ptable = NULL;
1231}
1232
Andrew Scull80871322018-08-06 12:04:09 +01001233/**
Andrew Scull80871322018-08-06 12:04:09 +01001234 * Updates the hypervisor page table such that the given physical address range
1235 * is mapped into the address space at the corresponding address range in the
1236 * architecture-agnostic mode provided.
1237 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001238void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001239 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001240{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001241 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +00001242
Karl Meakin0f506a12025-02-08 23:28:45 +00001243 assert(stage1_locked.ptable->stage1);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001244 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001245 arch_mm_mode_to_stage1_attrs(mode), flags,
1246 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001247 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001248 }
1249
1250 return NULL;
1251}
1252
1253/**
1254 * Updates the hypervisor table such that the given physical address range is
1255 * not mapped in the address space.
1256 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001257bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1258 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001259{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001260 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001261
1262 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001263}
1264
1265/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001266 * Defragments the hypervisor page table.
1267 */
1268void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1269{
Karl Meakin0f506a12025-02-08 23:28:45 +00001270 assert(stage1_locked.ptable->stage1);
1271 mm_ptable_defrag(stage1_locked.ptable, false, ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001272}
1273
1274/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001275 * Initialises memory management for the hypervisor itself.
1276 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001277bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001278{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001279 /* Locking is not enabled yet so fake it, */
1280 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
1281
Karl Meakine8937d92024-03-19 16:04:25 +00001282 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001283 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001284 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001285 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001286 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001287 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001288 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001289 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001290
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001291 /* ASID 0 is reserved for use by the hypervisor. */
Karl Meakin0f506a12025-02-08 23:28:45 +00001292 if (!mm_ptable_init(&ptable, 0, true, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001293 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001294 return false;
1295 }
1296
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001297 /* Initialize arch_mm before calling below mapping routines */
Karl Meakine1aeb1d2025-02-08 00:35:14 +00001298 if (!arch_mm_init(&ptable)) {
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001299 return false;
1300 }
1301
Andrew Walbran48699362019-05-20 14:38:00 +01001302 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001303 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001304
1305 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001306 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1307 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001308
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001309 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1310 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001311
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001312 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1313 layout_data_end(), MM_MODE_R | MM_MODE_W,
1314 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001315
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001316 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001317 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001318
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001319 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001320}