blob: a3e06efce1a7e5630c95f8ab31f0bc5cec07bfe4 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
15
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010020#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010021#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010022
Andrew Walbran2400ed22018-09-27 14:45:58 +010023/**
24 * This file has functions for managing the level 1 and 2 page tables used by
25 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
26 * and then a level 2 mapping per VM. The design assumes that all page tables
27 * contain only 1-1 mappings, aligned on the block boundaries.
28 */
29
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010030/*
31 * For stage 2, the input is an intermediate physical addresses rather than a
32 * virtual address so:
33 */
Andrew Scull80871322018-08-06 12:04:09 +010034static_assert(
35 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
36 "Currently, the same code manages the stage 1 and stage 2 page tables "
37 "which only works if the virtual and intermediate physical addresses "
38 "are the same size. It looks like that assumption might not be holding "
39 "so we need to check that everything is going to be ok.");
40
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010042static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043
Andrew Scullda241972019-01-05 18:17:48 +000044static bool mm_stage2_invalidate = false;
45
46/**
47 * After calling this function, modifications to stage-2 page tables will use
48 * break-before-make and invalidate the TLB for the affected range.
49 */
50void mm_vm_enable_invalidation(void)
51{
52 mm_stage2_invalidate = true;
53}
54
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010055/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010056 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010057 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010058static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010059{
60 return ptr_from_va(va_from_pa(pa));
61}
62
63/**
Andrew Scull80871322018-08-06 12:04:09 +010064 * Rounds an address down to a page boundary.
65 */
66static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
67{
68 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
69}
70
71/**
72 * Rounds an address up to a page boundary.
73 */
74static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
75{
76 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
77}
78
79/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 * Calculates the size of the address space represented by a page table entry at
81 * the given level.
82 */
Andrew Sculle9827712018-10-19 14:54:20 +010083static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084{
Andrew Scull78d6fd92018-09-06 15:08:36 +010085 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086}
87
88/**
Andrew Scullcae45572018-12-13 15:46:30 +000089 * Gets the address of the start of the next block of the given size. The size
90 * must be a power of two.
91 */
92static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
93 size_t block_size)
94{
95 return (addr + block_size) & ~(block_size - 1);
96}
97
98/**
99 * Gets the physical address of the start of the next block of the given size.
100 * The size must be a power of two.
101 */
102static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
103{
104 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
105}
106
107/**
Andrew Scull80871322018-08-06 12:04:09 +0100108 * For a given address, calculates the maximum (plus one) address that can be
109 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100110 */
Andrew Sculle9827712018-10-19 14:54:20 +0100111static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100112{
113 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000114
Andrew Scull80871322018-08-06 12:04:09 +0100115 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116}
117
118/**
Andrew Scull80871322018-08-06 12:04:09 +0100119 * For a given address, calculates the index at which its entry is stored in a
120 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121 */
Andrew Sculle9827712018-10-19 14:54:20 +0100122static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100123{
Andrew Scull80871322018-08-06 12:04:09 +0100124 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000125
Andrew Scull78d6fd92018-09-06 15:08:36 +0100126 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127}
128
129/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000130 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100131 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000132static struct mm_page_table *mm_alloc_page_tables(size_t count,
133 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100134{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000135 if (count == 1) {
136 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100137 }
138
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000139 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100140}
141
142/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000143 * Returns the maximum level in the page table given the flags.
144 */
145static uint8_t mm_max_level(int flags)
146{
147 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
148 : arch_mm_stage2_max_level();
149}
150
151/**
152 * Returns the number of root-level tables given the flags.
153 */
154static uint8_t mm_root_table_count(int flags)
155{
156 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
157 : arch_mm_stage2_root_table_count();
158}
159
160/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000161 * Invalidates the TLB for the given address range.
162 */
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800163static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100164 uint16_t id, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000165{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000166 if (flags & MM_FLAG_STAGE1) {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800167 arch_mm_invalidate_stage1_range(id, va_init(begin),
168 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 } else {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800170 arch_mm_invalidate_stage2_range(id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100171 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000172 }
173}
174
175/**
176 * Frees all page-table-related memory associated with the given pte at the
177 * given level, including any subtables recursively.
178 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100179// NOLINTNEXTLINE(misc-no-recursion)
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000180static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000181{
182 struct mm_page_table *table;
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000183
184 if (!arch_mm_pte_is_table(pte, level)) {
185 return;
186 }
187
188 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000189 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Karl Meakind64aaf82025-02-08 01:12:55 +0000190 for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000191 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000192 }
193
194 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000195 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000196}
197
198/**
David Brazdil711fbe92019-08-06 13:39:58 +0100199 * Returns the first address which cannot be encoded in page tables given by
200 * `flags`. It is the exclusive end of the address space created by the tables.
201 */
202ptable_addr_t mm_ptable_addr_space_end(int flags)
203{
204 return mm_root_table_count(flags) *
205 mm_entry_size(mm_max_level(flags) + 1);
206}
207
208/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000209 * Initialises the given page table.
210 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000211bool mm_ptable_init(struct mm_ptable *ptable, uint16_t id, int flags,
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800212 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000213{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000214 struct mm_page_table *tables;
215 uint8_t root_table_count = mm_root_table_count(flags);
216
217 tables = mm_alloc_page_tables(root_table_count, ppool);
218 if (tables == NULL) {
219 return false;
220 }
221
Karl Meakind64aaf82025-02-08 01:12:55 +0000222 for (size_t i = 0; i < root_table_count; i++) {
223 for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000224 tables[i].entries[j] =
225 arch_mm_absent_pte(mm_max_level(flags));
226 }
227 }
228
229 /*
230 * TODO: halloc could return a virtual or physical address if mm not
231 * enabled?
232 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000233 ptable->root = pa_init((uintpaddr_t)tables);
234 ptable->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000235 return true;
236}
237
238/**
239 * Frees all memory associated with the give page table.
240 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000241static void mm_ptable_fini(const struct mm_ptable *ptable, int flags,
242 struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000243{
Karl Meakind64aaf82025-02-08 01:12:55 +0000244 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000245 uint8_t level = mm_max_level(flags);
246 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000247
Karl Meakind64aaf82025-02-08 01:12:55 +0000248 for (size_t i = 0; i < root_table_count; ++i) {
249 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000250 mm_free_page_pte(tables[i].entries[j], level, ppool);
251 }
252 }
253
254 mpool_add_chunk(ppool, tables,
255 sizeof(struct mm_page_table) * root_table_count);
256}
257
258/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000259 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000260 * are valid, it performs a break-before-make sequence where it first writes an
261 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
262 * This is to prevent cases where CPUs have different 'valid' values in their
263 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000264 */
265static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800266 uint8_t level, int flags, struct mpool *ppool,
Olivier Deprez6f400372022-03-07 09:31:08 +0100267 uint16_t id, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268{
269 pte_t v = *pte;
270
271 /*
272 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000273 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000274 */
Andrew Scullda241972019-01-05 18:17:48 +0000275 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800276 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000277 *pte = arch_mm_absent_pte(level);
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800278 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100279 id, non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000280 }
281
282 /* Assign the new pte. */
283 *pte = new_pte;
284
285 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000286 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000287}
288
289/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100290 * Populates the provided page table entry with a reference to another table if
291 * needed, that is, if it does not yet point to another table.
292 *
293 * Returns a pointer to the table the entry now points to.
294 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000295static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
296 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000297 int flags,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800298 struct mpool *ppool,
Olivier Deprez6f400372022-03-07 09:31:08 +0100299 uint16_t id, bool non_secure)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100300{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100301 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302 pte_t v = *pte;
303 pte_t new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100305 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100306
307 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100308 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000309 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100310 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100311
312 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000313 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100314 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000315 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100316 return NULL;
317 }
318
319 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100320 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100321 inc = mm_entry_size(level_below);
322 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000323 arch_mm_block_from_pte(v, level),
324 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100325 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100326 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100327 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 }
329
330 /* Initialise entries in the new table. */
Karl Meakind64aaf82025-02-08 01:12:55 +0000331 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100332 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 new_pte += inc;
334 }
335
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000336 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100337 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000338
339 /* Replace the pte entry, doing a break-before-make if needed. */
340 mm_replace_entry(begin, pte,
341 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Olivier Deprez6f400372022-03-07 09:31:08 +0100342 level, flags, ppool, id, non_secure);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100343
344 return ntable;
345}
346
347/**
Andrew Scull80871322018-08-06 12:04:09 +0100348 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100349 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000350 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100351 *
352 * This function calls itself recursively if it needs to update additional
353 * levels, but the recursion is bound by the maximum number of levels in a page
354 * table.
355 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100356// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull80871322018-08-06 12:04:09 +0100357static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100358 uint64_t attrs, struct mm_page_table *table,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800359 uint8_t level, int flags, struct mpool *ppool,
360 uint16_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100361{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100362 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100363 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100364 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000365 bool commit = flags & MM_FLAG_COMMIT;
366 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100367
Andrew Scull265ada92018-07-30 15:19:01 +0100368 /* Cap end so that we don't go over the current level max. */
369 if (end > level_end) {
370 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100371 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372
373 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100374 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100375 if (unmap ? !arch_mm_pte_is_present(*pte, level)
376 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000377 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100378 /*
379 * If the entry is already mapped with the right
380 * attributes, or already absent in the case of
381 * unmapping, no need to do anything; carry on to the
382 * next entry.
383 */
384 } else if ((end - begin) >= entry_size &&
385 (unmap || arch_mm_is_block_allowed(level)) &&
386 (begin & (entry_size - 1)) == 0) {
387 /*
388 * If the entire entry is within the region we want to
389 * map, map/unmap the whole entry.
390 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100391 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000392 pte_t new_pte =
393 unmap ? arch_mm_absent_pte(level)
394 : arch_mm_block_pte(level, pa,
395 attrs);
396 mm_replace_entry(begin, pte, new_pte, level,
Olivier Deprez6f400372022-03-07 09:31:08 +0100397 flags, ppool, id,
398 (attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100399 }
400 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100401 /*
402 * If the entry is already a subtable get it; otherwise
403 * replace it with an equivalent subtable and get that.
404 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000405 struct mm_page_table *nt = mm_populate_table_pte(
Olivier Deprez6f400372022-03-07 09:31:08 +0100406 begin, pte, level, flags, ppool, id,
407 (attrs & (1ULL << 57)) != 0);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100408 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100409 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100410 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100411
Andrew Walbran6324fc92018-10-03 11:46:43 +0100412 /*
413 * Recurse to map/unmap the appropriate entries within
414 * the subtable.
415 */
Andrew Scull80871322018-08-06 12:04:09 +0100416 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800417 flags, ppool, id)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100418 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100419 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100420 }
421
Andrew Scullcae45572018-12-13 15:46:30 +0000422 begin = mm_start_of_next_block(begin, entry_size);
423 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100424 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100425 }
426
427 return true;
428}
429
430/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000431 * Updates the page table from the root to map the given address range to a
432 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000433 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000434 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000435static bool mm_map_root(struct mm_ptable *ptable, ptable_addr_t begin,
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000437 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000438{
439 size_t root_table_size = mm_entry_size(root_level);
Karl Meakind64aaf82025-02-08 01:12:55 +0000440 struct mm_page_table *table = &mm_page_table_from_pa(
441 ptable->root)[mm_index(begin, root_level)];
Andrew Scull1ba470e2018-10-31 15:14:31 +0000442
443 while (begin < end) {
444 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Karl Meakind64aaf82025-02-08 01:12:55 +0000445 root_level - 1, flags, ppool, ptable->id)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000446 return false;
447 }
Andrew Scullcae45572018-12-13 15:46:30 +0000448 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000449 table++;
450 }
451
452 return true;
453}
454
455/**
Andrew Scull80871322018-08-06 12:04:09 +0100456 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000457 * or not mapped into the address space with the architecture-agnostic mode
Andrew Walbran58a6e542019-11-19 14:23:15 +0000458 * provided. Only commits the change if MM_FLAG_COMMIT is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100459 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000460static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000461 paddr_t pa_end, uint64_t attrs, int flags,
462 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100463{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000464 uint8_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100465 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000466 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
467 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100468
Andrew Scull1ba470e2018-10-31 15:14:31 +0000469 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100470 * Assert condition to communicate the API constraint of mm_max_level(),
471 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000472 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000473 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474
475 /* Cap end to stay within the bounds of the page table. */
476 if (end > ptable_end) {
477 end = ptable_end;
478 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100479
Karl Meakind64aaf82025-02-08 01:12:55 +0000480 if (!mm_map_root(ptable, begin, end, attrs, root_level, flags, ppool)) {
Andrew Walbran58a6e542019-11-19 14:23:15 +0000481 return false;
482 }
483
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800484 /*
485 * All TLB invalidations must be complete already if any entries were
486 * replaced by mm_replace_entry. Sync all page table writes so that code
487 * following this can use them.
488 */
489 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000490
491 return true;
492}
493
Andrew Scull4e83cef2019-11-19 14:17:54 +0000494/*
495 * Prepares the given page table for the given address mapping such that it
496 * will be able to commit the change without failure. It does so by ensuring
497 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100498 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000499 *
500 * In particular, multiple calls to this function will result in the
501 * corresponding calls to commit the changes to succeed.
502 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000503static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
504 paddr_t pa_begin, paddr_t pa_end,
505 uint64_t attrs, int flags,
506 struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000507{
508 flags &= ~MM_FLAG_COMMIT;
Karl Meakind64aaf82025-02-08 01:12:55 +0000509 return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
510 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000511}
512
513/**
514 * Commits the given address mapping to the page table assuming the operation
515 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
516 * ensure this condition.
517 *
518 * Without the table being properly prepared, the commit may only partially
519 * complete if it runs out of memory resulting in an inconsistent state that
520 * isn't handled.
521 *
522 * Since the non-failure assumtion is used in the reasoning about the atomicity
523 * of higher level memory operations, any detected violations result in a panic.
524 *
525 * TODO: remove ppool argument to be sure no changes are made.
526 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000527static void mm_ptable_identity_commit(struct mm_ptable *ptable,
528 paddr_t pa_begin, paddr_t pa_end,
529 uint64_t attrs, int flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000530 struct mpool *ppool)
531{
Karl Meakind64aaf82025-02-08 01:12:55 +0000532 CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000533 flags | MM_FLAG_COMMIT, ppool));
534}
535
Andrew Walbran58a6e542019-11-19 14:23:15 +0000536/**
537 * Updates the given table such that the given physical address range is mapped
538 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000539 * provided.
540 *
541 * The page table is updated using the separate prepare and commit stages so
542 * that, on failure, a partial update of the address space cannot happen. The
543 * table may be left with extra internal tables but the address space is
544 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000545 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000546static bool mm_ptable_identity_update(struct mm_ptable *ptable,
547 paddr_t pa_begin, paddr_t pa_end,
548 uint64_t attrs, int flags,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000549 struct mpool *ppool)
550{
Karl Meakind64aaf82025-02-08 01:12:55 +0000551 if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000552 ppool)) {
553 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100554 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100555
Karl Meakind64aaf82025-02-08 01:12:55 +0000556 mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags,
557 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000558
559 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100560}
561
562/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100563 * Writes the given table to the debug log, calling itself recursively to
564 * write sub-tables.
565 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100566// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000567static void mm_dump_table_recursive(const struct mm_page_table *ptable,
568 uint8_t level, int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100569{
Karl Meakind64aaf82025-02-08 01:12:55 +0000570 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
571 if (!arch_mm_pte_is_present(ptable->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100572 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100573 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100574
Karl Meakine8937d92024-03-19 16:04:25 +0000575 dlog("%*s%lx: %lx\n", 4 * (max_level - level), "", i,
Karl Meakind64aaf82025-02-08 01:12:55 +0000576 ptable->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100577
Karl Meakind64aaf82025-02-08 01:12:55 +0000578 if (arch_mm_pte_is_table(ptable->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100579 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100580 mm_page_table_from_pa(arch_mm_table_from_pte(
Karl Meakind64aaf82025-02-08 01:12:55 +0000581 ptable->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100582 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100583 }
584 }
585}
586
587/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000588 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100589 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000590static void mm_ptable_dump(const struct mm_ptable *ptable, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100591{
Karl Meakind64aaf82025-02-08 01:12:55 +0000592 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000593 uint8_t max_level = mm_max_level(flags);
594 uint8_t root_table_count = mm_root_table_count(flags);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000595
Karl Meakind64aaf82025-02-08 01:12:55 +0000596 for (size_t i = 0; i < root_table_count; ++i) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000597 mm_dump_table_recursive(&tables[i], max_level, max_level);
598 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100599}
600
601/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000602 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800603 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100604 */
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800605static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100606{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100607 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100608 uint64_t block_attrs;
609 uint64_t table_attrs;
610 uint64_t combined_attrs;
611 paddr_t block_address;
612
Andrew Scullb6b9b562018-12-21 14:41:35 +0000613 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
614
615 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000616 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100617 }
618
Andrew Scullb6b9b562018-12-21 14:41:35 +0000619 /* Might not be possible to merge the table into a single block. */
620 if (!arch_mm_is_block_allowed(level)) {
621 return table_pte;
622 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000623
Andrew Scullb6b9b562018-12-21 14:41:35 +0000624 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000625 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000626 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100627 combined_attrs =
628 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000629 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000630
Andrew Walbran2400ed22018-09-27 14:45:58 +0100631 return arch_mm_block_pte(level, block_address, combined_attrs);
632}
633
634/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000635 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000636 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100637 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100638// NOLINTNEXTLINE(misc-no-recursion)
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800639static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
640 uint8_t level, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100641 struct mpool *ppool, uint16_t id,
642 bool non_secure)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100643{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100644 struct mm_page_table *table;
Andrew Scull12122ce2019-11-19 14:21:07 +0000645 bool mergeable;
646 bool base_present;
647 uint64_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800648 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100649
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800650 if (!arch_mm_pte_is_table(*entry, level)) {
651 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100652 }
653
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800654 table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100655
Andrew Scull12122ce2019-11-19 14:21:07 +0000656 /* Defrag the first entry in the table and use it as the base entry. */
657 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800658
659 mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
Olivier Deprez6f400372022-03-07 09:31:08 +0100660 flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800661
Andrew Scull12122ce2019-11-19 14:21:07 +0000662 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
663 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
664
Andrew Walbran2400ed22018-09-27 14:45:58 +0100665 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000666 * Defrag the remaining entries in the table and check whether they are
667 * compatible with the base entry meaning the table can be merged into a
668 * block entry. It assumes addresses are contiguous due to identity
669 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100670 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000671 mergeable = true;
Karl Meakind64aaf82025-02-08 01:12:55 +0000672 for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000673 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800674 ptable_addr_t block_addr =
675 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000676
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800677 mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
Olivier Deprez6f400372022-03-07 09:31:08 +0100678 level - 1, flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800679
Andrew Scull12122ce2019-11-19 14:21:07 +0000680 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100681
Andrew Scull12122ce2019-11-19 14:21:07 +0000682 if (present != base_present) {
683 mergeable = false;
684 continue;
685 }
686
687 if (!present) {
688 continue;
689 }
690
691 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
692 mergeable = false;
693 continue;
694 }
695
696 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
697 base_attrs) {
698 mergeable = false;
699 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100700 }
701 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000702
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800703 if (!mergeable) {
704 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000705 }
706
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800707 new_entry = mm_merge_table_pte(*entry, level);
708 if (*entry != new_entry) {
Olivier Deprez6f400372022-03-07 09:31:08 +0100709 mm_replace_entry(base_addr, entry, (uintptr_t)new_entry, level,
710 flags, ppool, id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800711 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100712}
713
714/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100715 * Defragments the given page table by converting page table references to
716 * blocks whenever possible.
717 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000718static void mm_ptable_defrag(struct mm_ptable *ptable, int flags,
Olivier Deprez6f400372022-03-07 09:31:08 +0100719 struct mpool *ppool, bool non_secure)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100720{
Karl Meakind64aaf82025-02-08 01:12:55 +0000721 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000722 uint8_t level = mm_max_level(flags);
723 uint8_t root_table_count = mm_root_table_count(flags);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800724 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100725
726 /*
727 * Loop through each entry in the table. If it points to another table,
728 * check if that table can be replaced by a block or an absent entry.
729 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000730 for (size_t i = 0; i < root_table_count; ++i) {
731 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
732 mm_ptable_defrag_entry(
733 block_addr, &(tables[i].entries[j]), level,
734 flags, ppool, ptable->id, non_secure);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800735 block_addr = mm_start_of_next_block(
736 block_addr, mm_entry_size(level));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000737 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100738 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800739
740 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100741}
742
743/**
Andrew Scull81e85092018-12-12 12:56:20 +0000744 * Gets the attributes applied to the given range of stage-2 addresses at the
745 * given level.
746 *
747 * The `got_attrs` argument is initially passed as false until `attrs` contains
748 * attributes of the memory region at which point it is passed as true.
749 *
750 * The value returned in `attrs` is only valid if the function returns true.
751 *
752 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100753 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100754// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000755static bool mm_ptable_get_attrs_level(const struct mm_page_table *table,
Andrew Scull81e85092018-12-12 12:56:20 +0000756 ptable_addr_t begin, ptable_addr_t end,
757 uint8_t level, bool got_attrs,
758 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100759{
Karl Meakind64aaf82025-02-08 01:12:55 +0000760 const pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000761 ptable_addr_t level_end = mm_level_end(begin, level);
762 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100763
Andrew Scull81e85092018-12-12 12:56:20 +0000764 /* Cap end so that we don't go over the current level max. */
765 if (end > level_end) {
766 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100767 }
768
Andrew Scull81e85092018-12-12 12:56:20 +0000769 /* Check that each entry is owned. */
770 while (begin < end) {
771 if (arch_mm_pte_is_table(*pte, level)) {
772 if (!mm_ptable_get_attrs_level(
773 mm_page_table_from_pa(
774 arch_mm_table_from_pte(*pte,
775 level)),
776 begin, end, level - 1, got_attrs, attrs)) {
777 return false;
778 }
779 got_attrs = true;
780 } else {
781 if (!got_attrs) {
782 *attrs = arch_mm_pte_attrs(*pte, level);
783 got_attrs = true;
784 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
785 return false;
786 }
787 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100788
Andrew Scull81e85092018-12-12 12:56:20 +0000789 begin = mm_start_of_next_block(begin, entry_size);
790 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100791 }
792
Andrew Scullc66a04d2018-12-07 13:41:56 +0000793 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000794 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100795}
796
797/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800798 * Gets the attributes applied to the given range of addresses in the page
799 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000800 *
801 * The value returned in `attrs` is only valid if the function returns true.
802 *
803 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100804 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000805static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin,
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800806 ptable_addr_t end, uint64_t *attrs, int flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100807{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000808 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000809 uint8_t root_level = max_level + 1;
810 size_t root_table_size = mm_entry_size(root_level);
811 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000812 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000813 struct mm_page_table *table;
814 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100815
Andrew Scull81e85092018-12-12 12:56:20 +0000816 begin = mm_round_down_to_page(begin);
817 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100818
Andrew Scull81e85092018-12-12 12:56:20 +0000819 /* Fail if the addresses are out of range. */
820 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000821 return false;
822 }
823
Karl Meakind64aaf82025-02-08 01:12:55 +0000824 table = &mm_page_table_from_pa(
825 ptable->root)[mm_index(begin, root_level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000826 while (begin < end) {
827 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
828 got_attrs, attrs)) {
829 return false;
830 }
831
832 got_attrs = true;
833 begin = mm_start_of_next_block(begin, root_table_size);
834 table++;
835 }
836
837 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100838}
839
Karl Meakind64aaf82025-02-08 01:12:55 +0000840bool mm_vm_init(struct mm_ptable *ptable, uint16_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100841{
Karl Meakind64aaf82025-02-08 01:12:55 +0000842 return mm_ptable_init(ptable, id, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100843}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100844
Karl Meakind64aaf82025-02-08 01:12:55 +0000845void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000846{
Karl Meakind64aaf82025-02-08 01:12:55 +0000847 mm_ptable_fini(ptable, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000848}
849
850/**
Andrew Scull73b89542019-11-20 17:31:26 +0000851 * Selects flags to pass to the page table manipulation operation based on the
852 * mapping mode.
853 */
854static int mm_mode_to_flags(uint32_t mode)
855{
856 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
857 return MM_FLAG_UNMAP;
858 }
859
860 return 0;
861}
862
863/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000864 * See `mm_ptable_identity_prepare`.
865 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800866 * This must be called before `mm_identity_commit` for the same mapping.
867 *
868 * Returns true on success, or false if the update would fail.
869 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000870bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800871 uint32_t mode, struct mpool *ppool)
872{
873 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
874
Karl Meakind64aaf82025-02-08 01:12:55 +0000875 return mm_ptable_identity_prepare(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800876 arch_mm_mode_to_stage1_attrs(mode),
877 flags, ppool);
878}
879
880/**
881 * See `mm_ptable_identity_commit`.
882 *
883 * `mm_identity_prepare` must be called before this for the same mapping.
884 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000885void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800886 uint32_t mode, struct mpool *ppool)
887{
888 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
889
Karl Meakind64aaf82025-02-08 01:12:55 +0000890 mm_ptable_identity_commit(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800891 arch_mm_mode_to_stage1_attrs(mode), flags,
892 ppool);
893 return ptr_from_va(va_from_pa(begin));
894}
895
896/**
897 * See `mm_ptable_identity_prepare`.
898 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000899 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000900 *
901 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000902 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000903bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
904 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000905{
906 int flags = mm_mode_to_flags(mode);
907
Karl Meakind64aaf82025-02-08 01:12:55 +0000908 return mm_ptable_identity_prepare(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000909 arch_mm_mode_to_stage2_attrs(mode),
910 flags, ppool);
911}
912
913/**
914 * See `mm_ptable_identity_commit`.
915 *
916 * `mm_vm_identity_prepare` must be called before this for the same mapping.
917 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000918void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000919 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000920{
921 int flags = mm_mode_to_flags(mode);
922
Karl Meakind64aaf82025-02-08 01:12:55 +0000923 mm_ptable_identity_commit(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000924 arch_mm_mode_to_stage2_attrs(mode), flags,
925 ppool);
926
927 if (ipa != NULL) {
928 *ipa = ipa_from_pa(begin);
929 }
930}
931
932/**
Andrew Scull80871322018-08-06 12:04:09 +0100933 * Updates a VM's page table such that the given physical address range is
934 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100935 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000936 *
937 * mm_vm_defrag should always be called after a series of page table updates,
938 * whether they succeed or fail. This is because on failure extra page table
939 * entries may have been allocated and then not used, while on success it may be
940 * possible to compact the page table by merging several entries into a block.
941 *
942 * Returns true on success, or false if the update failed and no changes were
943 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100944 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000945bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000946 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100947{
Andrew Scull73b89542019-11-20 17:31:26 +0000948 int flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000949 bool success = mm_ptable_identity_update(
Karl Meakind64aaf82025-02-08 01:12:55 +0000950 ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000951 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100952
953 if (success && ipa != NULL) {
954 *ipa = ipa_from_pa(begin);
955 }
956
957 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100958}
959
960/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000961 * Updates the VM's table such that the given physical address range has no
962 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100963 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000964bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000965 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100966{
Andrew Scull73b89542019-11-20 17:31:26 +0000967 uint32_t mode = MM_MODE_UNMAPPED_MASK;
968
Karl Meakind64aaf82025-02-08 01:12:55 +0000969 return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +0100970}
971
972/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000973 * Write the given page table of a VM to the debug log.
974 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000975void mm_vm_dump(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000976{
Karl Meakind64aaf82025-02-08 01:12:55 +0000977 mm_ptable_dump(ptable, 0);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000978}
979
980/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700981 * Defragments a stage1 page table.
982 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000983void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool)
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700984{
Karl Meakind64aaf82025-02-08 01:12:55 +0000985 mm_ptable_defrag(ptable, MM_FLAG_STAGE1, ppool, false);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700986}
987
988/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000989 * Defragments the VM page table.
990 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000991void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
992 bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000993{
Karl Meakind64aaf82025-02-08 01:12:55 +0000994 mm_ptable_defrag(ptable, 0, ppool, non_secure);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000995}
996
997/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100998 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +0000999 * are mapped with the same mode.
1000 *
1001 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001002 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001003bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
1004 ipaddr_t end, uint32_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001005{
Andrew Scull81e85092018-12-12 12:56:20 +00001006 uint64_t attrs;
1007 bool ret;
1008
Karl Meakind64aaf82025-02-08 01:12:55 +00001009 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs, 0);
Andrew Scull81e85092018-12-12 12:56:20 +00001010 if (ret) {
1011 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1012 }
1013
1014 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001015}
1016
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001017/**
1018 * Gets the mode of the given range of virtual addresses if they
1019 * are mapped with the same mode.
1020 *
1021 * Returns true if the range is mapped with the same mode and false otherwise.
1022 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001023bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001024 uint32_t *mode)
1025{
1026 uint64_t attrs;
1027 bool ret;
1028
Karl Meakind64aaf82025-02-08 01:12:55 +00001029 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs,
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001030 MM_FLAG_STAGE1);
1031 if (ret) {
1032 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1033 }
1034
1035 return ret;
1036}
1037
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001038static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1039{
1040 return (struct mm_stage1_locked){.ptable = &ptable};
1041}
1042
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001043struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1044{
1045 return (struct mm_stage1_locked){.ptable = ptable};
1046}
1047
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001048struct mm_stage1_locked mm_lock_stage1(void)
1049{
1050 sl_lock(&ptable_lock);
1051 return mm_stage1_lock_unsafe();
1052}
1053
1054void mm_unlock_stage1(struct mm_stage1_locked *lock)
1055{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001056 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001057 sl_unlock(&ptable_lock);
1058 lock->ptable = NULL;
1059}
1060
Andrew Scull80871322018-08-06 12:04:09 +01001061/**
Andrew Scull80871322018-08-06 12:04:09 +01001062 * Updates the hypervisor page table such that the given physical address range
1063 * is mapped into the address space at the corresponding address range in the
1064 * architecture-agnostic mode provided.
1065 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001066void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +01001067 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001068{
Andrew Scull73b89542019-11-20 17:31:26 +00001069 int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode);
1070
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001071 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001072 arch_mm_mode_to_stage1_attrs(mode), flags,
1073 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001074 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001075 }
1076
1077 return NULL;
1078}
1079
1080/**
1081 * Updates the hypervisor table such that the given physical address range is
1082 * not mapped in the address space.
1083 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001084bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1085 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001086{
Andrew Scull73b89542019-11-20 17:31:26 +00001087 uint32_t mode = MM_MODE_UNMAPPED_MASK;
1088
1089 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001090}
1091
1092/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001093 * Defragments the hypervisor page table.
1094 */
1095void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1096{
Olivier Deprez6f400372022-03-07 09:31:08 +01001097 mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool, false);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001098}
1099
1100/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001101 * Initialises memory management for the hypervisor itself.
1102 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001103bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001104{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001105 /* Locking is not enabled yet so fake it, */
1106 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
1107
Karl Meakine8937d92024-03-19 16:04:25 +00001108 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001109 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001110 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001111 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001112 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001113 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001114 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001115 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001116
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001117 /* ASID 0 is reserved for use by the hypervisor. */
1118 if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001119 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001120 return false;
1121 }
1122
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001123 /* Initialize arch_mm before calling below mapping routines */
1124 if (!arch_mm_init(ptable.root)) {
1125 return false;
1126 }
1127
Andrew Walbran48699362019-05-20 14:38:00 +01001128 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001129 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001130
1131 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001132 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1133 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001134
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001135 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1136 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001137
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001138 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1139 layout_data_end(), MM_MODE_R | MM_MODE_W,
1140 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001141
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001142 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001143 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001144
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001145 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001146}