blob: 98774b90ab24c6d63e75f516e257009cba19b138 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000015#include "hf/arch/mm.h"
Maksims Svecovs134b8f92022-03-04 15:14:09 +000016
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000019#include "hf/ffa.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010020#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010021#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010022#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010023
Andrew Walbran2400ed22018-09-27 14:45:58 +010024/**
25 * This file has functions for managing the level 1 and 2 page tables used by
26 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
27 * and then a level 2 mapping per VM. The design assumes that all page tables
28 * contain only 1-1 mappings, aligned on the block boundaries.
29 */
30
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010031/*
32 * For stage 2, the input is an intermediate physical addresses rather than a
33 * virtual address so:
34 */
Andrew Scull80871322018-08-06 12:04:09 +010035static_assert(
36 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
37 "Currently, the same code manages the stage 1 and stage 2 page tables "
38 "which only works if the virtual and intermediate physical addresses "
39 "are the same size. It looks like that assumption might not be holding "
40 "so we need to check that everything is going to be ok.");
41
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010042static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010043static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010044
Andrew Scullda241972019-01-05 18:17:48 +000045static bool mm_stage2_invalidate = false;
46
47/**
48 * After calling this function, modifications to stage-2 page tables will use
49 * break-before-make and invalidate the TLB for the affected range.
50 */
51void mm_vm_enable_invalidation(void)
52{
53 mm_stage2_invalidate = true;
54}
55
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010057 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010058 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010059static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010060{
61 return ptr_from_va(va_from_pa(pa));
62}
63
64/**
Andrew Scull80871322018-08-06 12:04:09 +010065 * Rounds an address down to a page boundary.
66 */
67static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
68{
69 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
70}
71
72/**
73 * Rounds an address up to a page boundary.
74 */
75static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
76{
77 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
78}
79
80/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010081 * Calculates the size of the address space represented by a page table entry at
82 * the given level.
83 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000084static size_t mm_entry_size(mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010085{
Andrew Scull78d6fd92018-09-06 15:08:36 +010086 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010087}
88
89/**
Andrew Scullcae45572018-12-13 15:46:30 +000090 * Gets the address of the start of the next block of the given size. The size
91 * must be a power of two.
92 */
93static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
94 size_t block_size)
95{
96 return (addr + block_size) & ~(block_size - 1);
97}
98
99/**
100 * Gets the physical address of the start of the next block of the given size.
101 * The size must be a power of two.
102 */
103static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
104{
105 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
106}
107
108/**
Andrew Scull80871322018-08-06 12:04:09 +0100109 * For a given address, calculates the maximum (plus one) address that can be
110 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100111 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000112static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100113{
114 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000115
Andrew Scull80871322018-08-06 12:04:09 +0100116 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100117}
118
119/**
Andrew Scull80871322018-08-06 12:04:09 +0100120 * For a given address, calculates the index at which its entry is stored in a
121 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100122 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000123static size_t mm_index(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100124{
Andrew Scull80871322018-08-06 12:04:09 +0100125 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000126
Andrew Scull78d6fd92018-09-06 15:08:36 +0100127 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100128}
129
130/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000131 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100132 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000133static struct mm_page_table *mm_alloc_page_tables(size_t count,
134 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100135{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000136 if (count == 1) {
137 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100138 }
139
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000140 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100141}
142
143/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000144 * Returns the maximum level in the page table given the flags.
145 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000146static mm_level_t mm_max_level(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000147{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000148 return flags.stage1 ? arch_mm_stage1_max_level()
149 : arch_mm_stage2_max_level();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000150}
151
152/**
153 * Returns the number of root-level tables given the flags.
154 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000155static uint8_t mm_root_table_count(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000156{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000157 return flags.stage1 ? arch_mm_stage1_root_table_count()
158 : arch_mm_stage2_root_table_count();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000159}
160
161/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000162 * Invalidates the TLB for the given address range.
163 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000164static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
165 struct mm_flags flags, bool non_secure,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000166 mm_asid_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000167{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000168 if (flags.stage1) {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800169 arch_mm_invalidate_stage1_range(id, va_init(begin),
170 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000171 } else {
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800172 arch_mm_invalidate_stage2_range(id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100173 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000174 }
175}
176
177/**
178 * Frees all page-table-related memory associated with the given pte at the
179 * given level, including any subtables recursively.
180 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100181// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin07a69ab2025-02-07 14:53:19 +0000182static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000183{
184 struct mm_page_table *table;
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000185
186 if (!arch_mm_pte_is_table(pte, level)) {
187 return;
188 }
189
190 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000191 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Karl Meakind64aaf82025-02-08 01:12:55 +0000192 for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000193 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000194 }
195
196 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000197 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000198}
199
200/**
David Brazdil711fbe92019-08-06 13:39:58 +0100201 * Returns the first address which cannot be encoded in page tables given by
202 * `flags`. It is the exclusive end of the address space created by the tables.
203 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000204ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags)
David Brazdil711fbe92019-08-06 13:39:58 +0100205{
206 return mm_root_table_count(flags) *
207 mm_entry_size(mm_max_level(flags) + 1);
208}
209
210/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 * Initialises the given page table.
212 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000213bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000214 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000215{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000216 struct mm_page_table *tables;
217 uint8_t root_table_count = mm_root_table_count(flags);
218
219 tables = mm_alloc_page_tables(root_table_count, ppool);
220 if (tables == NULL) {
221 return false;
222 }
223
Karl Meakind64aaf82025-02-08 01:12:55 +0000224 for (size_t i = 0; i < root_table_count; i++) {
225 for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000226 tables[i].entries[j] =
227 arch_mm_absent_pte(mm_max_level(flags));
228 }
229 }
230
231 /*
232 * TODO: halloc could return a virtual or physical address if mm not
233 * enabled?
234 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000235 ptable->root = pa_init((uintpaddr_t)tables);
236 ptable->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000237 return true;
238}
239
240/**
241 * Frees all memory associated with the give page table.
242 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000243static void mm_ptable_fini(const struct mm_ptable *ptable,
244 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000245{
Karl Meakind64aaf82025-02-08 01:12:55 +0000246 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000247 mm_level_t level = mm_max_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000248 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000249
Karl Meakind64aaf82025-02-08 01:12:55 +0000250 for (size_t i = 0; i < root_table_count; ++i) {
251 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000252 mm_free_page_pte(tables[i].entries[j], level, ppool);
253 }
254 }
255
256 mpool_add_chunk(ppool, tables,
257 sizeof(struct mm_page_table) * root_table_count);
258}
259
260/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000261 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000262 * are valid, it performs a break-before-make sequence where it first writes an
263 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
264 * This is to prevent cases where CPUs have different 'valid' values in their
265 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000266 */
267static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000268 mm_level_t level, struct mm_flags flags,
269 bool non_secure, struct mpool *ppool, mm_asid_t id)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000270{
271 pte_t v = *pte;
272
273 /*
274 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000275 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000276 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000277 if ((flags.stage1 || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800278 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000279 *pte = arch_mm_absent_pte(level);
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800280 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000281 non_secure, id);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000282 }
283
284 /* Assign the new pte. */
285 *pte = new_pte;
286
287 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000288 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000289}
290
291/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100292 * Populates the provided page table entry with a reference to another table if
293 * needed, that is, if it does not yet point to another table.
294 *
295 * Returns a pointer to the table the entry now points to.
296 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000297static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
298 pte_t *pte, mm_level_t level,
299 struct mm_flags flags,
300 bool non_secure,
301 struct mpool *ppool,
302 mm_asid_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100303{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100304 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100305 pte_t v = *pte;
306 pte_t new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100307 size_t inc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000308 mm_level_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100311 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000312 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100313 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314
315 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000316 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100317 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000318 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100319 return NULL;
320 }
321
322 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100323 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100324 inc = mm_entry_size(level_below);
325 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000326 arch_mm_block_from_pte(v, level),
327 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100329 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100330 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331 }
332
333 /* Initialise entries in the new table. */
Karl Meakind64aaf82025-02-08 01:12:55 +0000334 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100335 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336 new_pte += inc;
337 }
338
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100340 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341
342 /* Replace the pte entry, doing a break-before-make if needed. */
343 mm_replace_entry(begin, pte,
344 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Karl Meakin1fd4b822025-02-01 17:13:47 +0000345 level, flags, non_secure, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100346
347 return ntable;
348}
349
350/**
Andrew Scull80871322018-08-06 12:04:09 +0100351 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100352 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000353 * `flags.unmap` is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100354 *
355 * This function calls itself recursively if it needs to update additional
356 * levels, but the recursion is bound by the maximum number of levels in a page
357 * table.
358 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100359// NOLINTNEXTLINE(misc-no-recursion)
Andrew Scull80871322018-08-06 12:04:09 +0100360static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000361 mm_attr_t attrs, struct mm_page_table *table,
362 mm_level_t level, struct mm_flags flags,
363 struct mpool *ppool, mm_asid_t id)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100364{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100365 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100366 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100367 size_t entry_size = mm_entry_size(level);
Karl Meakin1fd4b822025-02-01 17:13:47 +0000368 bool commit = flags.commit;
369 bool unmap = flags.unmap;
370 bool non_secure = ((attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100371
Andrew Scull265ada92018-07-30 15:19:01 +0100372 /* Cap end so that we don't go over the current level max. */
373 if (end > level_end) {
374 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100375 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100376
377 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100378 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100379 if (unmap ? !arch_mm_pte_is_present(*pte, level)
380 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000381 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100382 /*
383 * If the entry is already mapped with the right
384 * attributes, or already absent in the case of
385 * unmapping, no need to do anything; carry on to the
386 * next entry.
387 */
388 } else if ((end - begin) >= entry_size &&
389 (unmap || arch_mm_is_block_allowed(level)) &&
390 (begin & (entry_size - 1)) == 0) {
391 /*
392 * If the entire entry is within the region we want to
393 * map, map/unmap the whole entry.
394 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100395 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000396 pte_t new_pte =
397 unmap ? arch_mm_absent_pte(level)
398 : arch_mm_block_pte(level, pa,
399 attrs);
400 mm_replace_entry(begin, pte, new_pte, level,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000401 flags, non_secure, ppool, id);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100402 }
403 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100404 /*
405 * If the entry is already a subtable get it; otherwise
406 * replace it with an equivalent subtable and get that.
407 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000408 struct mm_page_table *nt =
409 mm_populate_table_pte(begin, pte, level, flags,
410 non_secure, ppool, id);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100411 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100412 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100413 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100414
Andrew Walbran6324fc92018-10-03 11:46:43 +0100415 /*
416 * Recurse to map/unmap the appropriate entries within
417 * the subtable.
418 */
Andrew Scull80871322018-08-06 12:04:09 +0100419 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800420 flags, ppool, id)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100421 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100422 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423 }
424
Andrew Scullcae45572018-12-13 15:46:30 +0000425 begin = mm_start_of_next_block(begin, entry_size);
426 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100427 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100428 }
429
430 return true;
431}
432
433/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000434 * Updates the page table from the root to map the given address range to a
435 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000436 * `flags.unmap` is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000437 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000438static bool mm_map_root(struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000439 ptable_addr_t end, mm_attr_t attrs,
440 mm_level_t root_level, struct mm_flags flags,
441 struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000442{
443 size_t root_table_size = mm_entry_size(root_level);
Karl Meakind64aaf82025-02-08 01:12:55 +0000444 struct mm_page_table *table = &mm_page_table_from_pa(
445 ptable->root)[mm_index(begin, root_level)];
Andrew Scull1ba470e2018-10-31 15:14:31 +0000446
447 while (begin < end) {
448 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Karl Meakind64aaf82025-02-08 01:12:55 +0000449 root_level - 1, flags, ppool, ptable->id)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000450 return false;
451 }
Andrew Scullcae45572018-12-13 15:46:30 +0000452 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000453 table++;
454 }
455
456 return true;
457}
458
459/**
Andrew Scull80871322018-08-06 12:04:09 +0100460 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000461 * or not mapped into the address space with the architecture-agnostic mode
Karl Meakin1fd4b822025-02-01 17:13:47 +0000462 * provided. Only commits the change if `flags.commit` is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100463 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000464static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000465 paddr_t pa_end, mm_attr_t attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000466 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100467{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000468 mm_level_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100469 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000470 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
471 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100472
Andrew Scull1ba470e2018-10-31 15:14:31 +0000473 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100474 * Assert condition to communicate the API constraint of mm_max_level(),
475 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000476 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000477 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000478
479 /* Cap end to stay within the bounds of the page table. */
480 if (end > ptable_end) {
481 end = ptable_end;
482 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100483
Karl Meakind64aaf82025-02-08 01:12:55 +0000484 if (!mm_map_root(ptable, begin, end, attrs, root_level, flags, ppool)) {
Andrew Walbran58a6e542019-11-19 14:23:15 +0000485 return false;
486 }
487
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800488 /*
489 * All TLB invalidations must be complete already if any entries were
490 * replaced by mm_replace_entry. Sync all page table writes so that code
491 * following this can use them.
492 */
493 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000494
495 return true;
496}
497
Andrew Scull4e83cef2019-11-19 14:17:54 +0000498/*
499 * Prepares the given page table for the given address mapping such that it
500 * will be able to commit the change without failure. It does so by ensuring
501 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100502 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000503 *
504 * In particular, multiple calls to this function will result in the
505 * corresponding calls to commit the changes to succeed.
506 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000507static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
508 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000509 mm_attr_t attrs, struct mm_flags flags,
Karl Meakind64aaf82025-02-08 01:12:55 +0000510 struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000511{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000512 flags.commit = false;
Karl Meakind64aaf82025-02-08 01:12:55 +0000513 return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
514 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000515}
516
517/**
518 * Commits the given address mapping to the page table assuming the operation
519 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
520 * ensure this condition.
521 *
522 * Without the table being properly prepared, the commit may only partially
523 * complete if it runs out of memory resulting in an inconsistent state that
524 * isn't handled.
525 *
526 * Since the non-failure assumtion is used in the reasoning about the atomicity
527 * of higher level memory operations, any detected violations result in a panic.
528 *
529 * TODO: remove ppool argument to be sure no changes are made.
530 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000531static void mm_ptable_identity_commit(struct mm_ptable *ptable,
532 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000533 mm_attr_t attrs, struct mm_flags flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000534 struct mpool *ppool)
535{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000536 flags.commit = true;
537 CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
538 ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000539}
540
Andrew Walbran58a6e542019-11-19 14:23:15 +0000541/**
542 * Updates the given table such that the given physical address range is mapped
543 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000544 * provided.
545 *
546 * The page table is updated using the separate prepare and commit stages so
547 * that, on failure, a partial update of the address space cannot happen. The
548 * table may be left with extra internal tables but the address space is
549 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000550 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000551static bool mm_ptable_identity_update(struct mm_ptable *ptable,
552 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000553 mm_attr_t attrs, struct mm_flags flags,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000554 struct mpool *ppool)
555{
Karl Meakind64aaf82025-02-08 01:12:55 +0000556 if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000557 ppool)) {
558 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100559 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100560
Karl Meakind64aaf82025-02-08 01:12:55 +0000561 mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags,
562 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000563
564 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100565}
566
567/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100568 * Writes the given table to the debug log, calling itself recursively to
569 * write sub-tables.
570 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100571// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000572static void mm_dump_table_recursive(const struct mm_page_table *ptable,
Karl Meakin100b0b22025-02-08 00:59:25 +0000573 mm_level_t level, uint8_t depth)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100574{
Karl Meakind64aaf82025-02-08 01:12:55 +0000575 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Karl Meakin100b0b22025-02-08 00:59:25 +0000576 pte_t entry = ptable->entries[i];
577
578 if (arch_mm_pte_is_absent(entry, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100579 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100580 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100581
Karl Meakin100b0b22025-02-08 00:59:25 +0000582 dlog("%*s[level=%u, index=%zu]: ", depth * 2, "", level, i);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100583
Karl Meakin100b0b22025-02-08 00:59:25 +0000584 switch (arch_mm_pte_type(entry, level)) {
585 case PTE_TYPE_ABSENT: {
586 /* Do nothing */
587 break;
588 }
589 case PTE_TYPE_INVALID_BLOCK: {
590 dlog("INVALID_BLOCK(%#lx)\n", entry);
591 break;
592 }
593 case PTE_TYPE_VALID_BLOCK: {
594 dlog("VALID_BLOCK(%#lx)\n", entry);
595 break;
596 }
597 case PTE_TYPE_TABLE: {
598 const struct mm_page_table *child_table =
599 mm_page_table_from_pa(
600 arch_mm_table_from_pte(entry, level));
601
602 dlog("TABLE(%#lx)\n", entry);
603
604 mm_dump_table_recursive(child_table, level - 1,
605 depth + 1);
606 break;
607 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100608 }
609 }
610}
611
612/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000613 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100614 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000615static void mm_ptable_dump(const struct mm_ptable *ptable,
616 struct mm_flags flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100617{
Karl Meakind64aaf82025-02-08 01:12:55 +0000618 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000619 mm_level_t max_level = mm_max_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000620 uint8_t root_table_count = mm_root_table_count(flags);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000621
Karl Meakind64aaf82025-02-08 01:12:55 +0000622 for (size_t i = 0; i < root_table_count; ++i) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000623 mm_dump_table_recursive(&tables[i], max_level, max_level);
624 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100625}
626
627/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000628 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800629 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100630 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000631static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100632{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100633 struct mm_page_table *table;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000634 mm_attr_t block_attrs;
635 mm_attr_t table_attrs;
636 mm_attr_t combined_attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100637 paddr_t block_address;
638
Andrew Scullb6b9b562018-12-21 14:41:35 +0000639 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
640
641 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000642 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100643 }
644
Andrew Scullb6b9b562018-12-21 14:41:35 +0000645 /* Might not be possible to merge the table into a single block. */
646 if (!arch_mm_is_block_allowed(level)) {
647 return table_pte;
648 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000649
Andrew Scullb6b9b562018-12-21 14:41:35 +0000650 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000651 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000652 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100653 combined_attrs =
654 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000655 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000656
Andrew Walbran2400ed22018-09-27 14:45:58 +0100657 return arch_mm_block_pte(level, block_address, combined_attrs);
658}
659
660/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000661 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000662 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100663 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100664// NOLINTNEXTLINE(misc-no-recursion)
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800665static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000666 mm_level_t level, struct mm_flags flags,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000667 bool non_secure, struct mpool *ppool,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000668 mm_asid_t id)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100669{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100670 struct mm_page_table *table;
Andrew Scull12122ce2019-11-19 14:21:07 +0000671 bool mergeable;
672 bool base_present;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000673 mm_attr_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800674 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100675
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800676 if (!arch_mm_pte_is_table(*entry, level)) {
677 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100678 }
679
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800680 table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100681
Andrew Scull12122ce2019-11-19 14:21:07 +0000682 /* Defrag the first entry in the table and use it as the base entry. */
683 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800684
685 mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000686 flags, non_secure, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800687
Andrew Scull12122ce2019-11-19 14:21:07 +0000688 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
689 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
690
Andrew Walbran2400ed22018-09-27 14:45:58 +0100691 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000692 * Defrag the remaining entries in the table and check whether they are
693 * compatible with the base entry meaning the table can be merged into a
694 * block entry. It assumes addresses are contiguous due to identity
695 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100696 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000697 mergeable = true;
Karl Meakind64aaf82025-02-08 01:12:55 +0000698 for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000699 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800700 ptable_addr_t block_addr =
701 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000702
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800703 mm_ptable_defrag_entry(block_addr, &(table->entries[i]),
Karl Meakin1fd4b822025-02-01 17:13:47 +0000704 level - 1, flags, non_secure, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800705
Andrew Scull12122ce2019-11-19 14:21:07 +0000706 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100707
Andrew Scull12122ce2019-11-19 14:21:07 +0000708 if (present != base_present) {
709 mergeable = false;
710 continue;
711 }
712
713 if (!present) {
714 continue;
715 }
716
717 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
718 mergeable = false;
719 continue;
720 }
721
722 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
723 base_attrs) {
724 mergeable = false;
725 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100726 }
727 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000728
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800729 if (!mergeable) {
730 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000731 }
732
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800733 new_entry = mm_merge_table_pte(*entry, level);
734 if (*entry != new_entry) {
Olivier Deprez6f400372022-03-07 09:31:08 +0100735 mm_replace_entry(base_addr, entry, (uintptr_t)new_entry, level,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000736 flags, non_secure, ppool, id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800737 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100738}
739
740/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100741 * Defragments the given page table by converting page table references to
742 * blocks whenever possible.
743 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000744static void mm_ptable_defrag(struct mm_ptable *ptable, struct mm_flags flags,
745 bool non_secure, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100746{
Karl Meakind64aaf82025-02-08 01:12:55 +0000747 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000748 mm_level_t level = mm_max_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000749 uint8_t root_table_count = mm_root_table_count(flags);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800750 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100751
752 /*
753 * Loop through each entry in the table. If it points to another table,
754 * check if that table can be replaced by a block or an absent entry.
755 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000756 for (size_t i = 0; i < root_table_count; ++i) {
757 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
758 mm_ptable_defrag_entry(
759 block_addr, &(tables[i].entries[j]), level,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000760 flags, non_secure, ppool, ptable->id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800761 block_addr = mm_start_of_next_block(
762 block_addr, mm_entry_size(level));
Andrew Scull1ba470e2018-10-31 15:14:31 +0000763 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100764 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800765
766 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100767}
768
769/**
Andrew Scull81e85092018-12-12 12:56:20 +0000770 * Gets the attributes applied to the given range of stage-2 addresses at the
771 * given level.
772 *
773 * The `got_attrs` argument is initially passed as false until `attrs` contains
774 * attributes of the memory region at which point it is passed as true.
775 *
776 * The value returned in `attrs` is only valid if the function returns true.
777 *
778 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100779 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100780// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000781static bool mm_ptable_get_attrs_level(const struct mm_page_table *table,
Andrew Scull81e85092018-12-12 12:56:20 +0000782 ptable_addr_t begin, ptable_addr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000783 mm_level_t level, bool got_attrs,
784 mm_attr_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100785{
Karl Meakind64aaf82025-02-08 01:12:55 +0000786 const pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000787 ptable_addr_t level_end = mm_level_end(begin, level);
788 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100789
Andrew Scull81e85092018-12-12 12:56:20 +0000790 /* Cap end so that we don't go over the current level max. */
791 if (end > level_end) {
792 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100793 }
794
Andrew Scull81e85092018-12-12 12:56:20 +0000795 /* Check that each entry is owned. */
796 while (begin < end) {
797 if (arch_mm_pte_is_table(*pte, level)) {
798 if (!mm_ptable_get_attrs_level(
799 mm_page_table_from_pa(
800 arch_mm_table_from_pte(*pte,
801 level)),
802 begin, end, level - 1, got_attrs, attrs)) {
803 return false;
804 }
805 got_attrs = true;
806 } else {
807 if (!got_attrs) {
808 *attrs = arch_mm_pte_attrs(*pte, level);
809 got_attrs = true;
810 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
811 return false;
812 }
813 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100814
Andrew Scull81e85092018-12-12 12:56:20 +0000815 begin = mm_start_of_next_block(begin, entry_size);
816 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100817 }
818
Andrew Scullc66a04d2018-12-07 13:41:56 +0000819 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000820 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100821}
822
823/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800824 * Gets the attributes applied to the given range of addresses in the page
825 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000826 *
827 * The value returned in `attrs` is only valid if the function returns true.
828 *
829 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100830 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000831static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000832 ptable_addr_t end, mm_attr_t *attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000833 struct mm_flags flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100834{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000835 mm_level_t max_level = mm_max_level(flags);
836 mm_level_t root_level = max_level + 1;
Andrew Scull81e85092018-12-12 12:56:20 +0000837 size_t root_table_size = mm_entry_size(root_level);
838 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000839 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000840 struct mm_page_table *table;
841 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100842
Andrew Scull81e85092018-12-12 12:56:20 +0000843 begin = mm_round_down_to_page(begin);
844 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100845
Andrew Scull81e85092018-12-12 12:56:20 +0000846 /* Fail if the addresses are out of range. */
847 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000848 return false;
849 }
850
Karl Meakind64aaf82025-02-08 01:12:55 +0000851 table = &mm_page_table_from_pa(
852 ptable->root)[mm_index(begin, root_level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000853 while (begin < end) {
854 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
855 got_attrs, attrs)) {
856 return false;
857 }
858
859 got_attrs = true;
860 begin = mm_start_of_next_block(begin, root_table_size);
861 table++;
862 }
863
864 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100865}
866
Karl Meakin07a69ab2025-02-07 14:53:19 +0000867bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100868{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000869 return mm_ptable_init(ptable, id, (struct mm_flags){0}, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100870}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100871
Karl Meakind64aaf82025-02-08 01:12:55 +0000872void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000873{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000874 mm_ptable_fini(ptable, (struct mm_flags){0}, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000875}
876
877/**
Andrew Scull73b89542019-11-20 17:31:26 +0000878 * Selects flags to pass to the page table manipulation operation based on the
879 * mapping mode.
880 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000881static struct mm_flags mm_mode_to_flags(mm_mode_t mode)
Andrew Scull73b89542019-11-20 17:31:26 +0000882{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000883 struct mm_flags flags = {0};
884
Andrew Scull73b89542019-11-20 17:31:26 +0000885 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
Karl Meakin1fd4b822025-02-01 17:13:47 +0000886 flags.unmap = true;
Andrew Scull73b89542019-11-20 17:31:26 +0000887 }
888
Karl Meakin1fd4b822025-02-01 17:13:47 +0000889 return flags;
Andrew Scull73b89542019-11-20 17:31:26 +0000890}
891
892/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000893 * See `mm_ptable_identity_prepare`.
894 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800895 * This must be called before `mm_identity_commit` for the same mapping.
896 *
897 * Returns true on success, or false if the update would fail.
898 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000899bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000900 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800901{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000902 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000903
Karl Meakin1fd4b822025-02-01 17:13:47 +0000904 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800905
Karl Meakind64aaf82025-02-08 01:12:55 +0000906 return mm_ptable_identity_prepare(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800907 arch_mm_mode_to_stage1_attrs(mode),
908 flags, ppool);
909}
910
911/**
912 * See `mm_ptable_identity_commit`.
913 *
914 * `mm_identity_prepare` must be called before this for the same mapping.
915 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000916void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000917 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800918{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000919 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000920
Karl Meakin1fd4b822025-02-01 17:13:47 +0000921 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800922
Karl Meakind64aaf82025-02-08 01:12:55 +0000923 mm_ptable_identity_commit(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800924 arch_mm_mode_to_stage1_attrs(mode), flags,
925 ppool);
926 return ptr_from_va(va_from_pa(begin));
927}
928
929/**
930 * See `mm_ptable_identity_prepare`.
931 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000932 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000933 *
934 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000935 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000936bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000937 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000938{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000939 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000940
Karl Meakind64aaf82025-02-08 01:12:55 +0000941 return mm_ptable_identity_prepare(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000942 arch_mm_mode_to_stage2_attrs(mode),
943 flags, ppool);
944}
945
946/**
947 * See `mm_ptable_identity_commit`.
948 *
949 * `mm_vm_identity_prepare` must be called before this for the same mapping.
950 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000951void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000952 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000953{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000954 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000955
Karl Meakind64aaf82025-02-08 01:12:55 +0000956 mm_ptable_identity_commit(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000957 arch_mm_mode_to_stage2_attrs(mode), flags,
958 ppool);
959
960 if (ipa != NULL) {
961 *ipa = ipa_from_pa(begin);
962 }
963}
964
965/**
Andrew Scull80871322018-08-06 12:04:09 +0100966 * Updates a VM's page table such that the given physical address range is
967 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100968 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000969 *
970 * mm_vm_defrag should always be called after a series of page table updates,
971 * whether they succeed or fail. This is because on failure extra page table
972 * entries may have been allocated and then not used, while on success it may be
973 * possible to compact the page table by merging several entries into a block.
974 *
975 * Returns true on success, or false if the update failed and no changes were
976 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100977 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000978bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000979 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100980{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000981 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000982 bool success = mm_ptable_identity_update(
Karl Meakind64aaf82025-02-08 01:12:55 +0000983 ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000984 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100985
986 if (success && ipa != NULL) {
987 *ipa = ipa_from_pa(begin);
988 }
989
990 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100991}
992
993/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000994 * Updates the VM's table such that the given physical address range has no
995 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100996 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000997bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000998 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100999{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001000 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001001
Karl Meakind64aaf82025-02-08 01:12:55 +00001002 return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +01001003}
1004
1005/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001006 * Write the given page table of a VM to the debug log.
1007 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001008void mm_vm_dump(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001009{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001010 mm_ptable_dump(ptable, (struct mm_flags){0});
Andrew Scullda3df7f2019-01-05 17:49:27 +00001011}
1012
1013/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001014 * Defragments a stage1 page table.
1015 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001016void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool)
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001017{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001018 mm_ptable_defrag(ptable, (struct mm_flags){.stage1 = true}, false,
1019 ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001020}
1021
1022/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001023 * Defragments the VM page table.
1024 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001025void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
1026 bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001027{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001028 mm_ptable_defrag(ptable, (struct mm_flags){0}, non_secure, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001029}
1030
1031/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +01001032 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001033 * are mapped with the same mode.
1034 *
1035 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001036 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001037bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001038 ipaddr_t end, mm_mode_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001039{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001040 mm_attr_t attrs;
Andrew Scull81e85092018-12-12 12:56:20 +00001041 bool ret;
1042
Karl Meakin1fd4b822025-02-01 17:13:47 +00001043 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs,
1044 (struct mm_flags){0});
Andrew Scull81e85092018-12-12 12:56:20 +00001045 if (ret) {
1046 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1047 }
1048
1049 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001050}
1051
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001052/**
1053 * Gets the mode of the given range of virtual addresses if they
1054 * are mapped with the same mode.
1055 *
1056 * Returns true if the range is mapped with the same mode and false otherwise.
1057 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001058bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001059 mm_mode_t *mode)
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001060{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001061 mm_attr_t attrs;
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001062 bool ret;
1063
Karl Meakind64aaf82025-02-08 01:12:55 +00001064 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +00001065 (struct mm_flags){.stage1 = true});
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001066 if (ret) {
1067 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1068 }
1069
1070 return ret;
1071}
1072
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001073static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1074{
1075 return (struct mm_stage1_locked){.ptable = &ptable};
1076}
1077
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001078struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1079{
1080 return (struct mm_stage1_locked){.ptable = ptable};
1081}
1082
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001083struct mm_stage1_locked mm_lock_stage1(void)
1084{
1085 sl_lock(&ptable_lock);
1086 return mm_stage1_lock_unsafe();
1087}
1088
1089void mm_unlock_stage1(struct mm_stage1_locked *lock)
1090{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001091 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001092 sl_unlock(&ptable_lock);
1093 lock->ptable = NULL;
1094}
1095
Andrew Scull80871322018-08-06 12:04:09 +01001096/**
Andrew Scull80871322018-08-06 12:04:09 +01001097 * Updates the hypervisor page table such that the given physical address range
1098 * is mapped into the address space at the corresponding address range in the
1099 * architecture-agnostic mode provided.
1100 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001101void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001102 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001103{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001104 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +00001105
Karl Meakin1fd4b822025-02-01 17:13:47 +00001106 flags.stage1 = true;
Andrew Scull73b89542019-11-20 17:31:26 +00001107
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001108 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001109 arch_mm_mode_to_stage1_attrs(mode), flags,
1110 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001111 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001112 }
1113
1114 return NULL;
1115}
1116
1117/**
1118 * Updates the hypervisor table such that the given physical address range is
1119 * not mapped in the address space.
1120 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001121bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1122 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001123{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001124 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001125
1126 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001127}
1128
1129/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001130 * Defragments the hypervisor page table.
1131 */
1132void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1133{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001134 mm_ptable_defrag(stage1_locked.ptable,
1135 (struct mm_flags){.stage1 = true}, false, ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001136}
1137
1138/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001139 * Initialises memory management for the hypervisor itself.
1140 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001141bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001142{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001143 /* Locking is not enabled yet so fake it, */
1144 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
Karl Meakin07a69ab2025-02-07 14:53:19 +00001145 struct mm_flags flags = {.stage1 = true};
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001146
Karl Meakine8937d92024-03-19 16:04:25 +00001147 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001148 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001149 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001150 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001151 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001152 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001153 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001154 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001155
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001156 /* ASID 0 is reserved for use by the hypervisor. */
Karl Meakin07a69ab2025-02-07 14:53:19 +00001157 if (!mm_ptable_init(&ptable, 0, flags, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001158 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001159 return false;
1160 }
1161
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001162 /* Initialize arch_mm before calling below mapping routines */
1163 if (!arch_mm_init(ptable.root)) {
1164 return false;
1165 }
1166
Andrew Walbran48699362019-05-20 14:38:00 +01001167 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001168 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001169
1170 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001171 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1172 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001173
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001174 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1175 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001176
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001177 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1178 layout_data_end(), MM_MODE_R | MM_MODE_W,
1179 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001180
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001181 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001182 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001183
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001184 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001185}