blob: 4ac1761defedb4f6752e46108e0243960a3a3ae0 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000015#include "hf/arch/mm.h"
Maksims Svecovs134b8f92022-03-04 15:14:09 +000016
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000019#include "hf/ffa.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010020#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010021#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010022#include "hf/static_assert.h"
Karl Meakin25954e32025-02-07 16:12:51 +000023#include "hf/std.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010024
Andrew Walbran2400ed22018-09-27 14:45:58 +010025/**
26 * This file has functions for managing the level 1 and 2 page tables used by
27 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
28 * and then a level 2 mapping per VM. The design assumes that all page tables
29 * contain only 1-1 mappings, aligned on the block boundaries.
30 */
31
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010032/*
33 * For stage 2, the input is an intermediate physical addresses rather than a
34 * virtual address so:
35 */
Andrew Scull80871322018-08-06 12:04:09 +010036static_assert(
37 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
38 "Currently, the same code manages the stage 1 and stage 2 page tables "
39 "which only works if the virtual and intermediate physical addresses "
40 "are the same size. It looks like that assumption might not be holding "
41 "so we need to check that everything is going to be ok.");
42
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010044static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010045
Andrew Scullda241972019-01-05 18:17:48 +000046static bool mm_stage2_invalidate = false;
47
48/**
49 * After calling this function, modifications to stage-2 page tables will use
50 * break-before-make and invalidate the TLB for the affected range.
51 */
52void mm_vm_enable_invalidation(void)
53{
54 mm_stage2_invalidate = true;
55}
56
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010057/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010058 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010059 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010060static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010061{
62 return ptr_from_va(va_from_pa(pa));
63}
64
65/**
Andrew Scull80871322018-08-06 12:04:09 +010066 * Rounds an address down to a page boundary.
67 */
68static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
69{
Karl Meakin25954e32025-02-07 16:12:51 +000070 return align_down(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010071}
72
73/**
74 * Rounds an address up to a page boundary.
75 */
76static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
77{
Karl Meakin25954e32025-02-07 16:12:51 +000078 return align_up(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010079}
80
81/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082 * Calculates the size of the address space represented by a page table entry at
83 * the given level.
84 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000085static size_t mm_entry_size(mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010086{
Andrew Scull78d6fd92018-09-06 15:08:36 +010087 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010088}
89
90/**
Karl Meakin25954e32025-02-07 16:12:51 +000091 * Gets the address of the start of the next block of the given level.
Andrew Scullcae45572018-12-13 15:46:30 +000092 */
93static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
Karl Meakin25954e32025-02-07 16:12:51 +000094 mm_level_t level)
Andrew Scullcae45572018-12-13 15:46:30 +000095{
Karl Meakin25954e32025-02-07 16:12:51 +000096 return align_up(addr + 1, mm_entry_size(level));
Andrew Scullcae45572018-12-13 15:46:30 +000097}
98
99/**
Andrew Scull80871322018-08-06 12:04:09 +0100100 * For a given address, calculates the maximum (plus one) address that can be
101 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100102 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000103static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100104{
105 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000106
Andrew Scull80871322018-08-06 12:04:09 +0100107 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100108}
109
110/**
Andrew Scull80871322018-08-06 12:04:09 +0100111 * For a given address, calculates the index at which its entry is stored in a
112 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100113 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000114static size_t mm_index(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100115{
Andrew Scull80871322018-08-06 12:04:09 +0100116 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000117
Andrew Scull78d6fd92018-09-06 15:08:36 +0100118 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100119}
120
121/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000122 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100123 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000124static struct mm_page_table *mm_alloc_page_tables(size_t count,
125 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100126{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000127 if (count == 1) {
128 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100129 }
130
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000131 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100132}
133
134/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000135 * Returns the maximum level in the page table given the flags.
136 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000137static mm_level_t mm_max_level(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000138{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000139 return flags.stage1 ? arch_mm_stage1_max_level()
140 : arch_mm_stage2_max_level();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000141}
142
143/**
144 * Returns the number of root-level tables given the flags.
145 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000146static uint8_t mm_root_table_count(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000147{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000148 return flags.stage1 ? arch_mm_stage1_root_table_count()
149 : arch_mm_stage2_root_table_count();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000150}
151
152/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000153 * Invalidates the TLB for the given address range.
154 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000155static void mm_invalidate_tlb(const struct mm_ptable *ptable,
156 ptable_addr_t begin, ptable_addr_t end,
157 struct mm_flags flags, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000158{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000159 if (flags.stage1) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000160 arch_mm_invalidate_stage1_range(ptable->id, va_init(begin),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800161 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000162 } else {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000163 arch_mm_invalidate_stage2_range(ptable->id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100164 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000165 }
166}
167
168/**
169 * Frees all page-table-related memory associated with the given pte at the
170 * given level, including any subtables recursively.
171 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100172// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin07a69ab2025-02-07 14:53:19 +0000173static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000174{
175 struct mm_page_table *table;
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000176
177 if (!arch_mm_pte_is_table(pte, level)) {
178 return;
179 }
180
181 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000182 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Karl Meakind64aaf82025-02-08 01:12:55 +0000183 for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000184 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000185 }
186
187 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000188 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000189}
190
191/**
David Brazdil711fbe92019-08-06 13:39:58 +0100192 * Returns the first address which cannot be encoded in page tables given by
193 * `flags`. It is the exclusive end of the address space created by the tables.
194 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000195ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags)
David Brazdil711fbe92019-08-06 13:39:58 +0100196{
197 return mm_root_table_count(flags) *
198 mm_entry_size(mm_max_level(flags) + 1);
199}
200
201/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000202 * Initialises the given page table.
203 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000204bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000205 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000206{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000207 struct mm_page_table *tables;
208 uint8_t root_table_count = mm_root_table_count(flags);
209
210 tables = mm_alloc_page_tables(root_table_count, ppool);
211 if (tables == NULL) {
212 return false;
213 }
214
Karl Meakind64aaf82025-02-08 01:12:55 +0000215 for (size_t i = 0; i < root_table_count; i++) {
216 for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000217 tables[i].entries[j] =
218 arch_mm_absent_pte(mm_max_level(flags));
219 }
220 }
221
222 /*
223 * TODO: halloc could return a virtual or physical address if mm not
224 * enabled?
225 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000226 ptable->root = pa_init((uintpaddr_t)tables);
227 ptable->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000228 return true;
229}
230
231/**
232 * Frees all memory associated with the give page table.
233 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000234static void mm_ptable_fini(const struct mm_ptable *ptable,
235 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000236{
Karl Meakind64aaf82025-02-08 01:12:55 +0000237 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000238 mm_level_t level = mm_max_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000239 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000240
Karl Meakind64aaf82025-02-08 01:12:55 +0000241 for (size_t i = 0; i < root_table_count; ++i) {
242 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000243 mm_free_page_pte(tables[i].entries[j], level, ppool);
244 }
245 }
246
247 mpool_add_chunk(ppool, tables,
248 sizeof(struct mm_page_table) * root_table_count);
249}
250
251/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000252 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000253 * are valid, it performs a break-before-make sequence where it first writes an
254 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
255 * This is to prevent cases where CPUs have different 'valid' values in their
256 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000257 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000258static void mm_replace_entry(const struct mm_ptable *ptable,
259 ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000260 mm_level_t level, struct mm_flags flags,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000261 bool non_secure, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000262{
263 pte_t v = *pte;
264
265 /*
266 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000267 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000269 if ((flags.stage1 || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800270 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000271 *pte = arch_mm_absent_pte(level);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000272 mm_invalidate_tlb(ptable, begin, begin + mm_entry_size(level),
273 flags, non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000274 }
275
276 /* Assign the new pte. */
277 *pte = new_pte;
278
279 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000280 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281}
282
283/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284 * Populates the provided page table entry with a reference to another table if
285 * needed, that is, if it does not yet point to another table.
286 *
287 * Returns a pointer to the table the entry now points to.
288 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000289static struct mm_page_table *mm_populate_table_pte(
290 const struct mm_ptable *ptable, ptable_addr_t begin, pte_t *pte,
291 mm_level_t level, struct mm_flags flags, bool non_secure,
292 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100293{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100294 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100295 pte_t v = *pte;
296 pte_t new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100297 size_t inc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000298 mm_level_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100299
300 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100301 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000302 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100303 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304
305 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000306 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100307 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000308 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309 return NULL;
310 }
311
312 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100313 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100314 inc = mm_entry_size(level_below);
315 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000316 arch_mm_block_from_pte(v, level),
317 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100318 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100319 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100320 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100321 }
322
323 /* Initialise entries in the new table. */
Karl Meakind64aaf82025-02-08 01:12:55 +0000324 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100325 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326 new_pte += inc;
327 }
328
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000329 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100330 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000331
332 /* Replace the pte entry, doing a break-before-make if needed. */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000333 mm_replace_entry(ptable, begin, pte,
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000334 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000335 level, flags, non_secure, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336
337 return ntable;
338}
339
340/**
Andrew Scull80871322018-08-06 12:04:09 +0100341 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100342 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000343 * `flags.unmap` is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100344 *
345 * This function calls itself recursively if it needs to update additional
346 * levels, but the recursion is bound by the maximum number of levels in a page
347 * table.
348 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100349// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000350static bool mm_map_level(struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin25954e32025-02-07 16:12:51 +0000351 ptable_addr_t end, mm_attr_t attrs,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000352 struct mm_page_table *child_table, mm_level_t level,
353 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100354{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000355 pte_t *pte = &child_table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100356 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100357 size_t entry_size = mm_entry_size(level);
Karl Meakin1fd4b822025-02-01 17:13:47 +0000358 bool commit = flags.commit;
359 bool unmap = flags.unmap;
360 bool non_secure = ((attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100361
Andrew Scull265ada92018-07-30 15:19:01 +0100362 /* Cap end so that we don't go over the current level max. */
363 if (end > level_end) {
364 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100365 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100366
367 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100368 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100369 if (unmap ? !arch_mm_pte_is_present(*pte, level)
370 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000371 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100372 /*
373 * If the entry is already mapped with the right
374 * attributes, or already absent in the case of
375 * unmapping, no need to do anything; carry on to the
376 * next entry.
377 */
378 } else if ((end - begin) >= entry_size &&
379 (unmap || arch_mm_is_block_allowed(level)) &&
Karl Meakin25954e32025-02-07 16:12:51 +0000380 is_aligned(begin, entry_size)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100381 /*
382 * If the entire entry is within the region we want to
383 * map, map/unmap the whole entry.
384 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100385 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000386 pte_t new_pte =
387 unmap ? arch_mm_absent_pte(level)
Karl Meakin25954e32025-02-07 16:12:51 +0000388 : arch_mm_block_pte(
389 level, pa_init(begin),
390 attrs);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000391 mm_replace_entry(ptable, begin, pte, new_pte,
392 level, flags, non_secure,
393 ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100394 }
395 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100396 /*
397 * If the entry is already a subtable get it; otherwise
398 * replace it with an equivalent subtable and get that.
399 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000400 struct mm_page_table *nt =
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000401 mm_populate_table_pte(ptable, begin, pte, level,
402 flags, non_secure, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100403 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100404 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100405 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100406
Andrew Walbran6324fc92018-10-03 11:46:43 +0100407 /*
408 * Recurse to map/unmap the appropriate entries within
409 * the subtable.
410 */
Karl Meakin25954e32025-02-07 16:12:51 +0000411 if (!mm_map_level(ptable, begin, end, attrs, nt,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000412 level - 1, flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100413 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100414 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100415 }
416
Karl Meakin25954e32025-02-07 16:12:51 +0000417 begin = mm_start_of_next_block(begin, level);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100418 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100419 }
420
421 return true;
422}
423
424/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000425 * Updates the page table from the root to map the given address range to a
426 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000427 * `flags.unmap` is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000428 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000429static bool mm_map_root(struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000430 ptable_addr_t end, mm_attr_t attrs,
431 mm_level_t root_level, struct mm_flags flags,
432 struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000433{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000434 struct mm_page_table *child_table = &mm_page_table_from_pa(
Karl Meakind64aaf82025-02-08 01:12:55 +0000435 ptable->root)[mm_index(begin, root_level)];
Andrew Scull1ba470e2018-10-31 15:14:31 +0000436
437 while (begin < end) {
Karl Meakin25954e32025-02-07 16:12:51 +0000438 if (!mm_map_level(ptable, begin, end, attrs, child_table,
439 root_level - 1, flags, ppool)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000440 return false;
441 }
Karl Meakin25954e32025-02-07 16:12:51 +0000442 begin = mm_start_of_next_block(begin, root_level);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000443 child_table++;
Andrew Scull1ba470e2018-10-31 15:14:31 +0000444 }
445
446 return true;
447}
448
449/**
Andrew Scull80871322018-08-06 12:04:09 +0100450 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000451 * or not mapped into the address space with the architecture-agnostic mode
Karl Meakin1fd4b822025-02-01 17:13:47 +0000452 * provided. Only commits the change if `flags.commit` is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000454static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000455 paddr_t pa_end, mm_attr_t attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000456 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100457{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000458 mm_level_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100459 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000460 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
461 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100462
Andrew Scull1ba470e2018-10-31 15:14:31 +0000463 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100464 * Assert condition to communicate the API constraint of mm_max_level(),
465 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000466 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000467 assert(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000468
469 /* Cap end to stay within the bounds of the page table. */
470 if (end > ptable_end) {
471 end = ptable_end;
472 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100473
Karl Meakind64aaf82025-02-08 01:12:55 +0000474 if (!mm_map_root(ptable, begin, end, attrs, root_level, flags, ppool)) {
Andrew Walbran58a6e542019-11-19 14:23:15 +0000475 return false;
476 }
477
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800478 /*
479 * All TLB invalidations must be complete already if any entries were
480 * replaced by mm_replace_entry. Sync all page table writes so that code
481 * following this can use them.
482 */
483 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000484
485 return true;
486}
487
Andrew Scull4e83cef2019-11-19 14:17:54 +0000488/*
489 * Prepares the given page table for the given address mapping such that it
490 * will be able to commit the change without failure. It does so by ensuring
491 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100492 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000493 *
494 * In particular, multiple calls to this function will result in the
495 * corresponding calls to commit the changes to succeed.
496 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000497static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
498 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000499 mm_attr_t attrs, struct mm_flags flags,
Karl Meakind64aaf82025-02-08 01:12:55 +0000500 struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000501{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000502 flags.commit = false;
Karl Meakind64aaf82025-02-08 01:12:55 +0000503 return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
504 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000505}
506
507/**
508 * Commits the given address mapping to the page table assuming the operation
509 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
510 * ensure this condition.
511 *
512 * Without the table being properly prepared, the commit may only partially
513 * complete if it runs out of memory resulting in an inconsistent state that
514 * isn't handled.
515 *
516 * Since the non-failure assumtion is used in the reasoning about the atomicity
517 * of higher level memory operations, any detected violations result in a panic.
518 *
519 * TODO: remove ppool argument to be sure no changes are made.
520 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000521static void mm_ptable_identity_commit(struct mm_ptable *ptable,
522 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000523 mm_attr_t attrs, struct mm_flags flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000524 struct mpool *ppool)
525{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000526 flags.commit = true;
527 CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
528 ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000529}
530
Andrew Walbran58a6e542019-11-19 14:23:15 +0000531/**
532 * Updates the given table such that the given physical address range is mapped
533 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000534 * provided.
535 *
536 * The page table is updated using the separate prepare and commit stages so
537 * that, on failure, a partial update of the address space cannot happen. The
538 * table may be left with extra internal tables but the address space is
539 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000540 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000541static bool mm_ptable_identity_update(struct mm_ptable *ptable,
542 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000543 mm_attr_t attrs, struct mm_flags flags,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000544 struct mpool *ppool)
545{
Karl Meakind64aaf82025-02-08 01:12:55 +0000546 if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000547 ppool)) {
548 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100549 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100550
Karl Meakind64aaf82025-02-08 01:12:55 +0000551 mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags,
552 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000553
554 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100555}
556
Karl Meakinc88ad412025-02-11 16:04:49 +0000557static void mm_dump_entries(const pte_t *entries, mm_level_t level,
558 uint32_t indent);
559
560static void mm_dump_block_entry(pte_t entry, mm_level_t level, uint32_t indent)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100561{
Karl Meakinc88ad412025-02-11 16:04:49 +0000562 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
563 paddr_t addr = arch_mm_block_from_pte(entry, level);
564
565 if (arch_mm_pte_is_valid(entry, level)) {
566 if (level == 0) {
567 dlog("page {\n");
568 } else {
569 dlog("block {\n");
570 }
571 } else {
572 dlog("invalid_block {\n");
573 }
574
575 indent += 1;
576 {
577 dlog_indent(indent, ".addr = %#016lx\n", pa_addr(addr));
578 dlog_indent(indent, ".attrs = %#016lx\n", attrs);
579 }
580 indent -= 1;
581 dlog_indent(indent, "}");
582}
583
584// NOLINTNEXTLINE(misc-no-recursion)
585static void mm_dump_table_entry(pte_t entry, mm_level_t level, uint32_t indent)
586{
587 dlog("table {\n");
588 indent += 1;
589 {
590 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
591 paddr_t addr = arch_mm_table_from_pte(entry, level);
592 const struct mm_page_table *child_table =
593 mm_page_table_from_pa(addr);
594
595 dlog_indent(indent, ".pte = %#016lx,\n", entry);
596 dlog_indent(indent, ".attrs = %#016lx,\n", attrs);
597 dlog_indent(indent, ".addr = %#016lx,\n", pa_addr(addr));
598 dlog_indent(indent, ".entries = ");
599 mm_dump_entries(child_table->entries, level - 1, indent);
600 dlog(",\n");
601 }
602 indent -= 1;
603 dlog_indent(indent, "}");
604}
605
606// NOLINTNEXTLINE(misc-no-recursion)
607static void mm_dump_entry(pte_t entry, mm_level_t level, uint32_t indent)
608{
609 switch (arch_mm_pte_type(entry, level)) {
610 case PTE_TYPE_ABSENT:
611 dlog("absent {}");
612 break;
613 case PTE_TYPE_INVALID_BLOCK:
614 case PTE_TYPE_VALID_BLOCK: {
615 mm_dump_block_entry(entry, level, indent);
616 break;
617 }
618 case PTE_TYPE_TABLE: {
619 mm_dump_table_entry(entry, level, indent);
620 break;
621 }
622 }
623}
624
625// NOLINTNEXTLINE(misc-no-recursion)
626static void mm_dump_entries(const pte_t *entries, mm_level_t level,
627 uint32_t indent)
628{
629 dlog("{\n");
630 indent += 1;
631
Karl Meakind64aaf82025-02-08 01:12:55 +0000632 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Karl Meakinc88ad412025-02-11 16:04:49 +0000633 pte_t entry = entries[i];
Karl Meakin100b0b22025-02-08 00:59:25 +0000634
635 if (arch_mm_pte_is_absent(entry, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100636 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100637 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100638
Karl Meakinc88ad412025-02-11 16:04:49 +0000639 dlog_indent(indent, "[level = %u, index = %zu] = ", level, i);
640 mm_dump_entry(entry, level, indent);
641 dlog(",\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100642 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000643
644 indent -= 1;
645 dlog_indent(indent, "}");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100646}
647
648/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000649 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100650 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000651static void mm_ptable_dump(const struct mm_ptable *ptable,
652 struct mm_flags flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100653{
Karl Meakinc88ad412025-02-11 16:04:49 +0000654 struct mm_page_table *root_tables = mm_page_table_from_pa(ptable->root);
655 mm_level_t root_level = mm_max_level(flags) + 1;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000656 uint8_t root_table_count = mm_root_table_count(flags);
Karl Meakinc88ad412025-02-11 16:04:49 +0000657 uint32_t indent = 0;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000658
Karl Meakinc88ad412025-02-11 16:04:49 +0000659 dlog_indent(indent, "mm_ptable {\n");
660 indent += 1;
661 {
662 dlog_indent(indent, ".stage = %s,\n",
663 flags.stage1 ? "stage1" : "stage2");
664 dlog_indent(indent, ".id = %hu,\n", ptable->id);
665 dlog_indent(indent, ".root_tables = {\n");
666
667 indent += 1;
668 {
669 for (size_t i = 0; i < root_table_count; ++i) {
670 dlog_indent(
671 indent,
672 "[level = %u, index = %zu].entries = ",
673 root_level, i);
674 mm_dump_entries(root_tables[i].entries,
675 root_level - 1, indent);
676 dlog(",\n");
677 }
678 }
679 indent -= 1;
680 dlog_indent(indent, "},\n");
Andrew Scull1ba470e2018-10-31 15:14:31 +0000681 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000682 indent -= 1;
683 dlog_indent(indent, "}\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100684}
685
686/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000687 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800688 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100689 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000690static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100691{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100692 struct mm_page_table *table;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000693 mm_attr_t block_attrs;
694 mm_attr_t table_attrs;
695 mm_attr_t combined_attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100696 paddr_t block_address;
697
Andrew Scullb6b9b562018-12-21 14:41:35 +0000698 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
699
700 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000701 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100702 }
703
Andrew Scullb6b9b562018-12-21 14:41:35 +0000704 /* Might not be possible to merge the table into a single block. */
705 if (!arch_mm_is_block_allowed(level)) {
706 return table_pte;
707 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000708
Andrew Scullb6b9b562018-12-21 14:41:35 +0000709 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000710 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000711 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100712 combined_attrs =
713 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000714 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000715
Andrew Walbran2400ed22018-09-27 14:45:58 +0100716 return arch_mm_block_pte(level, block_address, combined_attrs);
717}
718
719/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000720 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000721 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100722 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100723// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000724static void mm_ptable_defrag_entry(struct mm_ptable *ptable,
725 ptable_addr_t base_addr, pte_t *entry,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000726 mm_level_t level, struct mm_flags flags,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000727 bool non_secure, struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100728{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000729 struct mm_page_table *child_table;
Andrew Scull12122ce2019-11-19 14:21:07 +0000730 bool mergeable;
731 bool base_present;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000732 mm_attr_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800733 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100734
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800735 if (!arch_mm_pte_is_table(*entry, level)) {
736 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100737 }
738
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000739 child_table =
740 mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100741
Andrew Scull12122ce2019-11-19 14:21:07 +0000742 /* Defrag the first entry in the table and use it as the base entry. */
743 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800744
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000745 mm_ptable_defrag_entry(ptable, base_addr, &(child_table->entries[0]),
746 level - 1, flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800747
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000748 base_present =
749 arch_mm_pte_is_present(child_table->entries[0], level - 1);
750 base_attrs = arch_mm_pte_attrs(child_table->entries[0], level - 1);
Andrew Scull12122ce2019-11-19 14:21:07 +0000751
Andrew Walbran2400ed22018-09-27 14:45:58 +0100752 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000753 * Defrag the remaining entries in the table and check whether they are
754 * compatible with the base entry meaning the table can be merged into a
755 * block entry. It assumes addresses are contiguous due to identity
756 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100757 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000758 mergeable = true;
Karl Meakind64aaf82025-02-08 01:12:55 +0000759 for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000760 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800761 ptable_addr_t block_addr =
762 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000763
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000764 mm_ptable_defrag_entry(ptable, block_addr,
765 &(child_table->entries[i]), level - 1,
766 flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800767
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000768 present = arch_mm_pte_is_present(child_table->entries[i],
769 level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100770
Andrew Scull12122ce2019-11-19 14:21:07 +0000771 if (present != base_present) {
772 mergeable = false;
773 continue;
774 }
775
776 if (!present) {
777 continue;
778 }
779
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000780 if (!arch_mm_pte_is_block(child_table->entries[i], level - 1)) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000781 mergeable = false;
782 continue;
783 }
784
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000785 if (arch_mm_pte_attrs(child_table->entries[i], level - 1) !=
Andrew Scull12122ce2019-11-19 14:21:07 +0000786 base_attrs) {
787 mergeable = false;
788 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100789 }
790 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000791
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800792 if (!mergeable) {
793 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000794 }
795
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800796 new_entry = mm_merge_table_pte(*entry, level);
797 if (*entry != new_entry) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000798 mm_replace_entry(ptable, base_addr, entry, (uintptr_t)new_entry,
799 level, flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800800 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100801}
802
803/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100804 * Defragments the given page table by converting page table references to
805 * blocks whenever possible.
806 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000807static void mm_ptable_defrag(struct mm_ptable *ptable, struct mm_flags flags,
808 bool non_secure, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100809{
Karl Meakind64aaf82025-02-08 01:12:55 +0000810 struct mm_page_table *tables = mm_page_table_from_pa(ptable->root);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000811 mm_level_t level = mm_max_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000812 uint8_t root_table_count = mm_root_table_count(flags);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800813 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100814
815 /*
816 * Loop through each entry in the table. If it points to another table,
817 * check if that table can be replaced by a block or an absent entry.
818 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000819 for (size_t i = 0; i < root_table_count; ++i) {
820 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000821 mm_ptable_defrag_entry(ptable, block_addr,
822 &(tables[i].entries[j]), level,
823 flags, non_secure, ppool);
Karl Meakin25954e32025-02-07 16:12:51 +0000824 block_addr = mm_start_of_next_block(block_addr, level);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000825 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100826 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800827
828 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100829}
830
831/**
Andrew Scull81e85092018-12-12 12:56:20 +0000832 * Gets the attributes applied to the given range of stage-2 addresses at the
833 * given level.
834 *
835 * The `got_attrs` argument is initially passed as false until `attrs` contains
836 * attributes of the memory region at which point it is passed as true.
837 *
838 * The value returned in `attrs` is only valid if the function returns true.
839 *
840 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100841 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100842// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000843static bool mm_ptable_get_attrs_level(const struct mm_page_table *table,
Andrew Scull81e85092018-12-12 12:56:20 +0000844 ptable_addr_t begin, ptable_addr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000845 mm_level_t level, bool got_attrs,
846 mm_attr_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100847{
Karl Meakind64aaf82025-02-08 01:12:55 +0000848 const pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000849 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100850
Andrew Scull81e85092018-12-12 12:56:20 +0000851 /* Cap end so that we don't go over the current level max. */
852 if (end > level_end) {
853 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100854 }
855
Andrew Scull81e85092018-12-12 12:56:20 +0000856 /* Check that each entry is owned. */
857 while (begin < end) {
858 if (arch_mm_pte_is_table(*pte, level)) {
859 if (!mm_ptable_get_attrs_level(
860 mm_page_table_from_pa(
861 arch_mm_table_from_pte(*pte,
862 level)),
863 begin, end, level - 1, got_attrs, attrs)) {
864 return false;
865 }
866 got_attrs = true;
867 } else {
868 if (!got_attrs) {
869 *attrs = arch_mm_pte_attrs(*pte, level);
870 got_attrs = true;
871 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
872 return false;
873 }
874 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100875
Karl Meakin25954e32025-02-07 16:12:51 +0000876 begin = mm_start_of_next_block(begin, level);
Andrew Scull81e85092018-12-12 12:56:20 +0000877 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100878 }
879
Andrew Scullc66a04d2018-12-07 13:41:56 +0000880 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000881 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100882}
883
884/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800885 * Gets the attributes applied to the given range of addresses in the page
886 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000887 *
888 * The value returned in `attrs` is only valid if the function returns true.
889 *
890 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100891 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000892static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000893 ptable_addr_t end, mm_attr_t *attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000894 struct mm_flags flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100895{
Karl Meakin07a69ab2025-02-07 14:53:19 +0000896 mm_level_t max_level = mm_max_level(flags);
897 mm_level_t root_level = max_level + 1;
Karl Meakin25954e32025-02-07 16:12:51 +0000898 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000899 struct mm_page_table *table;
900 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100901
Andrew Scull81e85092018-12-12 12:56:20 +0000902 begin = mm_round_down_to_page(begin);
903 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100904
Andrew Scull81e85092018-12-12 12:56:20 +0000905 /* Fail if the addresses are out of range. */
906 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000907 return false;
908 }
909
Karl Meakind64aaf82025-02-08 01:12:55 +0000910 table = &mm_page_table_from_pa(
911 ptable->root)[mm_index(begin, root_level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000912 while (begin < end) {
913 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
914 got_attrs, attrs)) {
915 return false;
916 }
917
918 got_attrs = true;
Karl Meakin25954e32025-02-07 16:12:51 +0000919 begin = mm_start_of_next_block(begin, root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000920 table++;
921 }
922
923 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100924}
925
Karl Meakin07a69ab2025-02-07 14:53:19 +0000926bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100927{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000928 return mm_ptable_init(ptable, id, (struct mm_flags){0}, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100929}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100930
Karl Meakind64aaf82025-02-08 01:12:55 +0000931void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000932{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000933 mm_ptable_fini(ptable, (struct mm_flags){0}, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000934}
935
936/**
Andrew Scull73b89542019-11-20 17:31:26 +0000937 * Selects flags to pass to the page table manipulation operation based on the
938 * mapping mode.
939 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000940static struct mm_flags mm_mode_to_flags(mm_mode_t mode)
Andrew Scull73b89542019-11-20 17:31:26 +0000941{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000942 struct mm_flags flags = {0};
943
Andrew Scull73b89542019-11-20 17:31:26 +0000944 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
Karl Meakin1fd4b822025-02-01 17:13:47 +0000945 flags.unmap = true;
Andrew Scull73b89542019-11-20 17:31:26 +0000946 }
947
Karl Meakin1fd4b822025-02-01 17:13:47 +0000948 return flags;
Andrew Scull73b89542019-11-20 17:31:26 +0000949}
950
951/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000952 * See `mm_ptable_identity_prepare`.
953 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800954 * This must be called before `mm_identity_commit` for the same mapping.
955 *
956 * Returns true on success, or false if the update would fail.
957 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000958bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000959 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800960{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000961 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000962
Karl Meakin1fd4b822025-02-01 17:13:47 +0000963 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800964
Karl Meakind64aaf82025-02-08 01:12:55 +0000965 return mm_ptable_identity_prepare(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800966 arch_mm_mode_to_stage1_attrs(mode),
967 flags, ppool);
968}
969
970/**
971 * See `mm_ptable_identity_commit`.
972 *
973 * `mm_identity_prepare` must be called before this for the same mapping.
974 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000975void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000976 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800977{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000978 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000979
Karl Meakin1fd4b822025-02-01 17:13:47 +0000980 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800981
Karl Meakind64aaf82025-02-08 01:12:55 +0000982 mm_ptable_identity_commit(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800983 arch_mm_mode_to_stage1_attrs(mode), flags,
984 ppool);
985 return ptr_from_va(va_from_pa(begin));
986}
987
988/**
989 * See `mm_ptable_identity_prepare`.
990 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000991 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000992 *
993 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000994 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000995bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000996 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000997{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000998 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000999
Karl Meakind64aaf82025-02-08 01:12:55 +00001000 return mm_ptable_identity_prepare(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +00001001 arch_mm_mode_to_stage2_attrs(mode),
1002 flags, ppool);
1003}
1004
1005/**
1006 * See `mm_ptable_identity_commit`.
1007 *
1008 * `mm_vm_identity_prepare` must be called before this for the same mapping.
1009 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001010void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001011 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +00001012{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001013 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +00001014
Karl Meakind64aaf82025-02-08 01:12:55 +00001015 mm_ptable_identity_commit(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +00001016 arch_mm_mode_to_stage2_attrs(mode), flags,
1017 ppool);
1018
1019 if (ipa != NULL) {
1020 *ipa = ipa_from_pa(begin);
1021 }
1022}
1023
1024/**
Andrew Scull80871322018-08-06 12:04:09 +01001025 * Updates a VM's page table such that the given physical address range is
1026 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +01001027 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001028 *
1029 * mm_vm_defrag should always be called after a series of page table updates,
1030 * whether they succeed or fail. This is because on failure extra page table
1031 * entries may have been allocated and then not used, while on success it may be
1032 * possible to compact the page table by merging several entries into a block.
1033 *
1034 * Returns true on success, or false if the update failed and no changes were
1035 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001036 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001037bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001038 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001039{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001040 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001041 bool success = mm_ptable_identity_update(
Karl Meakind64aaf82025-02-08 01:12:55 +00001042 ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
Andrew Scullda3df7f2019-01-05 17:49:27 +00001043 ppool);
Andrew Scull80871322018-08-06 12:04:09 +01001044
1045 if (success && ipa != NULL) {
1046 *ipa = ipa_from_pa(begin);
1047 }
1048
1049 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001050}
1051
1052/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001053 * Updates the VM's table such that the given physical address range has no
1054 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +01001055 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001056bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001057 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001058{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001059 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001060
Karl Meakind64aaf82025-02-08 01:12:55 +00001061 return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +01001062}
1063
1064/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001065 * Write the given page table of a VM to the debug log.
1066 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001067void mm_vm_dump(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001068{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001069 mm_ptable_dump(ptable, (struct mm_flags){0});
Andrew Scullda3df7f2019-01-05 17:49:27 +00001070}
1071
1072/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001073 * Defragments a stage1 page table.
1074 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001075void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool)
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001076{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001077 mm_ptable_defrag(ptable, (struct mm_flags){.stage1 = true}, false,
1078 ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001079}
1080
1081/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001082 * Defragments the VM page table.
1083 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001084void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
1085 bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001086{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001087 mm_ptable_defrag(ptable, (struct mm_flags){0}, non_secure, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001088}
1089
1090/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +01001091 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001092 * are mapped with the same mode.
1093 *
1094 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001095 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001096bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001097 ipaddr_t end, mm_mode_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001098{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001099 mm_attr_t attrs;
Andrew Scull81e85092018-12-12 12:56:20 +00001100 bool ret;
1101
Karl Meakin1fd4b822025-02-01 17:13:47 +00001102 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs,
1103 (struct mm_flags){0});
Andrew Scull81e85092018-12-12 12:56:20 +00001104 if (ret) {
1105 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1106 }
1107
1108 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001109}
1110
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001111/**
1112 * Gets the mode of the given range of virtual addresses if they
1113 * are mapped with the same mode.
1114 *
1115 * Returns true if the range is mapped with the same mode and false otherwise.
1116 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001117bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001118 mm_mode_t *mode)
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001119{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001120 mm_attr_t attrs;
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001121 bool ret;
1122
Karl Meakind64aaf82025-02-08 01:12:55 +00001123 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +00001124 (struct mm_flags){.stage1 = true});
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001125 if (ret) {
1126 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1127 }
1128
1129 return ret;
1130}
1131
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001132static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1133{
1134 return (struct mm_stage1_locked){.ptable = &ptable};
1135}
1136
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001137struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1138{
1139 return (struct mm_stage1_locked){.ptable = ptable};
1140}
1141
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001142struct mm_stage1_locked mm_lock_stage1(void)
1143{
1144 sl_lock(&ptable_lock);
1145 return mm_stage1_lock_unsafe();
1146}
1147
1148void mm_unlock_stage1(struct mm_stage1_locked *lock)
1149{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001150 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001151 sl_unlock(&ptable_lock);
1152 lock->ptable = NULL;
1153}
1154
Andrew Scull80871322018-08-06 12:04:09 +01001155/**
Andrew Scull80871322018-08-06 12:04:09 +01001156 * Updates the hypervisor page table such that the given physical address range
1157 * is mapped into the address space at the corresponding address range in the
1158 * architecture-agnostic mode provided.
1159 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001160void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001161 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001162{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001163 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +00001164
Karl Meakin1fd4b822025-02-01 17:13:47 +00001165 flags.stage1 = true;
Andrew Scull73b89542019-11-20 17:31:26 +00001166
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001167 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001168 arch_mm_mode_to_stage1_attrs(mode), flags,
1169 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001170 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001171 }
1172
1173 return NULL;
1174}
1175
1176/**
1177 * Updates the hypervisor table such that the given physical address range is
1178 * not mapped in the address space.
1179 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001180bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1181 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001182{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001183 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001184
1185 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001186}
1187
1188/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001189 * Defragments the hypervisor page table.
1190 */
1191void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1192{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001193 mm_ptable_defrag(stage1_locked.ptable,
1194 (struct mm_flags){.stage1 = true}, false, ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001195}
1196
1197/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001198 * Initialises memory management for the hypervisor itself.
1199 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001200bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001201{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001202 /* Locking is not enabled yet so fake it, */
1203 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
Karl Meakin07a69ab2025-02-07 14:53:19 +00001204 struct mm_flags flags = {.stage1 = true};
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001205
Karl Meakine8937d92024-03-19 16:04:25 +00001206 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001207 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001208 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001209 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001210 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001211 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001212 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001213 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001214
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001215 /* ASID 0 is reserved for use by the hypervisor. */
Karl Meakin07a69ab2025-02-07 14:53:19 +00001216 if (!mm_ptable_init(&ptable, 0, flags, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001217 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001218 return false;
1219 }
1220
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001221 /* Initialize arch_mm before calling below mapping routines */
1222 if (!arch_mm_init(ptable.root)) {
1223 return false;
1224 }
1225
Andrew Walbran48699362019-05-20 14:38:00 +01001226 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001227 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001228
1229 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001230 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1231 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001232
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001233 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1234 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001235
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001236 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1237 layout_data_end(), MM_MODE_R | MM_MODE_W,
1238 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001239
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001240 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001241 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001242
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001243 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001244}