blob: 270aceabf22131f90834e75aae45fac5dc1c3b15 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdatomic.h>
12#include <stdint.h>
13
Maksims Svecovs134b8f92022-03-04 15:14:09 +000014#include "hf/arch/init.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000015#include "hf/arch/mm.h"
Maksims Svecovs134b8f92022-03-04 15:14:09 +000016
Andrew Scull877ae4b2019-07-02 12:52:33 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010020#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010021#include "hf/static_assert.h"
Karl Meakin25954e32025-02-07 16:12:51 +000022#include "hf/std.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010023
Andrew Walbran2400ed22018-09-27 14:45:58 +010024/**
25 * This file has functions for managing the level 1 and 2 page tables used by
26 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
27 * and then a level 2 mapping per VM. The design assumes that all page tables
28 * contain only 1-1 mappings, aligned on the block boundaries.
29 */
30
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010031/*
32 * For stage 2, the input is an intermediate physical addresses rather than a
33 * virtual address so:
34 */
Andrew Scull80871322018-08-06 12:04:09 +010035static_assert(
36 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
37 "Currently, the same code manages the stage 1 and stage 2 page tables "
38 "which only works if the virtual and intermediate physical addresses "
39 "are the same size. It looks like that assumption might not be holding "
40 "so we need to check that everything is going to be ok.");
41
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010042static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010043static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010044
Andrew Scullda241972019-01-05 18:17:48 +000045static bool mm_stage2_invalidate = false;
46
47/**
48 * After calling this function, modifications to stage-2 page tables will use
49 * break-before-make and invalidate the TLB for the affected range.
50 */
51void mm_vm_enable_invalidation(void)
52{
53 mm_stage2_invalidate = true;
54}
55
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056/**
Andrew Scull80871322018-08-06 12:04:09 +010057 * Rounds an address down to a page boundary.
58 */
59static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
60{
Karl Meakin25954e32025-02-07 16:12:51 +000061 return align_down(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010062}
63
64/**
65 * Rounds an address up to a page boundary.
66 */
67static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
68{
Karl Meakin25954e32025-02-07 16:12:51 +000069 return align_up(addr, PAGE_SIZE);
Andrew Scull80871322018-08-06 12:04:09 +010070}
71
72/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010073 * Calculates the size of the address space represented by a page table entry at
Karl Meakina3a9f952025-02-08 00:11:16 +000074 * the given level. See also Arm ARM, table D8-15
75 * - `level == 4`: 256 TiB (1 << 48)
76 * - `level == 3`: 512 GiB (1 << 39)
77 * - `level == 2`: 1 GiB (1 << 30)
78 * - `level == 1`: 2 MiB (1 << 21)
79 * - `level == 0`: 4 KiB (1 << 12)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000081static size_t mm_entry_size(mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082{
Karl Meakina3a9f952025-02-08 00:11:16 +000083 assert(level <= 4);
Andrew Scull78d6fd92018-09-06 15:08:36 +010084 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010085}
86
87/**
Karl Meakina3a9f952025-02-08 00:11:16 +000088 * Get the start address of the range mapped by the next block of the given
89 * level.
Andrew Scullcae45572018-12-13 15:46:30 +000090 */
91static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
Karl Meakin25954e32025-02-07 16:12:51 +000092 mm_level_t level)
Andrew Scullcae45572018-12-13 15:46:30 +000093{
Karl Meakina3a9f952025-02-08 00:11:16 +000094 assert(level <= 4);
Karl Meakin25954e32025-02-07 16:12:51 +000095 return align_up(addr + 1, mm_entry_size(level));
Andrew Scullcae45572018-12-13 15:46:30 +000096}
97
98/**
Andrew Scull80871322018-08-06 12:04:09 +010099 * For a given address, calculates the maximum (plus one) address that can be
100 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100101 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000102static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100103{
104 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000105
Andrew Scull80871322018-08-06 12:04:09 +0100106 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107}
108
109/**
Andrew Scull80871322018-08-06 12:04:09 +0100110 * For a given address, calculates the index at which its entry is stored in a
Karl Meakina3a9f952025-02-08 00:11:16 +0000111 * table at the given level. See also Arm ARM, table D8-14
112 * - `level == 4`: bits[51:48]
113 * - `level == 3`: bits[47:39]
114 * - `level == 2`: bits[38:30]
115 * - `level == 1`: bits[29:21]
116 * - `level == 0`: bits[20:12]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100117 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000118static size_t mm_index(ptable_addr_t addr, mm_level_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100119{
Andrew Scull80871322018-08-06 12:04:09 +0100120 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000121
Andrew Scull78d6fd92018-09-06 15:08:36 +0100122 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100123}
124
125/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000126 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100127 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000128static struct mm_page_table *mm_alloc_page_tables(size_t count,
129 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100130{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000131 if (count == 1) {
132 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100133 }
134
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000135 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100136}
137
138/**
Karl Meakina3a9f952025-02-08 00:11:16 +0000139 * Returns the root level in the page table given the flags.
Andrew Scullda3df7f2019-01-05 17:49:27 +0000140 */
Karl Meakina3a9f952025-02-08 00:11:16 +0000141static mm_level_t mm_root_level(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000142{
Karl Meakina3a9f952025-02-08 00:11:16 +0000143 return flags.stage1 ? arch_mm_stage1_root_level()
144 : arch_mm_stage2_root_level();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000145}
146
147/**
148 * Returns the number of root-level tables given the flags.
149 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000150static uint8_t mm_root_table_count(struct mm_flags flags)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000151{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000152 return flags.stage1 ? arch_mm_stage1_root_table_count()
153 : arch_mm_stage2_root_table_count();
Andrew Scullda3df7f2019-01-05 17:49:27 +0000154}
155
156/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000157 * Invalidates the TLB for the given address range.
158 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000159static void mm_invalidate_tlb(const struct mm_ptable *ptable,
160 ptable_addr_t begin, ptable_addr_t end,
161 struct mm_flags flags, bool non_secure)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000162{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000163 if (flags.stage1) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000164 arch_mm_invalidate_stage1_range(ptable->id, va_init(begin),
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800165 va_init(end));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000166 } else {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000167 arch_mm_invalidate_stage2_range(ptable->id, ipa_init(begin),
Olivier Deprez6f400372022-03-07 09:31:08 +0100168 ipa_init(end), non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169 }
170}
171
172/**
173 * Frees all page-table-related memory associated with the given pte at the
174 * given level, including any subtables recursively.
175 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100176// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin07a69ab2025-02-07 14:53:19 +0000177static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000178{
179 struct mm_page_table *table;
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000180
181 if (!arch_mm_pte_is_table(pte, level)) {
182 return;
183 }
184
185 /* Recursively free any subtables. */
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000186 table = arch_mm_table_from_pte(pte, level);
Karl Meakind64aaf82025-02-08 01:12:55 +0000187 for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000188 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000189 }
190
191 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000192 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000193}
194
195/**
David Brazdil711fbe92019-08-06 13:39:58 +0100196 * Returns the first address which cannot be encoded in page tables given by
197 * `flags`. It is the exclusive end of the address space created by the tables.
198 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000199ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags)
David Brazdil711fbe92019-08-06 13:39:58 +0100200{
Karl Meakina3a9f952025-02-08 00:11:16 +0000201 return mm_root_table_count(flags) * mm_entry_size(mm_root_level(flags));
David Brazdil711fbe92019-08-06 13:39:58 +0100202}
203
204/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000205 * Initialises the given page table.
206 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000207bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000208 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000209{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000210 struct mm_page_table *root_tables;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 uint8_t root_table_count = mm_root_table_count(flags);
Karl Meakina3a9f952025-02-08 00:11:16 +0000212 mm_level_t root_level = mm_root_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000213
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000214 root_tables = mm_alloc_page_tables(root_table_count, ppool);
215 if (root_tables == NULL) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000216 return false;
217 }
218
Karl Meakind64aaf82025-02-08 01:12:55 +0000219 for (size_t i = 0; i < root_table_count; i++) {
220 for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) {
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000221 root_tables[i].entries[j] =
Karl Meakina3a9f952025-02-08 00:11:16 +0000222 arch_mm_absent_pte(root_level - 1);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000223 }
224 }
225
226 /*
227 * TODO: halloc could return a virtual or physical address if mm not
228 * enabled?
229 */
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000230 ptable->root_tables = root_tables;
Karl Meakind64aaf82025-02-08 01:12:55 +0000231 ptable->id = id;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000232 return true;
233}
234
235/**
236 * Frees all memory associated with the give page table.
237 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000238static void mm_ptable_fini(const struct mm_ptable *ptable,
239 struct mm_flags flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000240{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000241 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakina3a9f952025-02-08 00:11:16 +0000242 mm_level_t root_level = mm_root_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000243 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000244
Karl Meakind64aaf82025-02-08 01:12:55 +0000245 for (size_t i = 0; i < root_table_count; ++i) {
246 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000247 mm_free_page_pte(root_tables[i].entries[j],
248 root_level - 1, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000249 }
250 }
251
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000252 mpool_add_chunk(ppool, root_tables,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000253 sizeof(struct mm_page_table) * root_table_count);
254}
255
256/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000257 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000258 * are valid, it performs a break-before-make sequence where it first writes an
259 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
260 * This is to prevent cases where CPUs have different 'valid' values in their
261 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000262 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000263static void mm_replace_entry(const struct mm_ptable *ptable,
264 ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000265 mm_level_t level, struct mm_flags flags,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000266 bool non_secure, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000267{
268 pte_t v = *pte;
269
270 /*
271 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000272 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000273 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000274 if ((flags.stage1 || mm_stage2_invalidate) &&
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800275 arch_mm_pte_is_valid(v, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000276 *pte = arch_mm_absent_pte(level);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000277 mm_invalidate_tlb(ptable, begin, begin + mm_entry_size(level),
278 flags, non_secure);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000279 }
280
281 /* Assign the new pte. */
282 *pte = new_pte;
283
284 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000285 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000286}
287
288/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289 * Populates the provided page table entry with a reference to another table if
290 * needed, that is, if it does not yet point to another table.
291 *
292 * Returns a pointer to the table the entry now points to.
293 */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000294static struct mm_page_table *mm_populate_table_pte(
295 const struct mm_ptable *ptable, ptable_addr_t begin, pte_t *pte,
296 mm_level_t level, struct mm_flags flags, bool non_secure,
297 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100298{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100299 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100300 pte_t v = *pte;
301 pte_t new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302 size_t inc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000303 mm_level_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304
305 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100306 if (arch_mm_pte_is_table(v, level)) {
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000307 return arch_mm_table_from_pte(v, level);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100308 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000311 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100312 if (ntable == NULL) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000313 dlog_error("Failed to allocate memory for page table\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314 return NULL;
315 }
316
317 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100318 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100319 inc = mm_entry_size(level_below);
320 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000321 arch_mm_block_from_pte(v, level),
322 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100323 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100324 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100325 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100326 }
327
328 /* Initialise entries in the new table. */
Karl Meakind64aaf82025-02-08 01:12:55 +0000329 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100330 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331 new_pte += inc;
332 }
333
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000334 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100335 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000336
337 /* Replace the pte entry, doing a break-before-make if needed. */
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000338 mm_replace_entry(ptable, begin, pte,
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000340 level, flags, non_secure, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100341
342 return ntable;
343}
344
345/**
Andrew Scull80871322018-08-06 12:04:09 +0100346 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100347 * physical range using the provided (architecture-specific) attributes. Or if
Karl Meakin1fd4b822025-02-01 17:13:47 +0000348 * `flags.unmap` is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100349 *
350 * This function calls itself recursively if it needs to update additional
351 * levels, but the recursion is bound by the maximum number of levels in a page
352 * table.
353 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100354// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000355static bool mm_map_level(struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin25954e32025-02-07 16:12:51 +0000356 ptable_addr_t end, mm_attr_t attrs,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000357 struct mm_page_table *child_table, mm_level_t level,
358 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100359{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000360 pte_t *pte = &child_table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100361 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100362 size_t entry_size = mm_entry_size(level);
Karl Meakin1fd4b822025-02-01 17:13:47 +0000363 bool commit = flags.commit;
364 bool unmap = flags.unmap;
365 bool non_secure = ((attrs & (1ULL << 57)) != 0);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100366
Andrew Scull265ada92018-07-30 15:19:01 +0100367 /* Cap end so that we don't go over the current level max. */
368 if (end > level_end) {
369 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100370 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100371
372 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100373 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100374 if (unmap ? !arch_mm_pte_is_present(*pte, level)
375 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000376 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100377 /*
378 * If the entry is already mapped with the right
379 * attributes, or already absent in the case of
380 * unmapping, no need to do anything; carry on to the
381 * next entry.
382 */
383 } else if ((end - begin) >= entry_size &&
384 (unmap || arch_mm_is_block_allowed(level)) &&
Karl Meakin25954e32025-02-07 16:12:51 +0000385 is_aligned(begin, entry_size)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100386 /*
387 * If the entire entry is within the region we want to
388 * map, map/unmap the whole entry.
389 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100390 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000391 pte_t new_pte =
392 unmap ? arch_mm_absent_pte(level)
Karl Meakin25954e32025-02-07 16:12:51 +0000393 : arch_mm_block_pte(
394 level, pa_init(begin),
395 attrs);
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000396 mm_replace_entry(ptable, begin, pte, new_pte,
397 level, flags, non_secure,
398 ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100399 }
400 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100401 /*
402 * If the entry is already a subtable get it; otherwise
403 * replace it with an equivalent subtable and get that.
404 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000405 struct mm_page_table *nt =
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000406 mm_populate_table_pte(ptable, begin, pte, level,
407 flags, non_secure, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100408 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100409 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100410 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100411
Andrew Walbran6324fc92018-10-03 11:46:43 +0100412 /*
413 * Recurse to map/unmap the appropriate entries within
414 * the subtable.
415 */
Karl Meakin25954e32025-02-07 16:12:51 +0000416 if (!mm_map_level(ptable, begin, end, attrs, nt,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000417 level - 1, flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100418 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100419 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100420 }
421
Karl Meakin25954e32025-02-07 16:12:51 +0000422 begin = mm_start_of_next_block(begin, level);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100423 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100424 }
425
426 return true;
427}
428
429/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000430 * Updates the page table from the root to map the given address range to a
Karl Meakinaac38012025-02-07 23:57:37 +0000431 * physical range using the provided (architecture-specific) attributes.
432 *
433 * Flags:
434 * - `flags.unmap`: unmap the given range instead of mapping it.
435 * - `flags.commit`: the change is only committed if this flag is set.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100436 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000437static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000438 paddr_t pa_end, mm_attr_t attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000439 struct mm_flags flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100440{
Karl Meakina3a9f952025-02-08 00:11:16 +0000441 mm_level_t root_level = mm_root_level(flags);
David Brazdil711fbe92019-08-06 13:39:58 +0100442 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
Karl Meakinc17ab272025-02-08 03:29:17 +0000444 ptable_addr_t begin = mm_round_down_to_page(pa_addr(pa_begin));
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000445 struct mm_page_table *root_table =
446 &ptable->root_tables[mm_index(begin, root_level)];
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100447
Andrew Scull1ba470e2018-10-31 15:14:31 +0000448 /*
Karl Meakina3a9f952025-02-08 00:11:16 +0000449 * Assert condition to communicate the API constraint of
450 * mm_root_level(), that isn't encoded in the types, to the static
451 * analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000452 */
Karl Meakina3a9f952025-02-08 00:11:16 +0000453 assert(root_level >= 3);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000454
455 /* Cap end to stay within the bounds of the page table. */
456 if (end > ptable_end) {
457 end = ptable_end;
458 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100459
Karl Meakinaac38012025-02-07 23:57:37 +0000460 while (begin < end) {
461 if (!mm_map_level(ptable, begin, end, attrs, root_table,
462 root_level - 1, flags, ppool)) {
463 return false;
464 }
465 begin = mm_start_of_next_block(begin, root_level);
466 root_table++;
Andrew Walbran58a6e542019-11-19 14:23:15 +0000467 }
468
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800469 /*
470 * All TLB invalidations must be complete already if any entries were
471 * replaced by mm_replace_entry. Sync all page table writes so that code
472 * following this can use them.
473 */
474 arch_mm_sync_table_writes();
Andrew Walbran58a6e542019-11-19 14:23:15 +0000475
476 return true;
477}
478
Andrew Scull4e83cef2019-11-19 14:17:54 +0000479/*
480 * Prepares the given page table for the given address mapping such that it
481 * will be able to commit the change without failure. It does so by ensuring
482 * the smallest granularity needed is available. This remains valid provided
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100483 * subsequent operations do not decrease the granularity.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000484 *
485 * In particular, multiple calls to this function will result in the
486 * corresponding calls to commit the changes to succeed.
487 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000488static bool mm_ptable_identity_prepare(struct mm_ptable *ptable,
489 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000490 mm_attr_t attrs, struct mm_flags flags,
Karl Meakind64aaf82025-02-08 01:12:55 +0000491 struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000492{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000493 flags.commit = false;
Karl Meakind64aaf82025-02-08 01:12:55 +0000494 return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
495 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000496}
497
498/**
499 * Commits the given address mapping to the page table assuming the operation
500 * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to
501 * ensure this condition.
502 *
503 * Without the table being properly prepared, the commit may only partially
504 * complete if it runs out of memory resulting in an inconsistent state that
505 * isn't handled.
506 *
507 * Since the non-failure assumtion is used in the reasoning about the atomicity
508 * of higher level memory operations, any detected violations result in a panic.
509 *
510 * TODO: remove ppool argument to be sure no changes are made.
511 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000512static void mm_ptable_identity_commit(struct mm_ptable *ptable,
513 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000514 mm_attr_t attrs, struct mm_flags flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000515 struct mpool *ppool)
516{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000517 flags.commit = true;
518 CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags,
519 ppool));
Andrew Scull4e83cef2019-11-19 14:17:54 +0000520}
521
Andrew Walbran58a6e542019-11-19 14:23:15 +0000522/**
523 * Updates the given table such that the given physical address range is mapped
524 * or not mapped into the address space with the architecture-agnostic mode
Andrew Scull4e83cef2019-11-19 14:17:54 +0000525 * provided.
526 *
527 * The page table is updated using the separate prepare and commit stages so
528 * that, on failure, a partial update of the address space cannot happen. The
529 * table may be left with extra internal tables but the address space is
530 * unchanged.
Andrew Walbran58a6e542019-11-19 14:23:15 +0000531 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000532static bool mm_ptable_identity_update(struct mm_ptable *ptable,
533 paddr_t pa_begin, paddr_t pa_end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000534 mm_attr_t attrs, struct mm_flags flags,
Andrew Walbran58a6e542019-11-19 14:23:15 +0000535 struct mpool *ppool)
536{
Karl Meakind64aaf82025-02-08 01:12:55 +0000537 if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000538 ppool)) {
539 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100540 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100541
Karl Meakind64aaf82025-02-08 01:12:55 +0000542 mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags,
543 ppool);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000544
545 return true;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100546}
547
Karl Meakinc88ad412025-02-11 16:04:49 +0000548static void mm_dump_entries(const pte_t *entries, mm_level_t level,
549 uint32_t indent);
550
551static void mm_dump_block_entry(pte_t entry, mm_level_t level, uint32_t indent)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100552{
Karl Meakinc88ad412025-02-11 16:04:49 +0000553 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
554 paddr_t addr = arch_mm_block_from_pte(entry, level);
555
556 if (arch_mm_pte_is_valid(entry, level)) {
557 if (level == 0) {
558 dlog("page {\n");
559 } else {
560 dlog("block {\n");
561 }
562 } else {
563 dlog("invalid_block {\n");
564 }
565
566 indent += 1;
567 {
568 dlog_indent(indent, ".addr = %#016lx\n", pa_addr(addr));
569 dlog_indent(indent, ".attrs = %#016lx\n", attrs);
570 }
571 indent -= 1;
572 dlog_indent(indent, "}");
573}
574
575// NOLINTNEXTLINE(misc-no-recursion)
576static void mm_dump_table_entry(pte_t entry, mm_level_t level, uint32_t indent)
577{
578 dlog("table {\n");
579 indent += 1;
580 {
581 mm_attr_t attrs = arch_mm_pte_attrs(entry, level);
Karl Meakinc88ad412025-02-11 16:04:49 +0000582 const struct mm_page_table *child_table =
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000583 arch_mm_table_from_pte(entry, level);
584 paddr_t addr = pa_init((uintpaddr_t)child_table);
Karl Meakinc88ad412025-02-11 16:04:49 +0000585
586 dlog_indent(indent, ".pte = %#016lx,\n", entry);
587 dlog_indent(indent, ".attrs = %#016lx,\n", attrs);
588 dlog_indent(indent, ".addr = %#016lx,\n", pa_addr(addr));
589 dlog_indent(indent, ".entries = ");
590 mm_dump_entries(child_table->entries, level - 1, indent);
591 dlog(",\n");
592 }
593 indent -= 1;
594 dlog_indent(indent, "}");
595}
596
597// NOLINTNEXTLINE(misc-no-recursion)
598static void mm_dump_entry(pte_t entry, mm_level_t level, uint32_t indent)
599{
600 switch (arch_mm_pte_type(entry, level)) {
601 case PTE_TYPE_ABSENT:
602 dlog("absent {}");
603 break;
604 case PTE_TYPE_INVALID_BLOCK:
605 case PTE_TYPE_VALID_BLOCK: {
606 mm_dump_block_entry(entry, level, indent);
607 break;
608 }
609 case PTE_TYPE_TABLE: {
610 mm_dump_table_entry(entry, level, indent);
611 break;
612 }
613 }
614}
615
616// NOLINTNEXTLINE(misc-no-recursion)
617static void mm_dump_entries(const pte_t *entries, mm_level_t level,
618 uint32_t indent)
619{
620 dlog("{\n");
621 indent += 1;
622
Karl Meakind64aaf82025-02-08 01:12:55 +0000623 for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) {
Karl Meakinc88ad412025-02-11 16:04:49 +0000624 pte_t entry = entries[i];
Karl Meakin100b0b22025-02-08 00:59:25 +0000625
626 if (arch_mm_pte_is_absent(entry, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100627 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100628 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100629
Karl Meakinc88ad412025-02-11 16:04:49 +0000630 dlog_indent(indent, "[level = %u, index = %zu] = ", level, i);
631 mm_dump_entry(entry, level, indent);
632 dlog(",\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100633 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000634
635 indent -= 1;
636 dlog_indent(indent, "}");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100637}
638
639/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000640 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100641 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000642static void mm_ptable_dump(const struct mm_ptable *ptable,
643 struct mm_flags flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100644{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000645 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakina3a9f952025-02-08 00:11:16 +0000646 mm_level_t root_level = mm_root_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000647 uint8_t root_table_count = mm_root_table_count(flags);
Karl Meakinc88ad412025-02-11 16:04:49 +0000648 uint32_t indent = 0;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000649
Karl Meakinc88ad412025-02-11 16:04:49 +0000650 dlog_indent(indent, "mm_ptable {\n");
651 indent += 1;
652 {
653 dlog_indent(indent, ".stage = %s,\n",
654 flags.stage1 ? "stage1" : "stage2");
655 dlog_indent(indent, ".id = %hu,\n", ptable->id);
656 dlog_indent(indent, ".root_tables = {\n");
657
658 indent += 1;
659 {
660 for (size_t i = 0; i < root_table_count; ++i) {
661 dlog_indent(
662 indent,
663 "[level = %u, index = %zu].entries = ",
664 root_level, i);
665 mm_dump_entries(root_tables[i].entries,
666 root_level - 1, indent);
667 dlog(",\n");
668 }
669 }
670 indent -= 1;
671 dlog_indent(indent, "},\n");
Andrew Scull1ba470e2018-10-31 15:14:31 +0000672 }
Karl Meakinc88ad412025-02-11 16:04:49 +0000673 indent -= 1;
674 dlog_indent(indent, "}\n");
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100675}
676
677/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000678 * Given the table PTE entries all have identical attributes, returns the single
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800679 * entry with which it can be replaced.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100680 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000681static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100682{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100683 struct mm_page_table *table;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000684 mm_attr_t block_attrs;
685 mm_attr_t table_attrs;
686 mm_attr_t combined_attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100687 paddr_t block_address;
688
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000689 table = arch_mm_table_from_pte(table_pte, level);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000690
691 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000692 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100693 }
694
Andrew Scullb6b9b562018-12-21 14:41:35 +0000695 /* Might not be possible to merge the table into a single block. */
696 if (!arch_mm_is_block_allowed(level)) {
697 return table_pte;
698 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000699
Andrew Scullb6b9b562018-12-21 14:41:35 +0000700 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000701 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000702 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100703 combined_attrs =
704 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000705 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000706
Andrew Walbran2400ed22018-09-27 14:45:58 +0100707 return arch_mm_block_pte(level, block_address, combined_attrs);
708}
709
710/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000711 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000712 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100713 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100714// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000715static void mm_ptable_defrag_entry(struct mm_ptable *ptable,
716 ptable_addr_t base_addr, pte_t *entry,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000717 mm_level_t level, struct mm_flags flags,
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000718 bool non_secure, struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100719{
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000720 struct mm_page_table *child_table;
Andrew Scull12122ce2019-11-19 14:21:07 +0000721 bool mergeable;
722 bool base_present;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000723 mm_attr_t base_attrs;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800724 pte_t new_entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100725
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800726 if (!arch_mm_pte_is_table(*entry, level)) {
727 return;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100728 }
729
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000730 child_table = arch_mm_table_from_pte(*entry, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100731
Andrew Scull12122ce2019-11-19 14:21:07 +0000732 /* Defrag the first entry in the table and use it as the base entry. */
733 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800734
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000735 mm_ptable_defrag_entry(ptable, base_addr, &(child_table->entries[0]),
736 level - 1, flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800737
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000738 base_present =
739 arch_mm_pte_is_present(child_table->entries[0], level - 1);
740 base_attrs = arch_mm_pte_attrs(child_table->entries[0], level - 1);
Andrew Scull12122ce2019-11-19 14:21:07 +0000741
Andrew Walbran2400ed22018-09-27 14:45:58 +0100742 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000743 * Defrag the remaining entries in the table and check whether they are
744 * compatible with the base entry meaning the table can be merged into a
745 * block entry. It assumes addresses are contiguous due to identity
746 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100747 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000748 mergeable = true;
Karl Meakind64aaf82025-02-08 01:12:55 +0000749 for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000750 bool present;
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800751 ptable_addr_t block_addr =
752 base_addr + (i * mm_entry_size(level - 1));
Andrew Scull12122ce2019-11-19 14:21:07 +0000753
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000754 mm_ptable_defrag_entry(ptable, block_addr,
755 &(child_table->entries[i]), level - 1,
756 flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800757
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000758 present = arch_mm_pte_is_present(child_table->entries[i],
759 level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100760
Andrew Scull12122ce2019-11-19 14:21:07 +0000761 if (present != base_present) {
762 mergeable = false;
763 continue;
764 }
765
766 if (!present) {
767 continue;
768 }
769
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000770 if (!arch_mm_pte_is_block(child_table->entries[i], level - 1)) {
Andrew Scull12122ce2019-11-19 14:21:07 +0000771 mergeable = false;
772 continue;
773 }
774
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000775 if (arch_mm_pte_attrs(child_table->entries[i], level - 1) !=
Andrew Scull12122ce2019-11-19 14:21:07 +0000776 base_attrs) {
777 mergeable = false;
778 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100779 }
780 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000781
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800782 if (!mergeable) {
783 return;
Andrew Scull12122ce2019-11-19 14:21:07 +0000784 }
785
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800786 new_entry = mm_merge_table_pte(*entry, level);
787 if (*entry != new_entry) {
Karl Meakin00dbf1b2025-02-07 17:58:39 +0000788 mm_replace_entry(ptable, base_addr, entry, (uintptr_t)new_entry,
789 level, flags, non_secure, ppool);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800790 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100791}
792
793/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100794 * Defragments the given page table by converting page table references to
795 * blocks whenever possible.
796 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000797static void mm_ptable_defrag(struct mm_ptable *ptable, struct mm_flags flags,
798 bool non_secure, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100799{
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000800 struct mm_page_table *root_tables = ptable->root_tables;
Karl Meakina3a9f952025-02-08 00:11:16 +0000801 mm_level_t root_level = mm_root_level(flags);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000802 uint8_t root_table_count = mm_root_table_count(flags);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800803 ptable_addr_t block_addr = 0;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100804
805 /*
806 * Loop through each entry in the table. If it points to another table,
807 * check if that table can be replaced by a block or an absent entry.
808 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000809 for (size_t i = 0; i < root_table_count; ++i) {
810 for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) {
Karl Meakina3a9f952025-02-08 00:11:16 +0000811 mm_ptable_defrag_entry(
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000812 ptable, block_addr, &root_tables[i].entries[j],
Karl Meakina3a9f952025-02-08 00:11:16 +0000813 root_level - 1, flags, non_secure, ppool);
814 block_addr = mm_start_of_next_block(block_addr,
815 root_level - 1);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000816 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100817 }
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800818
819 arch_mm_sync_table_writes();
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100820}
821
822/**
Andrew Scull81e85092018-12-12 12:56:20 +0000823 * Gets the attributes applied to the given range of stage-2 addresses at the
824 * given level.
825 *
826 * The `got_attrs` argument is initially passed as false until `attrs` contains
827 * attributes of the memory region at which point it is passed as true.
828 *
829 * The value returned in `attrs` is only valid if the function returns true.
830 *
831 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100832 */
Daniel Boulby8adf7482021-09-22 15:12:44 +0100833// NOLINTNEXTLINE(misc-no-recursion)
Karl Meakind64aaf82025-02-08 01:12:55 +0000834static bool mm_ptable_get_attrs_level(const struct mm_page_table *table,
Andrew Scull81e85092018-12-12 12:56:20 +0000835 ptable_addr_t begin, ptable_addr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000836 mm_level_t level, bool got_attrs,
837 mm_attr_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100838{
Karl Meakind64aaf82025-02-08 01:12:55 +0000839 const pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000840 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100841
Andrew Scull81e85092018-12-12 12:56:20 +0000842 /* Cap end so that we don't go over the current level max. */
843 if (end > level_end) {
844 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100845 }
846
Andrew Scull81e85092018-12-12 12:56:20 +0000847 /* Check that each entry is owned. */
848 while (begin < end) {
849 if (arch_mm_pte_is_table(*pte, level)) {
850 if (!mm_ptable_get_attrs_level(
Karl Meakinaacfd4f2025-02-08 19:30:52 +0000851 arch_mm_table_from_pte(*pte, level), begin,
852 end, level - 1, got_attrs, attrs)) {
Andrew Scull81e85092018-12-12 12:56:20 +0000853 return false;
854 }
855 got_attrs = true;
856 } else {
857 if (!got_attrs) {
858 *attrs = arch_mm_pte_attrs(*pte, level);
859 got_attrs = true;
860 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
861 return false;
862 }
863 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100864
Karl Meakin25954e32025-02-07 16:12:51 +0000865 begin = mm_start_of_next_block(begin, level);
Andrew Scull81e85092018-12-12 12:56:20 +0000866 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100867 }
868
Andrew Scullc66a04d2018-12-07 13:41:56 +0000869 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000870 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100871}
872
873/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800874 * Gets the attributes applied to the given range of addresses in the page
875 * tables.
Andrew Scull81e85092018-12-12 12:56:20 +0000876 *
877 * The value returned in `attrs` is only valid if the function returns true.
878 *
879 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100880 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000881static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000882 ptable_addr_t end, mm_attr_t *attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000883 struct mm_flags flags)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100884{
Karl Meakina3a9f952025-02-08 00:11:16 +0000885 mm_level_t root_level = mm_root_level(flags);
Karl Meakin25954e32025-02-07 16:12:51 +0000886 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000887 struct mm_page_table *root_table;
Andrew Scull81e85092018-12-12 12:56:20 +0000888 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100889
Andrew Scull81e85092018-12-12 12:56:20 +0000890 begin = mm_round_down_to_page(begin);
891 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100892
Andrew Scull81e85092018-12-12 12:56:20 +0000893 /* Fail if the addresses are out of range. */
894 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000895 return false;
896 }
897
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000898 root_table = &ptable->root_tables[mm_index(begin, root_level)];
Andrew Scull81e85092018-12-12 12:56:20 +0000899 while (begin < end) {
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000900 if (!mm_ptable_get_attrs_level(root_table, begin, end,
Karl Meakina3a9f952025-02-08 00:11:16 +0000901 root_level - 1, got_attrs,
902 attrs)) {
Andrew Scull81e85092018-12-12 12:56:20 +0000903 return false;
904 }
905
906 got_attrs = true;
Karl Meakin25954e32025-02-07 16:12:51 +0000907 begin = mm_start_of_next_block(begin, root_level);
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000908 root_table++;
Andrew Scull81e85092018-12-12 12:56:20 +0000909 }
910
911 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100912}
913
Karl Meakin07a69ab2025-02-07 14:53:19 +0000914bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100915{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000916 return mm_ptable_init(ptable, id, (struct mm_flags){0}, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100917}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100918
Karl Meakind64aaf82025-02-08 01:12:55 +0000919void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000920{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000921 mm_ptable_fini(ptable, (struct mm_flags){0}, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000922}
923
924/**
Andrew Scull73b89542019-11-20 17:31:26 +0000925 * Selects flags to pass to the page table manipulation operation based on the
926 * mapping mode.
927 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000928static struct mm_flags mm_mode_to_flags(mm_mode_t mode)
Andrew Scull73b89542019-11-20 17:31:26 +0000929{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000930 struct mm_flags flags = {0};
931
Andrew Scull73b89542019-11-20 17:31:26 +0000932 if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) {
Karl Meakin1fd4b822025-02-01 17:13:47 +0000933 flags.unmap = true;
Andrew Scull73b89542019-11-20 17:31:26 +0000934 }
935
Karl Meakin1fd4b822025-02-01 17:13:47 +0000936 return flags;
Andrew Scull73b89542019-11-20 17:31:26 +0000937}
938
939/**
Andrew Scull4e83cef2019-11-19 14:17:54 +0000940 * See `mm_ptable_identity_prepare`.
941 *
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800942 * This must be called before `mm_identity_commit` for the same mapping.
943 *
944 * Returns true on success, or false if the update would fail.
945 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000946bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000947 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800948{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000949 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000950
Karl Meakin1fd4b822025-02-01 17:13:47 +0000951 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800952
Karl Meakind64aaf82025-02-08 01:12:55 +0000953 return mm_ptable_identity_prepare(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800954 arch_mm_mode_to_stage1_attrs(mode),
955 flags, ppool);
956}
957
958/**
959 * See `mm_ptable_identity_commit`.
960 *
961 * `mm_identity_prepare` must be called before this for the same mapping.
962 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000963void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000964 mm_mode_t mode, struct mpool *ppool)
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800965{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000966 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +0000967
Karl Meakin1fd4b822025-02-01 17:13:47 +0000968 flags.stage1 = true;
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800969
Karl Meakind64aaf82025-02-08 01:12:55 +0000970 mm_ptable_identity_commit(ptable, begin, end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800971 arch_mm_mode_to_stage1_attrs(mode), flags,
972 ppool);
973 return ptr_from_va(va_from_pa(begin));
974}
975
976/**
977 * See `mm_ptable_identity_prepare`.
978 *
Andrew Scull4e83cef2019-11-19 14:17:54 +0000979 * This must be called before `mm_vm_identity_commit` for the same mapping.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000980 *
981 * Returns true on success, or false if the update would fail.
Andrew Scull4e83cef2019-11-19 14:17:54 +0000982 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000983bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000984 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull4e83cef2019-11-19 14:17:54 +0000985{
Karl Meakin1fd4b822025-02-01 17:13:47 +0000986 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +0000987
Karl Meakind64aaf82025-02-08 01:12:55 +0000988 return mm_ptable_identity_prepare(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +0000989 arch_mm_mode_to_stage2_attrs(mode),
990 flags, ppool);
991}
992
993/**
994 * See `mm_ptable_identity_commit`.
995 *
996 * `mm_vm_identity_prepare` must be called before this for the same mapping.
997 */
Karl Meakind64aaf82025-02-08 01:12:55 +0000998void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000999 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull4e83cef2019-11-19 14:17:54 +00001000{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001001 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scull4e83cef2019-11-19 14:17:54 +00001002
Karl Meakind64aaf82025-02-08 01:12:55 +00001003 mm_ptable_identity_commit(ptable, begin, end,
Andrew Scull4e83cef2019-11-19 14:17:54 +00001004 arch_mm_mode_to_stage2_attrs(mode), flags,
1005 ppool);
1006
1007 if (ipa != NULL) {
1008 *ipa = ipa_from_pa(begin);
1009 }
1010}
1011
1012/**
Andrew Scull80871322018-08-06 12:04:09 +01001013 * Updates a VM's page table such that the given physical address range is
1014 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +01001015 * architecture-agnostic mode provided.
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +00001016 *
1017 * mm_vm_defrag should always be called after a series of page table updates,
1018 * whether they succeed or fail. This is because on failure extra page table
1019 * entries may have been allocated and then not used, while on success it may be
1020 * possible to compact the page table by merging several entries into a block.
1021 *
1022 * Returns true on success, or false if the update failed and no changes were
1023 * made.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001024 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001025bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001026 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001027{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001028 struct mm_flags flags = mm_mode_to_flags(mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001029 bool success = mm_ptable_identity_update(
Karl Meakind64aaf82025-02-08 01:12:55 +00001030 ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
Andrew Scullda3df7f2019-01-05 17:49:27 +00001031 ppool);
Andrew Scull80871322018-08-06 12:04:09 +01001032
1033 if (success && ipa != NULL) {
1034 *ipa = ipa_from_pa(begin);
1035 }
1036
1037 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001038}
1039
1040/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001041 * Updates the VM's table such that the given physical address range has no
1042 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +01001043 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001044bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001045 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001046{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001047 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001048
Karl Meakind64aaf82025-02-08 01:12:55 +00001049 return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL);
Andrew Scull80871322018-08-06 12:04:09 +01001050}
1051
1052/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001053 * Write the given page table of a VM to the debug log.
1054 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001055void mm_vm_dump(const struct mm_ptable *ptable)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001056{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001057 mm_ptable_dump(ptable, (struct mm_flags){0});
Andrew Scullda3df7f2019-01-05 17:49:27 +00001058}
1059
1060/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001061 * Defragments a stage1 page table.
1062 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001063void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool)
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001064{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001065 mm_ptable_defrag(ptable, (struct mm_flags){.stage1 = true}, false,
1066 ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001067}
1068
1069/**
Andrew Scullda3df7f2019-01-05 17:49:27 +00001070 * Defragments the VM page table.
1071 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001072void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
1073 bool non_secure)
Andrew Scullda3df7f2019-01-05 17:49:27 +00001074{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001075 mm_ptable_defrag(ptable, (struct mm_flags){0}, non_secure, ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +00001076}
1077
1078/**
Fuad Tabba9dc276f2020-07-16 09:29:32 +01001079 * Gets the mode of the given range of intermediate physical addresses if they
Andrew Scull81e85092018-12-12 12:56:20 +00001080 * are mapped with the same mode.
1081 *
1082 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +01001083 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001084bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001085 ipaddr_t end, mm_mode_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +01001086{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001087 mm_attr_t attrs;
Andrew Scull81e85092018-12-12 12:56:20 +00001088 bool ret;
1089
Karl Meakin1fd4b822025-02-01 17:13:47 +00001090 ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs,
1091 (struct mm_flags){0});
Andrew Scull81e85092018-12-12 12:56:20 +00001092 if (ret) {
1093 *mode = arch_mm_stage2_attrs_to_mode(attrs);
1094 }
1095
1096 return ret;
Andrew Scull80871322018-08-06 12:04:09 +01001097}
1098
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001099/**
1100 * Gets the mode of the given range of virtual addresses if they
1101 * are mapped with the same mode.
1102 *
1103 * Returns true if the range is mapped with the same mode and false otherwise.
1104 */
Karl Meakind64aaf82025-02-08 01:12:55 +00001105bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001106 mm_mode_t *mode)
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001107{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001108 mm_attr_t attrs;
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001109 bool ret;
1110
Karl Meakind64aaf82025-02-08 01:12:55 +00001111 ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs,
Karl Meakin1fd4b822025-02-01 17:13:47 +00001112 (struct mm_flags){.stage1 = true});
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -08001113 if (ret) {
1114 *mode = arch_mm_stage1_attrs_to_mode(attrs);
1115 }
1116
1117 return ret;
1118}
1119
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001120static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
1121{
1122 return (struct mm_stage1_locked){.ptable = &ptable};
1123}
1124
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -08001125struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable)
1126{
1127 return (struct mm_stage1_locked){.ptable = ptable};
1128}
1129
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001130struct mm_stage1_locked mm_lock_stage1(void)
1131{
1132 sl_lock(&ptable_lock);
1133 return mm_stage1_lock_unsafe();
1134}
1135
1136void mm_unlock_stage1(struct mm_stage1_locked *lock)
1137{
Andrew Scull877ae4b2019-07-02 12:52:33 +01001138 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001139 sl_unlock(&ptable_lock);
1140 lock->ptable = NULL;
1141}
1142
Andrew Scull80871322018-08-06 12:04:09 +01001143/**
Andrew Scull80871322018-08-06 12:04:09 +01001144 * Updates the hypervisor page table such that the given physical address range
1145 * is mapped into the address space at the corresponding address range in the
1146 * architecture-agnostic mode provided.
1147 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001148void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +00001149 paddr_t end, mm_mode_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +01001150{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001151 struct mm_flags flags = mm_mode_to_flags(mode);
Karl Meakin07a69ab2025-02-07 14:53:19 +00001152
Karl Meakin1fd4b822025-02-01 17:13:47 +00001153 flags.stage1 = true;
Andrew Scull73b89542019-11-20 17:31:26 +00001154
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001155 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scull73b89542019-11-20 17:31:26 +00001156 arch_mm_mode_to_stage1_attrs(mode), flags,
1157 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +01001158 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +01001159 }
1160
1161 return NULL;
1162}
1163
1164/**
1165 * Updates the hypervisor table such that the given physical address range is
1166 * not mapped in the address space.
1167 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001168bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
1169 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001170{
Karl Meakin07a69ab2025-02-07 14:53:19 +00001171 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
Andrew Scull73b89542019-11-20 17:31:26 +00001172
1173 return mm_identity_map(stage1_locked, begin, end, mode, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001174}
1175
1176/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001177 * Defragments the hypervisor page table.
1178 */
1179void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
1180{
Karl Meakin1fd4b822025-02-01 17:13:47 +00001181 mm_ptable_defrag(stage1_locked.ptable,
1182 (struct mm_flags){.stage1 = true}, false, ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001183}
1184
1185/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001186 * Initialises memory management for the hypervisor itself.
1187 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +00001188bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001189{
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001190 /* Locking is not enabled yet so fake it, */
1191 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
Karl Meakin07a69ab2025-02-07 14:53:19 +00001192 struct mm_flags flags = {.stage1 = true};
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001193
Karl Meakine8937d92024-03-19 16:04:25 +00001194 dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001195 pa_addr(layout_text_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001196 dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001197 pa_addr(layout_rodata_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001198 dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()),
Andrew Walbran17eebf92020-02-05 16:35:49 +00001199 pa_addr(layout_data_end()));
Karl Meakine8937d92024-03-19 16:04:25 +00001200 dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()),
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001201 pa_addr(layout_stacks_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001202
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -08001203 /* ASID 0 is reserved for use by the hypervisor. */
Karl Meakin07a69ab2025-02-07 14:53:19 +00001204 if (!mm_ptable_init(&ptable, 0, flags, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001205 dlog_error("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001206 return false;
1207 }
1208
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001209 /* Initialize arch_mm before calling below mapping routines */
Karl Meakine1aeb1d2025-02-08 00:35:14 +00001210 if (!arch_mm_init(&ptable)) {
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001211 return false;
1212 }
1213
Andrew Walbran48699362019-05-20 14:38:00 +01001214 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001215 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001216
1217 /* Map each section. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001218 CHECK(mm_identity_map(stage1_locked, layout_text_begin(),
1219 layout_text_end(), MM_MODE_X, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001220
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001221 CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(),
1222 layout_rodata_end(), MM_MODE_R, ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001223
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001224 CHECK(mm_identity_map(stage1_locked, layout_data_begin(),
1225 layout_data_end(), MM_MODE_R | MM_MODE_W,
1226 ppool) != NULL);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001227
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001228 /* Arch-specific stack mapping. */
Raghu Krishnamurthy472a8822022-10-04 21:28:59 -07001229 CHECK(arch_stack_mm_init(stage1_locked, ppool));
Maksims Svecovs134b8f92022-03-04 15:14:09 +00001230
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +00001231 return true;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001232}