Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdatomic.h> |
| 12 | #include <stdint.h> |
| 13 | |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 14 | #include "hf/arch/init.h" |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 15 | #include "hf/arch/mm.h" |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 16 | |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 17 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 18 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 19 | #include "hf/layout.h" |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 20 | #include "hf/plat/console.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 21 | #include "hf/static_assert.h" |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 22 | #include "hf/std.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 23 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 24 | /** |
| 25 | * This file has functions for managing the level 1 and 2 page tables used by |
| 26 | * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory, |
| 27 | * and then a level 2 mapping per VM. The design assumes that all page tables |
| 28 | * contain only 1-1 mappings, aligned on the block boundaries. |
| 29 | */ |
| 30 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 31 | /* |
| 32 | * For stage 2, the input is an intermediate physical addresses rather than a |
| 33 | * virtual address so: |
| 34 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 35 | static_assert( |
| 36 | sizeof(ptable_addr_t) == sizeof(uintpaddr_t), |
| 37 | "Currently, the same code manages the stage 1 and stage 2 page tables " |
| 38 | "which only works if the virtual and intermediate physical addresses " |
| 39 | "are the same size. It looks like that assumption might not be holding " |
| 40 | "so we need to check that everything is going to be ok."); |
| 41 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 42 | static struct mm_ptable ptable; |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 43 | static struct spinlock ptable_lock; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 44 | |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 45 | static bool mm_stage2_invalidate = false; |
| 46 | |
| 47 | /** |
| 48 | * After calling this function, modifications to stage-2 page tables will use |
| 49 | * break-before-make and invalidate the TLB for the affected range. |
| 50 | */ |
| 51 | void mm_vm_enable_invalidation(void) |
| 52 | { |
| 53 | mm_stage2_invalidate = true; |
| 54 | } |
| 55 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 56 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 57 | * Rounds an address down to a page boundary. |
| 58 | */ |
| 59 | static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr) |
| 60 | { |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 61 | return align_down(addr, PAGE_SIZE); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | /** |
| 65 | * Rounds an address up to a page boundary. |
| 66 | */ |
| 67 | static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr) |
| 68 | { |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 69 | return align_up(addr, PAGE_SIZE); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 73 | * Calculates the size of the address space represented by a page table entry at |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 74 | * the given level. See also Arm ARM, table D8-15 |
| 75 | * - `level == 4`: 256 TiB (1 << 48) |
| 76 | * - `level == 3`: 512 GiB (1 << 39) |
| 77 | * - `level == 2`: 1 GiB (1 << 30) |
| 78 | * - `level == 1`: 2 MiB (1 << 21) |
| 79 | * - `level == 0`: 4 KiB (1 << 12) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 80 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 81 | static size_t mm_entry_size(mm_level_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 82 | { |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 83 | assert(level <= 4); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 84 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | /** |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 88 | * Get the start address of the range mapped by the next block of the given |
| 89 | * level. |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 90 | */ |
| 91 | static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr, |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 92 | mm_level_t level) |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 93 | { |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 94 | assert(level <= 4); |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 95 | return align_up(addr + 1, mm_entry_size(level)); |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 99 | * For a given address, calculates the maximum (plus one) address that can be |
| 100 | * represented by the same table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 101 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 102 | static ptable_addr_t mm_level_end(ptable_addr_t addr, mm_level_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 103 | { |
| 104 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 105 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 106 | return ((addr >> offset) + 1) << offset; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 110 | * For a given address, calculates the index at which its entry is stored in a |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 111 | * table at the given level. See also Arm ARM, table D8-14 |
| 112 | * - `level == 4`: bits[51:48] |
| 113 | * - `level == 3`: bits[47:39] |
| 114 | * - `level == 2`: bits[38:30] |
| 115 | * - `level == 1`: bits[29:21] |
| 116 | * - `level == 0`: bits[20:12] |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 117 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 118 | static size_t mm_index(ptable_addr_t addr, mm_level_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 119 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 120 | ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 121 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 122 | return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | /** |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 126 | * Allocates a new page table. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 127 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 128 | static struct mm_page_table *mm_alloc_page_tables(size_t count, |
| 129 | struct mpool *ppool) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 130 | { |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 131 | if (count == 1) { |
| 132 | return mpool_alloc(ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 135 | return mpool_alloc_contiguous(ppool, count, count); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | /** |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 139 | * Returns the root level in the page table given the flags. |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 140 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 141 | static mm_level_t mm_root_level(const struct mm_ptable *ptable) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 142 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 143 | return ptable->stage1 ? arch_mm_stage1_root_level() |
| 144 | : arch_mm_stage2_root_level(); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | /** |
| 148 | * Returns the number of root-level tables given the flags. |
| 149 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 150 | static uint8_t mm_root_table_count(const struct mm_ptable *ptable) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 151 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 152 | return ptable->stage1 ? arch_mm_stage1_root_table_count() |
| 153 | : arch_mm_stage2_root_table_count(); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | /** |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 157 | * Invalidates the TLB for the given address range. |
| 158 | */ |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 159 | static void mm_invalidate_tlb(const struct mm_ptable *ptable, |
| 160 | ptable_addr_t begin, ptable_addr_t end, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 161 | bool non_secure) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 162 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 163 | if (ptable->stage1) { |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 164 | arch_mm_invalidate_stage1_range(ptable->id, va_init(begin), |
Raghu Krishnamurthy | 8fdd6df | 2021-02-03 18:30:59 -0800 | [diff] [blame] | 165 | va_init(end)); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 166 | } else { |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 167 | arch_mm_invalidate_stage2_range(ptable->id, ipa_init(begin), |
Olivier Deprez | 6f40037 | 2022-03-07 09:31:08 +0100 | [diff] [blame] | 168 | ipa_init(end), non_secure); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 169 | } |
| 170 | } |
| 171 | |
| 172 | /** |
| 173 | * Frees all page-table-related memory associated with the given pte at the |
| 174 | * given level, including any subtables recursively. |
| 175 | */ |
Daniel Boulby | 8adf748 | 2021-09-22 15:12:44 +0100 | [diff] [blame] | 176 | // NOLINTNEXTLINE(misc-no-recursion) |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 177 | static void mm_free_page_pte(pte_t pte, mm_level_t level, struct mpool *ppool) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 178 | { |
| 179 | struct mm_page_table *table; |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 180 | |
| 181 | if (!arch_mm_pte_is_table(pte, level)) { |
| 182 | return; |
| 183 | } |
| 184 | |
| 185 | /* Recursively free any subtables. */ |
Karl Meakin | aacfd4f | 2025-02-08 19:30:52 +0000 | [diff] [blame] | 186 | table = arch_mm_table_from_pte(pte, level); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 187 | for (size_t i = 0; i < MM_PTE_PER_PAGE; ++i) { |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 188 | mm_free_page_pte(table->entries[i], level - 1, ppool); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | /* Free the table itself. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 192 | mpool_free(ppool, table); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | /** |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 196 | * Returns the first address which cannot be encoded in page tables given by |
| 197 | * `flags`. It is the exclusive end of the address space created by the tables. |
| 198 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 199 | ptable_addr_t mm_ptable_addr_space_end(const struct mm_ptable *ptable) |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 200 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 201 | return mm_root_table_count(ptable) * |
| 202 | mm_entry_size(mm_root_level(ptable)); |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 206 | * Initialises the given page table. |
| 207 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 208 | bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id, bool stage1, |
| 209 | struct mpool *ppool) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 210 | { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 211 | struct mm_page_table *root_tables; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 212 | uint8_t root_table_count = stage1 ? arch_mm_stage1_root_table_count() |
| 213 | : arch_mm_stage2_root_table_count(); |
| 214 | mm_level_t root_level = stage1 ? arch_mm_stage1_root_level() |
| 215 | : arch_mm_stage2_root_level(); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 216 | |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 217 | root_tables = mm_alloc_page_tables(root_table_count, ppool); |
| 218 | if (root_tables == NULL) { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 219 | return false; |
| 220 | } |
| 221 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 222 | for (size_t i = 0; i < root_table_count; i++) { |
| 223 | for (size_t j = 0; j < MM_PTE_PER_PAGE; j++) { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 224 | root_tables[i].entries[j] = |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 225 | arch_mm_absent_pte(root_level - 1); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 226 | } |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * TODO: halloc could return a virtual or physical address if mm not |
| 231 | * enabled? |
| 232 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 233 | ptable->id = id; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 234 | ptable->root_tables = root_tables; |
| 235 | ptable->stage1 = stage1; |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 236 | return true; |
| 237 | } |
| 238 | |
| 239 | /** |
| 240 | * Frees all memory associated with the give page table. |
| 241 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 242 | static void mm_ptable_fini(const struct mm_ptable *ptable, struct mpool *ppool) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 243 | { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 244 | struct mm_page_table *root_tables = ptable->root_tables; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 245 | mm_level_t root_level = mm_root_level(ptable); |
| 246 | uint8_t root_table_count = mm_root_table_count(ptable); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 247 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 248 | for (size_t i = 0; i < root_table_count; ++i) { |
| 249 | for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 250 | mm_free_page_pte(root_tables[i].entries[j], |
| 251 | root_level - 1, ppool); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 252 | } |
| 253 | } |
| 254 | |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 255 | mpool_add_chunk(ppool, root_tables, |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 256 | sizeof(struct mm_page_table) * root_table_count); |
| 257 | } |
| 258 | |
| 259 | /** |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 260 | * Replaces a page table entry with the given value. If both old and new values |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 261 | * are valid, it performs a break-before-make sequence where it first writes an |
| 262 | * invalid value to the PTE, flushes the TLB, then writes the actual new value. |
| 263 | * This is to prevent cases where CPUs have different 'valid' values in their |
| 264 | * TLBs, which may result in issues for example in cache coherency. |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 265 | */ |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 266 | static void mm_replace_entry(const struct mm_ptable *ptable, |
| 267 | ptable_addr_t begin, pte_t *pte, pte_t new_pte, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 268 | mm_level_t level, bool non_secure, |
| 269 | struct mpool *ppool) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 270 | { |
| 271 | pte_t v = *pte; |
| 272 | |
| 273 | /* |
| 274 | * We need to do the break-before-make sequence if both values are |
Andrew Scull | 3cd9e26 | 2019-01-08 17:59:22 +0000 | [diff] [blame] | 275 | * present and the TLB is being invalidated. |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 276 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 277 | if ((ptable->stage1 || mm_stage2_invalidate) && |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 278 | arch_mm_pte_is_valid(v, level)) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 279 | *pte = arch_mm_absent_pte(level); |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 280 | mm_invalidate_tlb(ptable, begin, begin + mm_entry_size(level), |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 281 | non_secure); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | /* Assign the new pte. */ |
| 285 | *pte = new_pte; |
| 286 | |
| 287 | /* Free pages that aren't in use anymore. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 288 | mm_free_page_pte(v, level, ppool); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 292 | * Populates the provided page table entry with a reference to another table if |
| 293 | * needed, that is, if it does not yet point to another table. |
| 294 | * |
| 295 | * Returns a pointer to the table the entry now points to. |
| 296 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 297 | static struct mm_page_table *mm_populate_table_pte(struct mm_ptable *ptable, |
| 298 | ptable_addr_t begin, |
| 299 | pte_t *pte, mm_level_t level, |
| 300 | bool non_secure, |
| 301 | struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 302 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 303 | struct mm_page_table *ntable; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 304 | pte_t v = *pte; |
| 305 | pte_t new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 306 | size_t inc; |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 307 | mm_level_t level_below = level - 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 308 | |
| 309 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 310 | if (arch_mm_pte_is_table(v, level)) { |
Karl Meakin | aacfd4f | 2025-02-08 19:30:52 +0000 | [diff] [blame] | 311 | return arch_mm_table_from_pte(v, level); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 312 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 313 | |
| 314 | /* Allocate a new table. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 315 | ntable = mm_alloc_page_tables(1, ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 316 | if (ntable == NULL) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 317 | dlog_error("Failed to allocate memory for page table\n"); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 318 | return NULL; |
| 319 | } |
| 320 | |
| 321 | /* Determine template for new pte and its increment. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 322 | if (arch_mm_pte_is_block(v, level)) { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 323 | inc = mm_entry_size(level_below); |
| 324 | new_pte = arch_mm_block_pte(level_below, |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 325 | arch_mm_block_from_pte(v, level), |
| 326 | arch_mm_pte_attrs(v, level)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 327 | } else { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 328 | inc = 0; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 329 | new_pte = arch_mm_absent_pte(level_below); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | /* Initialise entries in the new table. */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 333 | for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 334 | ntable->entries[i] = new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 335 | new_pte += inc; |
| 336 | } |
| 337 | |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 338 | /* Ensure initialisation is visible before updating the pte. */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 339 | atomic_thread_fence(memory_order_release); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 340 | |
| 341 | /* Replace the pte entry, doing a break-before-make if needed. */ |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 342 | mm_replace_entry(ptable, begin, pte, |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 343 | arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)), |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 344 | level, non_secure, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 345 | |
| 346 | return ntable; |
| 347 | } |
| 348 | |
| 349 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 350 | * Updates the page table at the given level to map the given address range to a |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 351 | * physical range using the provided (architecture-specific) attributes. Or if |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 352 | * `flags.unmap` is set, unmap the given range instead. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 353 | * |
| 354 | * This function calls itself recursively if it needs to update additional |
| 355 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 356 | * table. |
| 357 | */ |
Daniel Boulby | 8adf748 | 2021-09-22 15:12:44 +0100 | [diff] [blame] | 358 | // NOLINTNEXTLINE(misc-no-recursion) |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 359 | static bool mm_map_level(struct mm_ptable *ptable, ptable_addr_t begin, |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 360 | ptable_addr_t end, mm_attr_t attrs, |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 361 | struct mm_page_table *child_table, mm_level_t level, |
| 362 | struct mm_flags flags, struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 363 | { |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 364 | pte_t *pte = &child_table->entries[mm_index(begin, level)]; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 365 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 366 | size_t entry_size = mm_entry_size(level); |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 367 | bool commit = flags.commit; |
| 368 | bool unmap = flags.unmap; |
| 369 | bool non_secure = ((attrs & (1ULL << 57)) != 0); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 370 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 371 | /* Cap end so that we don't go over the current level max. */ |
| 372 | if (end > level_end) { |
| 373 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 374 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 375 | |
| 376 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 377 | while (begin < end) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 378 | if (unmap ? !arch_mm_pte_is_present(*pte, level) |
| 379 | : arch_mm_pte_is_block(*pte, level) && |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 380 | arch_mm_pte_attrs(*pte, level) == attrs) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 381 | /* |
| 382 | * If the entry is already mapped with the right |
| 383 | * attributes, or already absent in the case of |
| 384 | * unmapping, no need to do anything; carry on to the |
| 385 | * next entry. |
| 386 | */ |
| 387 | } else if ((end - begin) >= entry_size && |
| 388 | (unmap || arch_mm_is_block_allowed(level)) && |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 389 | is_aligned(begin, entry_size)) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 390 | /* |
| 391 | * If the entire entry is within the region we want to |
| 392 | * map, map/unmap the whole entry. |
| 393 | */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 394 | if (commit) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 395 | pte_t new_pte = |
| 396 | unmap ? arch_mm_absent_pte(level) |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 397 | : arch_mm_block_pte( |
| 398 | level, pa_init(begin), |
| 399 | attrs); |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 400 | mm_replace_entry(ptable, begin, pte, new_pte, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 401 | level, non_secure, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 402 | } |
| 403 | } else { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 404 | /* |
| 405 | * If the entry is already a subtable get it; otherwise |
| 406 | * replace it with an equivalent subtable and get that. |
| 407 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 408 | struct mm_page_table *nt = mm_populate_table_pte( |
| 409 | ptable, begin, pte, level, non_secure, ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 410 | if (nt == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 411 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 412 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 413 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 414 | /* |
| 415 | * Recurse to map/unmap the appropriate entries within |
| 416 | * the subtable. |
| 417 | */ |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 418 | if (!mm_map_level(ptable, begin, end, attrs, nt, |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 419 | level - 1, flags, ppool)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 420 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 421 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 422 | } |
| 423 | |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 424 | begin = mm_start_of_next_block(begin, level); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 425 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 426 | } |
| 427 | |
| 428 | return true; |
| 429 | } |
| 430 | |
| 431 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 432 | * Updates the page table from the root to map the given address range to a |
Karl Meakin | aac3801 | 2025-02-07 23:57:37 +0000 | [diff] [blame] | 433 | * physical range using the provided (architecture-specific) attributes. |
| 434 | * |
| 435 | * Flags: |
| 436 | * - `flags.unmap`: unmap the given range instead of mapping it. |
| 437 | * - `flags.commit`: the change is only committed if this flag is set. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 438 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 439 | static bool mm_ptable_identity_map(struct mm_ptable *ptable, paddr_t pa_begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 440 | paddr_t pa_end, mm_attr_t attrs, |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 441 | struct mm_flags flags, struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 442 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 443 | mm_level_t root_level = mm_root_level(ptable); |
| 444 | ptable_addr_t ptable_end = mm_ptable_addr_space_end(ptable); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 445 | ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end)); |
Karl Meakin | c17ab27 | 2025-02-08 03:29:17 +0000 | [diff] [blame] | 446 | ptable_addr_t begin = mm_round_down_to_page(pa_addr(pa_begin)); |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 447 | struct mm_page_table *root_table = |
| 448 | &ptable->root_tables[mm_index(begin, root_level)]; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 449 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 450 | /* |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 451 | * Assert condition to communicate the API constraint of |
| 452 | * mm_root_level(), that isn't encoded in the types, to the static |
| 453 | * analyzer. |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 454 | */ |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 455 | assert(root_level >= 3); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 456 | |
| 457 | /* Cap end to stay within the bounds of the page table. */ |
| 458 | if (end > ptable_end) { |
Karl Meakin | 3050695 | 2025-02-18 18:13:06 +0000 | [diff] [blame^] | 459 | dlog_verbose( |
| 460 | "ptable_map: input range end falls outside of ptable " |
| 461 | "address space (%#016lx > %#016lx), capping to ptable " |
| 462 | "address space end\n", |
| 463 | end, ptable_end); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 464 | end = ptable_end; |
| 465 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 466 | |
Karl Meakin | 3050695 | 2025-02-18 18:13:06 +0000 | [diff] [blame^] | 467 | if (begin >= end) { |
| 468 | dlog_verbose( |
| 469 | "ptable_map: input range is backwards (%#016lx >= " |
| 470 | "%#016lx), request will have no effect\n", |
| 471 | begin, end); |
| 472 | } else if (pa_addr(pa_begin) >= pa_addr(pa_end)) { |
| 473 | dlog_verbose( |
| 474 | "ptable_map: input range was backwards (%#016lx >= " |
| 475 | "%#016lx), but due to rounding the range %#016lx to " |
| 476 | "%#016lx will be mapped\n", |
| 477 | begin, end, pa_addr(pa_begin), pa_addr(pa_end)); |
| 478 | } |
| 479 | |
Karl Meakin | aac3801 | 2025-02-07 23:57:37 +0000 | [diff] [blame] | 480 | while (begin < end) { |
| 481 | if (!mm_map_level(ptable, begin, end, attrs, root_table, |
| 482 | root_level - 1, flags, ppool)) { |
| 483 | return false; |
| 484 | } |
| 485 | begin = mm_start_of_next_block(begin, root_level); |
| 486 | root_table++; |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 487 | } |
| 488 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 489 | /* |
| 490 | * All TLB invalidations must be complete already if any entries were |
| 491 | * replaced by mm_replace_entry. Sync all page table writes so that code |
| 492 | * following this can use them. |
| 493 | */ |
| 494 | arch_mm_sync_table_writes(); |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 495 | |
| 496 | return true; |
| 497 | } |
| 498 | |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 499 | /* |
| 500 | * Prepares the given page table for the given address mapping such that it |
| 501 | * will be able to commit the change without failure. It does so by ensuring |
| 502 | * the smallest granularity needed is available. This remains valid provided |
Fuad Tabba | 9dc276f | 2020-07-16 09:29:32 +0100 | [diff] [blame] | 503 | * subsequent operations do not decrease the granularity. |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 504 | * |
| 505 | * In particular, multiple calls to this function will result in the |
| 506 | * corresponding calls to commit the changes to succeed. |
| 507 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 508 | static bool mm_ptable_identity_prepare(struct mm_ptable *ptable, |
| 509 | paddr_t pa_begin, paddr_t pa_end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 510 | mm_attr_t attrs, struct mm_flags flags, |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 511 | struct mpool *ppool) |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 512 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 513 | flags.commit = false; |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 514 | return mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags, |
| 515 | ppool); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | /** |
| 519 | * Commits the given address mapping to the page table assuming the operation |
| 520 | * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to |
| 521 | * ensure this condition. |
| 522 | * |
| 523 | * Without the table being properly prepared, the commit may only partially |
| 524 | * complete if it runs out of memory resulting in an inconsistent state that |
| 525 | * isn't handled. |
| 526 | * |
| 527 | * Since the non-failure assumtion is used in the reasoning about the atomicity |
| 528 | * of higher level memory operations, any detected violations result in a panic. |
| 529 | * |
| 530 | * TODO: remove ppool argument to be sure no changes are made. |
| 531 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 532 | static void mm_ptable_identity_commit(struct mm_ptable *ptable, |
| 533 | paddr_t pa_begin, paddr_t pa_end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 534 | mm_attr_t attrs, struct mm_flags flags, |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 535 | struct mpool *ppool) |
| 536 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 537 | flags.commit = true; |
| 538 | CHECK(mm_ptable_identity_map(ptable, pa_begin, pa_end, attrs, flags, |
| 539 | ppool)); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 540 | } |
| 541 | |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 542 | /** |
| 543 | * Updates the given table such that the given physical address range is mapped |
| 544 | * or not mapped into the address space with the architecture-agnostic mode |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 545 | * provided. |
| 546 | * |
| 547 | * The page table is updated using the separate prepare and commit stages so |
| 548 | * that, on failure, a partial update of the address space cannot happen. The |
| 549 | * table may be left with extra internal tables but the address space is |
| 550 | * unchanged. |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 551 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 552 | static bool mm_ptable_identity_update(struct mm_ptable *ptable, |
| 553 | paddr_t pa_begin, paddr_t pa_end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 554 | mm_attr_t attrs, struct mm_flags flags, |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 555 | struct mpool *ppool) |
| 556 | { |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 557 | if (!mm_ptable_identity_prepare(ptable, pa_begin, pa_end, attrs, flags, |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 558 | ppool)) { |
| 559 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 560 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 561 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 562 | mm_ptable_identity_commit(ptable, pa_begin, pa_end, attrs, flags, |
| 563 | ppool); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 564 | |
| 565 | return true; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 566 | } |
| 567 | |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 568 | static void mm_dump_entries(const pte_t *entries, mm_level_t level, |
| 569 | uint32_t indent); |
| 570 | |
| 571 | static void mm_dump_block_entry(pte_t entry, mm_level_t level, uint32_t indent) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 572 | { |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 573 | mm_attr_t attrs = arch_mm_pte_attrs(entry, level); |
| 574 | paddr_t addr = arch_mm_block_from_pte(entry, level); |
| 575 | |
| 576 | if (arch_mm_pte_is_valid(entry, level)) { |
| 577 | if (level == 0) { |
| 578 | dlog("page {\n"); |
| 579 | } else { |
| 580 | dlog("block {\n"); |
| 581 | } |
| 582 | } else { |
| 583 | dlog("invalid_block {\n"); |
| 584 | } |
| 585 | |
| 586 | indent += 1; |
| 587 | { |
| 588 | dlog_indent(indent, ".addr = %#016lx\n", pa_addr(addr)); |
| 589 | dlog_indent(indent, ".attrs = %#016lx\n", attrs); |
| 590 | } |
| 591 | indent -= 1; |
| 592 | dlog_indent(indent, "}"); |
| 593 | } |
| 594 | |
| 595 | // NOLINTNEXTLINE(misc-no-recursion) |
| 596 | static void mm_dump_table_entry(pte_t entry, mm_level_t level, uint32_t indent) |
| 597 | { |
| 598 | dlog("table {\n"); |
| 599 | indent += 1; |
| 600 | { |
| 601 | mm_attr_t attrs = arch_mm_pte_attrs(entry, level); |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 602 | const struct mm_page_table *child_table = |
Karl Meakin | aacfd4f | 2025-02-08 19:30:52 +0000 | [diff] [blame] | 603 | arch_mm_table_from_pte(entry, level); |
| 604 | paddr_t addr = pa_init((uintpaddr_t)child_table); |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 605 | |
| 606 | dlog_indent(indent, ".pte = %#016lx,\n", entry); |
| 607 | dlog_indent(indent, ".attrs = %#016lx,\n", attrs); |
| 608 | dlog_indent(indent, ".addr = %#016lx,\n", pa_addr(addr)); |
| 609 | dlog_indent(indent, ".entries = "); |
| 610 | mm_dump_entries(child_table->entries, level - 1, indent); |
| 611 | dlog(",\n"); |
| 612 | } |
| 613 | indent -= 1; |
| 614 | dlog_indent(indent, "}"); |
| 615 | } |
| 616 | |
| 617 | // NOLINTNEXTLINE(misc-no-recursion) |
| 618 | static void mm_dump_entry(pte_t entry, mm_level_t level, uint32_t indent) |
| 619 | { |
| 620 | switch (arch_mm_pte_type(entry, level)) { |
| 621 | case PTE_TYPE_ABSENT: |
| 622 | dlog("absent {}"); |
| 623 | break; |
| 624 | case PTE_TYPE_INVALID_BLOCK: |
| 625 | case PTE_TYPE_VALID_BLOCK: { |
| 626 | mm_dump_block_entry(entry, level, indent); |
| 627 | break; |
| 628 | } |
| 629 | case PTE_TYPE_TABLE: { |
| 630 | mm_dump_table_entry(entry, level, indent); |
| 631 | break; |
| 632 | } |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | // NOLINTNEXTLINE(misc-no-recursion) |
| 637 | static void mm_dump_entries(const pte_t *entries, mm_level_t level, |
| 638 | uint32_t indent) |
| 639 | { |
| 640 | dlog("{\n"); |
| 641 | indent += 1; |
| 642 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 643 | for (size_t i = 0; i < MM_PTE_PER_PAGE; i++) { |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 644 | pte_t entry = entries[i]; |
Karl Meakin | 100b0b2 | 2025-02-08 00:59:25 +0000 | [diff] [blame] | 645 | |
| 646 | if (arch_mm_pte_is_absent(entry, level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 647 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 648 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 649 | |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 650 | dlog_indent(indent, "[level = %u, index = %zu] = ", level, i); |
| 651 | mm_dump_entry(entry, level, indent); |
| 652 | dlog(",\n"); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 653 | } |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 654 | |
| 655 | indent -= 1; |
| 656 | dlog_indent(indent, "}"); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 660 | * Writes the given table to the debug log. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 661 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 662 | static void mm_ptable_dump(const struct mm_ptable *ptable) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 663 | { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 664 | struct mm_page_table *root_tables = ptable->root_tables; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 665 | mm_level_t root_level = mm_root_level(ptable); |
| 666 | uint8_t root_table_count = mm_root_table_count(ptable); |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 667 | uint32_t indent = 0; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 668 | |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 669 | dlog_indent(indent, "mm_ptable {\n"); |
| 670 | indent += 1; |
| 671 | { |
| 672 | dlog_indent(indent, ".stage = %s,\n", |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 673 | ptable->stage1 ? "stage1" : "stage2"); |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 674 | dlog_indent(indent, ".id = %hu,\n", ptable->id); |
| 675 | dlog_indent(indent, ".root_tables = {\n"); |
| 676 | |
| 677 | indent += 1; |
| 678 | { |
| 679 | for (size_t i = 0; i < root_table_count; ++i) { |
| 680 | dlog_indent( |
| 681 | indent, |
| 682 | "[level = %u, index = %zu].entries = ", |
| 683 | root_level, i); |
| 684 | mm_dump_entries(root_tables[i].entries, |
| 685 | root_level - 1, indent); |
| 686 | dlog(",\n"); |
| 687 | } |
| 688 | } |
| 689 | indent -= 1; |
| 690 | dlog_indent(indent, "},\n"); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 691 | } |
Karl Meakin | c88ad41 | 2025-02-11 16:04:49 +0000 | [diff] [blame] | 692 | indent -= 1; |
| 693 | dlog_indent(indent, "}\n"); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 694 | } |
| 695 | |
| 696 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 697 | * Given the table PTE entries all have identical attributes, returns the single |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 698 | * entry with which it can be replaced. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 699 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 700 | static pte_t mm_merge_table_pte(pte_t table_pte, mm_level_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 701 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 702 | struct mm_page_table *table; |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 703 | mm_attr_t block_attrs; |
| 704 | mm_attr_t table_attrs; |
| 705 | mm_attr_t combined_attrs; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 706 | paddr_t block_address; |
| 707 | |
Karl Meakin | aacfd4f | 2025-02-08 19:30:52 +0000 | [diff] [blame] | 708 | table = arch_mm_table_from_pte(table_pte, level); |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 709 | |
| 710 | if (!arch_mm_pte_is_present(table->entries[0], level - 1)) { |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 711 | return arch_mm_absent_pte(level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 712 | } |
| 713 | |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 714 | /* Might not be possible to merge the table into a single block. */ |
| 715 | if (!arch_mm_is_block_allowed(level)) { |
| 716 | return table_pte; |
| 717 | } |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 718 | |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 719 | /* Replace table with a single block, with equivalent attributes. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 720 | block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1); |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 721 | table_attrs = arch_mm_pte_attrs(table_pte, level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 722 | combined_attrs = |
| 723 | arch_mm_combine_table_entry_attrs(table_attrs, block_attrs); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 724 | block_address = arch_mm_block_from_pte(table->entries[0], level - 1); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 725 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 726 | return arch_mm_block_pte(level, block_address, combined_attrs); |
| 727 | } |
| 728 | |
| 729 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 730 | * Defragments the given PTE by recursively replacing any tables with blocks or |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 731 | * absent entries where possible. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 732 | */ |
Daniel Boulby | 8adf748 | 2021-09-22 15:12:44 +0100 | [diff] [blame] | 733 | // NOLINTNEXTLINE(misc-no-recursion) |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 734 | static void mm_ptable_defrag_entry(struct mm_ptable *ptable, |
| 735 | ptable_addr_t base_addr, pte_t *entry, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 736 | mm_level_t level, bool non_secure, |
| 737 | struct mpool *ppool) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 738 | { |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 739 | struct mm_page_table *child_table; |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 740 | bool mergeable; |
| 741 | bool base_present; |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 742 | mm_attr_t base_attrs; |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 743 | pte_t new_entry; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 744 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 745 | if (!arch_mm_pte_is_table(*entry, level)) { |
| 746 | return; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 747 | } |
| 748 | |
Karl Meakin | aacfd4f | 2025-02-08 19:30:52 +0000 | [diff] [blame] | 749 | child_table = arch_mm_table_from_pte(*entry, level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 750 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 751 | /* Defrag the first entry in the table and use it as the base entry. */ |
| 752 | static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE."); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 753 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 754 | mm_ptable_defrag_entry(ptable, base_addr, &(child_table->entries[0]), |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 755 | level - 1, non_secure, ppool); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 756 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 757 | base_present = |
| 758 | arch_mm_pte_is_present(child_table->entries[0], level - 1); |
| 759 | base_attrs = arch_mm_pte_attrs(child_table->entries[0], level - 1); |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 760 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 761 | /* |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 762 | * Defrag the remaining entries in the table and check whether they are |
| 763 | * compatible with the base entry meaning the table can be merged into a |
| 764 | * block entry. It assumes addresses are contiguous due to identity |
| 765 | * mapping. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 766 | */ |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 767 | mergeable = true; |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 768 | for (size_t i = 1; i < MM_PTE_PER_PAGE; ++i) { |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 769 | bool present; |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 770 | ptable_addr_t block_addr = |
| 771 | base_addr + (i * mm_entry_size(level - 1)); |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 772 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 773 | mm_ptable_defrag_entry(ptable, block_addr, |
| 774 | &(child_table->entries[i]), level - 1, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 775 | non_secure, ppool); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 776 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 777 | present = arch_mm_pte_is_present(child_table->entries[i], |
| 778 | level - 1); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 779 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 780 | if (present != base_present) { |
| 781 | mergeable = false; |
| 782 | continue; |
| 783 | } |
| 784 | |
| 785 | if (!present) { |
| 786 | continue; |
| 787 | } |
| 788 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 789 | if (!arch_mm_pte_is_block(child_table->entries[i], level - 1)) { |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 790 | mergeable = false; |
| 791 | continue; |
| 792 | } |
| 793 | |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 794 | if (arch_mm_pte_attrs(child_table->entries[i], level - 1) != |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 795 | base_attrs) { |
| 796 | mergeable = false; |
| 797 | continue; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 798 | } |
| 799 | } |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 800 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 801 | if (!mergeable) { |
| 802 | return; |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 803 | } |
| 804 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 805 | new_entry = mm_merge_table_pte(*entry, level); |
| 806 | if (*entry != new_entry) { |
Karl Meakin | 00dbf1b | 2025-02-07 17:58:39 +0000 | [diff] [blame] | 807 | mm_replace_entry(ptable, base_addr, entry, (uintptr_t)new_entry, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 808 | level, non_secure, ppool); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 809 | } |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 813 | * Defragments the given page table by converting page table references to |
| 814 | * blocks whenever possible. |
| 815 | */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 816 | static void mm_ptable_defrag(struct mm_ptable *ptable, bool non_secure, |
| 817 | struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 818 | { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 819 | struct mm_page_table *root_tables = ptable->root_tables; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 820 | mm_level_t root_level = mm_root_level(ptable); |
| 821 | uint8_t root_table_count = mm_root_table_count(ptable); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 822 | ptable_addr_t block_addr = 0; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 823 | |
| 824 | /* |
| 825 | * Loop through each entry in the table. If it points to another table, |
| 826 | * check if that table can be replaced by a block or an absent entry. |
| 827 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 828 | for (size_t i = 0; i < root_table_count; ++i) { |
| 829 | for (size_t j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 830 | mm_ptable_defrag_entry( |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 831 | ptable, block_addr, &root_tables[i].entries[j], |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 832 | root_level - 1, non_secure, ppool); |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 833 | block_addr = mm_start_of_next_block(block_addr, |
| 834 | root_level - 1); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 835 | } |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 836 | } |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 837 | |
| 838 | arch_mm_sync_table_writes(); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 842 | * Gets the attributes applied to the given range of stage-2 addresses at the |
| 843 | * given level. |
| 844 | * |
| 845 | * The `got_attrs` argument is initially passed as false until `attrs` contains |
| 846 | * attributes of the memory region at which point it is passed as true. |
| 847 | * |
| 848 | * The value returned in `attrs` is only valid if the function returns true. |
| 849 | * |
| 850 | * Returns true if the whole range has the same attributes and false otherwise. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 851 | */ |
Daniel Boulby | 8adf748 | 2021-09-22 15:12:44 +0100 | [diff] [blame] | 852 | // NOLINTNEXTLINE(misc-no-recursion) |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 853 | static bool mm_ptable_get_attrs_level(const struct mm_page_table *table, |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 854 | ptable_addr_t begin, ptable_addr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 855 | mm_level_t level, bool got_attrs, |
| 856 | mm_attr_t *attrs) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 857 | { |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 858 | const pte_t *pte = &table->entries[mm_index(begin, level)]; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 859 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 860 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 861 | /* Cap end so that we don't go over the current level max. */ |
| 862 | if (end > level_end) { |
| 863 | end = level_end; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 864 | } |
| 865 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 866 | /* Check that each entry is owned. */ |
| 867 | while (begin < end) { |
Karl Meakin | d969647 | 2025-02-18 14:07:25 +0000 | [diff] [blame] | 868 | switch (arch_mm_pte_type(*pte, level)) { |
| 869 | case PTE_TYPE_TABLE: { |
| 870 | const struct mm_page_table *child_table = |
| 871 | arch_mm_table_from_pte(*pte, level); |
| 872 | bool child_ret = mm_ptable_get_attrs_level( |
| 873 | child_table, begin, end, level - 1, got_attrs, |
| 874 | attrs); |
| 875 | |
| 876 | if (!child_ret) { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 877 | return false; |
| 878 | } |
| 879 | got_attrs = true; |
Karl Meakin | d969647 | 2025-02-18 14:07:25 +0000 | [diff] [blame] | 880 | break; |
| 881 | } |
| 882 | |
| 883 | case PTE_TYPE_ABSENT: |
| 884 | case PTE_TYPE_INVALID_BLOCK: |
| 885 | case PTE_TYPE_VALID_BLOCK: { |
| 886 | mm_attr_t block_attrs = arch_mm_pte_attrs(*pte, level); |
| 887 | |
| 888 | if (got_attrs && block_attrs != *attrs) { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 889 | return false; |
| 890 | } |
Karl Meakin | d969647 | 2025-02-18 14:07:25 +0000 | [diff] [blame] | 891 | got_attrs = true; |
| 892 | *attrs = block_attrs; |
| 893 | break; |
| 894 | } |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 895 | } |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 896 | |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 897 | begin = mm_start_of_next_block(begin, level); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 898 | pte++; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 899 | } |
| 900 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 901 | /* The entry is a valid block. */ |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 902 | return got_attrs; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | /** |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 906 | * Gets the attributes applied to the given range of addresses in the page |
| 907 | * tables. |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 908 | * |
| 909 | * The value returned in `attrs` is only valid if the function returns true. |
| 910 | * |
| 911 | * Returns true if the whole range has the same attributes and false otherwise. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 912 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 913 | static bool mm_get_attrs(const struct mm_ptable *ptable, ptable_addr_t begin, |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 914 | ptable_addr_t end, mm_attr_t *attrs) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 915 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 916 | mm_level_t root_level = mm_root_level(ptable); |
| 917 | ptable_addr_t ptable_end = mm_ptable_addr_space_end(ptable); |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 918 | struct mm_page_table *root_table; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 919 | bool got_attrs = false; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 920 | |
Karl Meakin | 3050695 | 2025-02-18 18:13:06 +0000 | [diff] [blame^] | 921 | if (begin >= end) { |
| 922 | dlog_verbose( |
| 923 | "mm_get: input range is backwards (%#016lx >= " |
| 924 | "%#016lx)\n", |
| 925 | begin, end); |
| 926 | } |
| 927 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 928 | begin = mm_round_down_to_page(begin); |
| 929 | end = mm_round_up_to_page(end); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 930 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 931 | /* Fail if the addresses are out of range. */ |
| 932 | if (end > ptable_end) { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 933 | return false; |
| 934 | } |
| 935 | |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 936 | root_table = &ptable->root_tables[mm_index(begin, root_level)]; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 937 | while (begin < end) { |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 938 | if (!mm_ptable_get_attrs_level(root_table, begin, end, |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 939 | root_level - 1, got_attrs, |
| 940 | attrs)) { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 941 | return false; |
| 942 | } |
| 943 | |
| 944 | got_attrs = true; |
Karl Meakin | 25954e3 | 2025-02-07 16:12:51 +0000 | [diff] [blame] | 945 | begin = mm_start_of_next_block(begin, root_level); |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 946 | root_table++; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 947 | } |
| 948 | |
| 949 | return got_attrs; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 950 | } |
| 951 | |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 952 | bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 953 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 954 | return mm_ptable_init(ptable, id, false, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 955 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 956 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 957 | void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 958 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 959 | mm_ptable_fini(ptable, ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 960 | } |
| 961 | |
| 962 | /** |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 963 | * Selects flags to pass to the page table manipulation operation based on the |
| 964 | * mapping mode. |
| 965 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 966 | static struct mm_flags mm_mode_to_flags(mm_mode_t mode) |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 967 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 968 | struct mm_flags flags = {0}; |
| 969 | |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 970 | if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 971 | flags.unmap = true; |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 972 | } |
| 973 | |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 974 | return flags; |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 975 | } |
| 976 | |
| 977 | /** |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 978 | * See `mm_ptable_identity_prepare`. |
| 979 | * |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 980 | * This must be called before `mm_identity_commit` for the same mapping. |
| 981 | * |
| 982 | * Returns true on success, or false if the update would fail. |
| 983 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 984 | bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 985 | mm_mode_t mode, struct mpool *ppool) |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 986 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 987 | struct mm_flags flags = mm_mode_to_flags(mode); |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 988 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 989 | assert(ptable->stage1); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 990 | return mm_ptable_identity_prepare(ptable, begin, end, |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 991 | arch_mm_mode_to_stage1_attrs(mode), |
| 992 | flags, ppool); |
| 993 | } |
| 994 | |
| 995 | /** |
| 996 | * See `mm_ptable_identity_commit`. |
| 997 | * |
| 998 | * `mm_identity_prepare` must be called before this for the same mapping. |
| 999 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1000 | void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1001 | mm_mode_t mode, struct mpool *ppool) |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 1002 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 1003 | struct mm_flags flags = mm_mode_to_flags(mode); |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1004 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1005 | assert(ptable->stage1); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1006 | mm_ptable_identity_commit(ptable, begin, end, |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 1007 | arch_mm_mode_to_stage1_attrs(mode), flags, |
| 1008 | ppool); |
| 1009 | return ptr_from_va(va_from_pa(begin)); |
| 1010 | } |
| 1011 | |
| 1012 | /** |
| 1013 | * See `mm_ptable_identity_prepare`. |
| 1014 | * |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1015 | * This must be called before `mm_vm_identity_commit` for the same mapping. |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1016 | * |
| 1017 | * Returns true on success, or false if the update would fail. |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1018 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1019 | bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1020 | paddr_t end, mm_mode_t mode, struct mpool *ppool) |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1021 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 1022 | struct mm_flags flags = mm_mode_to_flags(mode); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1023 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1024 | return mm_ptable_identity_prepare(ptable, begin, end, |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1025 | arch_mm_mode_to_stage2_attrs(mode), |
| 1026 | flags, ppool); |
| 1027 | } |
| 1028 | |
| 1029 | /** |
| 1030 | * See `mm_ptable_identity_commit`. |
| 1031 | * |
| 1032 | * `mm_vm_identity_prepare` must be called before this for the same mapping. |
| 1033 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1034 | void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1035 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1036 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 1037 | struct mm_flags flags = mm_mode_to_flags(mode); |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1038 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1039 | mm_ptable_identity_commit(ptable, begin, end, |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 1040 | arch_mm_mode_to_stage2_attrs(mode), flags, |
| 1041 | ppool); |
| 1042 | |
| 1043 | if (ipa != NULL) { |
| 1044 | *ipa = ipa_from_pa(begin); |
| 1045 | } |
| 1046 | } |
| 1047 | |
| 1048 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1049 | * Updates a VM's page table such that the given physical address range is |
| 1050 | * mapped in the address space at the corresponding address range in the |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 1051 | * architecture-agnostic mode provided. |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 1052 | * |
| 1053 | * mm_vm_defrag should always be called after a series of page table updates, |
| 1054 | * whether they succeed or fail. This is because on failure extra page table |
| 1055 | * entries may have been allocated and then not used, while on success it may be |
| 1056 | * possible to compact the page table by merging several entries into a block. |
| 1057 | * |
| 1058 | * Returns true on success, or false if the update failed and no changes were |
| 1059 | * made. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1060 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1061 | bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1062 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1063 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 1064 | struct mm_flags flags = mm_mode_to_flags(mode); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1065 | bool success = mm_ptable_identity_update( |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1066 | ptable, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags, |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1067 | ppool); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1068 | |
| 1069 | if (success && ipa != NULL) { |
| 1070 | *ipa = ipa_from_pa(begin); |
| 1071 | } |
| 1072 | |
| 1073 | return success; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1074 | } |
| 1075 | |
| 1076 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1077 | * Updates the VM's table such that the given physical address range has no |
| 1078 | * connection to the VM. |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1079 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1080 | bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 1081 | struct mpool *ppool) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1082 | { |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1083 | mm_mode_t mode = MM_MODE_UNMAPPED_MASK; |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 1084 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1085 | return mm_vm_identity_map(ptable, begin, end, mode, ppool, NULL); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1086 | } |
| 1087 | |
| 1088 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1089 | * Write the given page table of a VM to the debug log. |
| 1090 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1091 | void mm_vm_dump(const struct mm_ptable *ptable) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1092 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1093 | mm_ptable_dump(ptable); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1094 | } |
| 1095 | |
| 1096 | /** |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 1097 | * Defragments a stage1 page table. |
| 1098 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1099 | void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool) |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 1100 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1101 | assert(ptable->stage1); |
| 1102 | mm_ptable_defrag(ptable, false, ppool); |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 1103 | } |
| 1104 | |
| 1105 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1106 | * Defragments the VM page table. |
| 1107 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1108 | void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool, |
| 1109 | bool non_secure) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1110 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1111 | mm_ptable_defrag(ptable, non_secure, ppool); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1112 | } |
| 1113 | |
| 1114 | /** |
Fuad Tabba | 9dc276f | 2020-07-16 09:29:32 +0100 | [diff] [blame] | 1115 | * Gets the mode of the given range of intermediate physical addresses if they |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1116 | * are mapped with the same mode. |
| 1117 | * |
| 1118 | * Returns true if the range is mapped with the same mode and false otherwise. |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1119 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1120 | bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1121 | ipaddr_t end, mm_mode_t *mode) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1122 | { |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1123 | mm_attr_t attrs; |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1124 | bool ret; |
| 1125 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1126 | ret = mm_get_attrs(ptable, ipa_addr(begin), ipa_addr(end), &attrs); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 1127 | if (ret) { |
| 1128 | *mode = arch_mm_stage2_attrs_to_mode(attrs); |
| 1129 | } |
| 1130 | |
| 1131 | return ret; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1132 | } |
| 1133 | |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 1134 | /** |
| 1135 | * Gets the mode of the given range of virtual addresses if they |
| 1136 | * are mapped with the same mode. |
| 1137 | * |
| 1138 | * Returns true if the range is mapped with the same mode and false otherwise. |
| 1139 | */ |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 1140 | bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1141 | mm_mode_t *mode) |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 1142 | { |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1143 | mm_attr_t attrs; |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 1144 | bool ret; |
| 1145 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1146 | assert(ptable->stage1); |
| 1147 | ret = mm_get_attrs(ptable, va_addr(begin), va_addr(end), &attrs); |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 1148 | if (ret) { |
| 1149 | *mode = arch_mm_stage1_attrs_to_mode(attrs); |
| 1150 | } |
| 1151 | |
| 1152 | return ret; |
| 1153 | } |
| 1154 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1155 | static struct mm_stage1_locked mm_stage1_lock_unsafe(void) |
| 1156 | { |
| 1157 | return (struct mm_stage1_locked){.ptable = &ptable}; |
| 1158 | } |
| 1159 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 1160 | struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable) |
| 1161 | { |
| 1162 | return (struct mm_stage1_locked){.ptable = ptable}; |
| 1163 | } |
| 1164 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1165 | struct mm_stage1_locked mm_lock_stage1(void) |
| 1166 | { |
| 1167 | sl_lock(&ptable_lock); |
| 1168 | return mm_stage1_lock_unsafe(); |
| 1169 | } |
| 1170 | |
| 1171 | void mm_unlock_stage1(struct mm_stage1_locked *lock) |
| 1172 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 1173 | CHECK(lock->ptable == &ptable); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1174 | sl_unlock(&ptable_lock); |
| 1175 | lock->ptable = NULL; |
| 1176 | } |
| 1177 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1178 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1179 | * Updates the hypervisor page table such that the given physical address range |
| 1180 | * is mapped into the address space at the corresponding address range in the |
| 1181 | * architecture-agnostic mode provided. |
| 1182 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1183 | void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1184 | paddr_t end, mm_mode_t mode, struct mpool *ppool) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1185 | { |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 1186 | struct mm_flags flags = mm_mode_to_flags(mode); |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1187 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1188 | assert(stage1_locked.ptable->stage1); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1189 | if (mm_ptable_identity_update(stage1_locked.ptable, begin, end, |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 1190 | arch_mm_mode_to_stage1_attrs(mode), flags, |
| 1191 | ppool)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 1192 | return ptr_from_va(va_from_pa(begin)); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1193 | } |
| 1194 | |
| 1195 | return NULL; |
| 1196 | } |
| 1197 | |
| 1198 | /** |
| 1199 | * Updates the hypervisor table such that the given physical address range is |
| 1200 | * not mapped in the address space. |
| 1201 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1202 | bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end, |
| 1203 | struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1204 | { |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 1205 | mm_mode_t mode = MM_MODE_UNMAPPED_MASK; |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 1206 | |
| 1207 | return mm_identity_map(stage1_locked, begin, end, mode, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1208 | } |
| 1209 | |
| 1210 | /** |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1211 | * Defragments the hypervisor page table. |
| 1212 | */ |
| 1213 | void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool) |
| 1214 | { |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1215 | assert(stage1_locked.ptable->stage1); |
| 1216 | mm_ptable_defrag(stage1_locked.ptable, false, ppool); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1217 | } |
| 1218 | |
| 1219 | /** |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1220 | * Initialises memory management for the hypervisor itself. |
| 1221 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 1222 | bool mm_init(struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1223 | { |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1224 | /* Locking is not enabled yet so fake it, */ |
| 1225 | struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe(); |
| 1226 | |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1227 | dlog_info("text: %#lx - %#lx\n", pa_addr(layout_text_begin()), |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1228 | pa_addr(layout_text_end())); |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1229 | dlog_info("rodata: %#lx - %#lx\n", pa_addr(layout_rodata_begin()), |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1230 | pa_addr(layout_rodata_end())); |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1231 | dlog_info("data: %#lx - %#lx\n", pa_addr(layout_data_begin()), |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1232 | pa_addr(layout_data_end())); |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 1233 | dlog_info("stacks: %#lx - %#lx\n", pa_addr(layout_stacks_begin()), |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 1234 | pa_addr(layout_stacks_end())); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1235 | |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame] | 1236 | /* ASID 0 is reserved for use by the hypervisor. */ |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 1237 | if (!mm_ptable_init(&ptable, 0, true, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1238 | dlog_error("Unable to allocate memory for page table.\n"); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1239 | return false; |
| 1240 | } |
| 1241 | |
Arunachalam Ganapathy | 0f0f706 | 2022-01-26 17:09:53 +0000 | [diff] [blame] | 1242 | /* Initialize arch_mm before calling below mapping routines */ |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 1243 | if (!arch_mm_init(&ptable)) { |
Arunachalam Ganapathy | 0f0f706 | 2022-01-26 17:09:53 +0000 | [diff] [blame] | 1244 | return false; |
| 1245 | } |
| 1246 | |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 1247 | /* Let console driver map pages for itself. */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1248 | plat_console_mm_init(stage1_locked, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1249 | |
| 1250 | /* Map each section. */ |
Raghu Krishnamurthy | 472a882 | 2022-10-04 21:28:59 -0700 | [diff] [blame] | 1251 | CHECK(mm_identity_map(stage1_locked, layout_text_begin(), |
| 1252 | layout_text_end(), MM_MODE_X, ppool) != NULL); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1253 | |
Raghu Krishnamurthy | 472a882 | 2022-10-04 21:28:59 -0700 | [diff] [blame] | 1254 | CHECK(mm_identity_map(stage1_locked, layout_rodata_begin(), |
| 1255 | layout_rodata_end(), MM_MODE_R, ppool) != NULL); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1256 | |
Raghu Krishnamurthy | 472a882 | 2022-10-04 21:28:59 -0700 | [diff] [blame] | 1257 | CHECK(mm_identity_map(stage1_locked, layout_data_begin(), |
| 1258 | layout_data_end(), MM_MODE_R | MM_MODE_W, |
| 1259 | ppool) != NULL); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1260 | |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 1261 | /* Arch-specific stack mapping. */ |
Raghu Krishnamurthy | 472a882 | 2022-10-04 21:28:59 -0700 | [diff] [blame] | 1262 | CHECK(arch_stack_mm_init(stage1_locked, ppool)); |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 1263 | |
Arunachalam Ganapathy | 0f0f706 | 2022-01-26 17:09:53 +0000 | [diff] [blame] | 1264 | return true; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1265 | } |