Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdatomic.h> |
| 12 | #include <stdint.h> |
| 13 | |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 14 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 15 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 16 | #include "hf/layout.h" |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 17 | #include "hf/plat/console.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 18 | #include "hf/static_assert.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 19 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 20 | /** |
| 21 | * This file has functions for managing the level 1 and 2 page tables used by |
| 22 | * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory, |
| 23 | * and then a level 2 mapping per VM. The design assumes that all page tables |
| 24 | * contain only 1-1 mappings, aligned on the block boundaries. |
| 25 | */ |
| 26 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 27 | /* |
| 28 | * For stage 2, the input is an intermediate physical addresses rather than a |
| 29 | * virtual address so: |
| 30 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 31 | static_assert( |
| 32 | sizeof(ptable_addr_t) == sizeof(uintpaddr_t), |
| 33 | "Currently, the same code manages the stage 1 and stage 2 page tables " |
| 34 | "which only works if the virtual and intermediate physical addresses " |
| 35 | "are the same size. It looks like that assumption might not be holding " |
| 36 | "so we need to check that everything is going to be ok."); |
| 37 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 38 | static struct mm_ptable ptable; |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 39 | static struct spinlock ptable_lock; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 40 | |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 41 | static bool mm_stage2_invalidate = false; |
| 42 | |
| 43 | /** |
| 44 | * After calling this function, modifications to stage-2 page tables will use |
| 45 | * break-before-make and invalidate the TLB for the affected range. |
| 46 | */ |
| 47 | void mm_vm_enable_invalidation(void) |
| 48 | { |
| 49 | mm_stage2_invalidate = true; |
| 50 | } |
| 51 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 52 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 53 | * Get the page table from the physical address. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 54 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 55 | static struct mm_page_table *mm_page_table_from_pa(paddr_t pa) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 56 | { |
| 57 | return ptr_from_va(va_from_pa(pa)); |
| 58 | } |
| 59 | |
| 60 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 61 | * Rounds an address down to a page boundary. |
| 62 | */ |
| 63 | static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr) |
| 64 | { |
| 65 | return addr & ~((ptable_addr_t)(PAGE_SIZE - 1)); |
| 66 | } |
| 67 | |
| 68 | /** |
| 69 | * Rounds an address up to a page boundary. |
| 70 | */ |
| 71 | static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr) |
| 72 | { |
| 73 | return mm_round_down_to_page(addr + PAGE_SIZE - 1); |
| 74 | } |
| 75 | |
| 76 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 77 | * Calculates the size of the address space represented by a page table entry at |
| 78 | * the given level. |
| 79 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 80 | static size_t mm_entry_size(uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 81 | { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 82 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | /** |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 86 | * Gets the address of the start of the next block of the given size. The size |
| 87 | * must be a power of two. |
| 88 | */ |
| 89 | static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr, |
| 90 | size_t block_size) |
| 91 | { |
| 92 | return (addr + block_size) & ~(block_size - 1); |
| 93 | } |
| 94 | |
| 95 | /** |
| 96 | * Gets the physical address of the start of the next block of the given size. |
| 97 | * The size must be a power of two. |
| 98 | */ |
| 99 | static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size) |
| 100 | { |
| 101 | return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1)); |
| 102 | } |
| 103 | |
| 104 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 105 | * For a given address, calculates the maximum (plus one) address that can be |
| 106 | * represented by the same table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 107 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 108 | static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 109 | { |
| 110 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 111 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 112 | return ((addr >> offset) + 1) << offset; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 116 | * For a given address, calculates the index at which its entry is stored in a |
| 117 | * table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 118 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 119 | static size_t mm_index(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 120 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 121 | ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 122 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 123 | return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | /** |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 127 | * Allocates a new page table. |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 128 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 129 | static struct mm_page_table *mm_alloc_page_tables(size_t count, |
| 130 | struct mpool *ppool) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 131 | { |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 132 | if (count == 1) { |
| 133 | return mpool_alloc(ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 134 | } |
| 135 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 136 | return mpool_alloc_contiguous(ppool, count, count); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 140 | * Returns the maximum level in the page table given the flags. |
| 141 | */ |
| 142 | static uint8_t mm_max_level(int flags) |
| 143 | { |
| 144 | return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level() |
| 145 | : arch_mm_stage2_max_level(); |
| 146 | } |
| 147 | |
| 148 | /** |
| 149 | * Returns the number of root-level tables given the flags. |
| 150 | */ |
| 151 | static uint8_t mm_root_table_count(int flags) |
| 152 | { |
| 153 | return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count() |
| 154 | : arch_mm_stage2_root_table_count(); |
| 155 | } |
| 156 | |
| 157 | /** |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 158 | * Invalidates the TLB for the given address range. |
| 159 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 160 | static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 161 | { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 162 | if (flags & MM_FLAG_STAGE1) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 163 | arch_mm_invalidate_stage1_range(va_init(begin), va_init(end)); |
| 164 | } else { |
| 165 | arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end)); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | /** |
| 170 | * Frees all page-table-related memory associated with the given pte at the |
| 171 | * given level, including any subtables recursively. |
| 172 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 173 | static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 174 | { |
| 175 | struct mm_page_table *table; |
| 176 | uint64_t i; |
| 177 | |
| 178 | if (!arch_mm_pte_is_table(pte, level)) { |
| 179 | return; |
| 180 | } |
| 181 | |
| 182 | /* Recursively free any subtables. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 183 | table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level)); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 184 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 185 | mm_free_page_pte(table->entries[i], level - 1, ppool); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | /* Free the table itself. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 189 | mpool_free(ppool, table); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | /** |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 193 | * Returns the first address which cannot be encoded in page tables given by |
| 194 | * `flags`. It is the exclusive end of the address space created by the tables. |
| 195 | */ |
| 196 | ptable_addr_t mm_ptable_addr_space_end(int flags) |
| 197 | { |
| 198 | return mm_root_table_count(flags) * |
| 199 | mm_entry_size(mm_max_level(flags) + 1); |
| 200 | } |
| 201 | |
| 202 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 203 | * Initialises the given page table. |
| 204 | */ |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 205 | bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool) |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 206 | { |
| 207 | uint8_t i; |
| 208 | size_t j; |
| 209 | struct mm_page_table *tables; |
| 210 | uint8_t root_table_count = mm_root_table_count(flags); |
| 211 | |
| 212 | tables = mm_alloc_page_tables(root_table_count, ppool); |
| 213 | if (tables == NULL) { |
| 214 | return false; |
| 215 | } |
| 216 | |
| 217 | for (i = 0; i < root_table_count; i++) { |
| 218 | for (j = 0; j < MM_PTE_PER_PAGE; j++) { |
| 219 | tables[i].entries[j] = |
| 220 | arch_mm_absent_pte(mm_max_level(flags)); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * TODO: halloc could return a virtual or physical address if mm not |
| 226 | * enabled? |
| 227 | */ |
| 228 | t->root = pa_init((uintpaddr_t)tables); |
| 229 | |
| 230 | return true; |
| 231 | } |
| 232 | |
| 233 | /** |
| 234 | * Frees all memory associated with the give page table. |
| 235 | */ |
| 236 | static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool) |
| 237 | { |
| 238 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
| 239 | uint8_t level = mm_max_level(flags); |
| 240 | uint8_t root_table_count = mm_root_table_count(flags); |
| 241 | uint8_t i; |
| 242 | uint64_t j; |
| 243 | |
| 244 | for (i = 0; i < root_table_count; ++i) { |
| 245 | for (j = 0; j < MM_PTE_PER_PAGE; ++j) { |
| 246 | mm_free_page_pte(tables[i].entries[j], level, ppool); |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | mpool_add_chunk(ppool, tables, |
| 251 | sizeof(struct mm_page_table) * root_table_count); |
| 252 | } |
| 253 | |
| 254 | /** |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 255 | * Replaces a page table entry with the given value. If both old and new values |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 256 | * are valid, it performs a break-before-make sequence where it first writes an |
| 257 | * invalid value to the PTE, flushes the TLB, then writes the actual new value. |
| 258 | * This is to prevent cases where CPUs have different 'valid' values in their |
| 259 | * TLBs, which may result in issues for example in cache coherency. |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 260 | */ |
| 261 | static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 262 | uint8_t level, int flags, struct mpool *ppool) |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 263 | { |
| 264 | pte_t v = *pte; |
| 265 | |
| 266 | /* |
| 267 | * We need to do the break-before-make sequence if both values are |
Andrew Scull | 3cd9e26 | 2019-01-08 17:59:22 +0000 | [diff] [blame] | 268 | * present and the TLB is being invalidated. |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 269 | */ |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 270 | if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) && |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 271 | arch_mm_pte_is_valid(v, level)) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 272 | *pte = arch_mm_absent_pte(level); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 273 | mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | /* Assign the new pte. */ |
| 277 | *pte = new_pte; |
| 278 | |
| 279 | /* Free pages that aren't in use anymore. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 280 | mm_free_page_pte(v, level, ppool); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 284 | * Populates the provided page table entry with a reference to another table if |
| 285 | * needed, that is, if it does not yet point to another table. |
| 286 | * |
| 287 | * Returns a pointer to the table the entry now points to. |
| 288 | */ |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 289 | static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin, |
| 290 | pte_t *pte, uint8_t level, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 291 | int flags, |
| 292 | struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 293 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 294 | struct mm_page_table *ntable; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 295 | pte_t v = *pte; |
| 296 | pte_t new_pte; |
| 297 | size_t i; |
| 298 | size_t inc; |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 299 | uint8_t level_below = level - 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 300 | |
| 301 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 302 | if (arch_mm_pte_is_table(v, level)) { |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 303 | return mm_page_table_from_pa(arch_mm_table_from_pte(v, level)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 304 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 305 | |
| 306 | /* Allocate a new table. */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 307 | ntable = mm_alloc_page_tables(1, ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 308 | if (ntable == NULL) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 309 | dlog_error("Failed to allocate memory for page table\n"); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 310 | return NULL; |
| 311 | } |
| 312 | |
| 313 | /* Determine template for new pte and its increment. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 314 | if (arch_mm_pte_is_block(v, level)) { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 315 | inc = mm_entry_size(level_below); |
| 316 | new_pte = arch_mm_block_pte(level_below, |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 317 | arch_mm_block_from_pte(v, level), |
| 318 | arch_mm_pte_attrs(v, level)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 319 | } else { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 320 | inc = 0; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 321 | new_pte = arch_mm_absent_pte(level_below); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | /* Initialise entries in the new table. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 325 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 326 | ntable->entries[i] = new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 327 | new_pte += inc; |
| 328 | } |
| 329 | |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 330 | /* Ensure initialisation is visible before updating the pte. */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 331 | atomic_thread_fence(memory_order_release); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 332 | |
| 333 | /* Replace the pte entry, doing a break-before-make if needed. */ |
| 334 | mm_replace_entry(begin, pte, |
| 335 | arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 336 | level, flags, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 337 | |
| 338 | return ntable; |
| 339 | } |
| 340 | |
| 341 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 342 | * Updates the page table at the given level to map the given address range to a |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 343 | * physical range using the provided (architecture-specific) attributes. Or if |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 344 | * MM_FLAG_UNMAP is set, unmap the given range instead. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 345 | * |
| 346 | * This function calls itself recursively if it needs to update additional |
| 347 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 348 | * table. |
| 349 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 350 | static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa, |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 351 | uint64_t attrs, struct mm_page_table *table, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 352 | uint8_t level, int flags, struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 353 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 354 | pte_t *pte = &table->entries[mm_index(begin, level)]; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 355 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 356 | size_t entry_size = mm_entry_size(level); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 357 | bool commit = flags & MM_FLAG_COMMIT; |
| 358 | bool unmap = flags & MM_FLAG_UNMAP; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 359 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 360 | /* Cap end so that we don't go over the current level max. */ |
| 361 | if (end > level_end) { |
| 362 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 363 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 364 | |
| 365 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 366 | while (begin < end) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 367 | if (unmap ? !arch_mm_pte_is_present(*pte, level) |
| 368 | : arch_mm_pte_is_block(*pte, level) && |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 369 | arch_mm_pte_attrs(*pte, level) == attrs) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 370 | /* |
| 371 | * If the entry is already mapped with the right |
| 372 | * attributes, or already absent in the case of |
| 373 | * unmapping, no need to do anything; carry on to the |
| 374 | * next entry. |
| 375 | */ |
| 376 | } else if ((end - begin) >= entry_size && |
| 377 | (unmap || arch_mm_is_block_allowed(level)) && |
| 378 | (begin & (entry_size - 1)) == 0) { |
| 379 | /* |
| 380 | * If the entire entry is within the region we want to |
| 381 | * map, map/unmap the whole entry. |
| 382 | */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 383 | if (commit) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 384 | pte_t new_pte = |
| 385 | unmap ? arch_mm_absent_pte(level) |
| 386 | : arch_mm_block_pte(level, pa, |
| 387 | attrs); |
| 388 | mm_replace_entry(begin, pte, new_pte, level, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 389 | flags, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 390 | } |
| 391 | } else { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 392 | /* |
| 393 | * If the entry is already a subtable get it; otherwise |
| 394 | * replace it with an equivalent subtable and get that. |
| 395 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 396 | struct mm_page_table *nt = mm_populate_table_pte( |
| 397 | begin, pte, level, flags, ppool); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 398 | if (nt == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 399 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 400 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 401 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 402 | /* |
| 403 | * Recurse to map/unmap the appropriate entries within |
| 404 | * the subtable. |
| 405 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 406 | if (!mm_map_level(begin, end, pa, attrs, nt, level - 1, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 407 | flags, ppool)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 408 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 409 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 410 | } |
| 411 | |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 412 | begin = mm_start_of_next_block(begin, entry_size); |
| 413 | pa = mm_pa_start_of_next_block(pa, entry_size); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 414 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | return true; |
| 418 | } |
| 419 | |
| 420 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 421 | * Updates the page table from the root to map the given address range to a |
| 422 | * physical range using the provided (architecture-specific) attributes. Or if |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 423 | * MM_FLAG_UNMAP is set, unmap the given range instead. |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 424 | */ |
| 425 | static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin, |
| 426 | ptable_addr_t end, uint64_t attrs, uint8_t root_level, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 427 | int flags, struct mpool *ppool) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 428 | { |
| 429 | size_t root_table_size = mm_entry_size(root_level); |
| 430 | struct mm_page_table *table = |
| 431 | &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)]; |
| 432 | |
| 433 | while (begin < end) { |
| 434 | if (!mm_map_level(begin, end, pa_init(begin), attrs, table, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 435 | root_level - 1, flags, ppool)) { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 436 | return false; |
| 437 | } |
Andrew Scull | cae4557 | 2018-12-13 15:46:30 +0000 | [diff] [blame] | 438 | begin = mm_start_of_next_block(begin, root_table_size); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 439 | table++; |
| 440 | } |
| 441 | |
| 442 | return true; |
| 443 | } |
| 444 | |
| 445 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 446 | * Updates the given table such that the given physical address range is mapped |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 447 | * or not mapped into the address space with the architecture-agnostic mode |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 448 | * provided. Only commits the change if MM_FLAG_COMMIT is set. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 449 | */ |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 450 | static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin, |
| 451 | paddr_t pa_end, uint64_t attrs, int flags, |
| 452 | struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 453 | { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 454 | uint8_t root_level = mm_max_level(flags) + 1; |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 455 | ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 456 | ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end)); |
| 457 | ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 458 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 459 | /* |
Andrew Scull | f825293 | 2019-04-04 13:51:22 +0100 | [diff] [blame] | 460 | * Assert condition to communicate the API constraint of mm_max_level(), |
| 461 | * that isn't encoded in the types, to the static analyzer. |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 462 | */ |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 463 | CHECK(root_level >= 2); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 464 | |
| 465 | /* Cap end to stay within the bounds of the page table. */ |
| 466 | if (end > ptable_end) { |
| 467 | end = ptable_end; |
| 468 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 469 | |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 470 | if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool)) { |
| 471 | return false; |
| 472 | } |
| 473 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 474 | /* |
| 475 | * All TLB invalidations must be complete already if any entries were |
| 476 | * replaced by mm_replace_entry. Sync all page table writes so that code |
| 477 | * following this can use them. |
| 478 | */ |
| 479 | arch_mm_sync_table_writes(); |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 480 | |
| 481 | return true; |
| 482 | } |
| 483 | |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 484 | /* |
| 485 | * Prepares the given page table for the given address mapping such that it |
| 486 | * will be able to commit the change without failure. It does so by ensuring |
| 487 | * the smallest granularity needed is available. This remains valid provided |
Fuad Tabba | 9dc276f | 2020-07-16 09:29:32 +0100 | [diff] [blame] | 488 | * subsequent operations do not decrease the granularity. |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 489 | * |
| 490 | * In particular, multiple calls to this function will result in the |
| 491 | * corresponding calls to commit the changes to succeed. |
| 492 | */ |
| 493 | static bool mm_ptable_identity_prepare(struct mm_ptable *t, paddr_t pa_begin, |
| 494 | paddr_t pa_end, uint64_t attrs, |
| 495 | int flags, struct mpool *ppool) |
| 496 | { |
| 497 | flags &= ~MM_FLAG_COMMIT; |
| 498 | return mm_ptable_identity_map(t, pa_begin, pa_end, attrs, flags, ppool); |
| 499 | } |
| 500 | |
| 501 | /** |
| 502 | * Commits the given address mapping to the page table assuming the operation |
| 503 | * cannot fail. `mm_ptable_identity_prepare` must used correctly before this to |
| 504 | * ensure this condition. |
| 505 | * |
| 506 | * Without the table being properly prepared, the commit may only partially |
| 507 | * complete if it runs out of memory resulting in an inconsistent state that |
| 508 | * isn't handled. |
| 509 | * |
| 510 | * Since the non-failure assumtion is used in the reasoning about the atomicity |
| 511 | * of higher level memory operations, any detected violations result in a panic. |
| 512 | * |
| 513 | * TODO: remove ppool argument to be sure no changes are made. |
| 514 | */ |
| 515 | static void mm_ptable_identity_commit(struct mm_ptable *t, paddr_t pa_begin, |
| 516 | paddr_t pa_end, uint64_t attrs, int flags, |
| 517 | struct mpool *ppool) |
| 518 | { |
| 519 | CHECK(mm_ptable_identity_map(t, pa_begin, pa_end, attrs, |
| 520 | flags | MM_FLAG_COMMIT, ppool)); |
| 521 | } |
| 522 | |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 523 | /** |
| 524 | * Updates the given table such that the given physical address range is mapped |
| 525 | * or not mapped into the address space with the architecture-agnostic mode |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 526 | * provided. |
| 527 | * |
| 528 | * The page table is updated using the separate prepare and commit stages so |
| 529 | * that, on failure, a partial update of the address space cannot happen. The |
| 530 | * table may be left with extra internal tables but the address space is |
| 531 | * unchanged. |
Andrew Walbran | 58a6e54 | 2019-11-19 14:23:15 +0000 | [diff] [blame] | 532 | */ |
| 533 | static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin, |
| 534 | paddr_t pa_end, uint64_t attrs, int flags, |
| 535 | struct mpool *ppool) |
| 536 | { |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 537 | if (!mm_ptable_identity_prepare(t, pa_begin, pa_end, attrs, flags, |
| 538 | ppool)) { |
| 539 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 540 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 541 | |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 542 | mm_ptable_identity_commit(t, pa_begin, pa_end, attrs, flags, ppool); |
| 543 | |
| 544 | return true; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 545 | } |
| 546 | |
| 547 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 548 | * Writes the given table to the debug log, calling itself recursively to |
| 549 | * write sub-tables. |
| 550 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 551 | static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level, |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 552 | int max_level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 553 | { |
| 554 | uint64_t i; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 555 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 556 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 557 | if (!arch_mm_pte_is_present(table->entries[i], level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 558 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 559 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 560 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 561 | dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, |
| 562 | table->entries[i]); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 563 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 564 | if (arch_mm_pte_is_table(table->entries[i], level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 565 | mm_dump_table_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 566 | mm_page_table_from_pa(arch_mm_table_from_pte( |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 567 | table->entries[i], level)), |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 568 | level - 1, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 569 | } |
| 570 | } |
| 571 | } |
| 572 | |
| 573 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 574 | * Writes the given table to the debug log. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 575 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 576 | static void mm_ptable_dump(struct mm_ptable *t, int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 577 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 578 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 579 | uint8_t max_level = mm_max_level(flags); |
| 580 | uint8_t root_table_count = mm_root_table_count(flags); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 581 | uint8_t i; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 582 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 583 | for (i = 0; i < root_table_count; ++i) { |
| 584 | mm_dump_table_recursive(&tables[i], max_level, max_level); |
| 585 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 589 | * Given the table PTE entries all have identical attributes, returns the single |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 590 | * entry with which it can be replaced. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 591 | */ |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 592 | static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 593 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 594 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 595 | uint64_t block_attrs; |
| 596 | uint64_t table_attrs; |
| 597 | uint64_t combined_attrs; |
| 598 | paddr_t block_address; |
| 599 | |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 600 | table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level)); |
| 601 | |
| 602 | if (!arch_mm_pte_is_present(table->entries[0], level - 1)) { |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 603 | return arch_mm_absent_pte(level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 604 | } |
| 605 | |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 606 | /* Might not be possible to merge the table into a single block. */ |
| 607 | if (!arch_mm_is_block_allowed(level)) { |
| 608 | return table_pte; |
| 609 | } |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 610 | |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 611 | /* Replace table with a single block, with equivalent attributes. */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 612 | block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1); |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 613 | table_attrs = arch_mm_pte_attrs(table_pte, level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 614 | combined_attrs = |
| 615 | arch_mm_combine_table_entry_attrs(table_attrs, block_attrs); |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 616 | block_address = arch_mm_block_from_pte(table->entries[0], level - 1); |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 617 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 618 | return arch_mm_block_pte(level, block_address, combined_attrs); |
| 619 | } |
| 620 | |
| 621 | /** |
Wedson Almeida Filho | ac8ad01 | 2018-12-17 18:00:29 +0000 | [diff] [blame] | 622 | * Defragments the given PTE by recursively replacing any tables with blocks or |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 623 | * absent entries where possible. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 624 | */ |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 625 | static void mm_ptable_defrag_entry(ptable_addr_t base_addr, pte_t *entry, |
| 626 | uint8_t level, int flags, |
| 627 | struct mpool *ppool) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 628 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 629 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 630 | uint64_t i; |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 631 | bool mergeable; |
| 632 | bool base_present; |
| 633 | uint64_t base_attrs; |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 634 | pte_t new_entry; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 635 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 636 | if (!arch_mm_pte_is_table(*entry, level)) { |
| 637 | return; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 638 | } |
| 639 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 640 | table = mm_page_table_from_pa(arch_mm_table_from_pte(*entry, level)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 641 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 642 | /* Defrag the first entry in the table and use it as the base entry. */ |
| 643 | static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE."); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 644 | |
| 645 | mm_ptable_defrag_entry(base_addr, &(table->entries[0]), level - 1, |
| 646 | flags, ppool); |
| 647 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 648 | base_present = arch_mm_pte_is_present(table->entries[0], level - 1); |
| 649 | base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1); |
| 650 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 651 | /* |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 652 | * Defrag the remaining entries in the table and check whether they are |
| 653 | * compatible with the base entry meaning the table can be merged into a |
| 654 | * block entry. It assumes addresses are contiguous due to identity |
| 655 | * mapping. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 656 | */ |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 657 | mergeable = true; |
| 658 | for (i = 1; i < MM_PTE_PER_PAGE; ++i) { |
| 659 | bool present; |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 660 | ptable_addr_t block_addr = |
| 661 | base_addr + (i * mm_entry_size(level - 1)); |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 662 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 663 | mm_ptable_defrag_entry(block_addr, &(table->entries[i]), |
| 664 | level - 1, flags, ppool); |
| 665 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 666 | present = arch_mm_pte_is_present(table->entries[i], level - 1); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 667 | |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 668 | if (present != base_present) { |
| 669 | mergeable = false; |
| 670 | continue; |
| 671 | } |
| 672 | |
| 673 | if (!present) { |
| 674 | continue; |
| 675 | } |
| 676 | |
| 677 | if (!arch_mm_pte_is_block(table->entries[i], level - 1)) { |
| 678 | mergeable = false; |
| 679 | continue; |
| 680 | } |
| 681 | |
| 682 | if (arch_mm_pte_attrs(table->entries[i], level - 1) != |
| 683 | base_attrs) { |
| 684 | mergeable = false; |
| 685 | continue; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 686 | } |
| 687 | } |
Andrew Scull | b6b9b56 | 2018-12-21 14:41:35 +0000 | [diff] [blame] | 688 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 689 | if (!mergeable) { |
| 690 | return; |
Andrew Scull | 12122ce | 2019-11-19 14:21:07 +0000 | [diff] [blame] | 691 | } |
| 692 | |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 693 | new_entry = mm_merge_table_pte(*entry, level); |
| 694 | if (*entry != new_entry) { |
| 695 | mm_replace_entry(base_addr, entry, new_entry, level, flags, |
| 696 | ppool); |
| 697 | } |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 701 | * Defragments the given page table by converting page table references to |
| 702 | * blocks whenever possible. |
| 703 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 704 | static void mm_ptable_defrag(struct mm_ptable *t, int flags, |
| 705 | struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 706 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 707 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 708 | uint8_t level = mm_max_level(flags); |
| 709 | uint8_t root_table_count = mm_root_table_count(flags); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 710 | uint8_t i; |
| 711 | uint64_t j; |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 712 | ptable_addr_t block_addr = 0; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 713 | |
| 714 | /* |
| 715 | * Loop through each entry in the table. If it points to another table, |
| 716 | * check if that table can be replaced by a block or an absent entry. |
| 717 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 718 | for (i = 0; i < root_table_count; ++i) { |
| 719 | for (j = 0; j < MM_PTE_PER_PAGE; ++j) { |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 720 | mm_ptable_defrag_entry(block_addr, |
| 721 | &(tables[i].entries[j]), level, |
| 722 | flags, ppool); |
| 723 | block_addr = mm_start_of_next_block( |
| 724 | block_addr, mm_entry_size(level)); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 725 | } |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 726 | } |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame^] | 727 | |
| 728 | arch_mm_sync_table_writes(); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 732 | * Gets the attributes applied to the given range of stage-2 addresses at the |
| 733 | * given level. |
| 734 | * |
| 735 | * The `got_attrs` argument is initially passed as false until `attrs` contains |
| 736 | * attributes of the memory region at which point it is passed as true. |
| 737 | * |
| 738 | * The value returned in `attrs` is only valid if the function returns true. |
| 739 | * |
| 740 | * Returns true if the whole range has the same attributes and false otherwise. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 741 | */ |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 742 | static bool mm_ptable_get_attrs_level(struct mm_page_table *table, |
| 743 | ptable_addr_t begin, ptable_addr_t end, |
| 744 | uint8_t level, bool got_attrs, |
| 745 | uint64_t *attrs) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 746 | { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 747 | pte_t *pte = &table->entries[mm_index(begin, level)]; |
| 748 | ptable_addr_t level_end = mm_level_end(begin, level); |
| 749 | size_t entry_size = mm_entry_size(level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 750 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 751 | /* Cap end so that we don't go over the current level max. */ |
| 752 | if (end > level_end) { |
| 753 | end = level_end; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 754 | } |
| 755 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 756 | /* Check that each entry is owned. */ |
| 757 | while (begin < end) { |
| 758 | if (arch_mm_pte_is_table(*pte, level)) { |
| 759 | if (!mm_ptable_get_attrs_level( |
| 760 | mm_page_table_from_pa( |
| 761 | arch_mm_table_from_pte(*pte, |
| 762 | level)), |
| 763 | begin, end, level - 1, got_attrs, attrs)) { |
| 764 | return false; |
| 765 | } |
| 766 | got_attrs = true; |
| 767 | } else { |
| 768 | if (!got_attrs) { |
| 769 | *attrs = arch_mm_pte_attrs(*pte, level); |
| 770 | got_attrs = true; |
| 771 | } else if (arch_mm_pte_attrs(*pte, level) != *attrs) { |
| 772 | return false; |
| 773 | } |
| 774 | } |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 775 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 776 | begin = mm_start_of_next_block(begin, entry_size); |
| 777 | pte++; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 778 | } |
| 779 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 780 | /* The entry is a valid block. */ |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 781 | return got_attrs; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 782 | } |
| 783 | |
| 784 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 785 | * Gets the attributes applies to the given range of addresses in the stage-2 |
| 786 | * table. |
| 787 | * |
| 788 | * The value returned in `attrs` is only valid if the function returns true. |
| 789 | * |
| 790 | * Returns true if the whole range has the same attributes and false otherwise. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 791 | */ |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 792 | static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin, |
| 793 | ptable_addr_t end, uint64_t *attrs) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 794 | { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 795 | int flags = 0; |
| 796 | uint8_t max_level = mm_max_level(flags); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 797 | uint8_t root_level = max_level + 1; |
| 798 | size_t root_table_size = mm_entry_size(root_level); |
| 799 | ptable_addr_t ptable_end = |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 800 | mm_root_table_count(flags) * mm_entry_size(root_level); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 801 | struct mm_page_table *table; |
| 802 | bool got_attrs = false; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 803 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 804 | begin = mm_round_down_to_page(begin); |
| 805 | end = mm_round_up_to_page(end); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 806 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 807 | /* Fail if the addresses are out of range. */ |
| 808 | if (end > ptable_end) { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 809 | return false; |
| 810 | } |
| 811 | |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 812 | table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)]; |
| 813 | while (begin < end) { |
| 814 | if (!mm_ptable_get_attrs_level(table, begin, end, max_level, |
| 815 | got_attrs, attrs)) { |
| 816 | return false; |
| 817 | } |
| 818 | |
| 819 | got_attrs = true; |
| 820 | begin = mm_start_of_next_block(begin, root_table_size); |
| 821 | table++; |
| 822 | } |
| 823 | |
| 824 | return got_attrs; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 825 | } |
| 826 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 827 | bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 828 | { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 829 | return mm_ptable_init(t, 0, ppool); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 830 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 831 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 832 | void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool) |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 833 | { |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 834 | mm_ptable_fini(t, 0, ppool); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame] | 835 | } |
| 836 | |
| 837 | /** |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 838 | * Selects flags to pass to the page table manipulation operation based on the |
| 839 | * mapping mode. |
| 840 | */ |
| 841 | static int mm_mode_to_flags(uint32_t mode) |
| 842 | { |
| 843 | if ((mode & MM_MODE_UNMAPPED_MASK) == MM_MODE_UNMAPPED_MASK) { |
| 844 | return MM_FLAG_UNMAP; |
| 845 | } |
| 846 | |
| 847 | return 0; |
| 848 | } |
| 849 | |
| 850 | /** |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 851 | * See `mm_ptable_identity_prepare`. |
| 852 | * |
| 853 | * This must be called before `mm_vm_identity_commit` for the same mapping. |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 854 | * |
| 855 | * Returns true on success, or false if the update would fail. |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 856 | */ |
| 857 | bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 858 | uint32_t mode, struct mpool *ppool) |
| 859 | { |
| 860 | int flags = mm_mode_to_flags(mode); |
| 861 | |
| 862 | return mm_ptable_identity_prepare(t, begin, end, |
| 863 | arch_mm_mode_to_stage2_attrs(mode), |
| 864 | flags, ppool); |
| 865 | } |
| 866 | |
| 867 | /** |
| 868 | * See `mm_ptable_identity_commit`. |
| 869 | * |
| 870 | * `mm_vm_identity_prepare` must be called before this for the same mapping. |
| 871 | */ |
| 872 | void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 873 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Andrew Scull | 4e83cef | 2019-11-19 14:17:54 +0000 | [diff] [blame] | 874 | { |
| 875 | int flags = mm_mode_to_flags(mode); |
| 876 | |
| 877 | mm_ptable_identity_commit(t, begin, end, |
| 878 | arch_mm_mode_to_stage2_attrs(mode), flags, |
| 879 | ppool); |
| 880 | |
| 881 | if (ipa != NULL) { |
| 882 | *ipa = ipa_from_pa(begin); |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 887 | * Updates a VM's page table such that the given physical address range is |
| 888 | * mapped in the address space at the corresponding address range in the |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 889 | * architecture-agnostic mode provided. |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 890 | * |
| 891 | * mm_vm_defrag should always be called after a series of page table updates, |
| 892 | * whether they succeed or fail. This is because on failure extra page table |
| 893 | * entries may have been allocated and then not used, while on success it may be |
| 894 | * possible to compact the page table by merging several entries into a block. |
| 895 | * |
| 896 | * Returns true on success, or false if the update failed and no changes were |
| 897 | * made. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 898 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 899 | bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 900 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 901 | { |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 902 | int flags = mm_mode_to_flags(mode); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 903 | bool success = mm_ptable_identity_update( |
| 904 | t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags, |
| 905 | ppool); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 906 | |
| 907 | if (success && ipa != NULL) { |
| 908 | *ipa = ipa_from_pa(begin); |
| 909 | } |
| 910 | |
| 911 | return success; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 912 | } |
| 913 | |
| 914 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 915 | * Updates the VM's table such that the given physical address range has no |
| 916 | * connection to the VM. |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 917 | */ |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 918 | bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 919 | struct mpool *ppool) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 920 | { |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 921 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 922 | |
Andrew Walbran | 8ec2b9f | 2019-11-25 15:05:40 +0000 | [diff] [blame] | 923 | return mm_vm_identity_map(t, begin, end, mode, ppool, NULL); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 927 | * Write the given page table of a VM to the debug log. |
| 928 | */ |
| 929 | void mm_vm_dump(struct mm_ptable *t) |
| 930 | { |
| 931 | mm_ptable_dump(t, 0); |
| 932 | } |
| 933 | |
| 934 | /** |
| 935 | * Defragments the VM page table. |
| 936 | */ |
| 937 | void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool) |
| 938 | { |
| 939 | mm_ptable_defrag(t, 0, ppool); |
| 940 | } |
| 941 | |
| 942 | /** |
Fuad Tabba | 9dc276f | 2020-07-16 09:29:32 +0100 | [diff] [blame] | 943 | * Gets the mode of the given range of intermediate physical addresses if they |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 944 | * are mapped with the same mode. |
| 945 | * |
| 946 | * Returns true if the range is mapped with the same mode and false otherwise. |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 947 | */ |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 948 | bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 949 | uint32_t *mode) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 950 | { |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 951 | uint64_t attrs; |
| 952 | bool ret; |
| 953 | |
| 954 | ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs); |
| 955 | if (ret) { |
| 956 | *mode = arch_mm_stage2_attrs_to_mode(attrs); |
| 957 | } |
| 958 | |
| 959 | return ret; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 960 | } |
| 961 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 962 | static struct mm_stage1_locked mm_stage1_lock_unsafe(void) |
| 963 | { |
| 964 | return (struct mm_stage1_locked){.ptable = &ptable}; |
| 965 | } |
| 966 | |
| 967 | struct mm_stage1_locked mm_lock_stage1(void) |
| 968 | { |
| 969 | sl_lock(&ptable_lock); |
| 970 | return mm_stage1_lock_unsafe(); |
| 971 | } |
| 972 | |
| 973 | void mm_unlock_stage1(struct mm_stage1_locked *lock) |
| 974 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 975 | CHECK(lock->ptable == &ptable); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 976 | sl_unlock(&ptable_lock); |
| 977 | lock->ptable = NULL; |
| 978 | } |
| 979 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 980 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 981 | * Updates the hypervisor page table such that the given physical address range |
| 982 | * is mapped into the address space at the corresponding address range in the |
| 983 | * architecture-agnostic mode provided. |
| 984 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 985 | void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 986 | paddr_t end, uint32_t mode, struct mpool *ppool) |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 987 | { |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 988 | int flags = MM_FLAG_STAGE1 | mm_mode_to_flags(mode); |
| 989 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 990 | if (mm_ptable_identity_update(stage1_locked.ptable, begin, end, |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 991 | arch_mm_mode_to_stage1_attrs(mode), flags, |
| 992 | ppool)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 993 | return ptr_from_va(va_from_pa(begin)); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 994 | } |
| 995 | |
| 996 | return NULL; |
| 997 | } |
| 998 | |
| 999 | /** |
| 1000 | * Updates the hypervisor table such that the given physical address range is |
| 1001 | * not mapped in the address space. |
| 1002 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1003 | bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end, |
| 1004 | struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1005 | { |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 1006 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 1007 | |
| 1008 | return mm_identity_map(stage1_locked, begin, end, mode, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1009 | } |
| 1010 | |
| 1011 | /** |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1012 | * Defragments the hypervisor page table. |
| 1013 | */ |
| 1014 | void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool) |
| 1015 | { |
| 1016 | mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool); |
| 1017 | } |
| 1018 | |
| 1019 | /** |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1020 | * Initialises memory management for the hypervisor itself. |
| 1021 | */ |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 1022 | bool mm_init(struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1023 | { |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1024 | /* Locking is not enabled yet so fake it, */ |
| 1025 | struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe(); |
| 1026 | |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1027 | dlog_info("text: %#x - %#x\n", pa_addr(layout_text_begin()), |
| 1028 | pa_addr(layout_text_end())); |
| 1029 | dlog_info("rodata: %#x - %#x\n", pa_addr(layout_rodata_begin()), |
| 1030 | pa_addr(layout_rodata_end())); |
| 1031 | dlog_info("data: %#x - %#x\n", pa_addr(layout_data_begin()), |
| 1032 | pa_addr(layout_data_end())); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1033 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 1034 | if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1035 | dlog_error("Unable to allocate memory for page table.\n"); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1036 | return false; |
| 1037 | } |
| 1038 | |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 1039 | /* Let console driver map pages for itself. */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1040 | plat_console_mm_init(stage1_locked, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1041 | |
| 1042 | /* Map each section. */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1043 | mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(), |
| 1044 | MM_MODE_X, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1045 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1046 | mm_identity_map(stage1_locked, layout_rodata_begin(), |
| 1047 | layout_rodata_end(), MM_MODE_R, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1048 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 1049 | mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(), |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 1050 | MM_MODE_R | MM_MODE_W, ppool); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1051 | |
Andrew Scull | b291056 | 2019-09-17 14:08:27 +0100 | [diff] [blame] | 1052 | return arch_mm_init(ptable.root); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1053 | } |