Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 19 | #include <assert.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 20 | #include <stdatomic.h> |
| 21 | #include <stdint.h> |
| 22 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/alloc.h" |
| 24 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 25 | #include "hf/layout.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 27 | /** |
| 28 | * This file has functions for managing the level 1 and 2 page tables used by |
| 29 | * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory, |
| 30 | * and then a level 2 mapping per VM. The design assumes that all page tables |
| 31 | * contain only 1-1 mappings, aligned on the block boundaries. |
| 32 | */ |
| 33 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 34 | /* The type of addresses stored in the page table. */ |
| 35 | typedef uintvaddr_t ptable_addr_t; |
| 36 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 37 | /* |
| 38 | * For stage 2, the input is an intermediate physical addresses rather than a |
| 39 | * virtual address so: |
| 40 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 41 | static_assert( |
| 42 | sizeof(ptable_addr_t) == sizeof(uintpaddr_t), |
| 43 | "Currently, the same code manages the stage 1 and stage 2 page tables " |
| 44 | "which only works if the virtual and intermediate physical addresses " |
| 45 | "are the same size. It looks like that assumption might not be holding " |
| 46 | "so we need to check that everything is going to be ok."); |
| 47 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 48 | /* Keep macro alignment */ |
| 49 | /* clang-format off */ |
| 50 | |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 51 | #define MAP_FLAG_NOSYNC 0x01 |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 52 | #define MAP_FLAG_COMMIT 0x02 |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 53 | #define MAP_FLAG_UNMAP 0x04 |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 54 | #define MAP_FLAG_NOBBM 0x08 |
| 55 | #define MAP_FLAG_STAGE1 0x10 |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 56 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 57 | /* clang-format on */ |
| 58 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 59 | static struct mm_ptable ptable; |
| 60 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 61 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 62 | * Get the page table from the physical address. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 63 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 64 | static struct mm_page_table *mm_page_table_from_pa(paddr_t pa) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 65 | { |
| 66 | return ptr_from_va(va_from_pa(pa)); |
| 67 | } |
| 68 | |
| 69 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 70 | * Rounds an address down to a page boundary. |
| 71 | */ |
| 72 | static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr) |
| 73 | { |
| 74 | return addr & ~((ptable_addr_t)(PAGE_SIZE - 1)); |
| 75 | } |
| 76 | |
| 77 | /** |
| 78 | * Rounds an address up to a page boundary. |
| 79 | */ |
| 80 | static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr) |
| 81 | { |
| 82 | return mm_round_down_to_page(addr + PAGE_SIZE - 1); |
| 83 | } |
| 84 | |
| 85 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 86 | * Calculates the size of the address space represented by a page table entry at |
| 87 | * the given level. |
| 88 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 89 | static size_t mm_entry_size(uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 90 | { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 91 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 95 | * For a given address, calculates the maximum (plus one) address that can be |
| 96 | * represented by the same table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 97 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 98 | static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 99 | { |
| 100 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 101 | return ((addr >> offset) + 1) << offset; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 105 | * For a given address, calculates the index at which its entry is stored in a |
| 106 | * table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 107 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 108 | static size_t mm_index(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 109 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 110 | ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 111 | return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 115 | * Allocate a new page table. |
| 116 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 117 | static struct mm_page_table *mm_alloc_page_tables(size_t count, bool nosync) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 118 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 119 | size_t size_and_align = count * sizeof(struct mm_page_table); |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 120 | if (nosync) { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 121 | return halloc_aligned_nosync(size_and_align, size_and_align); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 122 | } |
| 123 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 124 | return halloc_aligned(size_and_align, size_and_align); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /** |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 128 | * Invalidates the TLB for the given address range. |
| 129 | */ |
| 130 | static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, |
| 131 | bool stage1) |
| 132 | { |
| 133 | if (stage1) { |
| 134 | arch_mm_invalidate_stage1_range(va_init(begin), va_init(end)); |
| 135 | } else { |
| 136 | arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end)); |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * Frees all page-table-related memory associated with the given pte at the |
| 142 | * given level, including any subtables recursively. |
| 143 | */ |
| 144 | static void mm_free_page_pte(pte_t pte, uint8_t level) |
| 145 | { |
| 146 | struct mm_page_table *table; |
| 147 | uint64_t i; |
| 148 | |
| 149 | if (!arch_mm_pte_is_table(pte, level)) { |
| 150 | return; |
| 151 | } |
| 152 | |
| 153 | /* Recursively free any subtables. */ |
| 154 | table = mm_page_table_from_pa(arch_mm_table_from_pte(pte)); |
| 155 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 156 | mm_free_page_pte(table->entries[i], level - 1); |
| 157 | } |
| 158 | |
| 159 | /* Free the table itself. */ |
| 160 | hfree(table); |
| 161 | } |
| 162 | |
| 163 | /** |
| 164 | * Replaces a page table entry with the given value. If both old and new values |
| 165 | * are present, it performs a break-before-make sequence where it first writes |
| 166 | * an absent value to the PTE, flushes the TLB, then writes the actual new |
| 167 | * value. This is to prevent cases where CPUs have different 'present' values in |
| 168 | * their TLBs, which may result in issues for example in cache coherency. |
| 169 | */ |
| 170 | static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte, |
| 171 | uint8_t level, int flags) |
| 172 | { |
| 173 | pte_t v = *pte; |
| 174 | |
| 175 | /* |
| 176 | * We need to do the break-before-make sequence if both values are |
| 177 | * present, and if it hasn't been inhibited by the NOBBM flag. |
| 178 | */ |
| 179 | if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_present(v, level) && |
| 180 | arch_mm_pte_is_present(new_pte, level)) { |
| 181 | *pte = arch_mm_absent_pte(level); |
| 182 | mm_invalidate_tlb(begin, begin + mm_entry_size(level), |
| 183 | flags & MAP_FLAG_STAGE1); |
| 184 | } |
| 185 | |
| 186 | /* Assign the new pte. */ |
| 187 | *pte = new_pte; |
| 188 | |
| 189 | /* Free pages that aren't in use anymore. */ |
| 190 | mm_free_page_pte(v, level); |
| 191 | } |
| 192 | |
| 193 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 194 | * Populates the provided page table entry with a reference to another table if |
| 195 | * needed, that is, if it does not yet point to another table. |
| 196 | * |
| 197 | * Returns a pointer to the table the entry now points to. |
| 198 | */ |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 199 | static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin, |
| 200 | pte_t *pte, uint8_t level, |
| 201 | int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 202 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 203 | struct mm_page_table *ntable; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 204 | pte_t v = *pte; |
| 205 | pte_t new_pte; |
| 206 | size_t i; |
| 207 | size_t inc; |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 208 | uint8_t level_below = level - 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 209 | |
| 210 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 211 | if (arch_mm_pte_is_table(v, level)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 212 | return mm_page_table_from_pa(arch_mm_table_from_pte(v)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 213 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 214 | |
| 215 | /* Allocate a new table. */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 216 | ntable = mm_alloc_page_tables(1, flags & MAP_FLAG_NOSYNC); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 217 | if (ntable == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 218 | dlog("Failed to allocate memory for page table\n"); |
| 219 | return NULL; |
| 220 | } |
| 221 | |
| 222 | /* Determine template for new pte and its increment. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 223 | if (arch_mm_pte_is_block(v, level)) { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 224 | inc = mm_entry_size(level_below); |
| 225 | new_pte = arch_mm_block_pte(level_below, |
| 226 | arch_mm_block_from_pte(v), |
| 227 | arch_mm_pte_attrs(v)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 228 | } else { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 229 | inc = 0; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 230 | new_pte = arch_mm_absent_pte(level_below); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | /* Initialise entries in the new table. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 234 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 235 | ntable->entries[i] = new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 236 | new_pte += inc; |
| 237 | } |
| 238 | |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 239 | /* Ensure initialisation is visible before updating the pte. */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 240 | atomic_thread_fence(memory_order_release); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 241 | |
| 242 | /* Replace the pte entry, doing a break-before-make if needed. */ |
| 243 | mm_replace_entry(begin, pte, |
| 244 | arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)), |
| 245 | level, flags); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 246 | |
| 247 | return ntable; |
| 248 | } |
| 249 | |
| 250 | /** |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 251 | * Returns whether all entries in this table are absent. |
| 252 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 253 | static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level) |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 254 | { |
| 255 | uint64_t i; |
| 256 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 257 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 258 | if (arch_mm_pte_is_present(table->entries[i], level)) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 259 | return false; |
| 260 | } |
| 261 | } |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 262 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 263 | return true; |
| 264 | } |
| 265 | |
| 266 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 267 | * Updates the page table at the given level to map the given address range to a |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 268 | * physical range using the provided (architecture-specific) attributes. Or if |
| 269 | * MAP_FLAG_UNMAP is set, unmap the given range instead. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 270 | * |
| 271 | * This function calls itself recursively if it needs to update additional |
| 272 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 273 | * table. |
| 274 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 275 | static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa, |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 276 | uint64_t attrs, struct mm_page_table *table, |
| 277 | uint8_t level, int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 278 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 279 | pte_t *pte = &table->entries[mm_index(begin, level)]; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 280 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 281 | size_t entry_size = mm_entry_size(level); |
| 282 | bool commit = flags & MAP_FLAG_COMMIT; |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 283 | bool unmap = flags & MAP_FLAG_UNMAP; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 284 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 285 | /* Cap end so that we don't go over the current level max. */ |
| 286 | if (end > level_end) { |
| 287 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 288 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 289 | |
| 290 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 291 | while (begin < end) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 292 | if (unmap ? !arch_mm_pte_is_present(*pte, level) |
| 293 | : arch_mm_pte_is_block(*pte, level) && |
| 294 | arch_mm_pte_attrs(*pte) == attrs) { |
| 295 | /* |
| 296 | * If the entry is already mapped with the right |
| 297 | * attributes, or already absent in the case of |
| 298 | * unmapping, no need to do anything; carry on to the |
| 299 | * next entry. |
| 300 | */ |
| 301 | } else if ((end - begin) >= entry_size && |
| 302 | (unmap || arch_mm_is_block_allowed(level)) && |
| 303 | (begin & (entry_size - 1)) == 0) { |
| 304 | /* |
| 305 | * If the entire entry is within the region we want to |
| 306 | * map, map/unmap the whole entry. |
| 307 | */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 308 | if (commit) { |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 309 | pte_t new_pte = |
| 310 | unmap ? arch_mm_absent_pte(level) |
| 311 | : arch_mm_block_pte(level, pa, |
| 312 | attrs); |
| 313 | mm_replace_entry(begin, pte, new_pte, level, |
| 314 | flags); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 315 | } |
| 316 | } else { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 317 | /* |
| 318 | * If the entry is already a subtable get it; otherwise |
| 319 | * replace it with an equivalent subtable and get that. |
| 320 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 321 | struct mm_page_table *nt = |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 322 | mm_populate_table_pte(begin, pte, level, flags); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 323 | if (nt == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 324 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 325 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 326 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 327 | /* |
| 328 | * Recurse to map/unmap the appropriate entries within |
| 329 | * the subtable. |
| 330 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 331 | if (!mm_map_level(begin, end, pa, attrs, nt, level - 1, |
| 332 | flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 333 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 334 | } |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 335 | |
| 336 | /* |
| 337 | * If the subtable is now empty, replace it with an |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 338 | * absent entry at this level. We never need to do |
| 339 | * break-before-makes here because we are assigning |
| 340 | * an absent value. |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 341 | */ |
| 342 | if (commit && unmap && |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 343 | mm_page_table_is_empty(nt, level - 1)) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 344 | pte_t v = *pte; |
| 345 | *pte = arch_mm_absent_pte(level); |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 346 | mm_free_page_pte(v, level); |
| 347 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 348 | } |
| 349 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 350 | begin = (begin + entry_size) & ~(entry_size - 1); |
| 351 | pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1)); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 352 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | return true; |
| 356 | } |
| 357 | |
| 358 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 359 | * Updates the page table from the root to map the given address range to a |
| 360 | * physical range using the provided (architecture-specific) attributes. Or if |
| 361 | * MAP_FLAG_UNMAP is set, unmap the given range instead. |
| 362 | */ |
| 363 | static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin, |
| 364 | ptable_addr_t end, uint64_t attrs, uint8_t root_level, |
| 365 | int flags) |
| 366 | { |
| 367 | size_t root_table_size = mm_entry_size(root_level); |
| 368 | struct mm_page_table *table = |
| 369 | &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)]; |
| 370 | |
| 371 | while (begin < end) { |
| 372 | if (!mm_map_level(begin, end, pa_init(begin), attrs, table, |
| 373 | root_level - 1, flags)) { |
| 374 | return false; |
| 375 | } |
| 376 | begin = (begin + root_table_size) & ~(root_table_size - 1); |
| 377 | table++; |
| 378 | } |
| 379 | |
| 380 | return true; |
| 381 | } |
| 382 | |
| 383 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 384 | * Updates the given table such that the given physical address range is mapped |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 385 | * or not mapped into the address space with the architecture-agnostic mode |
| 386 | * provided. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 387 | */ |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 388 | static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin, |
| 389 | paddr_t pa_end, int mode, bool unmap) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 390 | { |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 391 | uint64_t attrs = unmap ? 0 : arch_mm_mode_to_attrs(mode); |
| 392 | int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) | |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 393 | (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) | |
| 394 | (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) | |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 395 | (unmap ? MAP_FLAG_UNMAP : 0); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 396 | uint8_t root_level = arch_mm_max_level(mode) + 1; |
| 397 | ptable_addr_t ptable_end = |
| 398 | arch_mm_root_table_count(mode) * mm_entry_size(root_level); |
| 399 | ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end)); |
| 400 | ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 401 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 402 | /* |
| 403 | * TODO: replace with assertions that the max level will be greater than |
| 404 | * 0 and less than 255 so wrapping will not be a problem and will not |
| 405 | * lead to subsequent overflows. |
| 406 | */ |
| 407 | if (root_level == 0 || root_level == 1) { |
| 408 | return false; |
| 409 | } |
| 410 | |
| 411 | /* Cap end to stay within the bounds of the page table. */ |
| 412 | if (end > ptable_end) { |
| 413 | end = ptable_end; |
| 414 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 415 | |
| 416 | /* |
| 417 | * Do it in two steps to prevent leaving the table in a halfway updated |
| 418 | * state. In such a two-step implementation, the table may be left with |
| 419 | * extra internal tables, but no different mapping on failure. |
| 420 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 421 | if (!mm_map_root(t, begin, end, attrs, root_level, flags) || |
| 422 | !mm_map_root(t, begin, end, attrs, root_level, |
| 423 | flags | MAP_FLAG_COMMIT)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 424 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 425 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 426 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 427 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 428 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 429 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 430 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 431 | |
| 432 | return true; |
| 433 | } |
| 434 | |
| 435 | /** |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 436 | * Updates the given table such that the given physical address range is mapped |
| 437 | * into the address space with the architecture-agnostic mode provided. |
| 438 | */ |
| 439 | static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin, |
| 440 | paddr_t pa_end, int mode) |
| 441 | { |
| 442 | return mm_ptable_identity_update(t, pa_begin, pa_end, mode, false); |
| 443 | } |
| 444 | |
| 445 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 446 | * Updates the given table such that the given physical address range is not |
| 447 | * mapped into the address space. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 448 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 449 | static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin, |
| 450 | paddr_t pa_end, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 451 | { |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame] | 452 | return mm_ptable_identity_update(t, pa_begin, pa_end, mode, true); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 453 | } |
| 454 | |
| 455 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 456 | * Writes the given table to the debug log, calling itself recursively to |
| 457 | * write sub-tables. |
| 458 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 459 | static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level, |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 460 | int max_level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 461 | { |
| 462 | uint64_t i; |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 463 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 464 | if (!arch_mm_pte_is_present(table->entries[i], level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 465 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 466 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 467 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 468 | dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, |
| 469 | table->entries[i]); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 470 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 471 | if (arch_mm_pte_is_table(table->entries[i], level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 472 | mm_dump_table_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 473 | mm_page_table_from_pa(arch_mm_table_from_pte( |
| 474 | table->entries[i])), |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 475 | level - 1, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 476 | } |
| 477 | } |
| 478 | } |
| 479 | |
| 480 | /** |
| 481 | * Write the given table to the debug log. |
| 482 | */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 483 | void mm_ptable_dump(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 484 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 485 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 486 | int max_level = arch_mm_max_level(mode); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 487 | uint8_t root_table_count = arch_mm_root_table_count(mode); |
| 488 | uint8_t i; |
| 489 | for (i = 0; i < root_table_count; ++i) { |
| 490 | mm_dump_table_recursive(&tables[i], max_level, max_level); |
| 491 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | /** |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 495 | * Given that `entry` is a subtable but its entries are all absent, return the |
| 496 | * absent entry with which it can be replaced. Note that `entry` will no longer |
| 497 | * be valid after calling this function as the subtable will have been freed. |
| 498 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 499 | static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 500 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 501 | struct mm_page_table *table = |
| 502 | mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 503 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 504 | /* |
| 505 | * Free the subtable. This is safe to do directly (rather than |
| 506 | * using mm_free_page_pte) because we know by this point that it |
| 507 | * doesn't have any subtables of its own. |
| 508 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 509 | hfree(table); |
Wedson Almeida Filho | 7c91323 | 2018-11-23 18:20:29 +0000 | [diff] [blame] | 510 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 511 | /* Replace subtable with a single absent entry. */ |
| 512 | return arch_mm_absent_pte(level); |
| 513 | } |
| 514 | |
| 515 | /** |
| 516 | * Given that `entry` is a subtable and its entries are all identical, return |
| 517 | * the single block entry with which it can be replaced if possible. Note that |
| 518 | * `entry` will no longer be valid after calling this function as the subtable |
| 519 | * may have been freed. |
| 520 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 521 | static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 522 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 523 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 524 | uint64_t block_attrs; |
| 525 | uint64_t table_attrs; |
| 526 | uint64_t combined_attrs; |
| 527 | paddr_t block_address; |
| 528 | |
| 529 | if (!arch_mm_is_block_allowed(level)) { |
| 530 | return entry; |
| 531 | } |
| 532 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 533 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 534 | /* |
| 535 | * Replace subtable with a single block, with equivalent |
| 536 | * attributes. |
| 537 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 538 | block_attrs = arch_mm_pte_attrs(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 539 | table_attrs = arch_mm_pte_attrs(entry); |
| 540 | combined_attrs = |
| 541 | arch_mm_combine_table_entry_attrs(table_attrs, block_attrs); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 542 | block_address = arch_mm_block_from_pte(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 543 | /* Free the subtable. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 544 | hfree(table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 545 | /* |
| 546 | * We can assume that the block is aligned properly |
| 547 | * because all virtual addresses are aligned by |
| 548 | * definition, and we have a 1-1 mapping from virtual to |
| 549 | * physical addresses. |
| 550 | */ |
| 551 | return arch_mm_block_pte(level, block_address, combined_attrs); |
| 552 | } |
| 553 | |
| 554 | /** |
| 555 | * Defragment the given ptable entry by recursively replacing any tables with |
| 556 | * block or absent entries where possible. |
| 557 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 558 | static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 559 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 560 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 561 | uint64_t i; |
| 562 | uint64_t attrs; |
| 563 | bool identical_blocks_so_far = true; |
| 564 | bool all_absent_so_far = true; |
| 565 | |
| 566 | if (!arch_mm_pte_is_table(entry, level)) { |
| 567 | return entry; |
| 568 | } |
| 569 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 570 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * Check if all entries are blocks with the same flags or are all |
| 574 | * absent. |
| 575 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 576 | attrs = arch_mm_pte_attrs(table->entries[0]); |
| 577 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 578 | /* |
| 579 | * First try to defrag the entry, in case it is a subtable. |
| 580 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 581 | table->entries[i] = |
| 582 | mm_ptable_defrag_entry(table->entries[i], level - 1); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 583 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 584 | if (arch_mm_pte_is_present(table->entries[i], level - 1)) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 585 | all_absent_so_far = false; |
| 586 | } |
| 587 | |
| 588 | /* |
| 589 | * If the entry is a block, check that the flags are the same as |
| 590 | * what we have so far. |
| 591 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 592 | if (!arch_mm_pte_is_block(table->entries[i], level - 1) || |
| 593 | arch_mm_pte_attrs(table->entries[i]) != attrs) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 594 | identical_blocks_so_far = false; |
| 595 | } |
| 596 | } |
| 597 | if (identical_blocks_so_far) { |
| 598 | return mm_table_pte_to_block(entry, level); |
| 599 | } |
| 600 | if (all_absent_so_far) { |
| 601 | return mm_table_pte_to_absent(entry, level); |
| 602 | } |
| 603 | return entry; |
| 604 | } |
| 605 | |
| 606 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 607 | * Defragments the given page table by converting page table references to |
| 608 | * blocks whenever possible. |
| 609 | */ |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 610 | void mm_ptable_defrag(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 611 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 612 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 613 | uint8_t level = arch_mm_max_level(mode); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 614 | uint8_t root_table_count = arch_mm_root_table_count(mode); |
| 615 | uint8_t i; |
| 616 | uint64_t j; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 617 | |
| 618 | /* |
| 619 | * Loop through each entry in the table. If it points to another table, |
| 620 | * check if that table can be replaced by a block or an absent entry. |
| 621 | */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 622 | for (i = 0; i < root_table_count; ++i) { |
| 623 | for (j = 0; j < MM_PTE_PER_PAGE; ++j) { |
| 624 | tables[i].entries[j] = mm_ptable_defrag_entry( |
| 625 | tables[i].entries[j], level); |
| 626 | } |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 627 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 628 | } |
| 629 | |
| 630 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 631 | * Determines if the given address is mapped in the given page table by |
| 632 | * recursively traversing all levels of the page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 633 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 634 | static bool mm_is_mapped_recursive(struct mm_page_table *table, |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 635 | ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 636 | { |
| 637 | pte_t pte; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 638 | ptable_addr_t va_level_end = mm_level_end(addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 639 | |
| 640 | /* It isn't mapped if it doesn't fit in the table. */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 641 | if (addr >= va_level_end) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 642 | return false; |
| 643 | } |
| 644 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 645 | pte = table->entries[mm_index(addr, level)]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 646 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 647 | if (arch_mm_pte_is_block(pte, level)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 648 | return true; |
| 649 | } |
| 650 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 651 | if (arch_mm_pte_is_table(pte, level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 652 | return mm_is_mapped_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 653 | mm_page_table_from_pa(arch_mm_table_from_pte(pte)), |
| 654 | addr, level - 1); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 655 | } |
| 656 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 657 | /* The entry is not present. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 658 | return false; |
| 659 | } |
| 660 | |
| 661 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 662 | * Determines if the given address is mapped in the given page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 663 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 664 | static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr, |
| 665 | int mode) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 666 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 667 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 668 | uint8_t level = arch_mm_max_level(mode); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 669 | size_t index; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 670 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 671 | addr = mm_round_down_to_page(addr); |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 672 | index = mm_index(addr, level + 1); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 673 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 674 | if (index >= arch_mm_root_table_count(mode)) { |
| 675 | return false; |
| 676 | } |
| 677 | |
| 678 | return mm_is_mapped_recursive(&tables[index], addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 682 | * Initialises the given page table. |
| 683 | */ |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 684 | bool mm_ptable_init(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 685 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 686 | uint8_t i; |
| 687 | size_t j; |
| 688 | struct mm_page_table *tables; |
| 689 | uint8_t root_table_count = arch_mm_root_table_count(mode); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 690 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 691 | tables = mm_alloc_page_tables(root_table_count, mode & MM_MODE_NOSYNC); |
| 692 | if (tables == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 693 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 694 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 695 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 696 | for (i = 0; i < root_table_count; i++) { |
| 697 | for (j = 0; j < MM_PTE_PER_PAGE; j++) { |
| 698 | tables[i].entries[j] = |
| 699 | arch_mm_absent_pte(arch_mm_max_level(mode)); |
| 700 | } |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 701 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 702 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 703 | /* TODO: halloc could return a virtual or physical address if mm not |
| 704 | * enabled? */ |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 705 | t->root = pa_init((uintpaddr_t)tables); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 706 | |
| 707 | return true; |
| 708 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 709 | |
| 710 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 711 | * Frees all memory associated with the give page table. |
| 712 | */ |
| 713 | void mm_ptable_fini(struct mm_ptable *t, int mode) |
| 714 | { |
| 715 | struct mm_page_table *tables = mm_page_table_from_pa(t->root); |
| 716 | uint8_t level = arch_mm_max_level(mode); |
| 717 | uint8_t root_table_count = arch_mm_root_table_count(mode); |
| 718 | uint8_t i; |
| 719 | uint64_t j; |
| 720 | |
| 721 | for (i = 0; i < root_table_count; ++i) { |
| 722 | for (j = 0; j < MM_PTE_PER_PAGE; ++j) { |
| 723 | mm_free_page_pte(tables[i].entries[j], level); |
| 724 | } |
| 725 | } |
| 726 | |
| 727 | hfree(tables); |
| 728 | } |
| 729 | |
| 730 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 731 | * Updates a VM's page table such that the given physical address range is |
| 732 | * mapped in the address space at the corresponding address range in the |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 733 | * architecture-agnostic mode provided. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 734 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 735 | bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 736 | int mode, ipaddr_t *ipa) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 737 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 738 | bool success = |
| 739 | mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 740 | |
| 741 | if (success && ipa != NULL) { |
| 742 | *ipa = ipa_from_pa(begin); |
| 743 | } |
| 744 | |
| 745 | return success; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 746 | } |
| 747 | |
| 748 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 749 | * Updates the VM's table such that the given physical address range is not |
| 750 | * mapped in the address space. |
| 751 | */ |
| 752 | bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode) |
| 753 | { |
| 754 | return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 755 | } |
| 756 | |
| 757 | /** |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 758 | * Unmaps the hypervisor pages from the given page table. |
| 759 | */ |
| 760 | bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode) |
| 761 | { |
| 762 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 763 | return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode) && |
| 764 | mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(), |
| 765 | mode) && |
| 766 | mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode); |
| 767 | } |
| 768 | |
| 769 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 770 | * Checks whether the given intermediate physical addess is mapped in the given |
| 771 | * page table of a VM. |
| 772 | */ |
| 773 | bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode) |
| 774 | { |
| 775 | return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1); |
| 776 | } |
| 777 | |
| 778 | /** |
| 779 | * Translates an intermediate physical address to a physical address. Addresses |
| 780 | * are currently identity mapped so this is a simple type convertion. Returns |
| 781 | * true if the address was mapped in the table and the address was converted. |
| 782 | */ |
| 783 | bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa) |
| 784 | { |
| 785 | bool mapped = mm_vm_is_mapped(t, ipa, 0); |
| 786 | |
| 787 | if (mapped) { |
| 788 | *pa = pa_init(ipa_addr(ipa)); |
| 789 | } |
| 790 | |
| 791 | return mapped; |
| 792 | } |
| 793 | |
| 794 | /** |
| 795 | * Updates the hypervisor page table such that the given physical address range |
| 796 | * is mapped into the address space at the corresponding address range in the |
| 797 | * architecture-agnostic mode provided. |
| 798 | */ |
| 799 | void *mm_identity_map(paddr_t begin, paddr_t end, int mode) |
| 800 | { |
| 801 | if (mm_ptable_identity_map(&ptable, begin, end, |
| 802 | mode | MM_MODE_STAGE1)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 803 | return ptr_from_va(va_from_pa(begin)); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 804 | } |
| 805 | |
| 806 | return NULL; |
| 807 | } |
| 808 | |
| 809 | /** |
| 810 | * Updates the hypervisor table such that the given physical address range is |
| 811 | * not mapped in the address space. |
| 812 | */ |
| 813 | bool mm_unmap(paddr_t begin, paddr_t end, int mode) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 814 | { |
| 815 | return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1); |
| 816 | } |
| 817 | |
| 818 | /** |
| 819 | * Initialises memory management for the hypervisor itself. |
| 820 | */ |
| 821 | bool mm_init(void) |
| 822 | { |
Andrew Scull | cb0a741 | 2018-11-06 17:28:14 +0000 | [diff] [blame] | 823 | dlog_nosync("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()), |
| 824 | pa_addr(layout_text_end())); |
| 825 | dlog_nosync("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()), |
| 826 | pa_addr(layout_rodata_end())); |
| 827 | dlog_nosync("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()), |
| 828 | pa_addr(layout_data_end())); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 829 | |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 830 | if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) { |
Andrew Scull | cb0a741 | 2018-11-06 17:28:14 +0000 | [diff] [blame] | 831 | dlog_nosync("Unable to allocate memory for page table.\n"); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 832 | return false; |
| 833 | } |
| 834 | |
| 835 | /* Map page for uart. */ |
| 836 | /* TODO: We may not want to map this. */ |
Andrew Scull | 24e032f | 2018-10-15 17:18:12 +0100 | [diff] [blame] | 837 | mm_ptable_identity_map(&ptable, pa_init(PL011_BASE), |
| 838 | pa_add(pa_init(PL011_BASE), PAGE_SIZE), |
| 839 | MM_MODE_R | MM_MODE_W | MM_MODE_D | |
| 840 | MM_MODE_NOSYNC | MM_MODE_STAGE1); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 841 | |
| 842 | /* Map each section. */ |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 843 | mm_identity_map(layout_text_begin(), layout_text_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 844 | MM_MODE_X | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 845 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 846 | mm_identity_map(layout_rodata_begin(), layout_rodata_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 847 | MM_MODE_R | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 848 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 849 | mm_identity_map(layout_data_begin(), layout_data_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 850 | MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 851 | |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 852 | return arch_mm_init(ptable.root, true); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 853 | } |
| 854 | |
| 855 | bool mm_cpu_init(void) |
| 856 | { |
Andrew Scull | 1ba470e | 2018-10-31 15:14:31 +0000 | [diff] [blame^] | 857 | return arch_mm_init(ptable.root, false); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 858 | } |
| 859 | |
| 860 | /** |
| 861 | * Defragments the hypervisor page table. |
| 862 | */ |
| 863 | void mm_defrag(void) |
| 864 | { |
| 865 | mm_ptable_defrag(&ptable, MM_MODE_STAGE1); |
| 866 | } |