Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 19 | #include <assert.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 20 | #include <stdatomic.h> |
| 21 | #include <stdint.h> |
| 22 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/alloc.h" |
| 24 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 25 | #include "hf/layout.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 27 | /** |
| 28 | * This file has functions for managing the level 1 and 2 page tables used by |
| 29 | * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory, |
| 30 | * and then a level 2 mapping per VM. The design assumes that all page tables |
| 31 | * contain only 1-1 mappings, aligned on the block boundaries. |
| 32 | */ |
| 33 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 34 | /* The type of addresses stored in the page table. */ |
| 35 | typedef uintvaddr_t ptable_addr_t; |
| 36 | |
| 37 | /* For stage 2, the input is an intermediate physical addresses rather than a |
| 38 | * virtual address so: */ |
| 39 | static_assert( |
| 40 | sizeof(ptable_addr_t) == sizeof(uintpaddr_t), |
| 41 | "Currently, the same code manages the stage 1 and stage 2 page tables " |
| 42 | "which only works if the virtual and intermediate physical addresses " |
| 43 | "are the same size. It looks like that assumption might not be holding " |
| 44 | "so we need to check that everything is going to be ok."); |
| 45 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 46 | /* Keep macro alignment */ |
| 47 | /* clang-format off */ |
| 48 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 49 | #define MAP_FLAG_SYNC 0x01 |
| 50 | #define MAP_FLAG_COMMIT 0x02 |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 51 | #define MAP_FLAG_UNMAP 0x04 |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 52 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 53 | /* clang-format on */ |
| 54 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 55 | static struct mm_ptable ptable; |
| 56 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 57 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 58 | * Get the page table from the physical address. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 59 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 60 | static struct mm_page_table *mm_page_table_from_pa(paddr_t pa) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 61 | { |
| 62 | return ptr_from_va(va_from_pa(pa)); |
| 63 | } |
| 64 | |
| 65 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 66 | * Rounds an address down to a page boundary. |
| 67 | */ |
| 68 | static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr) |
| 69 | { |
| 70 | return addr & ~((ptable_addr_t)(PAGE_SIZE - 1)); |
| 71 | } |
| 72 | |
| 73 | /** |
| 74 | * Rounds an address up to a page boundary. |
| 75 | */ |
| 76 | static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr) |
| 77 | { |
| 78 | return mm_round_down_to_page(addr + PAGE_SIZE - 1); |
| 79 | } |
| 80 | |
| 81 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 82 | * Calculates the size of the address space represented by a page table entry at |
| 83 | * the given level. |
| 84 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 85 | static size_t mm_entry_size(int level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 86 | { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 87 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 91 | * For a given address, calculates the maximum (plus one) address that can be |
| 92 | * represented by the same table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 93 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 94 | static ptable_addr_t mm_level_end(ptable_addr_t addr, int level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 95 | { |
| 96 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 97 | return ((addr >> offset) + 1) << offset; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 101 | * For a given address, calculates the index at which its entry is stored in a |
| 102 | * table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 103 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 104 | static size_t mm_index(ptable_addr_t addr, int level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 105 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 106 | ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 107 | return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 111 | * Allocate a new page table. |
| 112 | */ |
| 113 | static struct mm_page_table *mm_alloc_page_table(bool sync_alloc) |
| 114 | { |
| 115 | if (sync_alloc) { |
| 116 | return halloc_aligned_nosync(sizeof(struct mm_page_table), |
| 117 | alignof(struct mm_page_table)); |
| 118 | } |
| 119 | |
| 120 | return halloc_aligned(sizeof(struct mm_page_table), |
| 121 | alignof(struct mm_page_table)); |
| 122 | } |
| 123 | |
| 124 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 125 | * Populates the provided page table entry with a reference to another table if |
| 126 | * needed, that is, if it does not yet point to another table. |
| 127 | * |
| 128 | * Returns a pointer to the table the entry now points to. |
| 129 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 130 | static struct mm_page_table *mm_populate_table_pte(pte_t *pte, int level, |
| 131 | bool sync_alloc) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 132 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 133 | struct mm_page_table *ntable; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 134 | pte_t v = *pte; |
| 135 | pte_t new_pte; |
| 136 | size_t i; |
| 137 | size_t inc; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 138 | int level_below = level - 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 139 | |
| 140 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 141 | if (arch_mm_pte_is_table(v, level)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 142 | return mm_page_table_from_pa(arch_mm_table_from_pte(v)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 143 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 144 | |
| 145 | /* Allocate a new table. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 146 | ntable = mm_alloc_page_table(sync_alloc); |
| 147 | if (ntable == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 148 | dlog("Failed to allocate memory for page table\n"); |
| 149 | return NULL; |
| 150 | } |
| 151 | |
| 152 | /* Determine template for new pte and its increment. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 153 | if (arch_mm_pte_is_block(v, level)) { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 154 | inc = mm_entry_size(level_below); |
| 155 | new_pte = arch_mm_block_pte(level_below, |
| 156 | arch_mm_block_from_pte(v), |
| 157 | arch_mm_pte_attrs(v)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 158 | } else { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 159 | inc = 0; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 160 | new_pte = arch_mm_absent_pte(level_below); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | /* Initialise entries in the new table. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 164 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 165 | ntable->entries[i] = new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 166 | new_pte += inc; |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | * Ensure initialisation is visible before updating the actual pte, then |
| 171 | * update it. |
| 172 | */ |
| 173 | atomic_thread_fence(memory_order_release); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 174 | *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 175 | |
| 176 | return ntable; |
| 177 | } |
| 178 | |
| 179 | /** |
| 180 | * Frees all page-table-related memory associated with the given pte at the |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 181 | * given level, including any subtables recursively. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 182 | */ |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 183 | static void mm_free_page_pte(pte_t pte, int level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 184 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 185 | struct mm_page_table *table; |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 186 | uint64_t i; |
| 187 | |
| 188 | if (!arch_mm_pte_is_table(pte, level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 189 | return; |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 190 | } |
| 191 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 192 | table = mm_page_table_from_pa(arch_mm_table_from_pte(pte)); |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 193 | /* Recursively free any subtables. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 194 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 195 | mm_free_page_pte(table->entries[i], level - 1); |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | /* Free the table itself. */ |
| 199 | hfree(table); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | /** |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 203 | * Returns whether all entries in this table are absent. |
| 204 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 205 | static bool mm_ptable_is_empty(struct mm_page_table *table, int level) |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 206 | { |
| 207 | uint64_t i; |
| 208 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 209 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 210 | if (arch_mm_pte_is_present(table->entries[i], level)) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 211 | return false; |
| 212 | } |
| 213 | } |
| 214 | return true; |
| 215 | } |
| 216 | |
| 217 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 218 | * Updates the page table at the given level to map the given address range to a |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 219 | * physical range using the provided (architecture-specific) attributes. Or if |
| 220 | * MAP_FLAG_UNMAP is set, unmap the given range instead. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 221 | * |
| 222 | * This function calls itself recursively if it needs to update additional |
| 223 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 224 | * table. |
| 225 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 226 | static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa, |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 227 | uint64_t attrs, struct mm_page_table *table, int level, |
| 228 | int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 229 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 230 | pte_t *pte = &table->entries[mm_index(begin, level)]; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 231 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 232 | size_t entry_size = mm_entry_size(level); |
| 233 | bool commit = flags & MAP_FLAG_COMMIT; |
| 234 | bool sync = flags & MAP_FLAG_SYNC; |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 235 | bool unmap = flags & MAP_FLAG_UNMAP; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 236 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 237 | /* Cap end so that we don't go over the current level max. */ |
| 238 | if (end > level_end) { |
| 239 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 240 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 241 | |
| 242 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 243 | while (begin < end) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 244 | if (unmap ? !arch_mm_pte_is_present(*pte, level) |
| 245 | : arch_mm_pte_is_block(*pte, level) && |
| 246 | arch_mm_pte_attrs(*pte) == attrs) { |
| 247 | /* |
| 248 | * If the entry is already mapped with the right |
| 249 | * attributes, or already absent in the case of |
| 250 | * unmapping, no need to do anything; carry on to the |
| 251 | * next entry. |
| 252 | */ |
| 253 | } else if ((end - begin) >= entry_size && |
| 254 | (unmap || arch_mm_is_block_allowed(level)) && |
| 255 | (begin & (entry_size - 1)) == 0) { |
| 256 | /* |
| 257 | * If the entire entry is within the region we want to |
| 258 | * map, map/unmap the whole entry. |
| 259 | */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 260 | if (commit) { |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 261 | pte_t v = *pte; |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 262 | *pte = unmap ? arch_mm_absent_pte(level) |
| 263 | : arch_mm_block_pte(level, pa, |
| 264 | attrs); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 265 | /* TODO: Add barrier. How do we ensure this |
| 266 | * isn't in use by another CPU? Send IPI? */ |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 267 | mm_free_page_pte(v, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 268 | } |
| 269 | } else { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 270 | /* |
| 271 | * If the entry is already a subtable get it; otherwise |
| 272 | * replace it with an equivalent subtable and get that. |
| 273 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 274 | struct mm_page_table *nt = |
| 275 | mm_populate_table_pte(pte, level, sync); |
| 276 | if (nt == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 277 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 278 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 279 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 280 | /* |
| 281 | * Recurse to map/unmap the appropriate entries within |
| 282 | * the subtable. |
| 283 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 284 | if (!mm_map_level(begin, end, pa, attrs, nt, level - 1, |
| 285 | flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 286 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 287 | } |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 288 | |
| 289 | /* |
| 290 | * If the subtable is now empty, replace it with an |
| 291 | * absent entry at this level. |
| 292 | */ |
| 293 | if (commit && unmap && |
| 294 | mm_ptable_is_empty(nt, level - 1)) { |
| 295 | pte_t v = *pte; |
| 296 | *pte = arch_mm_absent_pte(level); |
| 297 | /* TODO: Add barrier. How do we ensure this |
| 298 | * isn't in use by another CPU? Send IPI? */ |
| 299 | mm_free_page_pte(v, level); |
| 300 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 301 | } |
| 302 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 303 | begin = (begin + entry_size) & ~(entry_size - 1); |
| 304 | pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1)); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 305 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | return true; |
| 309 | } |
| 310 | |
| 311 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 312 | * Invalidates the TLB for the given address range. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 313 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 314 | static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, |
| 315 | bool stage1) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 316 | { |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 317 | if (stage1) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 318 | arch_mm_invalidate_stage1_range(va_init(begin), va_init(end)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 319 | } else { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 320 | arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 321 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 325 | * Updates the given table such that the given physical address range is mapped |
| 326 | * into the address space with the corresponding address range in the |
| 327 | * architecture-agnostic mode provided. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 328 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 329 | static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin, |
| 330 | paddr_t pa_end, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 331 | { |
| 332 | uint64_t attrs = arch_mm_mode_to_attrs(mode); |
| 333 | int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 334 | int level = arch_mm_max_level(mode); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 335 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 336 | ptable_addr_t begin; |
| 337 | ptable_addr_t end; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 338 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 339 | pa_begin = arch_mm_clear_pa(pa_begin); |
| 340 | begin = pa_addr(pa_begin); |
| 341 | end = mm_round_up_to_page(pa_addr(pa_end)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 342 | |
| 343 | /* |
| 344 | * Do it in two steps to prevent leaving the table in a halfway updated |
| 345 | * state. In such a two-step implementation, the table may be left with |
| 346 | * extra internal tables, but no different mapping on failure. |
| 347 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 348 | if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 349 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 350 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 351 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 352 | mm_map_level(begin, end, pa_begin, attrs, table, level, |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 353 | flags | MAP_FLAG_COMMIT); |
| 354 | |
| 355 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 356 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 357 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 358 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 359 | |
| 360 | return true; |
| 361 | } |
| 362 | |
| 363 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 364 | * Updates the given table such that the given physical address range is not |
| 365 | * mapped into the address space. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 366 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 367 | static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin, |
| 368 | paddr_t pa_end, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 369 | { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 370 | int flags = |
| 371 | ((mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC) | MAP_FLAG_UNMAP; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 372 | int level = arch_mm_max_level(mode); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 373 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 374 | ptable_addr_t begin; |
| 375 | ptable_addr_t end; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 376 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 377 | pa_begin = arch_mm_clear_pa(pa_begin); |
| 378 | begin = pa_addr(pa_begin); |
| 379 | end = mm_round_up_to_page(pa_addr(pa_end)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 380 | |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 381 | /* Also do updates in two steps, similarly to mm_ptable_identity_map. */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 382 | if (!mm_map_level(begin, end, pa_begin, 0, table, level, flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 383 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 384 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 385 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 386 | mm_map_level(begin, end, pa_begin, 0, table, level, |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 387 | flags | MAP_FLAG_COMMIT); |
| 388 | |
| 389 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 390 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 391 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 392 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 393 | |
| 394 | return true; |
| 395 | } |
| 396 | |
| 397 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 398 | * Writes the given table to the debug log, calling itself recursively to |
| 399 | * write sub-tables. |
| 400 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 401 | static void mm_dump_table_recursive(struct mm_page_table *table, int level, |
| 402 | int max_level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 403 | { |
| 404 | uint64_t i; |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 405 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 406 | if (!arch_mm_pte_is_present(table->entries[i], level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 407 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 408 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 409 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 410 | dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, |
| 411 | table->entries[i]); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 412 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 413 | if (arch_mm_pte_is_table(table->entries[i], level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 414 | mm_dump_table_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 415 | mm_page_table_from_pa(arch_mm_table_from_pte( |
| 416 | table->entries[i])), |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 417 | level - 1, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 418 | } |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | /** |
| 423 | * Write the given table to the debug log. |
| 424 | */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 425 | void mm_ptable_dump(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 426 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 427 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 428 | int max_level = arch_mm_max_level(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 429 | mm_dump_table_recursive(table, max_level, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 430 | } |
| 431 | |
| 432 | /** |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 433 | * Given that `entry` is a subtable but its entries are all absent, return the |
| 434 | * absent entry with which it can be replaced. Note that `entry` will no longer |
| 435 | * be valid after calling this function as the subtable will have been freed. |
| 436 | */ |
| 437 | static pte_t mm_table_pte_to_absent(pte_t entry, int level) |
| 438 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 439 | struct mm_page_table *table = |
| 440 | mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 441 | /* |
| 442 | * Free the subtable. This is safe to do directly (rather than |
| 443 | * using mm_free_page_pte) because we know by this point that it |
| 444 | * doesn't have any subtables of its own. |
| 445 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 446 | hfree(table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 447 | /* Replace subtable with a single absent entry. */ |
| 448 | return arch_mm_absent_pte(level); |
| 449 | } |
| 450 | |
| 451 | /** |
| 452 | * Given that `entry` is a subtable and its entries are all identical, return |
| 453 | * the single block entry with which it can be replaced if possible. Note that |
| 454 | * `entry` will no longer be valid after calling this function as the subtable |
| 455 | * may have been freed. |
| 456 | */ |
| 457 | static pte_t mm_table_pte_to_block(pte_t entry, int level) |
| 458 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 459 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 460 | uint64_t block_attrs; |
| 461 | uint64_t table_attrs; |
| 462 | uint64_t combined_attrs; |
| 463 | paddr_t block_address; |
| 464 | |
| 465 | if (!arch_mm_is_block_allowed(level)) { |
| 466 | return entry; |
| 467 | } |
| 468 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 469 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 470 | /* |
| 471 | * Replace subtable with a single block, with equivalent |
| 472 | * attributes. |
| 473 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 474 | block_attrs = arch_mm_pte_attrs(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 475 | table_attrs = arch_mm_pte_attrs(entry); |
| 476 | combined_attrs = |
| 477 | arch_mm_combine_table_entry_attrs(table_attrs, block_attrs); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 478 | block_address = arch_mm_block_from_pte(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 479 | /* Free the subtable. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 480 | hfree(table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 481 | /* |
| 482 | * We can assume that the block is aligned properly |
| 483 | * because all virtual addresses are aligned by |
| 484 | * definition, and we have a 1-1 mapping from virtual to |
| 485 | * physical addresses. |
| 486 | */ |
| 487 | return arch_mm_block_pte(level, block_address, combined_attrs); |
| 488 | } |
| 489 | |
| 490 | /** |
| 491 | * Defragment the given ptable entry by recursively replacing any tables with |
| 492 | * block or absent entries where possible. |
| 493 | */ |
| 494 | static pte_t mm_ptable_defrag_entry(pte_t entry, int level) |
| 495 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 496 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 497 | uint64_t i; |
| 498 | uint64_t attrs; |
| 499 | bool identical_blocks_so_far = true; |
| 500 | bool all_absent_so_far = true; |
| 501 | |
| 502 | if (!arch_mm_pte_is_table(entry, level)) { |
| 503 | return entry; |
| 504 | } |
| 505 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 506 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 507 | |
| 508 | /* |
| 509 | * Check if all entries are blocks with the same flags or are all |
| 510 | * absent. |
| 511 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 512 | attrs = arch_mm_pte_attrs(table->entries[0]); |
| 513 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 514 | /* |
| 515 | * First try to defrag the entry, in case it is a subtable. |
| 516 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 517 | table->entries[i] = |
| 518 | mm_ptable_defrag_entry(table->entries[i], level - 1); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 519 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 520 | if (arch_mm_pte_is_present(table->entries[i], level - 1)) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 521 | all_absent_so_far = false; |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * If the entry is a block, check that the flags are the same as |
| 526 | * what we have so far. |
| 527 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 528 | if (!arch_mm_pte_is_block(table->entries[i], level - 1) || |
| 529 | arch_mm_pte_attrs(table->entries[i]) != attrs) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 530 | identical_blocks_so_far = false; |
| 531 | } |
| 532 | } |
| 533 | if (identical_blocks_so_far) { |
| 534 | return mm_table_pte_to_block(entry, level); |
| 535 | } |
| 536 | if (all_absent_so_far) { |
| 537 | return mm_table_pte_to_absent(entry, level); |
| 538 | } |
| 539 | return entry; |
| 540 | } |
| 541 | |
| 542 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 543 | * Defragments the given page table by converting page table references to |
| 544 | * blocks whenever possible. |
| 545 | */ |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 546 | void mm_ptable_defrag(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 547 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 548 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 549 | int level = arch_mm_max_level(mode); |
| 550 | uint64_t i; |
| 551 | |
| 552 | /* |
| 553 | * Loop through each entry in the table. If it points to another table, |
| 554 | * check if that table can be replaced by a block or an absent entry. |
| 555 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 556 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 557 | table->entries[i] = |
| 558 | mm_ptable_defrag_entry(table->entries[i], level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 559 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 560 | } |
| 561 | |
| 562 | /** |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 563 | * Unmaps the hypervisor pages from the given page table. |
| 564 | */ |
| 565 | bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode) |
| 566 | { |
| 567 | /* TODO: If we add pages dynamically, they must be included here too. */ |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 568 | return mm_ptable_unmap(t, layout_text_begin(), layout_text_end(), |
| 569 | mode) && |
| 570 | mm_ptable_unmap(t, layout_rodata_begin(), layout_rodata_end(), |
| 571 | mode) && |
| 572 | mm_ptable_unmap(t, layout_data_begin(), layout_data_end(), mode); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 573 | } |
| 574 | |
| 575 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 576 | * Determines if the given address is mapped in the given page table by |
| 577 | * recursively traversing all levels of the page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 578 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 579 | static bool mm_is_mapped_recursive(struct mm_page_table *table, |
| 580 | ptable_addr_t addr, int level) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 581 | { |
| 582 | pte_t pte; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 583 | ptable_addr_t va_level_end = mm_level_end(addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 584 | |
| 585 | /* It isn't mapped if it doesn't fit in the table. */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 586 | if (addr >= va_level_end) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 587 | return false; |
| 588 | } |
| 589 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 590 | pte = table->entries[mm_index(addr, level)]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 591 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 592 | if (arch_mm_pte_is_block(pte, level)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 593 | return true; |
| 594 | } |
| 595 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 596 | if (arch_mm_pte_is_table(pte, level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 597 | return mm_is_mapped_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 598 | mm_page_table_from_pa(arch_mm_table_from_pte(pte)), |
| 599 | addr, level - 1); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 600 | } |
| 601 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 602 | /* The entry is not present. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 603 | return false; |
| 604 | } |
| 605 | |
| 606 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 607 | * Determines if the given address is mapped in the given page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 608 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 609 | static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr, |
| 610 | int mode) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 611 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 612 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 613 | int level = arch_mm_max_level(mode); |
| 614 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 615 | addr = mm_round_down_to_page(addr); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 616 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 617 | return mm_is_mapped_recursive(table, addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 621 | * Initialises the given page table. |
| 622 | */ |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 623 | bool mm_ptable_init(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 624 | { |
| 625 | size_t i; |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 626 | struct mm_page_table *table; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 627 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 628 | table = mm_alloc_page_table(mode & MM_MODE_NOSYNC); |
| 629 | if (table == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 630 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 631 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 632 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 633 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 634 | table->entries[i] = arch_mm_absent_pte(arch_mm_max_level(mode)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 635 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 636 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 637 | /* TODO: halloc could return a virtual or physical address if mm not |
| 638 | * enabled? */ |
| 639 | t->table = pa_init((uintpaddr_t)table); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 640 | |
| 641 | return true; |
| 642 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 643 | |
| 644 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 645 | * Updates a VM's page table such that the given physical address range is |
| 646 | * mapped in the address space at the corresponding address range in the |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 647 | * architecture-agnostic mode provided. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 648 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 649 | bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 650 | int mode, ipaddr_t *ipa) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 651 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 652 | bool success = |
| 653 | mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 654 | |
| 655 | if (success && ipa != NULL) { |
| 656 | *ipa = ipa_from_pa(begin); |
| 657 | } |
| 658 | |
| 659 | return success; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 663 | * Updates the VM's table such that the given physical address range is not |
| 664 | * mapped in the address space. |
| 665 | */ |
| 666 | bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode) |
| 667 | { |
| 668 | return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 669 | } |
| 670 | |
| 671 | /** |
| 672 | * Checks whether the given intermediate physical addess is mapped in the given |
| 673 | * page table of a VM. |
| 674 | */ |
| 675 | bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode) |
| 676 | { |
| 677 | return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1); |
| 678 | } |
| 679 | |
| 680 | /** |
| 681 | * Translates an intermediate physical address to a physical address. Addresses |
| 682 | * are currently identity mapped so this is a simple type convertion. Returns |
| 683 | * true if the address was mapped in the table and the address was converted. |
| 684 | */ |
| 685 | bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa) |
| 686 | { |
| 687 | bool mapped = mm_vm_is_mapped(t, ipa, 0); |
| 688 | |
| 689 | if (mapped) { |
| 690 | *pa = pa_init(ipa_addr(ipa)); |
| 691 | } |
| 692 | |
| 693 | return mapped; |
| 694 | } |
| 695 | |
| 696 | /** |
| 697 | * Updates the hypervisor page table such that the given physical address range |
| 698 | * is mapped into the address space at the corresponding address range in the |
| 699 | * architecture-agnostic mode provided. |
| 700 | */ |
| 701 | void *mm_identity_map(paddr_t begin, paddr_t end, int mode) |
| 702 | { |
| 703 | if (mm_ptable_identity_map(&ptable, begin, end, |
| 704 | mode | MM_MODE_STAGE1)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame^] | 705 | return ptr_from_va(va_from_pa(begin)); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 706 | } |
| 707 | |
| 708 | return NULL; |
| 709 | } |
| 710 | |
| 711 | /** |
| 712 | * Updates the hypervisor table such that the given physical address range is |
| 713 | * not mapped in the address space. |
| 714 | */ |
| 715 | bool mm_unmap(paddr_t begin, paddr_t end, int mode) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 716 | { |
| 717 | return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1); |
| 718 | } |
| 719 | |
| 720 | /** |
| 721 | * Initialises memory management for the hypervisor itself. |
| 722 | */ |
| 723 | bool mm_init(void) |
| 724 | { |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 725 | dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()), |
| 726 | pa_addr(layout_text_end())); |
| 727 | dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()), |
| 728 | pa_addr(layout_rodata_end())); |
| 729 | dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()), |
| 730 | pa_addr(layout_data_end())); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 731 | |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 732 | if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 733 | dlog("Unable to allocate memory for page table.\n"); |
| 734 | return false; |
| 735 | } |
| 736 | |
| 737 | /* Map page for uart. */ |
| 738 | /* TODO: We may not want to map this. */ |
Andrew Scull | 24e032f | 2018-10-15 17:18:12 +0100 | [diff] [blame] | 739 | mm_ptable_identity_map(&ptable, pa_init(PL011_BASE), |
| 740 | pa_add(pa_init(PL011_BASE), PAGE_SIZE), |
| 741 | MM_MODE_R | MM_MODE_W | MM_MODE_D | |
| 742 | MM_MODE_NOSYNC | MM_MODE_STAGE1); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 743 | |
| 744 | /* Map each section. */ |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 745 | mm_identity_map(layout_text_begin(), layout_text_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 746 | MM_MODE_X | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 747 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 748 | mm_identity_map(layout_rodata_begin(), layout_rodata_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 749 | MM_MODE_R | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 750 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 751 | mm_identity_map(layout_data_begin(), layout_data_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 752 | MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 753 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 754 | return arch_mm_init(ptable.table, true); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | bool mm_cpu_init(void) |
| 758 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 759 | return arch_mm_init(ptable.table, false); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | /** |
| 763 | * Defragments the hypervisor page table. |
| 764 | */ |
| 765 | void mm_defrag(void) |
| 766 | { |
| 767 | mm_ptable_defrag(&ptable, MM_MODE_STAGE1); |
| 768 | } |