Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 19 | #include <assert.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 20 | #include <stdatomic.h> |
| 21 | #include <stdint.h> |
| 22 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 23 | #include "hf/alloc.h" |
| 24 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 25 | #include "hf/layout.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 27 | /** |
| 28 | * This file has functions for managing the level 1 and 2 page tables used by |
| 29 | * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory, |
| 30 | * and then a level 2 mapping per VM. The design assumes that all page tables |
| 31 | * contain only 1-1 mappings, aligned on the block boundaries. |
| 32 | */ |
| 33 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 34 | /* The type of addresses stored in the page table. */ |
| 35 | typedef uintvaddr_t ptable_addr_t; |
| 36 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 37 | /* |
| 38 | * For stage 2, the input is an intermediate physical addresses rather than a |
| 39 | * virtual address so: |
| 40 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 41 | static_assert( |
| 42 | sizeof(ptable_addr_t) == sizeof(uintpaddr_t), |
| 43 | "Currently, the same code manages the stage 1 and stage 2 page tables " |
| 44 | "which only works if the virtual and intermediate physical addresses " |
| 45 | "are the same size. It looks like that assumption might not be holding " |
| 46 | "so we need to check that everything is going to be ok."); |
| 47 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 48 | /* Keep macro alignment */ |
| 49 | /* clang-format off */ |
| 50 | |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 51 | #define MAP_FLAG_NOSYNC 0x01 |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 52 | #define MAP_FLAG_COMMIT 0x02 |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 53 | #define MAP_FLAG_UNMAP 0x04 |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 54 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 55 | /* clang-format on */ |
| 56 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 57 | static struct mm_ptable ptable; |
| 58 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 59 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 60 | * Get the page table from the physical address. |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 61 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 62 | static struct mm_page_table *mm_page_table_from_pa(paddr_t pa) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 63 | { |
| 64 | return ptr_from_va(va_from_pa(pa)); |
| 65 | } |
| 66 | |
| 67 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 68 | * Rounds an address down to a page boundary. |
| 69 | */ |
| 70 | static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr) |
| 71 | { |
| 72 | return addr & ~((ptable_addr_t)(PAGE_SIZE - 1)); |
| 73 | } |
| 74 | |
| 75 | /** |
| 76 | * Rounds an address up to a page boundary. |
| 77 | */ |
| 78 | static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr) |
| 79 | { |
| 80 | return mm_round_down_to_page(addr + PAGE_SIZE - 1); |
| 81 | } |
| 82 | |
| 83 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 84 | * Calculates the size of the address space represented by a page table entry at |
| 85 | * the given level. |
| 86 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 87 | static size_t mm_entry_size(uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 88 | { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 89 | return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 93 | * For a given address, calculates the maximum (plus one) address that can be |
| 94 | * represented by the same table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 95 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 96 | static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 97 | { |
| 98 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 99 | return ((addr >> offset) + 1) << offset; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 103 | * For a given address, calculates the index at which its entry is stored in a |
| 104 | * table at the given level. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 105 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 106 | static size_t mm_index(ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 107 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 108 | ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 109 | return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /** |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 113 | * Allocate a new page table. |
| 114 | */ |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 115 | static struct mm_page_table *mm_alloc_page_table(bool nosync) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 116 | { |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 117 | if (nosync) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 118 | return halloc_aligned_nosync(sizeof(struct mm_page_table), |
| 119 | alignof(struct mm_page_table)); |
| 120 | } |
| 121 | |
| 122 | return halloc_aligned(sizeof(struct mm_page_table), |
| 123 | alignof(struct mm_page_table)); |
| 124 | } |
| 125 | |
| 126 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 127 | * Populates the provided page table entry with a reference to another table if |
| 128 | * needed, that is, if it does not yet point to another table. |
| 129 | * |
| 130 | * Returns a pointer to the table the entry now points to. |
| 131 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 132 | static struct mm_page_table *mm_populate_table_pte(pte_t *pte, uint8_t level, |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 133 | bool nosync) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 134 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 135 | struct mm_page_table *ntable; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 136 | pte_t v = *pte; |
| 137 | pte_t new_pte; |
| 138 | size_t i; |
| 139 | size_t inc; |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 140 | uint8_t level_below = level - 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 141 | |
| 142 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 143 | if (arch_mm_pte_is_table(v, level)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 144 | return mm_page_table_from_pa(arch_mm_table_from_pte(v)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 145 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 146 | |
| 147 | /* Allocate a new table. */ |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 148 | ntable = mm_alloc_page_table(nosync); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 149 | if (ntable == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 150 | dlog("Failed to allocate memory for page table\n"); |
| 151 | return NULL; |
| 152 | } |
| 153 | |
| 154 | /* Determine template for new pte and its increment. */ |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 155 | if (arch_mm_pte_is_block(v, level)) { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 156 | inc = mm_entry_size(level_below); |
| 157 | new_pte = arch_mm_block_pte(level_below, |
| 158 | arch_mm_block_from_pte(v), |
| 159 | arch_mm_pte_attrs(v)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 160 | } else { |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 161 | inc = 0; |
Andrew Walbran | 1b99f9d | 2018-10-03 17:54:40 +0100 | [diff] [blame] | 162 | new_pte = arch_mm_absent_pte(level_below); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | /* Initialise entries in the new table. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 166 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 167 | ntable->entries[i] = new_pte; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 168 | new_pte += inc; |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * Ensure initialisation is visible before updating the actual pte, then |
| 173 | * update it. |
| 174 | */ |
| 175 | atomic_thread_fence(memory_order_release); |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 176 | *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 177 | |
| 178 | return ntable; |
| 179 | } |
| 180 | |
| 181 | /** |
| 182 | * Frees all page-table-related memory associated with the given pte at the |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 183 | * given level, including any subtables recursively. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 184 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 185 | static void mm_free_page_pte(pte_t pte, uint8_t level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 186 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 187 | struct mm_page_table *table; |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 188 | uint64_t i; |
| 189 | |
| 190 | if (!arch_mm_pte_is_table(pte, level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 191 | return; |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 192 | } |
| 193 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 194 | table = mm_page_table_from_pa(arch_mm_table_from_pte(pte)); |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 195 | /* Recursively free any subtables. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 196 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 197 | mm_free_page_pte(table->entries[i], level - 1); |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | /* Free the table itself. */ |
| 201 | hfree(table); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | /** |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 205 | * Returns whether all entries in this table are absent. |
| 206 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 207 | static bool mm_ptable_is_empty(struct mm_page_table *table, uint8_t level) |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 208 | { |
| 209 | uint64_t i; |
| 210 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 211 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 212 | if (arch_mm_pte_is_present(table->entries[i], level)) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 213 | return false; |
| 214 | } |
| 215 | } |
| 216 | return true; |
| 217 | } |
| 218 | |
| 219 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 220 | * Updates the page table at the given level to map the given address range to a |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 221 | * physical range using the provided (architecture-specific) attributes. Or if |
| 222 | * MAP_FLAG_UNMAP is set, unmap the given range instead. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 223 | * |
| 224 | * This function calls itself recursively if it needs to update additional |
| 225 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 226 | * table. |
| 227 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 228 | static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa, |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 229 | uint64_t attrs, struct mm_page_table *table, |
| 230 | uint8_t level, int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 231 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 232 | pte_t *pte = &table->entries[mm_index(begin, level)]; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 233 | ptable_addr_t level_end = mm_level_end(begin, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 234 | size_t entry_size = mm_entry_size(level); |
| 235 | bool commit = flags & MAP_FLAG_COMMIT; |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 236 | bool nosync = flags & MAP_FLAG_NOSYNC; |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 237 | bool unmap = flags & MAP_FLAG_UNMAP; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 238 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 239 | /* Cap end so that we don't go over the current level max. */ |
| 240 | if (end > level_end) { |
| 241 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 242 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 243 | |
| 244 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 245 | while (begin < end) { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 246 | if (unmap ? !arch_mm_pte_is_present(*pte, level) |
| 247 | : arch_mm_pte_is_block(*pte, level) && |
| 248 | arch_mm_pte_attrs(*pte) == attrs) { |
| 249 | /* |
| 250 | * If the entry is already mapped with the right |
| 251 | * attributes, or already absent in the case of |
| 252 | * unmapping, no need to do anything; carry on to the |
| 253 | * next entry. |
| 254 | */ |
| 255 | } else if ((end - begin) >= entry_size && |
| 256 | (unmap || arch_mm_is_block_allowed(level)) && |
| 257 | (begin & (entry_size - 1)) == 0) { |
| 258 | /* |
| 259 | * If the entire entry is within the region we want to |
| 260 | * map, map/unmap the whole entry. |
| 261 | */ |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 262 | if (commit) { |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 263 | pte_t v = *pte; |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 264 | *pte = unmap ? arch_mm_absent_pte(level) |
| 265 | : arch_mm_block_pte(level, pa, |
| 266 | attrs); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 267 | /* TODO: Add barrier. How do we ensure this |
| 268 | * isn't in use by another CPU? Send IPI? */ |
Andrew Walbran | 5bf935c | 2018-09-28 14:21:54 +0100 | [diff] [blame] | 269 | mm_free_page_pte(v, level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 270 | } |
| 271 | } else { |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 272 | /* |
| 273 | * If the entry is already a subtable get it; otherwise |
| 274 | * replace it with an equivalent subtable and get that. |
| 275 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 276 | struct mm_page_table *nt = |
Andrew Scull | f2f948e | 2018-10-22 18:39:28 +0100 | [diff] [blame] | 277 | mm_populate_table_pte(pte, level, nosync); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 278 | if (nt == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 279 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 280 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 281 | |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 282 | /* |
| 283 | * Recurse to map/unmap the appropriate entries within |
| 284 | * the subtable. |
| 285 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 286 | if (!mm_map_level(begin, end, pa, attrs, nt, level - 1, |
| 287 | flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 288 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 289 | } |
Andrew Walbran | 6324fc9 | 2018-10-03 11:46:43 +0100 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * If the subtable is now empty, replace it with an |
| 293 | * absent entry at this level. |
| 294 | */ |
| 295 | if (commit && unmap && |
| 296 | mm_ptable_is_empty(nt, level - 1)) { |
| 297 | pte_t v = *pte; |
| 298 | *pte = arch_mm_absent_pte(level); |
| 299 | /* TODO: Add barrier. How do we ensure this |
| 300 | * isn't in use by another CPU? Send IPI? */ |
| 301 | mm_free_page_pte(v, level); |
| 302 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 303 | } |
| 304 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 305 | begin = (begin + entry_size) & ~(entry_size - 1); |
| 306 | pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1)); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 307 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | return true; |
| 311 | } |
| 312 | |
| 313 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 314 | * Invalidates the TLB for the given address range. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 315 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 316 | static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, |
| 317 | bool stage1) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 318 | { |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 319 | if (stage1) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 320 | arch_mm_invalidate_stage1_range(va_init(begin), va_init(end)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 321 | } else { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 322 | arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 323 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 327 | * Updates the given table such that the given physical address range is mapped |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame^] | 328 | * or not mapped into the address space with the architecture-agnostic mode |
| 329 | * provided. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 330 | */ |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame^] | 331 | static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin, |
| 332 | paddr_t pa_end, int mode, bool unmap) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 333 | { |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame^] | 334 | uint64_t attrs = unmap ? 0 : arch_mm_mode_to_attrs(mode); |
| 335 | int flags = (mode & MM_MODE_NOSYNC ? MAP_FLAG_NOSYNC : 0) | |
| 336 | (unmap ? MAP_FLAG_UNMAP : 0); |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 337 | uint8_t level = arch_mm_max_level(mode); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 338 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 339 | ptable_addr_t begin; |
| 340 | ptable_addr_t end; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 341 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 342 | pa_begin = arch_mm_clear_pa(pa_begin); |
| 343 | begin = pa_addr(pa_begin); |
| 344 | end = mm_round_up_to_page(pa_addr(pa_end)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 345 | |
| 346 | /* |
| 347 | * Do it in two steps to prevent leaving the table in a halfway updated |
| 348 | * state. In such a two-step implementation, the table may be left with |
| 349 | * extra internal tables, but no different mapping on failure. |
| 350 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 351 | if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 352 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 353 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 354 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 355 | mm_map_level(begin, end, pa_begin, attrs, table, level, |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 356 | flags | MAP_FLAG_COMMIT); |
| 357 | |
| 358 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 359 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 360 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 361 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 362 | |
| 363 | return true; |
| 364 | } |
| 365 | |
| 366 | /** |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame^] | 367 | * Updates the given table such that the given physical address range is mapped |
| 368 | * into the address space with the architecture-agnostic mode provided. |
| 369 | */ |
| 370 | static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin, |
| 371 | paddr_t pa_end, int mode) |
| 372 | { |
| 373 | return mm_ptable_identity_update(t, pa_begin, pa_end, mode, false); |
| 374 | } |
| 375 | |
| 376 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 377 | * Updates the given table such that the given physical address range is not |
| 378 | * mapped into the address space. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 379 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 380 | static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin, |
| 381 | paddr_t pa_end, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 382 | { |
Andrew Scull | a6da834 | 2018-11-01 12:29:49 +0000 | [diff] [blame^] | 383 | return mm_ptable_identity_update(t, pa_begin, pa_end, mode, true); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 387 | * Writes the given table to the debug log, calling itself recursively to |
| 388 | * write sub-tables. |
| 389 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 390 | static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level, |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 391 | int max_level) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 392 | { |
| 393 | uint64_t i; |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 394 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 395 | if (!arch_mm_pte_is_present(table->entries[i], level)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 396 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 397 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 398 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 399 | dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, |
| 400 | table->entries[i]); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 401 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 402 | if (arch_mm_pte_is_table(table->entries[i], level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 403 | mm_dump_table_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 404 | mm_page_table_from_pa(arch_mm_table_from_pte( |
| 405 | table->entries[i])), |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 406 | level - 1, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 407 | } |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | /** |
| 412 | * Write the given table to the debug log. |
| 413 | */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 414 | void mm_ptable_dump(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 415 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 416 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 417 | int max_level = arch_mm_max_level(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 418 | mm_dump_table_recursive(table, max_level, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | /** |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 422 | * Given that `entry` is a subtable but its entries are all absent, return the |
| 423 | * absent entry with which it can be replaced. Note that `entry` will no longer |
| 424 | * be valid after calling this function as the subtable will have been freed. |
| 425 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 426 | static pte_t mm_table_pte_to_absent(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 427 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 428 | struct mm_page_table *table = |
| 429 | mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 430 | /* |
| 431 | * Free the subtable. This is safe to do directly (rather than |
| 432 | * using mm_free_page_pte) because we know by this point that it |
| 433 | * doesn't have any subtables of its own. |
| 434 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 435 | hfree(table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 436 | /* Replace subtable with a single absent entry. */ |
| 437 | return arch_mm_absent_pte(level); |
| 438 | } |
| 439 | |
| 440 | /** |
| 441 | * Given that `entry` is a subtable and its entries are all identical, return |
| 442 | * the single block entry with which it can be replaced if possible. Note that |
| 443 | * `entry` will no longer be valid after calling this function as the subtable |
| 444 | * may have been freed. |
| 445 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 446 | static pte_t mm_table_pte_to_block(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 447 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 448 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 449 | uint64_t block_attrs; |
| 450 | uint64_t table_attrs; |
| 451 | uint64_t combined_attrs; |
| 452 | paddr_t block_address; |
| 453 | |
| 454 | if (!arch_mm_is_block_allowed(level)) { |
| 455 | return entry; |
| 456 | } |
| 457 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 458 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 459 | /* |
| 460 | * Replace subtable with a single block, with equivalent |
| 461 | * attributes. |
| 462 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 463 | block_attrs = arch_mm_pte_attrs(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 464 | table_attrs = arch_mm_pte_attrs(entry); |
| 465 | combined_attrs = |
| 466 | arch_mm_combine_table_entry_attrs(table_attrs, block_attrs); |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 467 | block_address = arch_mm_block_from_pte(table->entries[0]); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 468 | /* Free the subtable. */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 469 | hfree(table); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 470 | /* |
| 471 | * We can assume that the block is aligned properly |
| 472 | * because all virtual addresses are aligned by |
| 473 | * definition, and we have a 1-1 mapping from virtual to |
| 474 | * physical addresses. |
| 475 | */ |
| 476 | return arch_mm_block_pte(level, block_address, combined_attrs); |
| 477 | } |
| 478 | |
| 479 | /** |
| 480 | * Defragment the given ptable entry by recursively replacing any tables with |
| 481 | * block or absent entries where possible. |
| 482 | */ |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 483 | static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level) |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 484 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 485 | struct mm_page_table *table; |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 486 | uint64_t i; |
| 487 | uint64_t attrs; |
| 488 | bool identical_blocks_so_far = true; |
| 489 | bool all_absent_so_far = true; |
| 490 | |
| 491 | if (!arch_mm_pte_is_table(entry, level)) { |
| 492 | return entry; |
| 493 | } |
| 494 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 495 | table = mm_page_table_from_pa(arch_mm_table_from_pte(entry)); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 496 | |
| 497 | /* |
| 498 | * Check if all entries are blocks with the same flags or are all |
| 499 | * absent. |
| 500 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 501 | attrs = arch_mm_pte_attrs(table->entries[0]); |
| 502 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 503 | /* |
| 504 | * First try to defrag the entry, in case it is a subtable. |
| 505 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 506 | table->entries[i] = |
| 507 | mm_ptable_defrag_entry(table->entries[i], level - 1); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 508 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 509 | if (arch_mm_pte_is_present(table->entries[i], level - 1)) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 510 | all_absent_so_far = false; |
| 511 | } |
| 512 | |
| 513 | /* |
| 514 | * If the entry is a block, check that the flags are the same as |
| 515 | * what we have so far. |
| 516 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 517 | if (!arch_mm_pte_is_block(table->entries[i], level - 1) || |
| 518 | arch_mm_pte_attrs(table->entries[i]) != attrs) { |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 519 | identical_blocks_so_far = false; |
| 520 | } |
| 521 | } |
| 522 | if (identical_blocks_so_far) { |
| 523 | return mm_table_pte_to_block(entry, level); |
| 524 | } |
| 525 | if (all_absent_so_far) { |
| 526 | return mm_table_pte_to_absent(entry, level); |
| 527 | } |
| 528 | return entry; |
| 529 | } |
| 530 | |
| 531 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 532 | * Defragments the given page table by converting page table references to |
| 533 | * blocks whenever possible. |
| 534 | */ |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 535 | void mm_ptable_defrag(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 536 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 537 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 538 | uint8_t level = arch_mm_max_level(mode); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 539 | uint64_t i; |
| 540 | |
| 541 | /* |
| 542 | * Loop through each entry in the table. If it points to another table, |
| 543 | * check if that table can be replaced by a block or an absent entry. |
| 544 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 545 | for (i = 0; i < MM_PTE_PER_PAGE; ++i) { |
| 546 | table->entries[i] = |
| 547 | mm_ptable_defrag_entry(table->entries[i], level); |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 548 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | /** |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 552 | * Unmaps the hypervisor pages from the given page table. |
| 553 | */ |
| 554 | bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode) |
| 555 | { |
| 556 | /* TODO: If we add pages dynamically, they must be included here too. */ |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 557 | return mm_ptable_unmap(t, layout_text_begin(), layout_text_end(), |
| 558 | mode) && |
| 559 | mm_ptable_unmap(t, layout_rodata_begin(), layout_rodata_end(), |
| 560 | mode) && |
| 561 | mm_ptable_unmap(t, layout_data_begin(), layout_data_end(), mode); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 565 | * Determines if the given address is mapped in the given page table by |
| 566 | * recursively traversing all levels of the page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 567 | */ |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 568 | static bool mm_is_mapped_recursive(struct mm_page_table *table, |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 569 | ptable_addr_t addr, uint8_t level) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 570 | { |
| 571 | pte_t pte; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 572 | ptable_addr_t va_level_end = mm_level_end(addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 573 | |
| 574 | /* It isn't mapped if it doesn't fit in the table. */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 575 | if (addr >= va_level_end) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 576 | return false; |
| 577 | } |
| 578 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 579 | pte = table->entries[mm_index(addr, level)]; |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 580 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 581 | if (arch_mm_pte_is_block(pte, level)) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 582 | return true; |
| 583 | } |
| 584 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 585 | if (arch_mm_pte_is_table(pte, level)) { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 586 | return mm_is_mapped_recursive( |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 587 | mm_page_table_from_pa(arch_mm_table_from_pte(pte)), |
| 588 | addr, level - 1); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 589 | } |
| 590 | |
Andrew Scull | 78d6fd9 | 2018-09-06 15:08:36 +0100 | [diff] [blame] | 591 | /* The entry is not present. */ |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 592 | return false; |
| 593 | } |
| 594 | |
| 595 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 596 | * Determines if the given address is mapped in the given page table. |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 597 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 598 | static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr, |
| 599 | int mode) |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 600 | { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 601 | struct mm_page_table *table = mm_page_table_from_pa(t->table); |
Andrew Scull | e982771 | 2018-10-19 14:54:20 +0100 | [diff] [blame] | 602 | uint8_t level = arch_mm_max_level(mode); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 603 | |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 604 | addr = mm_round_down_to_page(addr); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 605 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 606 | return mm_is_mapped_recursive(table, addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 610 | * Initialises the given page table. |
| 611 | */ |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 612 | bool mm_ptable_init(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 613 | { |
| 614 | size_t i; |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 615 | struct mm_page_table *table; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 616 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 617 | table = mm_alloc_page_table(mode & MM_MODE_NOSYNC); |
| 618 | if (table == NULL) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 619 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 620 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 621 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 622 | for (i = 0; i < MM_PTE_PER_PAGE; i++) { |
| 623 | table->entries[i] = arch_mm_absent_pte(arch_mm_max_level(mode)); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 624 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 625 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 626 | /* TODO: halloc could return a virtual or physical address if mm not |
| 627 | * enabled? */ |
| 628 | t->table = pa_init((uintpaddr_t)table); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 629 | |
| 630 | return true; |
| 631 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 632 | |
| 633 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 634 | * Updates a VM's page table such that the given physical address range is |
| 635 | * mapped in the address space at the corresponding address range in the |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 636 | * architecture-agnostic mode provided. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 637 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 638 | bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, |
| 639 | int mode, ipaddr_t *ipa) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 640 | { |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 641 | bool success = |
| 642 | mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 643 | |
| 644 | if (success && ipa != NULL) { |
| 645 | *ipa = ipa_from_pa(begin); |
| 646 | } |
| 647 | |
| 648 | return success; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | /** |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 652 | * Updates the VM's table such that the given physical address range is not |
| 653 | * mapped in the address space. |
| 654 | */ |
| 655 | bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode) |
| 656 | { |
| 657 | return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1); |
| 658 | } |
| 659 | |
| 660 | /** |
| 661 | * Checks whether the given intermediate physical addess is mapped in the given |
| 662 | * page table of a VM. |
| 663 | */ |
| 664 | bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode) |
| 665 | { |
| 666 | return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1); |
| 667 | } |
| 668 | |
| 669 | /** |
| 670 | * Translates an intermediate physical address to a physical address. Addresses |
| 671 | * are currently identity mapped so this is a simple type convertion. Returns |
| 672 | * true if the address was mapped in the table and the address was converted. |
| 673 | */ |
| 674 | bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa) |
| 675 | { |
| 676 | bool mapped = mm_vm_is_mapped(t, ipa, 0); |
| 677 | |
| 678 | if (mapped) { |
| 679 | *pa = pa_init(ipa_addr(ipa)); |
| 680 | } |
| 681 | |
| 682 | return mapped; |
| 683 | } |
| 684 | |
| 685 | /** |
| 686 | * Updates the hypervisor page table such that the given physical address range |
| 687 | * is mapped into the address space at the corresponding address range in the |
| 688 | * architecture-agnostic mode provided. |
| 689 | */ |
| 690 | void *mm_identity_map(paddr_t begin, paddr_t end, int mode) |
| 691 | { |
| 692 | if (mm_ptable_identity_map(&ptable, begin, end, |
| 693 | mode | MM_MODE_STAGE1)) { |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 694 | return ptr_from_va(va_from_pa(begin)); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | return NULL; |
| 698 | } |
| 699 | |
| 700 | /** |
| 701 | * Updates the hypervisor table such that the given physical address range is |
| 702 | * not mapped in the address space. |
| 703 | */ |
| 704 | bool mm_unmap(paddr_t begin, paddr_t end, int mode) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 705 | { |
| 706 | return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1); |
| 707 | } |
| 708 | |
| 709 | /** |
| 710 | * Initialises memory management for the hypervisor itself. |
| 711 | */ |
| 712 | bool mm_init(void) |
| 713 | { |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 714 | dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()), |
| 715 | pa_addr(layout_text_end())); |
| 716 | dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()), |
| 717 | pa_addr(layout_rodata_end())); |
| 718 | dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()), |
| 719 | pa_addr(layout_data_end())); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 720 | |
Andrew Scull | 8c3a63a | 2018-09-20 13:38:34 +0100 | [diff] [blame] | 721 | if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 722 | dlog("Unable to allocate memory for page table.\n"); |
| 723 | return false; |
| 724 | } |
| 725 | |
| 726 | /* Map page for uart. */ |
| 727 | /* TODO: We may not want to map this. */ |
Andrew Scull | 24e032f | 2018-10-15 17:18:12 +0100 | [diff] [blame] | 728 | mm_ptable_identity_map(&ptable, pa_init(PL011_BASE), |
| 729 | pa_add(pa_init(PL011_BASE), PAGE_SIZE), |
| 730 | MM_MODE_R | MM_MODE_W | MM_MODE_D | |
| 731 | MM_MODE_NOSYNC | MM_MODE_STAGE1); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 732 | |
| 733 | /* Map each section. */ |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 734 | mm_identity_map(layout_text_begin(), layout_text_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 735 | MM_MODE_X | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 736 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 737 | mm_identity_map(layout_rodata_begin(), layout_rodata_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 738 | MM_MODE_R | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 739 | |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 740 | mm_identity_map(layout_data_begin(), layout_data_end(), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 741 | MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 742 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 743 | return arch_mm_init(ptable.table, true); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | bool mm_cpu_init(void) |
| 747 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 748 | return arch_mm_init(ptable.table, false); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | /** |
| 752 | * Defragments the hypervisor page table. |
| 753 | */ |
| 754 | void mm_defrag(void) |
| 755 | { |
| 756 | mm_ptable_defrag(&ptable, MM_MODE_STAGE1); |
| 757 | } |