Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 1 | #include "mm.h" |
| 2 | |
| 3 | #include <stdatomic.h> |
| 4 | #include <stdint.h> |
| 5 | |
| 6 | #include "alloc.h" |
| 7 | #include "dlog.h" |
| 8 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 9 | /* Keep macro alignment */ |
| 10 | /* clang-format off */ |
| 11 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 12 | #define MAP_FLAG_SYNC 0x01 |
| 13 | #define MAP_FLAG_COMMIT 0x02 |
| 14 | |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 15 | /* clang-format on */ |
| 16 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 17 | extern uint8_t text_begin[]; |
| 18 | extern uint8_t text_end[]; |
| 19 | extern uint8_t rodata_begin[]; |
| 20 | extern uint8_t rodata_end[]; |
| 21 | extern uint8_t data_begin[]; |
| 22 | extern uint8_t data_end[]; |
| 23 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 24 | static struct mm_ptable ptable; |
| 25 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 26 | /** |
| 27 | * Calculates the size of the address space represented by a page table entry at |
| 28 | * the given level. |
| 29 | */ |
| 30 | static inline size_t mm_entry_size(int level) |
| 31 | { |
| 32 | return 1ull << (PAGE_BITS + level * PAGE_LEVEL_BITS); |
| 33 | } |
| 34 | |
| 35 | /** |
| 36 | * For a given virtual address, calculates the maximum (plus one) address that |
| 37 | * can be represented by the same table at the given level. |
| 38 | */ |
| 39 | static inline vaddr_t mm_level_end(vaddr_t va, int level) |
| 40 | { |
| 41 | size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS; |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 42 | return va_init(((va_addr(va) >> offset) + 1) << offset); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | /** |
| 46 | * For a given virtual address, calculates the index at which its entry is |
| 47 | * stored in a table at the given level. |
| 48 | */ |
| 49 | static inline size_t mm_index(vaddr_t va, int level) |
| 50 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 51 | uintvaddr_t v = va_addr(va) >> (PAGE_BITS + level * PAGE_LEVEL_BITS); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 52 | return v & ((1ull << PAGE_LEVEL_BITS) - 1); |
| 53 | } |
| 54 | |
| 55 | /** |
| 56 | * Populates the provided page table entry with a reference to another table if |
| 57 | * needed, that is, if it does not yet point to another table. |
| 58 | * |
| 59 | * Returns a pointer to the table the entry now points to. |
| 60 | */ |
| 61 | static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc) |
| 62 | { |
| 63 | pte_t *ntable; |
| 64 | pte_t v = *pte; |
| 65 | pte_t new_pte; |
| 66 | size_t i; |
| 67 | size_t inc; |
| 68 | |
| 69 | /* Just return pointer to table if it's already populated. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 70 | if (arch_mm_pte_is_table(v)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 71 | return arch_mm_pte_to_table(v); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 72 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 73 | |
| 74 | /* Allocate a new table. */ |
| 75 | ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)( |
Andrew Scull | 4f170f5 | 2018-07-19 12:58:20 +0100 | [diff] [blame] | 76 | PAGE_SIZE, PAGE_SIZE); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 77 | if (!ntable) { |
| 78 | dlog("Failed to allocate memory for page table\n"); |
| 79 | return NULL; |
| 80 | } |
| 81 | |
| 82 | /* Determine template for new pte and its increment. */ |
| 83 | if (!arch_mm_pte_is_block(v)) { |
| 84 | inc = 0; |
| 85 | new_pte = arch_mm_absent_pte(); |
| 86 | } else { |
| 87 | inc = mm_entry_size(level - 1); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 88 | if (level == 1) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 89 | new_pte = arch_mm_block_to_page_pte(v); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 90 | } else { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 91 | new_pte = v; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 92 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | /* Initialise entries in the new table. */ |
| 96 | for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) { |
| 97 | ntable[i] = new_pte; |
| 98 | new_pte += inc; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Ensure initialisation is visible before updating the actual pte, then |
| 103 | * update it. |
| 104 | */ |
| 105 | atomic_thread_fence(memory_order_release); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 106 | *pte = arch_mm_pa_to_table_pte(pa_init((uintpaddr_t)ntable)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 107 | |
| 108 | return ntable; |
| 109 | } |
| 110 | |
| 111 | /** |
| 112 | * Frees all page-table-related memory associated with the given pte at the |
| 113 | * given level. |
| 114 | */ |
| 115 | static void mm_free_page_pte(pte_t pte, int level, bool sync) |
| 116 | { |
Andrew Scull | 020ae69 | 2018-07-19 16:20:14 +0100 | [diff] [blame] | 117 | (void)pte; |
| 118 | (void)level; |
| 119 | (void)sync; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 120 | /* TODO: Implement. |
| 121 | if (!arch_mm_pte_is_present(pte) || level < 1) |
| 122 | return; |
| 123 | */ |
| 124 | } |
| 125 | |
| 126 | /** |
| 127 | * Updates the page table at the given level to map the given virtual address |
| 128 | * range to a physical range using the provided (architecture-specific) |
| 129 | * attributes. |
| 130 | * |
| 131 | * This function calls itself recursively if it needs to update additional |
| 132 | * levels, but the recursion is bound by the maximum number of levels in a page |
| 133 | * table. |
| 134 | */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 135 | static bool mm_map_level(vaddr_t va_begin, vaddr_t va_end, paddr_t pa, |
| 136 | uint64_t attrs, pte_t *table, int level, int flags) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 137 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 138 | pte_t *pte = table + mm_index(va_begin, level); |
| 139 | uintvaddr_t level_end = va_addr(mm_level_end(va_begin, level)); |
| 140 | uintvaddr_t begin = va_addr(va_begin); |
| 141 | uintvaddr_t end = va_addr(va_end); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 142 | size_t entry_size = mm_entry_size(level); |
| 143 | bool commit = flags & MAP_FLAG_COMMIT; |
| 144 | bool sync = flags & MAP_FLAG_SYNC; |
| 145 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 146 | /* Cap end so that we don't go over the current level max. */ |
| 147 | if (end > level_end) { |
| 148 | end = level_end; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 149 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 150 | |
| 151 | /* Fill each entry in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 152 | while (begin < end) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 153 | if (level == 0) { |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 154 | if (commit) { |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 155 | *pte = arch_mm_pa_to_page_pte(pa, attrs); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 156 | } |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 157 | } else if ((end - begin) >= entry_size && |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 158 | arch_mm_is_block_allowed(level) && |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 159 | (begin & (entry_size - 1)) == 0) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 160 | if (commit) { |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 161 | pte_t v = *pte; |
| 162 | *pte = arch_mm_pa_to_block_pte(pa, attrs); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 163 | /* TODO: Add barrier. How do we ensure this |
| 164 | * isn't in use by another CPU? Send IPI? */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 165 | mm_free_page_pte(v, level, sync); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 166 | } |
| 167 | } else { |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 168 | pte_t *nt = mm_populate_table_pte(pte, level, sync); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 169 | if (!nt) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 170 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 171 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 172 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 173 | if (!mm_map_level(va_begin, va_end, pa, attrs, nt, |
| 174 | level - 1, flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 175 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 176 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 177 | } |
| 178 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 179 | begin = (begin + entry_size) & ~(entry_size - 1); |
| 180 | pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1)); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 181 | pte++; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | return true; |
| 185 | } |
| 186 | |
| 187 | /** |
| 188 | * Invalidates the TLB for the given virtual address range. |
| 189 | */ |
| 190 | static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1) |
| 191 | { |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 192 | if (stage1) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 193 | arch_mm_invalidate_stage1_range(begin, end); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 194 | } else { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 195 | arch_mm_invalidate_stage2_range(begin, end); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 196 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | /** |
| 200 | * Updates the given table such that the given virtual address range is mapped |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 201 | * to the corresponding physical address range in the architecture-agnostic mode |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 202 | * provided. |
| 203 | */ |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 204 | bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end, |
| 205 | int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 206 | { |
| 207 | uint64_t attrs = arch_mm_mode_to_attrs(mode); |
| 208 | int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 209 | int level = arch_mm_max_level(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 210 | pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table)); |
| 211 | paddr_t paddr = arch_mm_clear_pa(mm_pa_from_va(begin)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 212 | |
| 213 | begin = arch_mm_clear_va(begin); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 214 | end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 215 | |
| 216 | /* |
| 217 | * Do it in two steps to prevent leaving the table in a halfway updated |
| 218 | * state. In such a two-step implementation, the table may be left with |
| 219 | * extra internal tables, but no different mapping on failure. |
| 220 | */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 221 | if (!mm_map_level(begin, end, paddr, attrs, table, level, flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 222 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 223 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 224 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 225 | mm_map_level(begin, end, paddr, attrs, table, level, |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 226 | flags | MAP_FLAG_COMMIT); |
| 227 | |
| 228 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 229 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 230 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 231 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 232 | |
| 233 | return true; |
| 234 | } |
| 235 | |
| 236 | /** |
| 237 | * Updates the given table such that the given virtual address range is not |
| 238 | * mapped to any physical address. |
| 239 | */ |
| 240 | bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode) |
| 241 | { |
| 242 | int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 243 | int level = arch_mm_max_level(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 244 | pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 245 | |
| 246 | begin = arch_mm_clear_va(begin); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 247 | end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 248 | |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 249 | /* Also do updates in two steps, similarly to mm_ptable_identity_map. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 250 | if (!mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level, |
| 251 | flags)) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 252 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 253 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 254 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 255 | mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level, |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 256 | flags | MAP_FLAG_COMMIT); |
| 257 | |
| 258 | /* Invalidate the tlb. */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 259 | if (!(mode & MM_MODE_NOINVALIDATE)) { |
| 260 | mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0); |
| 261 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 262 | |
| 263 | return true; |
| 264 | } |
| 265 | |
| 266 | /** |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 267 | * Updates the given table such that a single virtual address page is mapped to |
| 268 | * the corresponding physical address page in the provided architecture-agnostic |
| 269 | * mode. |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 270 | */ |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 271 | bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 272 | { |
| 273 | size_t i; |
| 274 | uint64_t attrs = arch_mm_mode_to_attrs(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 275 | pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 276 | bool sync = !(mode & MM_MODE_NOSYNC); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 277 | paddr_t pa = arch_mm_clear_pa(mm_pa_from_va(va)); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 278 | |
| 279 | va = arch_mm_clear_va(va); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 280 | |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 281 | for (i = arch_mm_max_level(mode); i > 0; i--) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 282 | table = mm_populate_table_pte(table + mm_index(va, i), i, sync); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 283 | if (!table) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 284 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 285 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | i = mm_index(va, 0); |
| 289 | table[i] = arch_mm_pa_to_page_pte(pa, attrs); |
| 290 | return true; |
| 291 | } |
| 292 | |
| 293 | /** |
| 294 | * Writes the given table to the debug log, calling itself recursively to |
| 295 | * write sub-tables. |
| 296 | */ |
| 297 | static void mm_dump_table_recursive(pte_t *table, int level, int max_level) |
| 298 | { |
| 299 | uint64_t i; |
| 300 | for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) { |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 301 | if (!arch_mm_pte_is_present(table[i])) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 302 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 303 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 304 | |
| 305 | dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 306 | if (!level) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 307 | continue; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 308 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 309 | |
| 310 | if (arch_mm_pte_is_table(table[i])) { |
| 311 | mm_dump_table_recursive(arch_mm_pte_to_table(table[i]), |
| 312 | level - 1, max_level); |
| 313 | } |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | /** |
| 318 | * Write the given table to the debug log. |
| 319 | */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 320 | void mm_ptable_dump(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 321 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 322 | pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table)); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 323 | int max_level = arch_mm_max_level(mode); |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 324 | mm_dump_table_recursive(table, max_level, max_level); |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | /** |
| 328 | * Defragments the given page table by converting page table references to |
| 329 | * blocks whenever possible. |
| 330 | */ |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 331 | void mm_ptable_defrag(struct mm_ptable *t, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 332 | { |
| 333 | /* TODO: Implement. */ |
Andrew Scull | 020ae69 | 2018-07-19 16:20:14 +0100 | [diff] [blame] | 334 | (void)t; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 335 | (void)mode; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | /** |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 339 | * Unmaps the hypervisor pages from the given page table. |
| 340 | */ |
| 341 | bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode) |
| 342 | { |
| 343 | /* TODO: If we add pages dynamically, they must be included here too. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 344 | return mm_ptable_unmap(t, va_init((uintvaddr_t)text_begin), |
| 345 | va_init((uintvaddr_t)text_end), mode) && |
| 346 | mm_ptable_unmap(t, va_init((uintvaddr_t)rodata_begin), |
| 347 | va_init((uintvaddr_t)rodata_end), mode) && |
| 348 | mm_ptable_unmap(t, va_init((uintvaddr_t)data_begin), |
| 349 | va_init((uintvaddr_t)data_end), mode); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | /** |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 353 | * Determines if the given virtual address is mapped in the given page table |
| 354 | * by recursively traversing all levels of the page table. |
| 355 | */ |
| 356 | static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level) |
| 357 | { |
| 358 | pte_t pte; |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 359 | uintvaddr_t va_level_end = va_addr(mm_level_end(addr, level)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 360 | |
| 361 | /* It isn't mapped if it doesn't fit in the table. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 362 | if (va_addr(addr) >= va_level_end) { |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 363 | return false; |
| 364 | } |
| 365 | |
| 366 | pte = table[mm_index(addr, level)]; |
| 367 | |
| 368 | if (level == 0) { |
| 369 | return arch_mm_pte_is_present(pte); |
| 370 | } |
| 371 | |
| 372 | if (arch_mm_is_block_allowed(level) && arch_mm_pte_is_block(pte)) { |
| 373 | return true; |
| 374 | } |
| 375 | |
| 376 | if (arch_mm_pte_is_table(pte)) { |
| 377 | return mm_is_mapped_recursive(arch_mm_pte_to_table(pte), addr, |
| 378 | level - 1); |
| 379 | } |
| 380 | |
| 381 | return false; |
| 382 | } |
| 383 | |
| 384 | /** |
| 385 | * Determines if the given virtual address is mapped in the given page table. |
| 386 | */ |
| 387 | bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode) |
| 388 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 389 | pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 390 | int level = arch_mm_max_level(mode); |
| 391 | |
| 392 | addr = arch_mm_clear_va(addr); |
| 393 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 394 | return mm_is_mapped_recursive(table, addr, level); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 395 | } |
| 396 | |
| 397 | /** |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 398 | * Initialises the given page table. |
| 399 | */ |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 400 | bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode) |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 401 | { |
| 402 | size_t i; |
| 403 | pte_t *table; |
| 404 | |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 405 | if (mode & MM_MODE_NOSYNC) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 406 | table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 407 | } else { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 408 | table = halloc_aligned(PAGE_SIZE, PAGE_SIZE); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 409 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 410 | |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 411 | if (!table) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 412 | return false; |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 413 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 414 | |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 415 | for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) { |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 416 | table[i] = arch_mm_absent_pte(); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 417 | } |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 418 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 419 | /* TODO: halloc could return a virtual or physical address if mm not |
| 420 | * enabled? */ |
| 421 | t->table = pa_init((uintpaddr_t)table); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 422 | t->id = id; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 423 | |
| 424 | return true; |
| 425 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 426 | |
| 427 | /** |
| 428 | * Updates the hypervisor page table such that the given virtual address range |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 429 | * is mapped to the corresponding physical address range in the |
| 430 | * architecture-agnostic mode provided. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 431 | */ |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 432 | bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 433 | { |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 434 | return mm_ptable_identity_map(&ptable, begin, end, |
| 435 | mode | MM_MODE_STAGE1); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 436 | } |
| 437 | |
| 438 | /** |
| 439 | * Updates the hypervisor table such that the given virtual address range is not |
| 440 | * mapped to any physical address. |
| 441 | */ |
| 442 | bool mm_unmap(vaddr_t begin, vaddr_t end, int mode) |
| 443 | { |
| 444 | return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1); |
| 445 | } |
| 446 | |
| 447 | /** |
| 448 | * Initialises memory management for the hypervisor itself. |
| 449 | */ |
| 450 | bool mm_init(void) |
| 451 | { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 452 | dlog("text: 0x%x - 0x%x\n", text_begin, text_end); |
| 453 | dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end); |
| 454 | dlog("data: 0x%x - 0x%x\n", data_begin, data_end); |
| 455 | |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 456 | if (!mm_ptable_init(&ptable, 0, MM_MODE_NOSYNC | MM_MODE_STAGE1)) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 457 | dlog("Unable to allocate memory for page table.\n"); |
| 458 | return false; |
| 459 | } |
| 460 | |
| 461 | /* Map page for uart. */ |
| 462 | /* TODO: We may not want to map this. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 463 | mm_ptable_identity_map_page(&ptable, va_init(PL011_BASE), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 464 | MM_MODE_R | MM_MODE_W | MM_MODE_D | |
| 465 | MM_MODE_NOSYNC | MM_MODE_STAGE1); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 466 | |
| 467 | /* Map each section. */ |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 468 | mm_identity_map(va_init((uintvaddr_t)text_begin), |
| 469 | va_init((uintvaddr_t)text_end), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 470 | MM_MODE_X | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 471 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 472 | mm_identity_map(va_init((uintvaddr_t)rodata_begin), |
| 473 | va_init((uintvaddr_t)rodata_end), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 474 | MM_MODE_R | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 475 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 476 | mm_identity_map(va_init((uintvaddr_t)data_begin), |
| 477 | va_init((uintvaddr_t)data_end), |
Andrew Scull | fe636b1 | 2018-07-30 14:15:54 +0100 | [diff] [blame] | 478 | MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 479 | |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 480 | return arch_mm_init(ptable.table, true); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | bool mm_cpu_init(void) |
| 484 | { |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame^] | 485 | return arch_mm_init(ptable.table, false); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | /** |
| 489 | * Defragments the hypervisor page table. |
| 490 | */ |
| 491 | void mm_defrag(void) |
| 492 | { |
| 493 | mm_ptable_defrag(&ptable, MM_MODE_STAGE1); |
| 494 | } |