blob: 9ecc313e16053bb27d858b84a213e242c49b722e [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#include "mm.h"
2
3#include <stdatomic.h>
4#include <stdint.h>
5
6#include "alloc.h"
7#include "dlog.h"
8
Andrew Scull4f170f52018-07-19 12:58:20 +01009/* Keep macro alignment */
10/* clang-format off */
11
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#define MAP_FLAG_SYNC 0x01
13#define MAP_FLAG_COMMIT 0x02
14
Andrew Scull4f170f52018-07-19 12:58:20 +010015/* clang-format on */
16
Andrew Scull265ada92018-07-30 15:19:01 +010017extern uint8_t text_begin[];
18extern uint8_t text_end[];
19extern uint8_t rodata_begin[];
20extern uint8_t rodata_end[];
21extern uint8_t data_begin[];
22extern uint8_t data_end[];
23
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010024static struct mm_ptable ptable;
25
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026/**
27 * Calculates the size of the address space represented by a page table entry at
28 * the given level.
29 */
30static inline size_t mm_entry_size(int level)
31{
32 return 1ull << (PAGE_BITS + level * PAGE_LEVEL_BITS);
33}
34
35/**
36 * For a given virtual address, calculates the maximum (plus one) address that
37 * can be represented by the same table at the given level.
38 */
39static inline vaddr_t mm_level_end(vaddr_t va, int level)
40{
41 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull265ada92018-07-30 15:19:01 +010042 return va_init(((va_addr(va) >> offset) + 1) << offset);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010043}
44
45/**
46 * For a given virtual address, calculates the index at which its entry is
47 * stored in a table at the given level.
48 */
49static inline size_t mm_index(vaddr_t va, int level)
50{
Andrew Scull265ada92018-07-30 15:19:01 +010051 uintvaddr_t v = va_addr(va) >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010052 return v & ((1ull << PAGE_LEVEL_BITS) - 1);
53}
54
55/**
56 * Populates the provided page table entry with a reference to another table if
57 * needed, that is, if it does not yet point to another table.
58 *
59 * Returns a pointer to the table the entry now points to.
60 */
61static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
62{
63 pte_t *ntable;
64 pte_t v = *pte;
65 pte_t new_pte;
66 size_t i;
67 size_t inc;
68
69 /* Just return pointer to table if it's already populated. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010070 if (arch_mm_pte_is_table(v)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010071 return arch_mm_pte_to_table(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010072 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010073
74 /* Allocate a new table. */
75 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +010076 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010077 if (!ntable) {
78 dlog("Failed to allocate memory for page table\n");
79 return NULL;
80 }
81
82 /* Determine template for new pte and its increment. */
83 if (!arch_mm_pte_is_block(v)) {
84 inc = 0;
85 new_pte = arch_mm_absent_pte();
86 } else {
87 inc = mm_entry_size(level - 1);
Andrew Scull7364a8e2018-07-19 15:39:29 +010088 if (level == 1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010089 new_pte = arch_mm_block_to_page_pte(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010090 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010091 new_pte = v;
Andrew Scull7364a8e2018-07-19 15:39:29 +010092 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010093 }
94
95 /* Initialise entries in the new table. */
96 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
97 ntable[i] = new_pte;
98 new_pte += inc;
99 }
100
101 /*
102 * Ensure initialisation is visible before updating the actual pte, then
103 * update it.
104 */
105 atomic_thread_fence(memory_order_release);
Andrew Scull265ada92018-07-30 15:19:01 +0100106 *pte = arch_mm_pa_to_table_pte(pa_init((uintpaddr_t)ntable));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107
108 return ntable;
109}
110
111/**
112 * Frees all page-table-related memory associated with the given pte at the
113 * given level.
114 */
115static void mm_free_page_pte(pte_t pte, int level, bool sync)
116{
Andrew Scull020ae692018-07-19 16:20:14 +0100117 (void)pte;
118 (void)level;
119 (void)sync;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100120 /* TODO: Implement.
121 if (!arch_mm_pte_is_present(pte) || level < 1)
122 return;
123 */
124}
125
126/**
127 * Updates the page table at the given level to map the given virtual address
128 * range to a physical range using the provided (architecture-specific)
129 * attributes.
130 *
131 * This function calls itself recursively if it needs to update additional
132 * levels, but the recursion is bound by the maximum number of levels in a page
133 * table.
134 */
Andrew Scull265ada92018-07-30 15:19:01 +0100135static bool mm_map_level(vaddr_t va_begin, vaddr_t va_end, paddr_t pa,
136 uint64_t attrs, pte_t *table, int level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100137{
Andrew Scull265ada92018-07-30 15:19:01 +0100138 pte_t *pte = table + mm_index(va_begin, level);
139 uintvaddr_t level_end = va_addr(mm_level_end(va_begin, level));
140 uintvaddr_t begin = va_addr(va_begin);
141 uintvaddr_t end = va_addr(va_end);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100142 size_t entry_size = mm_entry_size(level);
143 bool commit = flags & MAP_FLAG_COMMIT;
144 bool sync = flags & MAP_FLAG_SYNC;
145
Andrew Scull265ada92018-07-30 15:19:01 +0100146 /* Cap end so that we don't go over the current level max. */
147 if (end > level_end) {
148 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100149 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100150
151 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100152 while (begin < end) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100153 if (level == 0) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100154 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100155 *pte = arch_mm_pa_to_page_pte(pa, attrs);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100156 }
Andrew Scull265ada92018-07-30 15:19:01 +0100157 } else if ((end - begin) >= entry_size &&
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100158 arch_mm_is_block_allowed(level) &&
Andrew Scull265ada92018-07-30 15:19:01 +0100159 (begin & (entry_size - 1)) == 0) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100160 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100161 pte_t v = *pte;
162 *pte = arch_mm_pa_to_block_pte(pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100163 /* TODO: Add barrier. How do we ensure this
164 * isn't in use by another CPU? Send IPI? */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100165 mm_free_page_pte(v, level, sync);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100166 }
167 } else {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100168 pte_t *nt = mm_populate_table_pte(pte, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100169 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100170 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100171 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100172
Andrew Scull265ada92018-07-30 15:19:01 +0100173 if (!mm_map_level(va_begin, va_end, pa, attrs, nt,
174 level - 1, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100175 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100176 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100177 }
178
Andrew Scull265ada92018-07-30 15:19:01 +0100179 begin = (begin + entry_size) & ~(entry_size - 1);
180 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100181 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100182 }
183
184 return true;
185}
186
187/**
188 * Invalidates the TLB for the given virtual address range.
189 */
190static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
191{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100192 if (stage1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100193 arch_mm_invalidate_stage1_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100194 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100195 arch_mm_invalidate_stage2_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100196 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100197}
198
199/**
200 * Updates the given table such that the given virtual address range is mapped
Andrew Scullfe636b12018-07-30 14:15:54 +0100201 * to the corresponding physical address range in the architecture-agnostic mode
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100202 * provided.
203 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100204bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
205 int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100206{
207 uint64_t attrs = arch_mm_mode_to_attrs(mode);
208 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100209 int level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100210 pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
211 paddr_t paddr = arch_mm_clear_pa(mm_pa_from_va(begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100212
213 begin = arch_mm_clear_va(begin);
Andrew Scull265ada92018-07-30 15:19:01 +0100214 end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100215
216 /*
217 * Do it in two steps to prevent leaving the table in a halfway updated
218 * state. In such a two-step implementation, the table may be left with
219 * extra internal tables, but no different mapping on failure.
220 */
Andrew Scull265ada92018-07-30 15:19:01 +0100221 if (!mm_map_level(begin, end, paddr, attrs, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100222 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100223 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100224
Andrew Scull265ada92018-07-30 15:19:01 +0100225 mm_map_level(begin, end, paddr, attrs, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100226 flags | MAP_FLAG_COMMIT);
227
228 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100229 if (!(mode & MM_MODE_NOINVALIDATE)) {
230 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
231 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100232
233 return true;
234}
235
236/**
237 * Updates the given table such that the given virtual address range is not
238 * mapped to any physical address.
239 */
240bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode)
241{
242 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100243 int level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100244 pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100245
246 begin = arch_mm_clear_va(begin);
Andrew Scull265ada92018-07-30 15:19:01 +0100247 end = arch_mm_clear_va(va_add(end, PAGE_SIZE - 1));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100248
Andrew Scullfe636b12018-07-30 14:15:54 +0100249 /* Also do updates in two steps, similarly to mm_ptable_identity_map. */
Andrew Scull265ada92018-07-30 15:19:01 +0100250 if (!mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
251 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100252 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100253 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100254
Andrew Scull265ada92018-07-30 15:19:01 +0100255 mm_map_level(begin, end, mm_pa_from_va(begin), 0, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100256 flags | MAP_FLAG_COMMIT);
257
258 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100259 if (!(mode & MM_MODE_NOINVALIDATE)) {
260 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
261 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100262
263 return true;
264}
265
266/**
Andrew Scullfe636b12018-07-30 14:15:54 +0100267 * Updates the given table such that a single virtual address page is mapped to
268 * the corresponding physical address page in the provided architecture-agnostic
269 * mode.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100270 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100271bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100272{
273 size_t i;
274 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100275 pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100276 bool sync = !(mode & MM_MODE_NOSYNC);
Andrew Scull265ada92018-07-30 15:19:01 +0100277 paddr_t pa = arch_mm_clear_pa(mm_pa_from_va(va));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100278
279 va = arch_mm_clear_va(va);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100280
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100281 for (i = arch_mm_max_level(mode); i > 0; i--) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100282 table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100283 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100285 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100286 }
287
288 i = mm_index(va, 0);
289 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
290 return true;
291}
292
293/**
294 * Writes the given table to the debug log, calling itself recursively to
295 * write sub-tables.
296 */
297static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
298{
299 uint64_t i;
300 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100301 if (!arch_mm_pte_is_present(table[i])) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100303 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304
305 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100306 if (!level) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100307 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100308 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 if (arch_mm_pte_is_table(table[i])) {
311 mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
312 level - 1, max_level);
313 }
314 }
315}
316
317/**
318 * Write the given table to the debug log.
319 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100320void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100321{
Andrew Scull265ada92018-07-30 15:19:01 +0100322 pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100323 int max_level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100324 mm_dump_table_recursive(table, max_level, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100325}
326
327/**
328 * Defragments the given page table by converting page table references to
329 * blocks whenever possible.
330 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100331void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100332{
333 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100334 (void)t;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100335 (void)mode;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336}
337
338/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100339 * Unmaps the hypervisor pages from the given page table.
340 */
341bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
342{
343 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scull265ada92018-07-30 15:19:01 +0100344 return mm_ptable_unmap(t, va_init((uintvaddr_t)text_begin),
345 va_init((uintvaddr_t)text_end), mode) &&
346 mm_ptable_unmap(t, va_init((uintvaddr_t)rodata_begin),
347 va_init((uintvaddr_t)rodata_end), mode) &&
348 mm_ptable_unmap(t, va_init((uintvaddr_t)data_begin),
349 va_init((uintvaddr_t)data_end), mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100350}
351
352/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100353 * Determines if the given virtual address is mapped in the given page table
354 * by recursively traversing all levels of the page table.
355 */
356static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level)
357{
358 pte_t pte;
Andrew Scull265ada92018-07-30 15:19:01 +0100359 uintvaddr_t va_level_end = va_addr(mm_level_end(addr, level));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100360
361 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100362 if (va_addr(addr) >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100363 return false;
364 }
365
366 pte = table[mm_index(addr, level)];
367
368 if (level == 0) {
369 return arch_mm_pte_is_present(pte);
370 }
371
372 if (arch_mm_is_block_allowed(level) && arch_mm_pte_is_block(pte)) {
373 return true;
374 }
375
376 if (arch_mm_pte_is_table(pte)) {
377 return mm_is_mapped_recursive(arch_mm_pte_to_table(pte), addr,
378 level - 1);
379 }
380
381 return false;
382}
383
384/**
385 * Determines if the given virtual address is mapped in the given page table.
386 */
387bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
388{
Andrew Scull265ada92018-07-30 15:19:01 +0100389 pte_t *table = mm_ptr_from_va(mm_va_from_pa(t->table));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100390 int level = arch_mm_max_level(mode);
391
392 addr = arch_mm_clear_va(addr);
393
Andrew Scull265ada92018-07-30 15:19:01 +0100394 return mm_is_mapped_recursive(table, addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100395}
396
397/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100398 * Initialises the given page table.
399 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100400bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100401{
402 size_t i;
403 pte_t *table;
404
Andrew Scull7364a8e2018-07-19 15:39:29 +0100405 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100406 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100407 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100408 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100409 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100410
Andrew Scull7364a8e2018-07-19 15:39:29 +0100411 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100412 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100413 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100414
Andrew Scull7364a8e2018-07-19 15:39:29 +0100415 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100416 table[i] = arch_mm_absent_pte();
Andrew Scull7364a8e2018-07-19 15:39:29 +0100417 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100418
Andrew Scull265ada92018-07-30 15:19:01 +0100419 /* TODO: halloc could return a virtual or physical address if mm not
420 * enabled? */
421 t->table = pa_init((uintpaddr_t)table);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100422 t->id = id;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423
424 return true;
425}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100426
427/**
428 * Updates the hypervisor page table such that the given virtual address range
Andrew Scullfe636b12018-07-30 14:15:54 +0100429 * is mapped to the corresponding physical address range in the
430 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100431 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100432bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100433{
Andrew Scullfe636b12018-07-30 14:15:54 +0100434 return mm_ptable_identity_map(&ptable, begin, end,
435 mode | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100436}
437
438/**
439 * Updates the hypervisor table such that the given virtual address range is not
440 * mapped to any physical address.
441 */
442bool mm_unmap(vaddr_t begin, vaddr_t end, int mode)
443{
444 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
445}
446
447/**
448 * Initialises memory management for the hypervisor itself.
449 */
450bool mm_init(void)
451{
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100452 dlog("text: 0x%x - 0x%x\n", text_begin, text_end);
453 dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end);
454 dlog("data: 0x%x - 0x%x\n", data_begin, data_end);
455
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100456 if (!mm_ptable_init(&ptable, 0, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100457 dlog("Unable to allocate memory for page table.\n");
458 return false;
459 }
460
461 /* Map page for uart. */
462 /* TODO: We may not want to map this. */
Andrew Scull265ada92018-07-30 15:19:01 +0100463 mm_ptable_identity_map_page(&ptable, va_init(PL011_BASE),
Andrew Scullfe636b12018-07-30 14:15:54 +0100464 MM_MODE_R | MM_MODE_W | MM_MODE_D |
465 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100466
467 /* Map each section. */
Andrew Scull265ada92018-07-30 15:19:01 +0100468 mm_identity_map(va_init((uintvaddr_t)text_begin),
469 va_init((uintvaddr_t)text_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100470 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100471
Andrew Scull265ada92018-07-30 15:19:01 +0100472 mm_identity_map(va_init((uintvaddr_t)rodata_begin),
473 va_init((uintvaddr_t)rodata_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100474 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100475
Andrew Scull265ada92018-07-30 15:19:01 +0100476 mm_identity_map(va_init((uintvaddr_t)data_begin),
477 va_init((uintvaddr_t)data_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100478 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100479
Andrew Scull265ada92018-07-30 15:19:01 +0100480 return arch_mm_init(ptable.table, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100481}
482
483bool mm_cpu_init(void)
484{
Andrew Scull265ada92018-07-30 15:19:01 +0100485 return arch_mm_init(ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100486}
487
488/**
489 * Defragments the hypervisor page table.
490 */
491void mm_defrag(void)
492{
493 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
494}