blob: 01e28a35e85cfca1a59ea1cb0630354b5a2bcee1 [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#include "mm.h"
2
3#include <stdatomic.h>
4#include <stdint.h>
5
6#include "alloc.h"
7#include "dlog.h"
8
Andrew Scull4f170f52018-07-19 12:58:20 +01009/* Keep macro alignment */
10/* clang-format off */
11
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#define MAP_FLAG_SYNC 0x01
13#define MAP_FLAG_COMMIT 0x02
14
Andrew Scull4f170f52018-07-19 12:58:20 +010015/* clang-format on */
16
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010017extern char text_begin[];
18extern char text_end[];
19extern char rodata_begin[];
20extern char rodata_end[];
21extern char data_begin[];
22extern char data_end[];
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010023static struct mm_ptable ptable;
24
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010025/**
26 * Calculates the size of the address space represented by a page table entry at
27 * the given level.
28 */
29static inline size_t mm_entry_size(int level)
30{
31 return 1ull << (PAGE_BITS + level * PAGE_LEVEL_BITS);
32}
33
34/**
35 * For a given virtual address, calculates the maximum (plus one) address that
36 * can be represented by the same table at the given level.
37 */
38static inline vaddr_t mm_level_end(vaddr_t va, int level)
39{
40 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
41 return ((va >> offset) + 1) << offset;
42}
43
44/**
45 * For a given virtual address, calculates the index at which its entry is
46 * stored in a table at the given level.
47 */
48static inline size_t mm_index(vaddr_t va, int level)
49{
50 vaddr_t v = va >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
51 return v & ((1ull << PAGE_LEVEL_BITS) - 1);
52}
53
54/**
55 * Populates the provided page table entry with a reference to another table if
56 * needed, that is, if it does not yet point to another table.
57 *
58 * Returns a pointer to the table the entry now points to.
59 */
60static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
61{
62 pte_t *ntable;
63 pte_t v = *pte;
64 pte_t new_pte;
65 size_t i;
66 size_t inc;
67
68 /* Just return pointer to table if it's already populated. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 if (arch_mm_pte_is_table(v)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010070 return arch_mm_pte_to_table(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010071 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010072
73 /* Allocate a new table. */
74 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +010075 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010076 if (!ntable) {
77 dlog("Failed to allocate memory for page table\n");
78 return NULL;
79 }
80
81 /* Determine template for new pte and its increment. */
82 if (!arch_mm_pte_is_block(v)) {
83 inc = 0;
84 new_pte = arch_mm_absent_pte();
85 } else {
86 inc = mm_entry_size(level - 1);
Andrew Scull7364a8e2018-07-19 15:39:29 +010087 if (level == 1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010088 new_pte = arch_mm_block_to_page_pte(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010089 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090 new_pte = v;
Andrew Scull7364a8e2018-07-19 15:39:29 +010091 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010092 }
93
94 /* Initialise entries in the new table. */
95 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
96 ntable[i] = new_pte;
97 new_pte += inc;
98 }
99
100 /*
101 * Ensure initialisation is visible before updating the actual pte, then
102 * update it.
103 */
104 atomic_thread_fence(memory_order_release);
105 *pte = arch_mm_pa_to_table_pte((paddr_t)ntable);
106
107 return ntable;
108}
109
110/**
111 * Frees all page-table-related memory associated with the given pte at the
112 * given level.
113 */
114static void mm_free_page_pte(pte_t pte, int level, bool sync)
115{
Andrew Scull020ae692018-07-19 16:20:14 +0100116 (void)pte;
117 (void)level;
118 (void)sync;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100119 /* TODO: Implement.
120 if (!arch_mm_pte_is_present(pte) || level < 1)
121 return;
122 */
123}
124
125/**
126 * Updates the page table at the given level to map the given virtual address
127 * range to a physical range using the provided (architecture-specific)
128 * attributes.
129 *
130 * This function calls itself recursively if it needs to update additional
131 * levels, but the recursion is bound by the maximum number of levels in a page
132 * table.
133 */
134static bool mm_map_level(vaddr_t va, vaddr_t va_end, paddr_t pa, uint64_t attrs,
135 pte_t *table, int level, int flags)
136{
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100137 pte_t *pte = table + mm_index(va, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100138 vaddr_t va_level_end = mm_level_end(va, level);
139 size_t entry_size = mm_entry_size(level);
140 bool commit = flags & MAP_FLAG_COMMIT;
141 bool sync = flags & MAP_FLAG_SYNC;
142
143 /* Cap va_end so that we don't go over the current level max. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100144 if (va_end > va_level_end) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100145 va_end = va_level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100146 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100147
148 /* Fill each entry in the table. */
149 while (va < va_end) {
150 if (level == 0) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100151 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100152 *pte = arch_mm_pa_to_page_pte(pa, attrs);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100153 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100154 } else if ((va_end - va) >= entry_size &&
155 arch_mm_is_block_allowed(level) &&
156 (va & (entry_size - 1)) == 0) {
157 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100158 pte_t v = *pte;
159 *pte = arch_mm_pa_to_block_pte(pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100160 /* TODO: Add barrier. How do we ensure this
161 * isn't in use by another CPU? Send IPI? */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100162 mm_free_page_pte(v, level, sync);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100163 }
164 } else {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100165 pte_t *nt = mm_populate_table_pte(pte, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100166 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100167 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100168 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100169
Andrew Scull4f170f52018-07-19 12:58:20 +0100170 if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
Andrew Scull7364a8e2018-07-19 15:39:29 +0100171 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100172 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100173 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100174 }
175
176 va = (va + entry_size) & ~(entry_size - 1);
177 pa = (pa + entry_size) & ~(entry_size - 1);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100178 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100179 }
180
181 return true;
182}
183
184/**
185 * Invalidates the TLB for the given virtual address range.
186 */
187static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
188{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100189 if (stage1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100190 arch_mm_invalidate_stage1_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100191 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100192 arch_mm_invalidate_stage2_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100193 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100194}
195
196/**
197 * Updates the given table such that the given virtual address range is mapped
Andrew Scullfe636b12018-07-30 14:15:54 +0100198 * to the corresponding physical address range in the architecture-agnostic mode
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100199 * provided.
200 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100201bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
202 int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100203{
204 uint64_t attrs = arch_mm_mode_to_attrs(mode);
205 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100206 int level = arch_mm_max_level(mode);
Andrew Scullfe636b12018-07-30 14:15:54 +0100207 paddr_t paddr = arch_mm_clear_pa(begin);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100208
209 begin = arch_mm_clear_va(begin);
210 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100211
212 /*
213 * Do it in two steps to prevent leaving the table in a halfway updated
214 * state. In such a two-step implementation, the table may be left with
215 * extra internal tables, but no different mapping on failure.
216 */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100217 if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100218 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100219 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100220
221 mm_map_level(begin, end, paddr, attrs, t->table, level,
222 flags | MAP_FLAG_COMMIT);
223
224 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100225 if (!(mode & MM_MODE_NOINVALIDATE)) {
226 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
227 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228
229 return true;
230}
231
232/**
233 * Updates the given table such that the given virtual address range is not
234 * mapped to any physical address.
235 */
236bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode)
237{
238 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100239 int level = arch_mm_max_level(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100240
241 begin = arch_mm_clear_va(begin);
242 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
243
Andrew Scullfe636b12018-07-30 14:15:54 +0100244 /* Also do updates in two steps, similarly to mm_ptable_identity_map. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100245 if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100246 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100247 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100248
249 mm_map_level(begin, end, begin, 0, t->table, level,
250 flags | MAP_FLAG_COMMIT);
251
252 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100253 if (!(mode & MM_MODE_NOINVALIDATE)) {
254 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
255 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100256
257 return true;
258}
259
260/**
Andrew Scullfe636b12018-07-30 14:15:54 +0100261 * Updates the given table such that a single virtual address page is mapped to
262 * the corresponding physical address page in the provided architecture-agnostic
263 * mode.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100264 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100265bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100266{
267 size_t i;
268 uint64_t attrs = arch_mm_mode_to_attrs(mode);
269 pte_t *table = t->table;
270 bool sync = !(mode & MM_MODE_NOSYNC);
Andrew Scullfe636b12018-07-30 14:15:54 +0100271 paddr_t pa = arch_mm_clear_pa(va);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100272
273 va = arch_mm_clear_va(va);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100274
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100275 for (i = arch_mm_max_level(mode); i > 0; i--) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100276 table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100277 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100278 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100279 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100280 }
281
282 i = mm_index(va, 0);
283 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
284 return true;
285}
286
287/**
288 * Writes the given table to the debug log, calling itself recursively to
289 * write sub-tables.
290 */
291static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
292{
293 uint64_t i;
294 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100295 if (!arch_mm_pte_is_present(table[i])) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100296 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100297 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100298
299 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100300 if (!level) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100301 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100302 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100303
304 if (arch_mm_pte_is_table(table[i])) {
305 mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
306 level - 1, max_level);
307 }
308 }
309}
310
311/**
312 * Write the given table to the debug log.
313 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100314void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100315{
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100316 int max_level = arch_mm_max_level(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100317 mm_dump_table_recursive(t->table, max_level, max_level);
318}
319
320/**
321 * Defragments the given page table by converting page table references to
322 * blocks whenever possible.
323 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100324void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100325{
326 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100327 (void)t;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100328 (void)mode;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100329}
330
331/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100332 * Unmaps the hypervisor pages from the given page table.
333 */
334bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
335{
336 /* TODO: If we add pages dynamically, they must be included here too. */
337 return mm_ptable_unmap(t, (vaddr_t)text_begin, (vaddr_t)text_end,
338 mode) &&
339 mm_ptable_unmap(t, (vaddr_t)rodata_begin, (vaddr_t)rodata_end,
340 mode) &&
341 mm_ptable_unmap(t, (vaddr_t)data_begin, (vaddr_t)data_end, mode);
342}
343
344/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100345 * Determines if the given virtual address is mapped in the given page table
346 * by recursively traversing all levels of the page table.
347 */
348static bool mm_is_mapped_recursive(const pte_t *table, vaddr_t addr, int level)
349{
350 pte_t pte;
351 vaddr_t va_level_end = mm_level_end(addr, level);
352
353 /* It isn't mapped if it doesn't fit in the table. */
354 if (addr >= va_level_end) {
355 return false;
356 }
357
358 pte = table[mm_index(addr, level)];
359
360 if (level == 0) {
361 return arch_mm_pte_is_present(pte);
362 }
363
364 if (arch_mm_is_block_allowed(level) && arch_mm_pte_is_block(pte)) {
365 return true;
366 }
367
368 if (arch_mm_pte_is_table(pte)) {
369 return mm_is_mapped_recursive(arch_mm_pte_to_table(pte), addr,
370 level - 1);
371 }
372
373 return false;
374}
375
376/**
377 * Determines if the given virtual address is mapped in the given page table.
378 */
379bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode)
380{
381 int level = arch_mm_max_level(mode);
382
383 addr = arch_mm_clear_va(addr);
384
385 return mm_is_mapped_recursive(t->table, addr, level);
386}
387
388/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100389 * Initialises the given page table.
390 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100391bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100392{
393 size_t i;
394 pte_t *table;
395
Andrew Scull7364a8e2018-07-19 15:39:29 +0100396 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100397 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100398 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100399 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100400 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100401
Andrew Scull7364a8e2018-07-19 15:39:29 +0100402 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100403 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100404 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100405
Andrew Scull7364a8e2018-07-19 15:39:29 +0100406 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100407 table[i] = arch_mm_absent_pte();
Andrew Scull7364a8e2018-07-19 15:39:29 +0100408 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100409
410 t->table = table;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100411 t->id = id;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100412
413 return true;
414}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100415
416/**
417 * Updates the hypervisor page table such that the given virtual address range
Andrew Scullfe636b12018-07-30 14:15:54 +0100418 * is mapped to the corresponding physical address range in the
419 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100420 */
Andrew Scullfe636b12018-07-30 14:15:54 +0100421bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100422{
Andrew Scullfe636b12018-07-30 14:15:54 +0100423 return mm_ptable_identity_map(&ptable, begin, end,
424 mode | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100425}
426
427/**
428 * Updates the hypervisor table such that the given virtual address range is not
429 * mapped to any physical address.
430 */
431bool mm_unmap(vaddr_t begin, vaddr_t end, int mode)
432{
433 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
434}
435
436/**
437 * Initialises memory management for the hypervisor itself.
438 */
439bool mm_init(void)
440{
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100441 dlog("text: 0x%x - 0x%x\n", text_begin, text_end);
442 dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end);
443 dlog("data: 0x%x - 0x%x\n", data_begin, data_end);
444
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100445 if (!mm_ptable_init(&ptable, 0, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100446 dlog("Unable to allocate memory for page table.\n");
447 return false;
448 }
449
450 /* Map page for uart. */
451 /* TODO: We may not want to map this. */
Andrew Scullfe636b12018-07-30 14:15:54 +0100452 mm_ptable_identity_map_page(&ptable, PL011_BASE,
453 MM_MODE_R | MM_MODE_W | MM_MODE_D |
454 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100455
456 /* Map each section. */
Andrew Scullfe636b12018-07-30 14:15:54 +0100457 mm_identity_map((vaddr_t)text_begin, (vaddr_t)text_end,
458 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100459
Andrew Scullfe636b12018-07-30 14:15:54 +0100460 mm_identity_map((vaddr_t)rodata_begin, (vaddr_t)rodata_end,
461 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100462
Andrew Scullfe636b12018-07-30 14:15:54 +0100463 mm_identity_map((vaddr_t)data_begin, (vaddr_t)data_end,
464 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100465
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100466 return arch_mm_init((paddr_t)ptable.table, true);
467}
468
469bool mm_cpu_init(void)
470{
471 return arch_mm_init((paddr_t)ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100472}
473
474/**
475 * Defragments the hypervisor page table.
476 */
477void mm_defrag(void)
478{
479 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
480}