blob: 7029927884ff2d4117b60584ee8470c83748a421 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01002
Andrew Scull80871322018-08-06 12:04:09 +01003#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01004#include <stdatomic.h>
5#include <stdint.h>
6
Andrew Scull18c78fc2018-08-20 12:57:41 +01007#include "hf/alloc.h"
8#include "hf/dlog.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01009
Andrew Scull80871322018-08-06 12:04:09 +010010/* The type of addresses stored in the page table. */
11typedef uintvaddr_t ptable_addr_t;
12
13/* For stage 2, the input is an intermediate physical addresses rather than a
14 * virtual address so: */
15static_assert(
16 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
17 "Currently, the same code manages the stage 1 and stage 2 page tables "
18 "which only works if the virtual and intermediate physical addresses "
19 "are the same size. It looks like that assumption might not be holding "
20 "so we need to check that everything is going to be ok.");
21
Andrew Scull4f170f52018-07-19 12:58:20 +010022/* Keep macro alignment */
23/* clang-format off */
24
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010025#define MAP_FLAG_SYNC 0x01
26#define MAP_FLAG_COMMIT 0x02
27
Andrew Scull4f170f52018-07-19 12:58:20 +010028/* clang-format on */
29
Andrew Scull265ada92018-07-30 15:19:01 +010030extern uint8_t text_begin[];
31extern uint8_t text_end[];
32extern uint8_t rodata_begin[];
33extern uint8_t rodata_end[];
34extern uint8_t data_begin[];
35extern uint8_t data_end[];
36
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010037static struct mm_ptable ptable;
38
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010039/**
Andrew Scull80871322018-08-06 12:04:09 +010040 * Rounds an address down to a page boundary.
41 */
42static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
43{
44 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
45}
46
47/**
48 * Rounds an address up to a page boundary.
49 */
50static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
51{
52 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
53}
54
55/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056 * Calculates the size of the address space represented by a page table entry at
57 * the given level.
58 */
59static inline size_t mm_entry_size(int level)
60{
Andrew Scull78d6fd92018-09-06 15:08:36 +010061 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010062}
63
64/**
Andrew Scull80871322018-08-06 12:04:09 +010065 * For a given address, calculates the maximum (plus one) address that can be
66 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010067 */
Andrew Scull80871322018-08-06 12:04:09 +010068static inline ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010069{
70 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +010071 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010072}
73
74/**
Andrew Scull80871322018-08-06 12:04:09 +010075 * For a given address, calculates the index at which its entry is stored in a
76 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010077 */
Andrew Scull80871322018-08-06 12:04:09 +010078static inline size_t mm_index(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010079{
Andrew Scull80871322018-08-06 12:04:09 +010080 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +010081 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082}
83
84/**
85 * Populates the provided page table entry with a reference to another table if
86 * needed, that is, if it does not yet point to another table.
87 *
88 * Returns a pointer to the table the entry now points to.
89 */
90static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
91{
92 pte_t *ntable;
93 pte_t v = *pte;
94 pte_t new_pte;
95 size_t i;
96 size_t inc;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +010097 int level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010098
99 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100100 if (arch_mm_pte_is_table(v, level)) {
101 return ptr_from_va(va_from_pa(arch_mm_table_from_pte(v)));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100102 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100103
104 /* Allocate a new table. */
105 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +0100106 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100107 if (!ntable) {
108 dlog("Failed to allocate memory for page table\n");
109 return NULL;
110 }
111
112 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100113 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100114 inc = mm_entry_size(level_below);
115 new_pte = arch_mm_block_pte(level_below,
116 arch_mm_block_from_pte(v),
117 arch_mm_pte_attrs(v));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100118 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100119 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100120 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121 }
122
123 /* Initialise entries in the new table. */
124 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
125 ntable[i] = new_pte;
126 new_pte += inc;
127 }
128
129 /*
130 * Ensure initialisation is visible before updating the actual pte, then
131 * update it.
132 */
133 atomic_thread_fence(memory_order_release);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100134 *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100135
136 return ntable;
137}
138
139/**
140 * Frees all page-table-related memory associated with the given pte at the
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100141 * given level, including any subtables recursively.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100142 */
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100143static void mm_free_page_pte(pte_t pte, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100144{
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100145 pte_t *table;
146 uint64_t i;
147
148 if (!arch_mm_pte_is_table(pte, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100149 return;
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100150 }
151
152 table = ptr_from_va(va_from_pa(arch_mm_table_from_pte(pte)));
153 /* Recursively free any subtables. */
154 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); ++i) {
155 mm_free_page_pte(table[i], level - 1);
156 }
157
158 /* Free the table itself. */
159 hfree(table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100160}
161
162/**
Andrew Scull80871322018-08-06 12:04:09 +0100163 * Updates the page table at the given level to map the given address range to a
164 * physical range using the provided (architecture-specific) attributes.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100165 *
166 * This function calls itself recursively if it needs to update additional
167 * levels, but the recursion is bound by the maximum number of levels in a page
168 * table.
169 */
Andrew Scull80871322018-08-06 12:04:09 +0100170static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Scull265ada92018-07-30 15:19:01 +0100171 uint64_t attrs, pte_t *table, int level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100172{
Andrew Scullf3d45592018-09-20 14:30:22 +0100173 pte_t *pte = &table[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100174 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100175 size_t entry_size = mm_entry_size(level);
176 bool commit = flags & MAP_FLAG_COMMIT;
177 bool sync = flags & MAP_FLAG_SYNC;
178
Andrew Scull265ada92018-07-30 15:19:01 +0100179 /* Cap end so that we don't go over the current level max. */
180 if (end > level_end) {
181 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100182 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100183
184 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100185 while (begin < end) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100186 if ((end - begin) >= entry_size &&
187 arch_mm_is_block_allowed(level) &&
188 (begin & (entry_size - 1)) == 0) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100189 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100190 pte_t v = *pte;
Andrew Scull78d6fd92018-09-06 15:08:36 +0100191 *pte = arch_mm_block_pte(level, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100192 /* TODO: Add barrier. How do we ensure this
193 * isn't in use by another CPU? Send IPI? */
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100194 mm_free_page_pte(v, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100195 }
196 } else {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100197 pte_t *nt = mm_populate_table_pte(pte, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100198 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100199 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100200 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100201
Andrew Scull80871322018-08-06 12:04:09 +0100202 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
203 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100204 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100205 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100206 }
207
Andrew Scull265ada92018-07-30 15:19:01 +0100208 begin = (begin + entry_size) & ~(entry_size - 1);
209 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100210 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100211 }
212
213 return true;
214}
215
216/**
Andrew Scull80871322018-08-06 12:04:09 +0100217 * Invalidates the TLB for the given address range.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100218 */
Andrew Scull80871322018-08-06 12:04:09 +0100219static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
220 bool stage1)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100221{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100222 if (stage1) {
Andrew Scull80871322018-08-06 12:04:09 +0100223 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100224 } else {
Andrew Scull80871322018-08-06 12:04:09 +0100225 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100226 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100227}
228
229/**
Andrew Scull80871322018-08-06 12:04:09 +0100230 * Updates the given table such that the given physical address range is mapped
231 * into the address space with the corresponding address range in the
232 * architecture-agnostic mode provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100233 */
Andrew Scull80871322018-08-06 12:04:09 +0100234static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
235 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100236{
237 uint64_t attrs = arch_mm_mode_to_attrs(mode);
238 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100239 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100240 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100241 ptable_addr_t begin;
242 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100243
Andrew Scull80871322018-08-06 12:04:09 +0100244 pa_begin = arch_mm_clear_pa(pa_begin);
245 begin = pa_addr(pa_begin);
246 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100247
248 /*
249 * Do it in two steps to prevent leaving the table in a halfway updated
250 * state. In such a two-step implementation, the table may be left with
251 * extra internal tables, but no different mapping on failure.
252 */
Andrew Scull80871322018-08-06 12:04:09 +0100253 if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100254 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100255 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100256
Andrew Scull80871322018-08-06 12:04:09 +0100257 mm_map_level(begin, end, pa_begin, attrs, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100258 flags | MAP_FLAG_COMMIT);
259
260 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100261 if (!(mode & MM_MODE_NOINVALIDATE)) {
262 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
263 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100264
265 return true;
266}
267
268/**
Andrew Scull80871322018-08-06 12:04:09 +0100269 * Updates the given table such that the given physical address range is not
270 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100271 */
Andrew Scull80871322018-08-06 12:04:09 +0100272static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
273 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100274{
275 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100276 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100277 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100278 ptable_addr_t begin;
279 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100280
Andrew Scull80871322018-08-06 12:04:09 +0100281 pa_begin = arch_mm_clear_pa(pa_begin);
282 begin = pa_addr(pa_begin);
283 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284
Andrew Scullfe636b12018-07-30 14:15:54 +0100285 /* Also do updates in two steps, similarly to mm_ptable_identity_map. */
Andrew Scull80871322018-08-06 12:04:09 +0100286 if (!mm_map_level(begin, end, pa_begin, 0, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100287 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100288 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289
Andrew Scull80871322018-08-06 12:04:09 +0100290 mm_map_level(begin, end, pa_begin, 0, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100291 flags | MAP_FLAG_COMMIT);
292
293 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100294 if (!(mode & MM_MODE_NOINVALIDATE)) {
295 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
296 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100297
298 return true;
299}
300
301/**
Andrew Scull80871322018-08-06 12:04:09 +0100302 * Updates the given table such that a single physical address page is mapped
303 * into the address space with the corresponding address page in the provided
304 * architecture-agnostic mode.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100305 */
Andrew Scull80871322018-08-06 12:04:09 +0100306static bool mm_ptable_identity_map_page(struct mm_ptable *t, paddr_t pa,
307 int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100308{
309 size_t i;
310 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100311 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100312 bool sync = !(mode & MM_MODE_NOSYNC);
Andrew Scull80871322018-08-06 12:04:09 +0100313 ptable_addr_t addr;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314
Andrew Scull80871322018-08-06 12:04:09 +0100315 pa = arch_mm_clear_pa(pa);
316 addr = pa_addr(pa);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100317
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100318 for (i = arch_mm_max_level(mode); i > 0; i--) {
Andrew Scullf3d45592018-09-20 14:30:22 +0100319 table = mm_populate_table_pte(&table[mm_index(addr, i)], i,
Andrew Scull80871322018-08-06 12:04:09 +0100320 sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100321 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100322 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100323 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100324 }
325
Andrew Scull80871322018-08-06 12:04:09 +0100326 i = mm_index(addr, 0);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100327 table[i] = arch_mm_block_pte(0, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 return true;
329}
330
331/**
332 * Writes the given table to the debug log, calling itself recursively to
333 * write sub-tables.
334 */
335static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
336{
337 uint64_t i;
338 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100339 if (!arch_mm_pte_is_present(table[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100340 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100341 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100342
343 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100344
Andrew Scull78d6fd92018-09-06 15:08:36 +0100345 if (arch_mm_pte_is_table(table[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100346 mm_dump_table_recursive(
347 ptr_from_va(va_from_pa(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100348 arch_mm_table_from_pte(table[i]))),
Andrew Scull80871322018-08-06 12:04:09 +0100349 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100350 }
351 }
352}
353
354/**
355 * Write the given table to the debug log.
356 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100357void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100358{
Andrew Scull8dce4982018-08-06 13:02:20 +0100359 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100360 int max_level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100361 mm_dump_table_recursive(table, max_level, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100362}
363
364/**
365 * Defragments the given page table by converting page table references to
366 * blocks whenever possible.
367 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100368void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100369{
370 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100371 (void)t;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100372 (void)mode;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100373}
374
375/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100376 * Unmaps the hypervisor pages from the given page table.
377 */
378bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
379{
380 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scull80871322018-08-06 12:04:09 +0100381 return mm_ptable_unmap(t, pa_init((uintpaddr_t)text_begin),
382 pa_init((uintpaddr_t)text_end), mode) &&
383 mm_ptable_unmap(t, pa_init((uintpaddr_t)rodata_begin),
384 pa_init((uintpaddr_t)rodata_end), mode) &&
385 mm_ptable_unmap(t, pa_init((uintpaddr_t)data_begin),
386 pa_init((uintpaddr_t)data_end), mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100387}
388
389/**
Andrew Scull80871322018-08-06 12:04:09 +0100390 * Determines if the given address is mapped in the given page table by
391 * recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100392 */
Andrew Scull80871322018-08-06 12:04:09 +0100393static bool mm_is_mapped_recursive(const pte_t *table, ptable_addr_t addr,
394 int level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100395{
396 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100397 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100398
399 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100400 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100401 return false;
402 }
403
404 pte = table[mm_index(addr, level)];
405
Andrew Scull78d6fd92018-09-06 15:08:36 +0100406 if (arch_mm_pte_is_block(pte, level)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100407 return true;
408 }
409
Andrew Scull78d6fd92018-09-06 15:08:36 +0100410 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100411 return mm_is_mapped_recursive(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100412 ptr_from_va(va_from_pa(arch_mm_table_from_pte(pte))),
Andrew Scull80871322018-08-06 12:04:09 +0100413 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100414 }
415
Andrew Scull78d6fd92018-09-06 15:08:36 +0100416 /* The entry is not present. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100417 return false;
418}
419
420/**
Andrew Scull80871322018-08-06 12:04:09 +0100421 * Determines if the given address is mapped in the given page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100422 */
Andrew Scull80871322018-08-06 12:04:09 +0100423static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
424 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100425{
Andrew Scull8dce4982018-08-06 13:02:20 +0100426 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100427 int level = arch_mm_max_level(mode);
428
Andrew Scull80871322018-08-06 12:04:09 +0100429 addr = mm_round_down_to_page(addr);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100430
Andrew Scull265ada92018-07-30 15:19:01 +0100431 return mm_is_mapped_recursive(table, addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100432}
433
434/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100435 * Initialises the given page table.
436 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100437bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100438{
439 size_t i;
440 pte_t *table;
441
Andrew Scull7364a8e2018-07-19 15:39:29 +0100442 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100443 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100444 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100445 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100446 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100447
Andrew Scull7364a8e2018-07-19 15:39:29 +0100448 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100449 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100450 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100451
Andrew Scull7364a8e2018-07-19 15:39:29 +0100452 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100453 table[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100454 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100455
Andrew Scull265ada92018-07-30 15:19:01 +0100456 /* TODO: halloc could return a virtual or physical address if mm not
457 * enabled? */
458 t->table = pa_init((uintpaddr_t)table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100459
460 return true;
461}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100462
463/**
Andrew Scull80871322018-08-06 12:04:09 +0100464 * Updates a VM's page table such that the given physical address range is
465 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100466 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100467 */
Andrew Scull80871322018-08-06 12:04:09 +0100468bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
469 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100470{
Andrew Scull80871322018-08-06 12:04:09 +0100471 bool success =
472 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
473
474 if (success && ipa != NULL) {
475 *ipa = ipa_from_pa(begin);
476 }
477
478 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100479}
480
481/**
Andrew Scull80871322018-08-06 12:04:09 +0100482 * Updates a VM's page table such that the given physical address page is
483 * mapped in the address space at the corresponding address page in the
484 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100485 */
Andrew Scull80871322018-08-06 12:04:09 +0100486bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
487 ipaddr_t *ipa)
488{
489 bool success =
490 mm_ptable_identity_map_page(t, begin, mode & ~MM_MODE_STAGE1);
491
492 if (success && ipa != NULL) {
493 *ipa = ipa_from_pa(begin);
494 }
495
496 return success;
497}
498
499/**
500 * Updates the VM's table such that the given physical address range is not
501 * mapped in the address space.
502 */
503bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
504{
505 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
506}
507
508/**
509 * Checks whether the given intermediate physical addess is mapped in the given
510 * page table of a VM.
511 */
512bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
513{
514 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
515}
516
517/**
518 * Translates an intermediate physical address to a physical address. Addresses
519 * are currently identity mapped so this is a simple type convertion. Returns
520 * true if the address was mapped in the table and the address was converted.
521 */
522bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
523{
524 bool mapped = mm_vm_is_mapped(t, ipa, 0);
525
526 if (mapped) {
527 *pa = pa_init(ipa_addr(ipa));
528 }
529
530 return mapped;
531}
532
533/**
534 * Updates the hypervisor page table such that the given physical address range
535 * is mapped into the address space at the corresponding address range in the
536 * architecture-agnostic mode provided.
537 */
538void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
539{
540 if (mm_ptable_identity_map(&ptable, begin, end,
541 mode | MM_MODE_STAGE1)) {
542 return ptr_from_va(va_from_pa(begin));
543 }
544
545 return NULL;
546}
547
548/**
549 * Updates the hypervisor table such that the given physical address range is
550 * not mapped in the address space.
551 */
552bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100553{
554 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
555}
556
557/**
558 * Initialises memory management for the hypervisor itself.
559 */
560bool mm_init(void)
561{
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100562 dlog("text: 0x%x - 0x%x\n", text_begin, text_end);
563 dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end);
564 dlog("data: 0x%x - 0x%x\n", data_begin, data_end);
565
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100566 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100567 dlog("Unable to allocate memory for page table.\n");
568 return false;
569 }
570
571 /* Map page for uart. */
572 /* TODO: We may not want to map this. */
Andrew Scull80871322018-08-06 12:04:09 +0100573 mm_ptable_identity_map_page(&ptable, pa_init(PL011_BASE),
Andrew Scullfe636b12018-07-30 14:15:54 +0100574 MM_MODE_R | MM_MODE_W | MM_MODE_D |
575 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100576
577 /* Map each section. */
Andrew Scull80871322018-08-06 12:04:09 +0100578 mm_identity_map(pa_init((uintpaddr_t)text_begin),
579 pa_init((uintpaddr_t)text_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100580 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100581
Andrew Scull80871322018-08-06 12:04:09 +0100582 mm_identity_map(pa_init((uintpaddr_t)rodata_begin),
583 pa_init((uintpaddr_t)rodata_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100584 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100585
Andrew Scull80871322018-08-06 12:04:09 +0100586 mm_identity_map(pa_init((uintpaddr_t)data_begin),
587 pa_init((uintpaddr_t)data_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100588 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100589
Andrew Scull265ada92018-07-30 15:19:01 +0100590 return arch_mm_init(ptable.table, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100591}
592
593bool mm_cpu_init(void)
594{
Andrew Scull265ada92018-07-30 15:19:01 +0100595 return arch_mm_init(ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100596}
597
598/**
599 * Defragments the hypervisor page table.
600 */
601void mm_defrag(void)
602{
603 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
604}