blob: 51331be54fcd7f7661a7c230a101ae7500d69ca5 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01002
Andrew Scull80871322018-08-06 12:04:09 +01003#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01004#include <stdatomic.h>
5#include <stdint.h>
6
Andrew Scull18c78fc2018-08-20 12:57:41 +01007#include "hf/alloc.h"
8#include "hf/dlog.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01009
Andrew Scull80871322018-08-06 12:04:09 +010010/* The type of addresses stored in the page table. */
11typedef uintvaddr_t ptable_addr_t;
12
13/* For stage 2, the input is an intermediate physical addresses rather than a
14 * virtual address so: */
15static_assert(
16 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
17 "Currently, the same code manages the stage 1 and stage 2 page tables "
18 "which only works if the virtual and intermediate physical addresses "
19 "are the same size. It looks like that assumption might not be holding "
20 "so we need to check that everything is going to be ok.");
21
Andrew Scull4f170f52018-07-19 12:58:20 +010022/* Keep macro alignment */
23/* clang-format off */
24
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010025#define MAP_FLAG_SYNC 0x01
26#define MAP_FLAG_COMMIT 0x02
27
Andrew Scull4f170f52018-07-19 12:58:20 +010028/* clang-format on */
29
Andrew Scull265ada92018-07-30 15:19:01 +010030extern uint8_t text_begin[];
31extern uint8_t text_end[];
32extern uint8_t rodata_begin[];
33extern uint8_t rodata_end[];
34extern uint8_t data_begin[];
35extern uint8_t data_end[];
36
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010037static struct mm_ptable ptable;
38
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010039/**
Andrew Scull80871322018-08-06 12:04:09 +010040 * Rounds an address down to a page boundary.
41 */
42static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
43{
44 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
45}
46
47/**
48 * Rounds an address up to a page boundary.
49 */
50static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
51{
52 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
53}
54
55/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010056 * Calculates the size of the address space represented by a page table entry at
57 * the given level.
58 */
59static inline size_t mm_entry_size(int level)
60{
Andrew Scull78d6fd92018-09-06 15:08:36 +010061 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010062}
63
64/**
Andrew Scull80871322018-08-06 12:04:09 +010065 * For a given address, calculates the maximum (plus one) address that can be
66 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010067 */
Andrew Scull80871322018-08-06 12:04:09 +010068static inline ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010069{
70 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +010071 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010072}
73
74/**
Andrew Scull80871322018-08-06 12:04:09 +010075 * For a given address, calculates the index at which its entry is stored in a
76 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010077 */
Andrew Scull80871322018-08-06 12:04:09 +010078static inline size_t mm_index(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010079{
Andrew Scull80871322018-08-06 12:04:09 +010080 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +010081 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082}
83
84/**
85 * Populates the provided page table entry with a reference to another table if
86 * needed, that is, if it does not yet point to another table.
87 *
88 * Returns a pointer to the table the entry now points to.
89 */
90static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
91{
92 pte_t *ntable;
93 pte_t v = *pte;
94 pte_t new_pte;
95 size_t i;
96 size_t inc;
97
98 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +010099 if (arch_mm_pte_is_table(v, level)) {
100 return ptr_from_va(va_from_pa(arch_mm_table_from_pte(v)));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100101 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100102
103 /* Allocate a new table. */
104 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +0100105 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100106 if (!ntable) {
107 dlog("Failed to allocate memory for page table\n");
108 return NULL;
109 }
110
111 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100112 if (arch_mm_pte_is_block(v, level)) {
113 int level_below = level - 1;
114 inc = mm_entry_size(level_below);
115 new_pte = arch_mm_block_pte(level_below,
116 arch_mm_block_from_pte(v),
117 arch_mm_pte_attrs(v));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100118 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100119 inc = 0;
120 new_pte = arch_mm_absent_pte(level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121 }
122
123 /* Initialise entries in the new table. */
124 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
125 ntable[i] = new_pte;
126 new_pte += inc;
127 }
128
129 /*
130 * Ensure initialisation is visible before updating the actual pte, then
131 * update it.
132 */
133 atomic_thread_fence(memory_order_release);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100134 *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100135
136 return ntable;
137}
138
139/**
140 * Frees all page-table-related memory associated with the given pte at the
141 * given level.
142 */
143static void mm_free_page_pte(pte_t pte, int level, bool sync)
144{
Andrew Scull020ae692018-07-19 16:20:14 +0100145 (void)pte;
146 (void)level;
147 (void)sync;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100148 /* TODO: Implement.
Andrew Scull78d6fd92018-09-06 15:08:36 +0100149 if (!arch_mm_pte_is_present(pte, level) || level < 1)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100150 return;
151 */
152}
153
154/**
Andrew Scull80871322018-08-06 12:04:09 +0100155 * Updates the page table at the given level to map the given address range to a
156 * physical range using the provided (architecture-specific) attributes.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100157 *
158 * This function calls itself recursively if it needs to update additional
159 * levels, but the recursion is bound by the maximum number of levels in a page
160 * table.
161 */
Andrew Scull80871322018-08-06 12:04:09 +0100162static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Scull265ada92018-07-30 15:19:01 +0100163 uint64_t attrs, pte_t *table, int level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100164{
Andrew Scullf3d45592018-09-20 14:30:22 +0100165 pte_t *pte = &table[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100166 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100167 size_t entry_size = mm_entry_size(level);
168 bool commit = flags & MAP_FLAG_COMMIT;
169 bool sync = flags & MAP_FLAG_SYNC;
170
Andrew Scull265ada92018-07-30 15:19:01 +0100171 /* Cap end so that we don't go over the current level max. */
172 if (end > level_end) {
173 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100174 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100175
176 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100177 while (begin < end) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100178 if ((end - begin) >= entry_size &&
179 arch_mm_is_block_allowed(level) &&
180 (begin & (entry_size - 1)) == 0) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100181 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100182 pte_t v = *pte;
Andrew Scull78d6fd92018-09-06 15:08:36 +0100183 *pte = arch_mm_block_pte(level, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100184 /* TODO: Add barrier. How do we ensure this
185 * isn't in use by another CPU? Send IPI? */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100186 mm_free_page_pte(v, level, sync);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100187 }
188 } else {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100189 pte_t *nt = mm_populate_table_pte(pte, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100190 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100191 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100192 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100193
Andrew Scull80871322018-08-06 12:04:09 +0100194 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
195 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100196 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100197 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100198 }
199
Andrew Scull265ada92018-07-30 15:19:01 +0100200 begin = (begin + entry_size) & ~(entry_size - 1);
201 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100202 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100203 }
204
205 return true;
206}
207
208/**
Andrew Scull80871322018-08-06 12:04:09 +0100209 * Invalidates the TLB for the given address range.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100210 */
Andrew Scull80871322018-08-06 12:04:09 +0100211static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
212 bool stage1)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100213{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100214 if (stage1) {
Andrew Scull80871322018-08-06 12:04:09 +0100215 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100216 } else {
Andrew Scull80871322018-08-06 12:04:09 +0100217 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100218 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100219}
220
221/**
Andrew Scull80871322018-08-06 12:04:09 +0100222 * Updates the given table such that the given physical address range is mapped
223 * into the address space with the corresponding address range in the
224 * architecture-agnostic mode provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100225 */
Andrew Scull80871322018-08-06 12:04:09 +0100226static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
227 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228{
229 uint64_t attrs = arch_mm_mode_to_attrs(mode);
230 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100231 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100232 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100233 ptable_addr_t begin;
234 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100235
Andrew Scull80871322018-08-06 12:04:09 +0100236 pa_begin = arch_mm_clear_pa(pa_begin);
237 begin = pa_addr(pa_begin);
238 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100239
240 /*
241 * Do it in two steps to prevent leaving the table in a halfway updated
242 * state. In such a two-step implementation, the table may be left with
243 * extra internal tables, but no different mapping on failure.
244 */
Andrew Scull80871322018-08-06 12:04:09 +0100245 if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100246 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100247 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100248
Andrew Scull80871322018-08-06 12:04:09 +0100249 mm_map_level(begin, end, pa_begin, attrs, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100250 flags | MAP_FLAG_COMMIT);
251
252 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100253 if (!(mode & MM_MODE_NOINVALIDATE)) {
254 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
255 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100256
257 return true;
258}
259
260/**
Andrew Scull80871322018-08-06 12:04:09 +0100261 * Updates the given table such that the given physical address range is not
262 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100263 */
Andrew Scull80871322018-08-06 12:04:09 +0100264static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
265 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100266{
267 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100268 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100269 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100270 ptable_addr_t begin;
271 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100272
Andrew Scull80871322018-08-06 12:04:09 +0100273 pa_begin = arch_mm_clear_pa(pa_begin);
274 begin = pa_addr(pa_begin);
275 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100276
Andrew Scullfe636b12018-07-30 14:15:54 +0100277 /* Also do updates in two steps, similarly to mm_ptable_identity_map. */
Andrew Scull80871322018-08-06 12:04:09 +0100278 if (!mm_map_level(begin, end, pa_begin, 0, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100279 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100280 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281
Andrew Scull80871322018-08-06 12:04:09 +0100282 mm_map_level(begin, end, pa_begin, 0, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100283 flags | MAP_FLAG_COMMIT);
284
285 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100286 if (!(mode & MM_MODE_NOINVALIDATE)) {
287 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
288 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289
290 return true;
291}
292
293/**
Andrew Scull80871322018-08-06 12:04:09 +0100294 * Updates the given table such that a single physical address page is mapped
295 * into the address space with the corresponding address page in the provided
296 * architecture-agnostic mode.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100297 */
Andrew Scull80871322018-08-06 12:04:09 +0100298static bool mm_ptable_identity_map_page(struct mm_ptable *t, paddr_t pa,
299 int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100300{
301 size_t i;
302 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100303 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304 bool sync = !(mode & MM_MODE_NOSYNC);
Andrew Scull80871322018-08-06 12:04:09 +0100305 ptable_addr_t addr;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100306
Andrew Scull80871322018-08-06 12:04:09 +0100307 pa = arch_mm_clear_pa(pa);
308 addr = pa_addr(pa);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100310 for (i = arch_mm_max_level(mode); i > 0; i--) {
Andrew Scullf3d45592018-09-20 14:30:22 +0100311 table = mm_populate_table_pte(&table[mm_index(addr, i)], i,
Andrew Scull80871322018-08-06 12:04:09 +0100312 sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100313 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100315 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100316 }
317
Andrew Scull80871322018-08-06 12:04:09 +0100318 i = mm_index(addr, 0);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100319 table[i] = arch_mm_block_pte(0, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100320 return true;
321}
322
323/**
324 * Writes the given table to the debug log, calling itself recursively to
325 * write sub-tables.
326 */
327static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
328{
329 uint64_t i;
330 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100331 if (!arch_mm_pte_is_present(table[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100332 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100333 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100334
335 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336
Andrew Scull78d6fd92018-09-06 15:08:36 +0100337 if (arch_mm_pte_is_table(table[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100338 mm_dump_table_recursive(
339 ptr_from_va(va_from_pa(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100340 arch_mm_table_from_pte(table[i]))),
Andrew Scull80871322018-08-06 12:04:09 +0100341 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100342 }
343 }
344}
345
346/**
347 * Write the given table to the debug log.
348 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100349void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100350{
Andrew Scull8dce4982018-08-06 13:02:20 +0100351 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100352 int max_level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100353 mm_dump_table_recursive(table, max_level, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100354}
355
356/**
357 * Defragments the given page table by converting page table references to
358 * blocks whenever possible.
359 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100360void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100361{
362 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100363 (void)t;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100364 (void)mode;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100365}
366
367/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100368 * Unmaps the hypervisor pages from the given page table.
369 */
370bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
371{
372 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scull80871322018-08-06 12:04:09 +0100373 return mm_ptable_unmap(t, pa_init((uintpaddr_t)text_begin),
374 pa_init((uintpaddr_t)text_end), mode) &&
375 mm_ptable_unmap(t, pa_init((uintpaddr_t)rodata_begin),
376 pa_init((uintpaddr_t)rodata_end), mode) &&
377 mm_ptable_unmap(t, pa_init((uintpaddr_t)data_begin),
378 pa_init((uintpaddr_t)data_end), mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100379}
380
381/**
Andrew Scull80871322018-08-06 12:04:09 +0100382 * Determines if the given address is mapped in the given page table by
383 * recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100384 */
Andrew Scull80871322018-08-06 12:04:09 +0100385static bool mm_is_mapped_recursive(const pte_t *table, ptable_addr_t addr,
386 int level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100387{
388 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100389 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100390
391 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100392 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100393 return false;
394 }
395
396 pte = table[mm_index(addr, level)];
397
Andrew Scull78d6fd92018-09-06 15:08:36 +0100398 if (arch_mm_pte_is_block(pte, level)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100399 return true;
400 }
401
Andrew Scull78d6fd92018-09-06 15:08:36 +0100402 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100403 return mm_is_mapped_recursive(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100404 ptr_from_va(va_from_pa(arch_mm_table_from_pte(pte))),
Andrew Scull80871322018-08-06 12:04:09 +0100405 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100406 }
407
Andrew Scull78d6fd92018-09-06 15:08:36 +0100408 /* The entry is not present. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100409 return false;
410}
411
412/**
Andrew Scull80871322018-08-06 12:04:09 +0100413 * Determines if the given address is mapped in the given page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100414 */
Andrew Scull80871322018-08-06 12:04:09 +0100415static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
416 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100417{
Andrew Scull8dce4982018-08-06 13:02:20 +0100418 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100419 int level = arch_mm_max_level(mode);
420
Andrew Scull80871322018-08-06 12:04:09 +0100421 addr = mm_round_down_to_page(addr);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100422
Andrew Scull265ada92018-07-30 15:19:01 +0100423 return mm_is_mapped_recursive(table, addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100424}
425
426/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100427 * Initialises the given page table.
428 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100429bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100430{
431 size_t i;
432 pte_t *table;
433
Andrew Scull7364a8e2018-07-19 15:39:29 +0100434 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100435 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100436 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100437 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100438 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100439
Andrew Scull7364a8e2018-07-19 15:39:29 +0100440 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100441 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100442 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100443
Andrew Scull7364a8e2018-07-19 15:39:29 +0100444 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100445 table[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100446 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100447
Andrew Scull265ada92018-07-30 15:19:01 +0100448 /* TODO: halloc could return a virtual or physical address if mm not
449 * enabled? */
450 t->table = pa_init((uintpaddr_t)table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100451
452 return true;
453}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100454
455/**
Andrew Scull80871322018-08-06 12:04:09 +0100456 * Updates a VM's page table such that the given physical address range is
457 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100458 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100459 */
Andrew Scull80871322018-08-06 12:04:09 +0100460bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
461 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100462{
Andrew Scull80871322018-08-06 12:04:09 +0100463 bool success =
464 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
465
466 if (success && ipa != NULL) {
467 *ipa = ipa_from_pa(begin);
468 }
469
470 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100471}
472
473/**
Andrew Scull80871322018-08-06 12:04:09 +0100474 * Updates a VM's page table such that the given physical address page is
475 * mapped in the address space at the corresponding address page in the
476 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100477 */
Andrew Scull80871322018-08-06 12:04:09 +0100478bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
479 ipaddr_t *ipa)
480{
481 bool success =
482 mm_ptable_identity_map_page(t, begin, mode & ~MM_MODE_STAGE1);
483
484 if (success && ipa != NULL) {
485 *ipa = ipa_from_pa(begin);
486 }
487
488 return success;
489}
490
491/**
492 * Updates the VM's table such that the given physical address range is not
493 * mapped in the address space.
494 */
495bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
496{
497 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
498}
499
500/**
501 * Checks whether the given intermediate physical addess is mapped in the given
502 * page table of a VM.
503 */
504bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
505{
506 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
507}
508
509/**
510 * Translates an intermediate physical address to a physical address. Addresses
511 * are currently identity mapped so this is a simple type convertion. Returns
512 * true if the address was mapped in the table and the address was converted.
513 */
514bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
515{
516 bool mapped = mm_vm_is_mapped(t, ipa, 0);
517
518 if (mapped) {
519 *pa = pa_init(ipa_addr(ipa));
520 }
521
522 return mapped;
523}
524
525/**
526 * Updates the hypervisor page table such that the given physical address range
527 * is mapped into the address space at the corresponding address range in the
528 * architecture-agnostic mode provided.
529 */
530void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
531{
532 if (mm_ptable_identity_map(&ptable, begin, end,
533 mode | MM_MODE_STAGE1)) {
534 return ptr_from_va(va_from_pa(begin));
535 }
536
537 return NULL;
538}
539
540/**
541 * Updates the hypervisor table such that the given physical address range is
542 * not mapped in the address space.
543 */
544bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100545{
546 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
547}
548
549/**
550 * Initialises memory management for the hypervisor itself.
551 */
552bool mm_init(void)
553{
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100554 dlog("text: 0x%x - 0x%x\n", text_begin, text_end);
555 dlog("rodata: 0x%x - 0x%x\n", rodata_begin, rodata_end);
556 dlog("data: 0x%x - 0x%x\n", data_begin, data_end);
557
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100558 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100559 dlog("Unable to allocate memory for page table.\n");
560 return false;
561 }
562
563 /* Map page for uart. */
564 /* TODO: We may not want to map this. */
Andrew Scull80871322018-08-06 12:04:09 +0100565 mm_ptable_identity_map_page(&ptable, pa_init(PL011_BASE),
Andrew Scullfe636b12018-07-30 14:15:54 +0100566 MM_MODE_R | MM_MODE_W | MM_MODE_D |
567 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100568
569 /* Map each section. */
Andrew Scull80871322018-08-06 12:04:09 +0100570 mm_identity_map(pa_init((uintpaddr_t)text_begin),
571 pa_init((uintpaddr_t)text_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100572 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100573
Andrew Scull80871322018-08-06 12:04:09 +0100574 mm_identity_map(pa_init((uintpaddr_t)rodata_begin),
575 pa_init((uintpaddr_t)rodata_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100576 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100577
Andrew Scull80871322018-08-06 12:04:09 +0100578 mm_identity_map(pa_init((uintpaddr_t)data_begin),
579 pa_init((uintpaddr_t)data_end),
Andrew Scullfe636b12018-07-30 14:15:54 +0100580 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100581
Andrew Scull265ada92018-07-30 15:19:01 +0100582 return arch_mm_init(ptable.table, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100583}
584
585bool mm_cpu_init(void)
586{
Andrew Scull265ada92018-07-30 15:19:01 +0100587 return arch_mm_init(ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100588}
589
590/**
591 * Defragments the hypervisor page table.
592 */
593void mm_defrag(void)
594{
595 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
596}