blob: 8942ec5f37e8fa277e68114bfcd6b6611bd1f4b1 [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#include "mm.h"
2
3#include <stdatomic.h>
4#include <stdint.h>
5
6#include "alloc.h"
7#include "dlog.h"
8
Andrew Scull4f170f52018-07-19 12:58:20 +01009/* Keep macro alignment */
10/* clang-format off */
11
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#define MAP_FLAG_SYNC 0x01
13#define MAP_FLAG_COMMIT 0x02
14
Andrew Scull4f170f52018-07-19 12:58:20 +010015/* clang-format on */
16
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010017/**
18 * Calculates the size of the address space represented by a page table entry at
19 * the given level.
20 */
21static inline size_t mm_entry_size(int level)
22{
23 return 1ull << (PAGE_BITS + level * PAGE_LEVEL_BITS);
24}
25
26/**
27 * For a given virtual address, calculates the maximum (plus one) address that
28 * can be represented by the same table at the given level.
29 */
30static inline vaddr_t mm_level_end(vaddr_t va, int level)
31{
32 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
33 return ((va >> offset) + 1) << offset;
34}
35
36/**
37 * For a given virtual address, calculates the index at which its entry is
38 * stored in a table at the given level.
39 */
40static inline size_t mm_index(vaddr_t va, int level)
41{
42 vaddr_t v = va >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
43 return v & ((1ull << PAGE_LEVEL_BITS) - 1);
44}
45
46/**
47 * Populates the provided page table entry with a reference to another table if
48 * needed, that is, if it does not yet point to another table.
49 *
50 * Returns a pointer to the table the entry now points to.
51 */
52static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
53{
54 pte_t *ntable;
55 pte_t v = *pte;
56 pte_t new_pte;
57 size_t i;
58 size_t inc;
59
60 /* Just return pointer to table if it's already populated. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010061 if (arch_mm_pte_is_table(v)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010062 return arch_mm_pte_to_table(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010063 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010064
65 /* Allocate a new table. */
66 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +010067 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010068 if (!ntable) {
69 dlog("Failed to allocate memory for page table\n");
70 return NULL;
71 }
72
73 /* Determine template for new pte and its increment. */
74 if (!arch_mm_pte_is_block(v)) {
75 inc = 0;
76 new_pte = arch_mm_absent_pte();
77 } else {
78 inc = mm_entry_size(level - 1);
Andrew Scull7364a8e2018-07-19 15:39:29 +010079 if (level == 1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 new_pte = arch_mm_block_to_page_pte(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010081 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082 new_pte = v;
Andrew Scull7364a8e2018-07-19 15:39:29 +010083 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084 }
85
86 /* Initialise entries in the new table. */
87 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
88 ntable[i] = new_pte;
89 new_pte += inc;
90 }
91
92 /*
93 * Ensure initialisation is visible before updating the actual pte, then
94 * update it.
95 */
96 atomic_thread_fence(memory_order_release);
97 *pte = arch_mm_pa_to_table_pte((paddr_t)ntable);
98
99 return ntable;
100}
101
102/**
103 * Frees all page-table-related memory associated with the given pte at the
104 * given level.
105 */
106static void mm_free_page_pte(pte_t pte, int level, bool sync)
107{
108 /* TODO: Implement.
109 if (!arch_mm_pte_is_present(pte) || level < 1)
110 return;
111 */
112}
113
114/**
115 * Updates the page table at the given level to map the given virtual address
116 * range to a physical range using the provided (architecture-specific)
117 * attributes.
118 *
119 * This function calls itself recursively if it needs to update additional
120 * levels, but the recursion is bound by the maximum number of levels in a page
121 * table.
122 */
123static bool mm_map_level(vaddr_t va, vaddr_t va_end, paddr_t pa, uint64_t attrs,
124 pte_t *table, int level, int flags)
125{
126 size_t i = mm_index(va, level);
127 vaddr_t va_level_end = mm_level_end(va, level);
128 size_t entry_size = mm_entry_size(level);
129 bool commit = flags & MAP_FLAG_COMMIT;
130 bool sync = flags & MAP_FLAG_SYNC;
131
132 /* Cap va_end so that we don't go over the current level max. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100133 if (va_end > va_level_end) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100134 va_end = va_level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100135 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100136
137 /* Fill each entry in the table. */
138 while (va < va_end) {
139 if (level == 0) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100140 if (commit) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100141 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100142 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100143 } else if ((va_end - va) >= entry_size &&
144 arch_mm_is_block_allowed(level) &&
145 (va & (entry_size - 1)) == 0) {
146 if (commit) {
147 pte_t pte = table[i];
148 table[i] = arch_mm_pa_to_block_pte(pa, attrs);
149 /* TODO: Add barrier. How do we ensure this
150 * isn't in use by another CPU? Send IPI? */
151 mm_free_page_pte(pte, level, sync);
152 }
153 } else {
Andrew Scull4f170f52018-07-19 12:58:20 +0100154 pte_t *nt =
155 mm_populate_table_pte(table + i, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100156 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100157 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100158 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100159
Andrew Scull4f170f52018-07-19 12:58:20 +0100160 if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
Andrew Scull7364a8e2018-07-19 15:39:29 +0100161 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100162 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100163 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100164 }
165
166 va = (va + entry_size) & ~(entry_size - 1);
167 pa = (pa + entry_size) & ~(entry_size - 1);
168 i++;
169 }
170
171 return true;
172}
173
174/**
175 * Invalidates the TLB for the given virtual address range.
176 */
177static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
178{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100179 if (stage1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100180 arch_mm_invalidate_stage1_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100181 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100182 arch_mm_invalidate_stage2_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100183 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100184}
185
186/**
187 * Updates the given table such that the given virtual address range is mapped
188 * to the given physical address range in the architecture-agnostic mode
189 * provided.
190 */
191bool mm_ptable_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
192 paddr_t paddr, int mode)
193{
194 uint64_t attrs = arch_mm_mode_to_attrs(mode);
195 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
196 int level = arch_mm_max_level(&t->arch);
197
198 begin = arch_mm_clear_va(begin);
199 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
200 paddr = arch_mm_clear_pa(paddr);
201
202 /*
203 * Do it in two steps to prevent leaving the table in a halfway updated
204 * state. In such a two-step implementation, the table may be left with
205 * extra internal tables, but no different mapping on failure.
206 */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100207 if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100208 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100209 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100210
211 mm_map_level(begin, end, paddr, attrs, t->table, level,
212 flags | MAP_FLAG_COMMIT);
213
214 /* Invalidate the tlb. */
215 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
216
217 return true;
218}
219
220/**
221 * Updates the given table such that the given virtual address range is not
222 * mapped to any physical address.
223 */
224bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode)
225{
226 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
227 int level = arch_mm_max_level(&t->arch);
228
229 begin = arch_mm_clear_va(begin);
230 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
231
232 /* Also do updates in two steps, similarly to mm_ptable_map. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100233 if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100234 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100235 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100236
237 mm_map_level(begin, end, begin, 0, t->table, level,
238 flags | MAP_FLAG_COMMIT);
239
240 /* Invalidate the tlb. */
241 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
242
243 return true;
244}
245
246/**
247 * Updates the given table such that a single virtual address page is mapped
248 * to a single physical address page in the provided architecture-agnostic mode.
249 */
250bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode)
251{
252 size_t i;
253 uint64_t attrs = arch_mm_mode_to_attrs(mode);
254 pte_t *table = t->table;
255 bool sync = !(mode & MM_MODE_NOSYNC);
256
257 va = arch_mm_clear_va(va);
258 pa = arch_mm_clear_pa(pa);
259
260 for (i = arch_mm_max_level(&t->arch); i > 0; i--) {
261 table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100262 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100263 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100264 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100265 }
266
267 i = mm_index(va, 0);
268 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
269 return true;
270}
271
272/**
273 * Writes the given table to the debug log, calling itself recursively to
274 * write sub-tables.
275 */
276static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
277{
278 uint64_t i;
279 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100280 if (!arch_mm_pte_is_present(table[i])) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100282 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100283
284 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100285 if (!level) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100286 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100287 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100288
289 if (arch_mm_pte_is_table(table[i])) {
290 mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
291 level - 1, max_level);
292 }
293 }
294}
295
296/**
297 * Write the given table to the debug log.
298 */
299void mm_ptable_dump(struct mm_ptable *t)
300{
301 int max_level = arch_mm_max_level(&t->arch);
302 mm_dump_table_recursive(t->table, max_level, max_level);
303}
304
305/**
306 * Defragments the given page table by converting page table references to
307 * blocks whenever possible.
308 */
309void mm_ptable_defrag(struct mm_ptable *t)
310{
311 /* TODO: Implement. */
312}
313
314/**
315 * Initialises the given page table.
316 */
317bool mm_ptable_init(struct mm_ptable *t, int mode)
318{
319 size_t i;
320 pte_t *table;
321
Andrew Scull7364a8e2018-07-19 15:39:29 +0100322 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100323 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100324 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100325 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100326 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100327
Andrew Scull7364a8e2018-07-19 15:39:29 +0100328 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100329 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100330 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331
Andrew Scull7364a8e2018-07-19 15:39:29 +0100332 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 table[i] = arch_mm_absent_pte();
Andrew Scull7364a8e2018-07-19 15:39:29 +0100334 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100335
336 t->table = table;
337 arch_mm_ptable_init(&t->arch);
338
339 return true;
340}