blob: 6662bae336a335fda522d32b834958f9f8e21c7f [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#include "mm.h"
2
3#include <stdatomic.h>
4#include <stdint.h>
5
6#include "alloc.h"
7#include "dlog.h"
8
Andrew Scull4f170f52018-07-19 12:58:20 +01009/* Keep macro alignment */
10/* clang-format off */
11
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#define MAP_FLAG_SYNC 0x01
13#define MAP_FLAG_COMMIT 0x02
14
Andrew Scull4f170f52018-07-19 12:58:20 +010015/* clang-format on */
16
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010017/**
18 * Calculates the size of the address space represented by a page table entry at
19 * the given level.
20 */
21static inline size_t mm_entry_size(int level)
22{
23 return 1ull << (PAGE_BITS + level * PAGE_LEVEL_BITS);
24}
25
26/**
27 * For a given virtual address, calculates the maximum (plus one) address that
28 * can be represented by the same table at the given level.
29 */
30static inline vaddr_t mm_level_end(vaddr_t va, int level)
31{
32 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
33 return ((va >> offset) + 1) << offset;
34}
35
36/**
37 * For a given virtual address, calculates the index at which its entry is
38 * stored in a table at the given level.
39 */
40static inline size_t mm_index(vaddr_t va, int level)
41{
42 vaddr_t v = va >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
43 return v & ((1ull << PAGE_LEVEL_BITS) - 1);
44}
45
46/**
47 * Populates the provided page table entry with a reference to another table if
48 * needed, that is, if it does not yet point to another table.
49 *
50 * Returns a pointer to the table the entry now points to.
51 */
52static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
53{
54 pte_t *ntable;
55 pte_t v = *pte;
56 pte_t new_pte;
57 size_t i;
58 size_t inc;
59
60 /* Just return pointer to table if it's already populated. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010061 if (arch_mm_pte_is_table(v)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010062 return arch_mm_pte_to_table(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010063 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010064
65 /* Allocate a new table. */
66 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +010067 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010068 if (!ntable) {
69 dlog("Failed to allocate memory for page table\n");
70 return NULL;
71 }
72
73 /* Determine template for new pte and its increment. */
74 if (!arch_mm_pte_is_block(v)) {
75 inc = 0;
76 new_pte = arch_mm_absent_pte();
77 } else {
78 inc = mm_entry_size(level - 1);
Andrew Scull7364a8e2018-07-19 15:39:29 +010079 if (level == 1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010080 new_pte = arch_mm_block_to_page_pte(v);
Andrew Scull7364a8e2018-07-19 15:39:29 +010081 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082 new_pte = v;
Andrew Scull7364a8e2018-07-19 15:39:29 +010083 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084 }
85
86 /* Initialise entries in the new table. */
87 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
88 ntable[i] = new_pte;
89 new_pte += inc;
90 }
91
92 /*
93 * Ensure initialisation is visible before updating the actual pte, then
94 * update it.
95 */
96 atomic_thread_fence(memory_order_release);
97 *pte = arch_mm_pa_to_table_pte((paddr_t)ntable);
98
99 return ntable;
100}
101
102/**
103 * Frees all page-table-related memory associated with the given pte at the
104 * given level.
105 */
106static void mm_free_page_pte(pte_t pte, int level, bool sync)
107{
Andrew Scull020ae692018-07-19 16:20:14 +0100108 (void)pte;
109 (void)level;
110 (void)sync;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100111 /* TODO: Implement.
112 if (!arch_mm_pte_is_present(pte) || level < 1)
113 return;
114 */
115}
116
117/**
118 * Updates the page table at the given level to map the given virtual address
119 * range to a physical range using the provided (architecture-specific)
120 * attributes.
121 *
122 * This function calls itself recursively if it needs to update additional
123 * levels, but the recursion is bound by the maximum number of levels in a page
124 * table.
125 */
126static bool mm_map_level(vaddr_t va, vaddr_t va_end, paddr_t pa, uint64_t attrs,
127 pte_t *table, int level, int flags)
128{
129 size_t i = mm_index(va, level);
130 vaddr_t va_level_end = mm_level_end(va, level);
131 size_t entry_size = mm_entry_size(level);
132 bool commit = flags & MAP_FLAG_COMMIT;
133 bool sync = flags & MAP_FLAG_SYNC;
134
135 /* Cap va_end so that we don't go over the current level max. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100136 if (va_end > va_level_end) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100137 va_end = va_level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100138 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100139
140 /* Fill each entry in the table. */
141 while (va < va_end) {
142 if (level == 0) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100143 if (commit) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100144 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100145 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100146 } else if ((va_end - va) >= entry_size &&
147 arch_mm_is_block_allowed(level) &&
148 (va & (entry_size - 1)) == 0) {
149 if (commit) {
150 pte_t pte = table[i];
151 table[i] = arch_mm_pa_to_block_pte(pa, attrs);
152 /* TODO: Add barrier. How do we ensure this
153 * isn't in use by another CPU? Send IPI? */
154 mm_free_page_pte(pte, level, sync);
155 }
156 } else {
Andrew Scull4f170f52018-07-19 12:58:20 +0100157 pte_t *nt =
158 mm_populate_table_pte(table + i, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100159 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100160 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100161 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100162
Andrew Scull4f170f52018-07-19 12:58:20 +0100163 if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
Andrew Scull7364a8e2018-07-19 15:39:29 +0100164 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100165 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100166 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100167 }
168
169 va = (va + entry_size) & ~(entry_size - 1);
170 pa = (pa + entry_size) & ~(entry_size - 1);
171 i++;
172 }
173
174 return true;
175}
176
177/**
178 * Invalidates the TLB for the given virtual address range.
179 */
180static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
181{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100182 if (stage1) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100183 arch_mm_invalidate_stage1_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100184 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100185 arch_mm_invalidate_stage2_range(begin, end);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100186 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100187}
188
189/**
190 * Updates the given table such that the given virtual address range is mapped
191 * to the given physical address range in the architecture-agnostic mode
192 * provided.
193 */
194bool mm_ptable_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
195 paddr_t paddr, int mode)
196{
197 uint64_t attrs = arch_mm_mode_to_attrs(mode);
198 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
199 int level = arch_mm_max_level(&t->arch);
200
201 begin = arch_mm_clear_va(begin);
202 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
203 paddr = arch_mm_clear_pa(paddr);
204
205 /*
206 * Do it in two steps to prevent leaving the table in a halfway updated
207 * state. In such a two-step implementation, the table may be left with
208 * extra internal tables, but no different mapping on failure.
209 */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100210 if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100211 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100212 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100213
214 mm_map_level(begin, end, paddr, attrs, t->table, level,
215 flags | MAP_FLAG_COMMIT);
216
217 /* Invalidate the tlb. */
218 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
219
220 return true;
221}
222
223/**
224 * Updates the given table such that the given virtual address range is not
225 * mapped to any physical address.
226 */
227bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode)
228{
229 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
230 int level = arch_mm_max_level(&t->arch);
231
232 begin = arch_mm_clear_va(begin);
233 end = arch_mm_clear_va(end + PAGE_SIZE - 1);
234
235 /* Also do updates in two steps, similarly to mm_ptable_map. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100236 if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100237 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100238 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100239
240 mm_map_level(begin, end, begin, 0, t->table, level,
241 flags | MAP_FLAG_COMMIT);
242
243 /* Invalidate the tlb. */
244 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
245
246 return true;
247}
248
249/**
250 * Updates the given table such that a single virtual address page is mapped
251 * to a single physical address page in the provided architecture-agnostic mode.
252 */
253bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode)
254{
255 size_t i;
256 uint64_t attrs = arch_mm_mode_to_attrs(mode);
257 pte_t *table = t->table;
258 bool sync = !(mode & MM_MODE_NOSYNC);
259
260 va = arch_mm_clear_va(va);
261 pa = arch_mm_clear_pa(pa);
262
263 for (i = arch_mm_max_level(&t->arch); i > 0; i--) {
264 table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100265 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100266 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100267 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100268 }
269
270 i = mm_index(va, 0);
271 table[i] = arch_mm_pa_to_page_pte(pa, attrs);
272 return true;
273}
274
275/**
276 * Writes the given table to the debug log, calling itself recursively to
277 * write sub-tables.
278 */
279static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
280{
281 uint64_t i;
282 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull7364a8e2018-07-19 15:39:29 +0100283 if (!arch_mm_pte_is_present(table[i])) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100285 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100286
287 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100288 if (!level) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100289 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100290 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100291
292 if (arch_mm_pte_is_table(table[i])) {
293 mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
294 level - 1, max_level);
295 }
296 }
297}
298
299/**
300 * Write the given table to the debug log.
301 */
302void mm_ptable_dump(struct mm_ptable *t)
303{
304 int max_level = arch_mm_max_level(&t->arch);
305 mm_dump_table_recursive(t->table, max_level, max_level);
306}
307
308/**
309 * Defragments the given page table by converting page table references to
310 * blocks whenever possible.
311 */
312void mm_ptable_defrag(struct mm_ptable *t)
313{
314 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100315 (void)t;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100316}
317
318/**
319 * Initialises the given page table.
320 */
321bool mm_ptable_init(struct mm_ptable *t, int mode)
322{
323 size_t i;
324 pte_t *table;
325
Andrew Scull7364a8e2018-07-19 15:39:29 +0100326 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100327 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100328 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100329 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100330 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331
Andrew Scull7364a8e2018-07-19 15:39:29 +0100332 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100333 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100334 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100335
Andrew Scull7364a8e2018-07-19 15:39:29 +0100336 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100337 table[i] = arch_mm_absent_pte();
Andrew Scull7364a8e2018-07-19 15:39:29 +0100338 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100339
340 t->table = table;
341 arch_mm_ptable_init(&t->arch);
342
343 return true;
344}