blob: fa8d52c70d005300484f84c9b9fb50d0fde5a3c5 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/alloc.h"
24#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010025#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010026
Andrew Scull80871322018-08-06 12:04:09 +010027/* The type of addresses stored in the page table. */
28typedef uintvaddr_t ptable_addr_t;
29
30/* For stage 2, the input is an intermediate physical addresses rather than a
31 * virtual address so: */
32static_assert(
33 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
34 "Currently, the same code manages the stage 1 and stage 2 page tables "
35 "which only works if the virtual and intermediate physical addresses "
36 "are the same size. It looks like that assumption might not be holding "
37 "so we need to check that everything is going to be ok.");
38
Andrew Scull4f170f52018-07-19 12:58:20 +010039/* Keep macro alignment */
40/* clang-format off */
41
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010042#define MAP_FLAG_SYNC 0x01
43#define MAP_FLAG_COMMIT 0x02
44
Andrew Scull4f170f52018-07-19 12:58:20 +010045/* clang-format on */
46
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010047static struct mm_ptable ptable;
48
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010049/**
Andrew Scull80871322018-08-06 12:04:09 +010050 * Rounds an address down to a page boundary.
51 */
52static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
53{
54 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
55}
56
57/**
58 * Rounds an address up to a page boundary.
59 */
60static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
61{
62 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
63}
64
65/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010066 * Calculates the size of the address space represented by a page table entry at
67 * the given level.
68 */
69static inline size_t mm_entry_size(int level)
70{
Andrew Scull78d6fd92018-09-06 15:08:36 +010071 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010072}
73
74/**
Andrew Scull80871322018-08-06 12:04:09 +010075 * For a given address, calculates the maximum (plus one) address that can be
76 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010077 */
Andrew Scull80871322018-08-06 12:04:09 +010078static inline ptable_addr_t mm_level_end(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010079{
80 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Andrew Scull80871322018-08-06 12:04:09 +010081 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010082}
83
84/**
Andrew Scull80871322018-08-06 12:04:09 +010085 * For a given address, calculates the index at which its entry is stored in a
86 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010087 */
Andrew Scull80871322018-08-06 12:04:09 +010088static inline size_t mm_index(ptable_addr_t addr, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010089{
Andrew Scull80871322018-08-06 12:04:09 +010090 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Andrew Scull78d6fd92018-09-06 15:08:36 +010091 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010092}
93
94/**
95 * Populates the provided page table entry with a reference to another table if
96 * needed, that is, if it does not yet point to another table.
97 *
98 * Returns a pointer to the table the entry now points to.
99 */
100static pte_t *mm_populate_table_pte(pte_t *pte, int level, bool sync_alloc)
101{
102 pte_t *ntable;
103 pte_t v = *pte;
104 pte_t new_pte;
105 size_t i;
106 size_t inc;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100107 int level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100108
109 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100110 if (arch_mm_pte_is_table(v, level)) {
111 return ptr_from_va(va_from_pa(arch_mm_table_from_pte(v)));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100112 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100113
114 /* Allocate a new table. */
115 ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
Andrew Scull4f170f52018-07-19 12:58:20 +0100116 PAGE_SIZE, PAGE_SIZE);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100117 if (!ntable) {
118 dlog("Failed to allocate memory for page table\n");
119 return NULL;
120 }
121
122 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100123 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100124 inc = mm_entry_size(level_below);
125 new_pte = arch_mm_block_pte(level_below,
126 arch_mm_block_from_pte(v),
127 arch_mm_pte_attrs(v));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100128 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100129 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100130 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100131 }
132
133 /* Initialise entries in the new table. */
134 for (i = 0; i < PAGE_SIZE / sizeof(paddr_t); i++) {
135 ntable[i] = new_pte;
136 new_pte += inc;
137 }
138
139 /*
140 * Ensure initialisation is visible before updating the actual pte, then
141 * update it.
142 */
143 atomic_thread_fence(memory_order_release);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100144 *pte = arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100145
146 return ntable;
147}
148
149/**
150 * Frees all page-table-related memory associated with the given pte at the
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100151 * given level, including any subtables recursively.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100152 */
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100153static void mm_free_page_pte(pte_t pte, int level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100154{
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100155 pte_t *table;
156 uint64_t i;
157
158 if (!arch_mm_pte_is_table(pte, level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100159 return;
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100160 }
161
162 table = ptr_from_va(va_from_pa(arch_mm_table_from_pte(pte)));
163 /* Recursively free any subtables. */
164 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); ++i) {
165 mm_free_page_pte(table[i], level - 1);
166 }
167
168 /* Free the table itself. */
169 hfree(table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100170}
171
172/**
Andrew Scull80871322018-08-06 12:04:09 +0100173 * Updates the page table at the given level to map the given address range to a
174 * physical range using the provided (architecture-specific) attributes.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100175 *
176 * This function calls itself recursively if it needs to update additional
177 * levels, but the recursion is bound by the maximum number of levels in a page
178 * table.
179 */
Andrew Scull80871322018-08-06 12:04:09 +0100180static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Scull265ada92018-07-30 15:19:01 +0100181 uint64_t attrs, pte_t *table, int level, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100182{
Andrew Scullf3d45592018-09-20 14:30:22 +0100183 pte_t *pte = &table[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100184 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100185 size_t entry_size = mm_entry_size(level);
186 bool commit = flags & MAP_FLAG_COMMIT;
187 bool sync = flags & MAP_FLAG_SYNC;
188
Andrew Scull265ada92018-07-30 15:19:01 +0100189 /* Cap end so that we don't go over the current level max. */
190 if (end > level_end) {
191 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100192 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100193
194 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100195 while (begin < end) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100196 if ((end - begin) >= entry_size &&
197 arch_mm_is_block_allowed(level) &&
198 (begin & (entry_size - 1)) == 0) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100199 if (commit) {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100200 pte_t v = *pte;
Andrew Scull78d6fd92018-09-06 15:08:36 +0100201 *pte = arch_mm_block_pte(level, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100202 /* TODO: Add barrier. How do we ensure this
203 * isn't in use by another CPU? Send IPI? */
Andrew Walbran5bf935c2018-09-28 14:21:54 +0100204 mm_free_page_pte(v, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100205 }
206 } else {
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100207 pte_t *nt = mm_populate_table_pte(pte, level, sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100208 if (!nt) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100209 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100210 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100211
Andrew Scull80871322018-08-06 12:04:09 +0100212 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
213 flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100214 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100215 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100216 }
217
Andrew Scull265ada92018-07-30 15:19:01 +0100218 begin = (begin + entry_size) & ~(entry_size - 1);
219 pa = pa_init((pa_addr(pa) + entry_size) & ~(entry_size - 1));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100220 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100221 }
222
223 return true;
224}
225
226/**
Andrew Scull80871322018-08-06 12:04:09 +0100227 * Invalidates the TLB for the given address range.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100228 */
Andrew Scull80871322018-08-06 12:04:09 +0100229static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
230 bool stage1)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100231{
Andrew Scull7364a8e2018-07-19 15:39:29 +0100232 if (stage1) {
Andrew Scull80871322018-08-06 12:04:09 +0100233 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100234 } else {
Andrew Scull80871322018-08-06 12:04:09 +0100235 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100236 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100237}
238
239/**
Andrew Scull80871322018-08-06 12:04:09 +0100240 * Updates the given table such that the given physical address range is mapped
241 * into the address space with the corresponding address range in the
242 * architecture-agnostic mode provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100243 */
Andrew Scull80871322018-08-06 12:04:09 +0100244static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
245 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100246{
247 uint64_t attrs = arch_mm_mode_to_attrs(mode);
248 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100249 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100250 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100251 ptable_addr_t begin;
252 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100253
Andrew Scull80871322018-08-06 12:04:09 +0100254 pa_begin = arch_mm_clear_pa(pa_begin);
255 begin = pa_addr(pa_begin);
256 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100257
258 /*
259 * Do it in two steps to prevent leaving the table in a halfway updated
260 * state. In such a two-step implementation, the table may be left with
261 * extra internal tables, but no different mapping on failure.
262 */
Andrew Scull80871322018-08-06 12:04:09 +0100263 if (!mm_map_level(begin, end, pa_begin, attrs, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100264 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100265 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100266
Andrew Scull80871322018-08-06 12:04:09 +0100267 mm_map_level(begin, end, pa_begin, attrs, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100268 flags | MAP_FLAG_COMMIT);
269
270 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100271 if (!(mode & MM_MODE_NOINVALIDATE)) {
272 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
273 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100274
275 return true;
276}
277
278/**
Andrew Scull80871322018-08-06 12:04:09 +0100279 * Updates the given table such that the given physical address range is not
280 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100281 */
Andrew Scull80871322018-08-06 12:04:09 +0100282static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
283 paddr_t pa_end, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100284{
285 int flags = (mode & MM_MODE_NOSYNC) ? 0 : MAP_FLAG_SYNC;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100286 int level = arch_mm_max_level(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100287 pte_t *table = ptr_from_va(va_from_pa(t->table));
Andrew Scull80871322018-08-06 12:04:09 +0100288 ptable_addr_t begin;
289 ptable_addr_t end;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100290
Andrew Scull80871322018-08-06 12:04:09 +0100291 pa_begin = arch_mm_clear_pa(pa_begin);
292 begin = pa_addr(pa_begin);
293 end = mm_round_up_to_page(pa_addr(pa_end));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100294
Andrew Scullfe636b12018-07-30 14:15:54 +0100295 /* Also do updates in two steps, similarly to mm_ptable_identity_map. */
Andrew Scull80871322018-08-06 12:04:09 +0100296 if (!mm_map_level(begin, end, pa_begin, 0, table, level, flags)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100297 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100298 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100299
Andrew Scull80871322018-08-06 12:04:09 +0100300 mm_map_level(begin, end, pa_begin, 0, table, level,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100301 flags | MAP_FLAG_COMMIT);
302
303 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100304 if (!(mode & MM_MODE_NOINVALIDATE)) {
305 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
306 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100307
308 return true;
309}
310
311/**
Andrew Scull80871322018-08-06 12:04:09 +0100312 * Updates the given table such that a single physical address page is mapped
313 * into the address space with the corresponding address page in the provided
314 * architecture-agnostic mode.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100315 */
Andrew Scull80871322018-08-06 12:04:09 +0100316static bool mm_ptable_identity_map_page(struct mm_ptable *t, paddr_t pa,
317 int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100318{
319 size_t i;
320 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Andrew Scull8dce4982018-08-06 13:02:20 +0100321 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100322 bool sync = !(mode & MM_MODE_NOSYNC);
Andrew Scull80871322018-08-06 12:04:09 +0100323 ptable_addr_t addr;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100324
Andrew Scull80871322018-08-06 12:04:09 +0100325 pa = arch_mm_clear_pa(pa);
326 addr = pa_addr(pa);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100327
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100328 for (i = arch_mm_max_level(mode); i > 0; i--) {
Andrew Scullf3d45592018-09-20 14:30:22 +0100329 table = mm_populate_table_pte(&table[mm_index(addr, i)], i,
Andrew Scull80871322018-08-06 12:04:09 +0100330 sync);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100331 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100332 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100333 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100334 }
335
Andrew Scull80871322018-08-06 12:04:09 +0100336 i = mm_index(addr, 0);
Andrew Scull78d6fd92018-09-06 15:08:36 +0100337 table[i] = arch_mm_block_pte(0, pa, attrs);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100338 return true;
339}
340
341/**
342 * Writes the given table to the debug log, calling itself recursively to
343 * write sub-tables.
344 */
345static void mm_dump_table_recursive(pte_t *table, int level, int max_level)
346{
347 uint64_t i;
348 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100349 if (!arch_mm_pte_is_present(table[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100350 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100351 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100352
353 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100354
Andrew Scull78d6fd92018-09-06 15:08:36 +0100355 if (arch_mm_pte_is_table(table[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100356 mm_dump_table_recursive(
357 ptr_from_va(va_from_pa(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100358 arch_mm_table_from_pte(table[i]))),
Andrew Scull80871322018-08-06 12:04:09 +0100359 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100360 }
361 }
362}
363
364/**
365 * Write the given table to the debug log.
366 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100367void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100368{
Andrew Scull8dce4982018-08-06 13:02:20 +0100369 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100370 int max_level = arch_mm_max_level(mode);
Andrew Scull265ada92018-07-30 15:19:01 +0100371 mm_dump_table_recursive(table, max_level, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100372}
373
374/**
375 * Defragments the given page table by converting page table references to
376 * blocks whenever possible.
377 */
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100378void mm_ptable_defrag(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100379{
380 /* TODO: Implement. */
Andrew Scull020ae692018-07-19 16:20:14 +0100381 (void)t;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100382 (void)mode;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100383}
384
385/**
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100386 * Unmaps the hypervisor pages from the given page table.
387 */
388bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode)
389{
390 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100391 return mm_ptable_unmap(t, layout_text_begin(), layout_text_end(),
392 mode) &&
393 mm_ptable_unmap(t, layout_rodata_begin(), layout_rodata_end(),
394 mode) &&
395 mm_ptable_unmap(t, layout_data_begin(), layout_data_end(), mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100396}
397
398/**
Andrew Scull80871322018-08-06 12:04:09 +0100399 * Determines if the given address is mapped in the given page table by
400 * recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100401 */
Andrew Scull80871322018-08-06 12:04:09 +0100402static bool mm_is_mapped_recursive(const pte_t *table, ptable_addr_t addr,
403 int level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100404{
405 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100406 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100407
408 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100409 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100410 return false;
411 }
412
413 pte = table[mm_index(addr, level)];
414
Andrew Scull78d6fd92018-09-06 15:08:36 +0100415 if (arch_mm_pte_is_block(pte, level)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100416 return true;
417 }
418
Andrew Scull78d6fd92018-09-06 15:08:36 +0100419 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100420 return mm_is_mapped_recursive(
Andrew Scull78d6fd92018-09-06 15:08:36 +0100421 ptr_from_va(va_from_pa(arch_mm_table_from_pte(pte))),
Andrew Scull80871322018-08-06 12:04:09 +0100422 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100423 }
424
Andrew Scull78d6fd92018-09-06 15:08:36 +0100425 /* The entry is not present. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100426 return false;
427}
428
429/**
Andrew Scull80871322018-08-06 12:04:09 +0100430 * Determines if the given address is mapped in the given page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100431 */
Andrew Scull80871322018-08-06 12:04:09 +0100432static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
433 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100434{
Andrew Scull8dce4982018-08-06 13:02:20 +0100435 pte_t *table = ptr_from_va(va_from_pa(t->table));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100436 int level = arch_mm_max_level(mode);
437
Andrew Scull80871322018-08-06 12:04:09 +0100438 addr = mm_round_down_to_page(addr);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100439
Andrew Scull265ada92018-07-30 15:19:01 +0100440 return mm_is_mapped_recursive(table, addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100441}
442
443/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100444 * Initialises the given page table.
445 */
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100446bool mm_ptable_init(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100447{
448 size_t i;
449 pte_t *table;
450
Andrew Scull7364a8e2018-07-19 15:39:29 +0100451 if (mode & MM_MODE_NOSYNC) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100452 table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100453 } else {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100454 table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100455 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100456
Andrew Scull7364a8e2018-07-19 15:39:29 +0100457 if (!table) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100458 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100459 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100460
Andrew Scull7364a8e2018-07-19 15:39:29 +0100461 for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100462 table[i] = arch_mm_absent_pte(arch_mm_max_level(mode));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100463 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100464
Andrew Scull265ada92018-07-30 15:19:01 +0100465 /* TODO: halloc could return a virtual or physical address if mm not
466 * enabled? */
467 t->table = pa_init((uintpaddr_t)table);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100468
469 return true;
470}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100471
472/**
Andrew Scull80871322018-08-06 12:04:09 +0100473 * Updates a VM's page table such that the given physical address range is
474 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100475 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100476 */
Andrew Scull80871322018-08-06 12:04:09 +0100477bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
478 int mode, ipaddr_t *ipa)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100479{
Andrew Scull80871322018-08-06 12:04:09 +0100480 bool success =
481 mm_ptable_identity_map(t, begin, end, mode & ~MM_MODE_STAGE1);
482
483 if (success && ipa != NULL) {
484 *ipa = ipa_from_pa(begin);
485 }
486
487 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100488}
489
490/**
Andrew Scull80871322018-08-06 12:04:09 +0100491 * Updates a VM's page table such that the given physical address page is
492 * mapped in the address space at the corresponding address page in the
493 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100494 */
Andrew Scull80871322018-08-06 12:04:09 +0100495bool mm_vm_identity_map_page(struct mm_ptable *t, paddr_t begin, int mode,
496 ipaddr_t *ipa)
497{
498 bool success =
499 mm_ptable_identity_map_page(t, begin, mode & ~MM_MODE_STAGE1);
500
501 if (success && ipa != NULL) {
502 *ipa = ipa_from_pa(begin);
503 }
504
505 return success;
506}
507
508/**
509 * Updates the VM's table such that the given physical address range is not
510 * mapped in the address space.
511 */
512bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode)
513{
514 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1);
515}
516
517/**
518 * Checks whether the given intermediate physical addess is mapped in the given
519 * page table of a VM.
520 */
521bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
522{
523 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
524}
525
526/**
527 * Translates an intermediate physical address to a physical address. Addresses
528 * are currently identity mapped so this is a simple type convertion. Returns
529 * true if the address was mapped in the table and the address was converted.
530 */
531bool mm_vm_translate(struct mm_ptable *t, ipaddr_t ipa, paddr_t *pa)
532{
533 bool mapped = mm_vm_is_mapped(t, ipa, 0);
534
535 if (mapped) {
536 *pa = pa_init(ipa_addr(ipa));
537 }
538
539 return mapped;
540}
541
542/**
543 * Updates the hypervisor page table such that the given physical address range
544 * is mapped into the address space at the corresponding address range in the
545 * architecture-agnostic mode provided.
546 */
547void *mm_identity_map(paddr_t begin, paddr_t end, int mode)
548{
549 if (mm_ptable_identity_map(&ptable, begin, end,
550 mode | MM_MODE_STAGE1)) {
551 return ptr_from_va(va_from_pa(begin));
552 }
553
554 return NULL;
555}
556
557/**
558 * Updates the hypervisor table such that the given physical address range is
559 * not mapped in the address space.
560 */
561bool mm_unmap(paddr_t begin, paddr_t end, int mode)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100562{
563 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1);
564}
565
566/**
567 * Initialises memory management for the hypervisor itself.
568 */
569bool mm_init(void)
570{
Andrew Scull5991ec92018-10-08 14:55:02 +0100571 dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
572 pa_addr(layout_text_end()));
573 dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
574 pa_addr(layout_rodata_end()));
575 dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
576 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100577
Andrew Scull8c3a63a2018-09-20 13:38:34 +0100578 if (!mm_ptable_init(&ptable, MM_MODE_NOSYNC | MM_MODE_STAGE1)) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100579 dlog("Unable to allocate memory for page table.\n");
580 return false;
581 }
582
583 /* Map page for uart. */
584 /* TODO: We may not want to map this. */
Andrew Scull80871322018-08-06 12:04:09 +0100585 mm_ptable_identity_map_page(&ptable, pa_init(PL011_BASE),
Andrew Scullfe636b12018-07-30 14:15:54 +0100586 MM_MODE_R | MM_MODE_W | MM_MODE_D |
587 MM_MODE_NOSYNC | MM_MODE_STAGE1);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100588
589 /* Map each section. */
Andrew Scull5991ec92018-10-08 14:55:02 +0100590 mm_identity_map(layout_text_begin(), layout_text_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100591 MM_MODE_X | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100592
Andrew Scull5991ec92018-10-08 14:55:02 +0100593 mm_identity_map(layout_rodata_begin(), layout_rodata_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100594 MM_MODE_R | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100595
Andrew Scull5991ec92018-10-08 14:55:02 +0100596 mm_identity_map(layout_data_begin(), layout_data_end(),
Andrew Scullfe636b12018-07-30 14:15:54 +0100597 MM_MODE_R | MM_MODE_W | MM_MODE_NOSYNC);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100598
Andrew Scull265ada92018-07-30 15:19:01 +0100599 return arch_mm_init(ptable.table, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100600}
601
602bool mm_cpu_init(void)
603{
Andrew Scull265ada92018-07-30 15:19:01 +0100604 return arch_mm_init(ptable.table, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100605}
606
607/**
608 * Defragments the hypervisor page table.
609 */
610void mm_defrag(void)
611{
612 mm_ptable_defrag(&ptable, MM_MODE_STAGE1);
613}