blob: 55340a104d4613c27657af837482d6041bcea9aa [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull80871322018-08-06 12:04:09 +010019#include <assert.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010020#include <stdatomic.h>
21#include <stdint.h>
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010024#include "hf/layout.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010025
Andrew Walbran2400ed22018-09-27 14:45:58 +010026/**
27 * This file has functions for managing the level 1 and 2 page tables used by
28 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
29 * and then a level 2 mapping per VM. The design assumes that all page tables
30 * contain only 1-1 mappings, aligned on the block boundaries.
31 */
32
Andrew Scull80871322018-08-06 12:04:09 +010033/* The type of addresses stored in the page table. */
34typedef uintvaddr_t ptable_addr_t;
35
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010036/*
37 * For stage 2, the input is an intermediate physical addresses rather than a
38 * virtual address so:
39 */
Andrew Scull80871322018-08-06 12:04:09 +010040static_assert(
41 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
42 "Currently, the same code manages the stage 1 and stage 2 page tables "
43 "which only works if the virtual and intermediate physical addresses "
44 "are the same size. It looks like that assumption might not be holding "
45 "so we need to check that everything is going to be ok.");
46
Andrew Scull4f170f52018-07-19 12:58:20 +010047/* Keep macro alignment */
48/* clang-format off */
49
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000050#define MAP_FLAG_COMMIT 0x01
51#define MAP_FLAG_UNMAP 0x02
52#define MAP_FLAG_NOBBM 0x04
53#define MAP_FLAG_STAGE1 0x08
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010054
Andrew Scull4f170f52018-07-19 12:58:20 +010055/* clang-format on */
56
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010057static struct mm_ptable ptable;
58
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010059/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010060 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010061 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010062static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010063{
64 return ptr_from_va(va_from_pa(pa));
65}
66
67/**
Andrew Scull80871322018-08-06 12:04:09 +010068 * Rounds an address down to a page boundary.
69 */
70static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
71{
72 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
73}
74
75/**
76 * Rounds an address up to a page boundary.
77 */
78static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
79{
80 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
81}
82
83/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084 * Calculates the size of the address space represented by a page table entry at
85 * the given level.
86 */
Andrew Sculle9827712018-10-19 14:54:20 +010087static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010088{
Andrew Scull78d6fd92018-09-06 15:08:36 +010089 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010090}
91
92/**
Andrew Scullcae45572018-12-13 15:46:30 +000093 * Gets the address of the start of the next block of the given size. The size
94 * must be a power of two.
95 */
96static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
97 size_t block_size)
98{
99 return (addr + block_size) & ~(block_size - 1);
100}
101
102/**
103 * Gets the physical address of the start of the next block of the given size.
104 * The size must be a power of two.
105 */
106static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
107{
108 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
109}
110
111/**
Andrew Scull80871322018-08-06 12:04:09 +0100112 * For a given address, calculates the maximum (plus one) address that can be
113 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100114 */
Andrew Sculle9827712018-10-19 14:54:20 +0100115static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100116{
117 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000118
Andrew Scull80871322018-08-06 12:04:09 +0100119 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100120}
121
122/**
Andrew Scull80871322018-08-06 12:04:09 +0100123 * For a given address, calculates the index at which its entry is stored in a
124 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100125 */
Andrew Sculle9827712018-10-19 14:54:20 +0100126static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100127{
Andrew Scull80871322018-08-06 12:04:09 +0100128 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000129
Andrew Scull78d6fd92018-09-06 15:08:36 +0100130 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100131}
132
133/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000134 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100135 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000136static struct mm_page_table *mm_alloc_page_tables(size_t count,
137 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100138{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000139 if (count == 1) {
140 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100141 }
142
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000143 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100144}
145
146/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000147 * Invalidates the TLB for the given address range.
148 */
149static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end,
150 bool stage1)
151{
152 if (stage1) {
153 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
154 } else {
155 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
156 }
157}
158
159/**
160 * Frees all page-table-related memory associated with the given pte at the
161 * given level, including any subtables recursively.
162 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000163static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000164{
165 struct mm_page_table *table;
166 uint64_t i;
167
168 if (!arch_mm_pte_is_table(pte, level)) {
169 return;
170 }
171
172 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000173 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000174 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000175 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000176 }
177
178 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000179 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000180}
181
182/**
183 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000184 * are valid, it performs a break-before-make sequence where it first writes an
185 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
186 * This is to prevent cases where CPUs have different 'valid' values in their
187 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000188 */
189static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000190 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000191{
192 pte_t v = *pte;
193
194 /*
195 * We need to do the break-before-make sequence if both values are
196 * present, and if it hasn't been inhibited by the NOBBM flag.
197 */
Andrew Scullc66a04d2018-12-07 13:41:56 +0000198 if (!(flags & MAP_FLAG_NOBBM) && arch_mm_pte_is_valid(v, level) &&
199 arch_mm_pte_is_valid(new_pte, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000200 *pte = arch_mm_absent_pte(level);
201 mm_invalidate_tlb(begin, begin + mm_entry_size(level),
202 flags & MAP_FLAG_STAGE1);
203 }
204
205 /* Assign the new pte. */
206 *pte = new_pte;
207
208 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000209 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000210}
211
212/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100213 * Populates the provided page table entry with a reference to another table if
214 * needed, that is, if it does not yet point to another table.
215 *
216 * Returns a pointer to the table the entry now points to.
217 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000218static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
219 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000220 int flags,
221 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100222{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100223 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100224 pte_t v = *pte;
225 pte_t new_pte;
226 size_t i;
227 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100228 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100229
230 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100231 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000232 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100233 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100234
235 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000236 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100237 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100238 dlog("Failed to allocate memory for page table\n");
239 return NULL;
240 }
241
242 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100243 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100244 inc = mm_entry_size(level_below);
245 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000246 arch_mm_block_from_pte(v, level),
247 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100248 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100249 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100250 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100251 }
252
253 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100254 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
255 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100256 new_pte += inc;
257 }
258
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000259 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100260 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000261
262 /* Replace the pte entry, doing a break-before-make if needed. */
263 mm_replace_entry(begin, pte,
264 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000265 level, flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100266
267 return ntable;
268}
269
270/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100271 * Returns whether all entries in this table are absent.
272 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000273static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100274{
275 uint64_t i;
276
Andrew Scull4e5f8142018-10-12 14:37:19 +0100277 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
278 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100279 return false;
280 }
281 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000282
Andrew Walbran6324fc92018-10-03 11:46:43 +0100283 return true;
284}
285
286/**
Andrew Scull80871322018-08-06 12:04:09 +0100287 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100288 * physical range using the provided (architecture-specific) attributes. Or if
289 * MAP_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100290 *
291 * This function calls itself recursively if it needs to update additional
292 * levels, but the recursion is bound by the maximum number of levels in a page
293 * table.
294 */
Andrew Scull80871322018-08-06 12:04:09 +0100295static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100296 uint64_t attrs, struct mm_page_table *table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000297 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100298{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100299 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100300 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100301 size_t entry_size = mm_entry_size(level);
302 bool commit = flags & MAP_FLAG_COMMIT;
Andrew Walbran6324fc92018-10-03 11:46:43 +0100303 bool unmap = flags & MAP_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304
Andrew Scull265ada92018-07-30 15:19:01 +0100305 /* Cap end so that we don't go over the current level max. */
306 if (end > level_end) {
307 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100308 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100311 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100312 if (unmap ? !arch_mm_pte_is_present(*pte, level)
313 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000314 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100315 /*
316 * If the entry is already mapped with the right
317 * attributes, or already absent in the case of
318 * unmapping, no need to do anything; carry on to the
319 * next entry.
320 */
321 } else if ((end - begin) >= entry_size &&
322 (unmap || arch_mm_is_block_allowed(level)) &&
323 (begin & (entry_size - 1)) == 0) {
324 /*
325 * If the entire entry is within the region we want to
326 * map, map/unmap the whole entry.
327 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000329 pte_t new_pte =
330 unmap ? arch_mm_absent_pte(level)
331 : arch_mm_block_pte(level, pa,
332 attrs);
333 mm_replace_entry(begin, pte, new_pte, level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000334 flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100335 }
336 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100337 /*
338 * If the entry is already a subtable get it; otherwise
339 * replace it with an equivalent subtable and get that.
340 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000341 struct mm_page_table *nt = mm_populate_table_pte(
342 begin, pte, level, flags, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100343 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100344 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100345 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100346
Andrew Walbran6324fc92018-10-03 11:46:43 +0100347 /*
348 * Recurse to map/unmap the appropriate entries within
349 * the subtable.
350 */
Andrew Scull80871322018-08-06 12:04:09 +0100351 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000352 flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100353 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100354 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100355
356 /*
357 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000358 * absent entry at this level. We never need to do
359 * break-before-makes here because we are assigning
360 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100361 */
362 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000363 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100364 pte_t v = *pte;
365 *pte = arch_mm_absent_pte(level);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000366 mm_free_page_pte(v, level, ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100367 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100368 }
369
Andrew Scullcae45572018-12-13 15:46:30 +0000370 begin = mm_start_of_next_block(begin, entry_size);
371 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100372 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100373 }
374
375 return true;
376}
377
378/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000379 * Updates the page table from the root to map the given address range to a
380 * physical range using the provided (architecture-specific) attributes. Or if
381 * MAP_FLAG_UNMAP is set, unmap the given range instead.
382 */
383static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
384 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000385 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000386{
387 size_t root_table_size = mm_entry_size(root_level);
388 struct mm_page_table *table =
389 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
390
391 while (begin < end) {
392 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000393 root_level - 1, flags, ppool)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000394 return false;
395 }
Andrew Scullcae45572018-12-13 15:46:30 +0000396 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000397 table++;
398 }
399
400 return true;
401}
402
403/**
Andrew Scull80871322018-08-06 12:04:09 +0100404 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000405 * or not mapped into the address space with the architecture-agnostic mode
406 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100407 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000408static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000409 paddr_t pa_end, int mode,
410 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100411{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000412 uint64_t attrs = arch_mm_mode_to_attrs(mode);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000413 int flags = (mode & MM_MODE_NOINVALIDATE ? MAP_FLAG_NOBBM : 0) |
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000414 (mode & MM_MODE_STAGE1 ? MAP_FLAG_STAGE1 : 0) |
Andrew Scullc66a04d2018-12-07 13:41:56 +0000415 (mode & MM_MODE_INVALID && mode & MM_MODE_UNOWNED
416 ? MAP_FLAG_UNMAP
417 : 0);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000418 uint8_t root_level = arch_mm_max_level(mode) + 1;
419 ptable_addr_t ptable_end =
420 arch_mm_root_table_count(mode) * mm_entry_size(root_level);
421 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
422 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100423
Andrew Scull1ba470e2018-10-31 15:14:31 +0000424 /*
425 * TODO: replace with assertions that the max level will be greater than
426 * 0 and less than 255 so wrapping will not be a problem and will not
427 * lead to subsequent overflows.
428 */
429 if (root_level == 0 || root_level == 1) {
430 return false;
431 }
432
433 /* Cap end to stay within the bounds of the page table. */
434 if (end > ptable_end) {
435 end = ptable_end;
436 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100437
438 /*
439 * Do it in two steps to prevent leaving the table in a halfway updated
440 * state. In such a two-step implementation, the table may be left with
441 * extra internal tables, but no different mapping on failure.
442 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000443 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool) ||
Andrew Scull1ba470e2018-10-31 15:14:31 +0000444 !mm_map_root(t, begin, end, attrs, root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000445 flags | MAP_FLAG_COMMIT, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100446 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100447 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100449 /* Invalidate the tlb. */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100450 if (!(mode & MM_MODE_NOINVALIDATE)) {
451 mm_invalidate_tlb(begin, end, (mode & MM_MODE_STAGE1) != 0);
452 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453
454 return true;
455}
456
457/**
Andrew Sculla6da8342018-11-01 12:29:49 +0000458 * Updates the given table such that the given physical address range is mapped
459 * into the address space with the architecture-agnostic mode provided.
460 */
461static bool mm_ptable_identity_map(struct mm_ptable *t, paddr_t pa_begin,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000462 paddr_t pa_end, int mode,
463 struct mpool *ppool)
Andrew Sculla6da8342018-11-01 12:29:49 +0000464{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000465 return mm_ptable_identity_update(t, pa_begin, pa_end, mode, ppool);
Andrew Sculla6da8342018-11-01 12:29:49 +0000466}
467
468/**
Andrew Scull80871322018-08-06 12:04:09 +0100469 * Updates the given table such that the given physical address range is not
470 * mapped into the address space.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100471 */
Andrew Scull80871322018-08-06 12:04:09 +0100472static bool mm_ptable_unmap(struct mm_ptable *t, paddr_t pa_begin,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000473 paddr_t pa_end, int mode, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100474{
Andrew Scullc66a04d2018-12-07 13:41:56 +0000475 return mm_ptable_identity_update(
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000476 t, pa_begin, pa_end, mode | MM_MODE_UNOWNED | MM_MODE_INVALID,
477 ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100478}
479
480/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100481 * Writes the given table to the debug log, calling itself recursively to
482 * write sub-tables.
483 */
Andrew Sculle9827712018-10-19 14:54:20 +0100484static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100485 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100486{
487 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000488
Andrew Scull4e5f8142018-10-12 14:37:19 +0100489 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
490 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100491 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100492 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100493
Andrew Scull4e5f8142018-10-12 14:37:19 +0100494 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
495 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100496
Andrew Scull4e5f8142018-10-12 14:37:19 +0100497 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100498 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100499 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000500 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100501 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100502 }
503 }
504}
505
506/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000507 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100508 */
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100509void mm_ptable_dump(struct mm_ptable *t, int mode)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100510{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000511 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100512 int max_level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000513 uint8_t root_table_count = arch_mm_root_table_count(mode);
514 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000515
Andrew Scull1ba470e2018-10-31 15:14:31 +0000516 for (i = 0; i < root_table_count; ++i) {
517 mm_dump_table_recursive(&tables[i], max_level, max_level);
518 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100519}
520
521/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000522 * Given the table PTE entries all have identical attributes, returns the single
523 * entry with which it can be replaced. Note that the table PTE will no longer
524 * be valid after calling this function as the table may have been freed.
Andrew Scullb6b9b562018-12-21 14:41:35 +0000525 *
526 * If the table is freed, the memory is freed directly rather than calling
527 * `mm_free_page_pte()` as it is known to not have subtables.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100528 */
Andrew Scullb6b9b562018-12-21 14:41:35 +0000529static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level,
530 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100531{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100532 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100533 uint64_t block_attrs;
534 uint64_t table_attrs;
535 uint64_t combined_attrs;
536 paddr_t block_address;
537
Andrew Scullb6b9b562018-12-21 14:41:35 +0000538 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
539
540 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
541 /* Free the table and return an absent entry. */
542 mpool_free(ppool, table);
543 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100544 }
545
Andrew Scullb6b9b562018-12-21 14:41:35 +0000546 /* Might not be possible to merge the table into a single block. */
547 if (!arch_mm_is_block_allowed(level)) {
548 return table_pte;
549 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000550
Andrew Scullb6b9b562018-12-21 14:41:35 +0000551 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000552 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000553 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100554 combined_attrs =
555 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000556 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000557
Andrew Scullb6b9b562018-12-21 14:41:35 +0000558 /* Free the table and return a block. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000559 mpool_free(ppool, table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100560 return arch_mm_block_pte(level, block_address, combined_attrs);
561}
562
563/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000564 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000565 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100566 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000567static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level,
568 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100569{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100570 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100571 uint64_t i;
572 uint64_t attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100573
574 if (!arch_mm_pte_is_table(entry, level)) {
575 return entry;
576 }
577
Andrew Scull3681b8d2018-12-12 14:22:59 +0000578 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100579
580 /*
581 * Check if all entries are blocks with the same flags or are all
Andrew Scullb6b9b562018-12-21 14:41:35 +0000582 * absent. It assumes addresses are contiguous due to identity mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100583 */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000584 attrs = arch_mm_pte_attrs(table->entries[0], level);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100585 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000586 /* First try to defrag the entry, in case it is a subtable. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000587 table->entries[i] = mm_ptable_defrag_entry(table->entries[i],
588 level - 1, ppool);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100589
Andrew Walbran2400ed22018-09-27 14:45:58 +0100590 /*
Andrew Scullb6b9b562018-12-21 14:41:35 +0000591 * If the entry isn't a block or has different attributes then
592 * it isn't possible to defragment it.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100593 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100594 if (!arch_mm_pte_is_block(table->entries[i], level - 1) ||
Andrew Scull3681b8d2018-12-12 14:22:59 +0000595 arch_mm_pte_attrs(table->entries[i], level) != attrs) {
Andrew Scullb6b9b562018-12-21 14:41:35 +0000596 return entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100597 }
598 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000599
600 return mm_merge_table_pte(entry, level, ppool);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100601}
602
603/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100604 * Defragments the given page table by converting page table references to
605 * blocks whenever possible.
606 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000607void mm_ptable_defrag(struct mm_ptable *t, int mode, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100608{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000609 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100610 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000611 uint8_t root_table_count = arch_mm_root_table_count(mode);
612 uint8_t i;
613 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100614
615 /*
616 * Loop through each entry in the table. If it points to another table,
617 * check if that table can be replaced by a block or an absent entry.
618 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000619 for (i = 0; i < root_table_count; ++i) {
620 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
621 tables[i].entries[j] = mm_ptable_defrag_entry(
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000622 tables[i].entries[j], level, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000623 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100624 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100625}
626
627/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000628 * Determines if the given address is valid in the address space of the given
629 * page table by recursively traversing all levels of the page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100630 */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100631static bool mm_is_mapped_recursive(struct mm_page_table *table,
Andrew Sculle9827712018-10-19 14:54:20 +0100632 ptable_addr_t addr, uint8_t level)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100633{
634 pte_t pte;
Andrew Scull80871322018-08-06 12:04:09 +0100635 ptable_addr_t va_level_end = mm_level_end(addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100636
637 /* It isn't mapped if it doesn't fit in the table. */
Andrew Scull80871322018-08-06 12:04:09 +0100638 if (addr >= va_level_end) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100639 return false;
640 }
641
Andrew Scull4e5f8142018-10-12 14:37:19 +0100642 pte = table->entries[mm_index(addr, level)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100643
Andrew Scullc66a04d2018-12-07 13:41:56 +0000644 if (!arch_mm_pte_is_valid(pte, level)) {
645 return false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100646 }
647
Andrew Scull78d6fd92018-09-06 15:08:36 +0100648 if (arch_mm_pte_is_table(pte, level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100649 return mm_is_mapped_recursive(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000650 mm_page_table_from_pa(
651 arch_mm_table_from_pte(pte, level)),
Andrew Scull4e5f8142018-10-12 14:37:19 +0100652 addr, level - 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100653 }
654
Andrew Scullc66a04d2018-12-07 13:41:56 +0000655 /* The entry is a valid block. */
656 return true;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100657}
658
659/**
Andrew Scullc66a04d2018-12-07 13:41:56 +0000660 * Determines if the given address is valid in the address space of the given
661 * page table.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100662 */
Andrew Scull80871322018-08-06 12:04:09 +0100663static bool mm_ptable_is_mapped(struct mm_ptable *t, ptable_addr_t addr,
664 int mode)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100665{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000666 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Sculle9827712018-10-19 14:54:20 +0100667 uint8_t level = arch_mm_max_level(mode);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000668 size_t index;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100669
Andrew Scull80871322018-08-06 12:04:09 +0100670 addr = mm_round_down_to_page(addr);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000671 index = mm_index(addr, level + 1);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100672
Andrew Scull1ba470e2018-10-31 15:14:31 +0000673 if (index >= arch_mm_root_table_count(mode)) {
674 return false;
675 }
676
677 return mm_is_mapped_recursive(&tables[index], addr, level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100678}
679
680/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100681 * Initialises the given page table.
682 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000683bool mm_ptable_init(struct mm_ptable *t, int mode, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100684{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000685 uint8_t i;
686 size_t j;
687 struct mm_page_table *tables;
688 uint8_t root_table_count = arch_mm_root_table_count(mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100689
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000690 tables = mm_alloc_page_tables(root_table_count, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000691 if (tables == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100692 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100693 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100694
Andrew Scull1ba470e2018-10-31 15:14:31 +0000695 for (i = 0; i < root_table_count; i++) {
696 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
697 tables[i].entries[j] =
698 arch_mm_absent_pte(arch_mm_max_level(mode));
699 }
Andrew Scull7364a8e2018-07-19 15:39:29 +0100700 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100701
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000702 /*
703 * TODO: halloc could return a virtual or physical address if mm not
704 * enabled?
705 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000706 t->root = pa_init((uintpaddr_t)tables);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100707
708 return true;
709}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100710
711/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000712 * Frees all memory associated with the give page table.
713 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000714void mm_ptable_fini(struct mm_ptable *t, int mode, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000715{
716 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
717 uint8_t level = arch_mm_max_level(mode);
718 uint8_t root_table_count = arch_mm_root_table_count(mode);
719 uint8_t i;
720 uint64_t j;
721
722 for (i = 0; i < root_table_count; ++i) {
723 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000724 mm_free_page_pte(tables[i].entries[j], level, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000725 }
726 }
727
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000728 mpool_add_chunk(ppool, tables,
729 sizeof(struct mm_page_table) * root_table_count);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000730}
731
732/**
Andrew Scull80871322018-08-06 12:04:09 +0100733 * Updates a VM's page table such that the given physical address range is
734 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100735 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100736 */
Andrew Scull80871322018-08-06 12:04:09 +0100737bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000738 int mode, ipaddr_t *ipa, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100739{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000740 bool success = mm_ptable_identity_map(t, begin, end,
741 mode & ~MM_MODE_STAGE1, ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100742
743 if (success && ipa != NULL) {
744 *ipa = ipa_from_pa(begin);
745 }
746
747 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100748}
749
750/**
Andrew Scull80871322018-08-06 12:04:09 +0100751 * Updates the VM's table such that the given physical address range is not
752 * mapped in the address space.
753 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000754bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, int mode,
755 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100756{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000757 return mm_ptable_unmap(t, begin, end, mode & ~MM_MODE_STAGE1, ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100758}
759
760/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000761 * Unmaps the hypervisor pages from the given page table.
762 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000763bool mm_vm_unmap_hypervisor(struct mm_ptable *t, int mode, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000764{
765 /* TODO: If we add pages dynamically, they must be included here too. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000766 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), mode,
767 ppool) &&
768 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(), mode,
769 ppool) &&
770 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), mode,
771 ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000772}
773
774/**
Andrew Scull80871322018-08-06 12:04:09 +0100775 * Checks whether the given intermediate physical addess is mapped in the given
776 * page table of a VM.
777 */
778bool mm_vm_is_mapped(struct mm_ptable *t, ipaddr_t ipa, int mode)
779{
780 return mm_ptable_is_mapped(t, ipa_addr(ipa), mode & ~MM_MODE_STAGE1);
781}
782
783/**
Andrew Scull80871322018-08-06 12:04:09 +0100784 * Updates the hypervisor page table such that the given physical address range
785 * is mapped into the address space at the corresponding address range in the
786 * architecture-agnostic mode provided.
787 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000788void *mm_identity_map(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100789{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000790 if (mm_ptable_identity_map(&ptable, begin, end, mode | MM_MODE_STAGE1,
791 ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100792 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100793 }
794
795 return NULL;
796}
797
798/**
799 * Updates the hypervisor table such that the given physical address range is
800 * not mapped in the address space.
801 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000802bool mm_unmap(paddr_t begin, paddr_t end, int mode, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100803{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000804 return mm_ptable_unmap(&ptable, begin, end, mode | MM_MODE_STAGE1,
805 ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100806}
807
808/**
809 * Initialises memory management for the hypervisor itself.
810 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000811bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100812{
Andrew Scullfdd716e2018-12-20 05:37:31 +0000813 dlog("text: 0x%x - 0x%x\n", pa_addr(layout_text_begin()),
814 pa_addr(layout_text_end()));
815 dlog("rodata: 0x%x - 0x%x\n", pa_addr(layout_rodata_begin()),
816 pa_addr(layout_rodata_end()));
817 dlog("data: 0x%x - 0x%x\n", pa_addr(layout_data_begin()),
818 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100819
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000820 if (!mm_ptable_init(&ptable, MM_MODE_STAGE1, ppool)) {
Andrew Scullfdd716e2018-12-20 05:37:31 +0000821 dlog("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100822 return false;
823 }
824
825 /* Map page for uart. */
826 /* TODO: We may not want to map this. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000827 mm_ptable_identity_map(
828 &ptable, pa_init(PL011_BASE),
829 pa_add(pa_init(PL011_BASE), PAGE_SIZE),
830 MM_MODE_R | MM_MODE_W | MM_MODE_D | MM_MODE_STAGE1, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100831
832 /* Map each section. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000833 mm_identity_map(layout_text_begin(), layout_text_end(), MM_MODE_X,
834 ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100835
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000836 mm_identity_map(layout_rodata_begin(), layout_rodata_end(), MM_MODE_R,
837 ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100838
Andrew Scull5991ec92018-10-08 14:55:02 +0100839 mm_identity_map(layout_data_begin(), layout_data_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000840 MM_MODE_R | MM_MODE_W, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100841
Andrew Scull1ba470e2018-10-31 15:14:31 +0000842 return arch_mm_init(ptable.root, true);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100843}
844
845bool mm_cpu_init(void)
846{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000847 return arch_mm_init(ptable.root, false);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100848}
849
850/**
851 * Defragments the hypervisor page table.
852 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000853void mm_defrag(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100854{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000855 mm_ptable_defrag(&ptable, MM_MODE_STAGE1, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100856}