blob: 94fe6dfc72b2db1f9952f0db68906116e422927f [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
19#include <stdatomic.h>
20#include <stdint.h>
21
Andrew Scull877ae4b2019-07-02 12:52:33 +010022#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010024#include "hf/layout.h"
Andrew Walbran48699362019-05-20 14:38:00 +010025#include "hf/plat/console.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010026#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010027
Andrew Walbran2400ed22018-09-27 14:45:58 +010028/**
29 * This file has functions for managing the level 1 and 2 page tables used by
30 * Hafnium. There is a level 1 mapping used by Hafnium itself to access memory,
31 * and then a level 2 mapping per VM. The design assumes that all page tables
32 * contain only 1-1 mappings, aligned on the block boundaries.
33 */
34
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +010035/*
36 * For stage 2, the input is an intermediate physical addresses rather than a
37 * virtual address so:
38 */
Andrew Scull80871322018-08-06 12:04:09 +010039static_assert(
40 sizeof(ptable_addr_t) == sizeof(uintpaddr_t),
41 "Currently, the same code manages the stage 1 and stage 2 page tables "
42 "which only works if the virtual and intermediate physical addresses "
43 "are the same size. It looks like that assumption might not be holding "
44 "so we need to check that everything is going to be ok.");
45
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010046static struct mm_ptable ptable;
Andrew Scull3c0a90a2019-07-01 11:55:53 +010047static struct spinlock ptable_lock;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010048
Andrew Scullda241972019-01-05 18:17:48 +000049static bool mm_stage2_invalidate = false;
50
51/**
52 * After calling this function, modifications to stage-2 page tables will use
53 * break-before-make and invalidate the TLB for the affected range.
54 */
55void mm_vm_enable_invalidation(void)
56{
57 mm_stage2_invalidate = true;
58}
59
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010060/**
Andrew Scull4e5f8142018-10-12 14:37:19 +010061 * Get the page table from the physical address.
Andrew Walbran2400ed22018-09-27 14:45:58 +010062 */
Andrew Scull4e5f8142018-10-12 14:37:19 +010063static struct mm_page_table *mm_page_table_from_pa(paddr_t pa)
Andrew Walbran2400ed22018-09-27 14:45:58 +010064{
65 return ptr_from_va(va_from_pa(pa));
66}
67
68/**
Andrew Scull80871322018-08-06 12:04:09 +010069 * Rounds an address down to a page boundary.
70 */
71static ptable_addr_t mm_round_down_to_page(ptable_addr_t addr)
72{
73 return addr & ~((ptable_addr_t)(PAGE_SIZE - 1));
74}
75
76/**
77 * Rounds an address up to a page boundary.
78 */
79static ptable_addr_t mm_round_up_to_page(ptable_addr_t addr)
80{
81 return mm_round_down_to_page(addr + PAGE_SIZE - 1);
82}
83
84/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010085 * Calculates the size of the address space represented by a page table entry at
86 * the given level.
87 */
Andrew Sculle9827712018-10-19 14:54:20 +010088static size_t mm_entry_size(uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010089{
Andrew Scull78d6fd92018-09-06 15:08:36 +010090 return UINT64_C(1) << (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010091}
92
93/**
Andrew Scullcae45572018-12-13 15:46:30 +000094 * Gets the address of the start of the next block of the given size. The size
95 * must be a power of two.
96 */
97static ptable_addr_t mm_start_of_next_block(ptable_addr_t addr,
98 size_t block_size)
99{
100 return (addr + block_size) & ~(block_size - 1);
101}
102
103/**
104 * Gets the physical address of the start of the next block of the given size.
105 * The size must be a power of two.
106 */
107static paddr_t mm_pa_start_of_next_block(paddr_t pa, size_t block_size)
108{
109 return pa_init((pa_addr(pa) + block_size) & ~(block_size - 1));
110}
111
112/**
Andrew Scull80871322018-08-06 12:04:09 +0100113 * For a given address, calculates the maximum (plus one) address that can be
114 * represented by the same table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100115 */
Andrew Sculle9827712018-10-19 14:54:20 +0100116static ptable_addr_t mm_level_end(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100117{
118 size_t offset = PAGE_BITS + (level + 1) * PAGE_LEVEL_BITS;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000119
Andrew Scull80871322018-08-06 12:04:09 +0100120 return ((addr >> offset) + 1) << offset;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100121}
122
123/**
Andrew Scull80871322018-08-06 12:04:09 +0100124 * For a given address, calculates the index at which its entry is stored in a
125 * table at the given level.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100126 */
Andrew Sculle9827712018-10-19 14:54:20 +0100127static size_t mm_index(ptable_addr_t addr, uint8_t level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100128{
Andrew Scull80871322018-08-06 12:04:09 +0100129 ptable_addr_t v = addr >> (PAGE_BITS + level * PAGE_LEVEL_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000130
Andrew Scull78d6fd92018-09-06 15:08:36 +0100131 return v & ((UINT64_C(1) << PAGE_LEVEL_BITS) - 1);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100132}
133
134/**
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000135 * Allocates a new page table.
Andrew Scull4e5f8142018-10-12 14:37:19 +0100136 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000137static struct mm_page_table *mm_alloc_page_tables(size_t count,
138 struct mpool *ppool)
Andrew Scull4e5f8142018-10-12 14:37:19 +0100139{
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000140 if (count == 1) {
141 return mpool_alloc(ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100142 }
143
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000144 return mpool_alloc_contiguous(ppool, count, count);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100145}
146
147/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000148 * Returns the maximum level in the page table given the flags.
149 */
150static uint8_t mm_max_level(int flags)
151{
152 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_max_level()
153 : arch_mm_stage2_max_level();
154}
155
156/**
157 * Returns the number of root-level tables given the flags.
158 */
159static uint8_t mm_root_table_count(int flags)
160{
161 return (flags & MM_FLAG_STAGE1) ? arch_mm_stage1_root_table_count()
162 : arch_mm_stage2_root_table_count();
163}
164
165/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000166 * Invalidates the TLB for the given address range.
167 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000168static void mm_invalidate_tlb(ptable_addr_t begin, ptable_addr_t end, int flags)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000169{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000170 if (flags & MM_FLAG_STAGE1) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000171 arch_mm_invalidate_stage1_range(va_init(begin), va_init(end));
172 } else {
173 arch_mm_invalidate_stage2_range(ipa_init(begin), ipa_init(end));
174 }
175}
176
177/**
178 * Frees all page-table-related memory associated with the given pte at the
179 * given level, including any subtables recursively.
180 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000181static void mm_free_page_pte(pte_t pte, uint8_t level, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000182{
183 struct mm_page_table *table;
184 uint64_t i;
185
186 if (!arch_mm_pte_is_table(pte, level)) {
187 return;
188 }
189
190 /* Recursively free any subtables. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000191 table = mm_page_table_from_pa(arch_mm_table_from_pte(pte, level));
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000192 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000193 mm_free_page_pte(table->entries[i], level - 1, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000194 }
195
196 /* Free the table itself. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000197 mpool_free(ppool, table);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000198}
199
200/**
David Brazdil711fbe92019-08-06 13:39:58 +0100201 * Returns the first address which cannot be encoded in page tables given by
202 * `flags`. It is the exclusive end of the address space created by the tables.
203 */
204ptable_addr_t mm_ptable_addr_space_end(int flags)
205{
206 return mm_root_table_count(flags) *
207 mm_entry_size(mm_max_level(flags) + 1);
208}
209
210/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000211 * Initialises the given page table.
212 */
David Brazdil711fbe92019-08-06 13:39:58 +0100213bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool)
Andrew Scullda3df7f2019-01-05 17:49:27 +0000214{
215 uint8_t i;
216 size_t j;
217 struct mm_page_table *tables;
218 uint8_t root_table_count = mm_root_table_count(flags);
219
220 tables = mm_alloc_page_tables(root_table_count, ppool);
221 if (tables == NULL) {
222 return false;
223 }
224
225 for (i = 0; i < root_table_count; i++) {
226 for (j = 0; j < MM_PTE_PER_PAGE; j++) {
227 tables[i].entries[j] =
228 arch_mm_absent_pte(mm_max_level(flags));
229 }
230 }
231
232 /*
233 * TODO: halloc could return a virtual or physical address if mm not
234 * enabled?
235 */
236 t->root = pa_init((uintpaddr_t)tables);
237
238 return true;
239}
240
241/**
242 * Frees all memory associated with the give page table.
243 */
244static void mm_ptable_fini(struct mm_ptable *t, int flags, struct mpool *ppool)
245{
246 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
247 uint8_t level = mm_max_level(flags);
248 uint8_t root_table_count = mm_root_table_count(flags);
249 uint8_t i;
250 uint64_t j;
251
252 for (i = 0; i < root_table_count; ++i) {
253 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
254 mm_free_page_pte(tables[i].entries[j], level, ppool);
255 }
256 }
257
258 mpool_add_chunk(ppool, tables,
259 sizeof(struct mm_page_table) * root_table_count);
260}
261
262/**
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000263 * Replaces a page table entry with the given value. If both old and new values
Andrew Scullc66a04d2018-12-07 13:41:56 +0000264 * are valid, it performs a break-before-make sequence where it first writes an
265 * invalid value to the PTE, flushes the TLB, then writes the actual new value.
266 * This is to prevent cases where CPUs have different 'valid' values in their
267 * TLBs, which may result in issues for example in cache coherency.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000268 */
269static void mm_replace_entry(ptable_addr_t begin, pte_t *pte, pte_t new_pte,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000270 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000271{
272 pte_t v = *pte;
273
274 /*
275 * We need to do the break-before-make sequence if both values are
Andrew Scull3cd9e262019-01-08 17:59:22 +0000276 * present and the TLB is being invalidated.
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000277 */
Andrew Scullda241972019-01-05 18:17:48 +0000278 if (((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) &&
279 arch_mm_pte_is_valid(v, level) &&
Andrew Scullc66a04d2018-12-07 13:41:56 +0000280 arch_mm_pte_is_valid(new_pte, level)) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000281 *pte = arch_mm_absent_pte(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000282 mm_invalidate_tlb(begin, begin + mm_entry_size(level), flags);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000283 }
284
285 /* Assign the new pte. */
286 *pte = new_pte;
287
288 /* Free pages that aren't in use anymore. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000289 mm_free_page_pte(v, level, ppool);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000290}
291
292/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100293 * Populates the provided page table entry with a reference to another table if
294 * needed, that is, if it does not yet point to another table.
295 *
296 * Returns a pointer to the table the entry now points to.
297 */
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000298static struct mm_page_table *mm_populate_table_pte(ptable_addr_t begin,
299 pte_t *pte, uint8_t level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000300 int flags,
301 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100302{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100303 struct mm_page_table *ntable;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100304 pte_t v = *pte;
305 pte_t new_pte;
306 size_t i;
307 size_t inc;
Andrew Sculle9827712018-10-19 14:54:20 +0100308 uint8_t level_below = level - 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100309
310 /* Just return pointer to table if it's already populated. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100311 if (arch_mm_pte_is_table(v, level)) {
Andrew Scull3681b8d2018-12-12 14:22:59 +0000312 return mm_page_table_from_pa(arch_mm_table_from_pte(v, level));
Andrew Scull7364a8e2018-07-19 15:39:29 +0100313 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100314
315 /* Allocate a new table. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000316 ntable = mm_alloc_page_tables(1, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100317 if (ntable == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100318 dlog("Failed to allocate memory for page table\n");
319 return NULL;
320 }
321
322 /* Determine template for new pte and its increment. */
Andrew Scull78d6fd92018-09-06 15:08:36 +0100323 if (arch_mm_pte_is_block(v, level)) {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100324 inc = mm_entry_size(level_below);
325 new_pte = arch_mm_block_pte(level_below,
Andrew Scull3681b8d2018-12-12 14:22:59 +0000326 arch_mm_block_from_pte(v, level),
327 arch_mm_pte_attrs(v, level));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100328 } else {
Andrew Scull78d6fd92018-09-06 15:08:36 +0100329 inc = 0;
Andrew Walbran1b99f9d2018-10-03 17:54:40 +0100330 new_pte = arch_mm_absent_pte(level_below);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100331 }
332
333 /* Initialise entries in the new table. */
Andrew Scull4e5f8142018-10-12 14:37:19 +0100334 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
335 ntable->entries[i] = new_pte;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100336 new_pte += inc;
337 }
338
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000339 /* Ensure initialisation is visible before updating the pte. */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100340 atomic_thread_fence(memory_order_release);
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000341
342 /* Replace the pte entry, doing a break-before-make if needed. */
343 mm_replace_entry(begin, pte,
344 arch_mm_table_pte(level, pa_init((uintpaddr_t)ntable)),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000345 level, flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100346
347 return ntable;
348}
349
350/**
Andrew Walbran6324fc92018-10-03 11:46:43 +0100351 * Returns whether all entries in this table are absent.
352 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000353static bool mm_page_table_is_empty(struct mm_page_table *table, uint8_t level)
Andrew Walbran6324fc92018-10-03 11:46:43 +0100354{
355 uint64_t i;
356
Andrew Scull4e5f8142018-10-12 14:37:19 +0100357 for (i = 0; i < MM_PTE_PER_PAGE; ++i) {
358 if (arch_mm_pte_is_present(table->entries[i], level)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100359 return false;
360 }
361 }
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000362
Andrew Walbran6324fc92018-10-03 11:46:43 +0100363 return true;
364}
365
366/**
Andrew Scull80871322018-08-06 12:04:09 +0100367 * Updates the page table at the given level to map the given address range to a
Andrew Walbran6324fc92018-10-03 11:46:43 +0100368 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000369 * MM_FLAG_UNMAP is set, unmap the given range instead.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100370 *
371 * This function calls itself recursively if it needs to update additional
372 * levels, but the recursion is bound by the maximum number of levels in a page
373 * table.
374 */
Andrew Scull80871322018-08-06 12:04:09 +0100375static bool mm_map_level(ptable_addr_t begin, ptable_addr_t end, paddr_t pa,
Andrew Sculle9827712018-10-19 14:54:20 +0100376 uint64_t attrs, struct mm_page_table *table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000377 uint8_t level, int flags, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100378{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100379 pte_t *pte = &table->entries[mm_index(begin, level)];
Andrew Scull80871322018-08-06 12:04:09 +0100380 ptable_addr_t level_end = mm_level_end(begin, level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100381 size_t entry_size = mm_entry_size(level);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000382 bool commit = flags & MM_FLAG_COMMIT;
383 bool unmap = flags & MM_FLAG_UNMAP;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100384
Andrew Scull265ada92018-07-30 15:19:01 +0100385 /* Cap end so that we don't go over the current level max. */
386 if (end > level_end) {
387 end = level_end;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100388 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100389
390 /* Fill each entry in the table. */
Andrew Scull265ada92018-07-30 15:19:01 +0100391 while (begin < end) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100392 if (unmap ? !arch_mm_pte_is_present(*pte, level)
393 : arch_mm_pte_is_block(*pte, level) &&
Andrew Scull3681b8d2018-12-12 14:22:59 +0000394 arch_mm_pte_attrs(*pte, level) == attrs) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100395 /*
396 * If the entry is already mapped with the right
397 * attributes, or already absent in the case of
398 * unmapping, no need to do anything; carry on to the
399 * next entry.
400 */
401 } else if ((end - begin) >= entry_size &&
402 (unmap || arch_mm_is_block_allowed(level)) &&
403 (begin & (entry_size - 1)) == 0) {
404 /*
405 * If the entire entry is within the region we want to
406 * map, map/unmap the whole entry.
407 */
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100408 if (commit) {
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000409 pte_t new_pte =
410 unmap ? arch_mm_absent_pte(level)
411 : arch_mm_block_pte(level, pa,
412 attrs);
413 mm_replace_entry(begin, pte, new_pte, level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000414 flags, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100415 }
416 } else {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100417 /*
418 * If the entry is already a subtable get it; otherwise
419 * replace it with an equivalent subtable and get that.
420 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000421 struct mm_page_table *nt = mm_populate_table_pte(
422 begin, pte, level, flags, ppool);
Andrew Scull4e5f8142018-10-12 14:37:19 +0100423 if (nt == NULL) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100424 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100425 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100426
Andrew Walbran6324fc92018-10-03 11:46:43 +0100427 /*
428 * Recurse to map/unmap the appropriate entries within
429 * the subtable.
430 */
Andrew Scull80871322018-08-06 12:04:09 +0100431 if (!mm_map_level(begin, end, pa, attrs, nt, level - 1,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000432 flags, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100433 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100434 }
Andrew Walbran6324fc92018-10-03 11:46:43 +0100435
436 /*
437 * If the subtable is now empty, replace it with an
Wedson Almeida Filho7c913232018-11-23 18:20:29 +0000438 * absent entry at this level. We never need to do
439 * break-before-makes here because we are assigning
440 * an absent value.
Andrew Walbran6324fc92018-10-03 11:46:43 +0100441 */
442 if (commit && unmap &&
Andrew Scull1ba470e2018-10-31 15:14:31 +0000443 mm_page_table_is_empty(nt, level - 1)) {
Andrew Walbran6324fc92018-10-03 11:46:43 +0100444 pte_t v = *pte;
445 *pte = arch_mm_absent_pte(level);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000446 mm_free_page_pte(v, level, ppool);
Andrew Walbran6324fc92018-10-03 11:46:43 +0100447 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100448 }
449
Andrew Scullcae45572018-12-13 15:46:30 +0000450 begin = mm_start_of_next_block(begin, entry_size);
451 pa = mm_pa_start_of_next_block(pa, entry_size);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100452 pte++;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100453 }
454
455 return true;
456}
457
458/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000459 * Updates the page table from the root to map the given address range to a
460 * physical range using the provided (architecture-specific) attributes. Or if
Andrew Scullda3df7f2019-01-05 17:49:27 +0000461 * MM_FLAG_UNMAP is set, unmap the given range instead.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000462 */
463static bool mm_map_root(struct mm_ptable *t, ptable_addr_t begin,
464 ptable_addr_t end, uint64_t attrs, uint8_t root_level,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000465 int flags, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000466{
467 size_t root_table_size = mm_entry_size(root_level);
468 struct mm_page_table *table =
469 &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
470
471 while (begin < end) {
472 if (!mm_map_level(begin, end, pa_init(begin), attrs, table,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000473 root_level - 1, flags, ppool)) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000474 return false;
475 }
Andrew Scullcae45572018-12-13 15:46:30 +0000476 begin = mm_start_of_next_block(begin, root_table_size);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000477 table++;
478 }
479
480 return true;
481}
482
483/**
Andrew Scull80871322018-08-06 12:04:09 +0100484 * Updates the given table such that the given physical address range is mapped
Andrew Sculla6da8342018-11-01 12:29:49 +0000485 * or not mapped into the address space with the architecture-agnostic mode
486 * provided.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100487 */
Andrew Sculla6da8342018-11-01 12:29:49 +0000488static bool mm_ptable_identity_update(struct mm_ptable *t, paddr_t pa_begin,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000489 paddr_t pa_end, uint64_t attrs, int flags,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000490 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100491{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000492 uint8_t root_level = mm_max_level(flags) + 1;
David Brazdil711fbe92019-08-06 13:39:58 +0100493 ptable_addr_t ptable_end = mm_ptable_addr_space_end(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000494 ptable_addr_t end = mm_round_up_to_page(pa_addr(pa_end));
495 ptable_addr_t begin = pa_addr(arch_mm_clear_pa(pa_begin));
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100496
Andrew Scull1ba470e2018-10-31 15:14:31 +0000497 /*
Andrew Scullf8252932019-04-04 13:51:22 +0100498 * Assert condition to communicate the API constraint of mm_max_level(),
499 * that isn't encoded in the types, to the static analyzer.
Andrew Scull1ba470e2018-10-31 15:14:31 +0000500 */
Andrew Scull877ae4b2019-07-02 12:52:33 +0100501 CHECK(root_level >= 2);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000502
503 /* Cap end to stay within the bounds of the page table. */
504 if (end > ptable_end) {
505 end = ptable_end;
506 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100507
508 /*
509 * Do it in two steps to prevent leaving the table in a halfway updated
510 * state. In such a two-step implementation, the table may be left with
511 * extra internal tables, but no different mapping on failure.
512 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000513 if (!mm_map_root(t, begin, end, attrs, root_level, flags, ppool) ||
Andrew Scull1ba470e2018-10-31 15:14:31 +0000514 !mm_map_root(t, begin, end, attrs, root_level,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000515 flags | MM_FLAG_COMMIT, ppool)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100516 return false;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100517 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100518
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100519 /* Invalidate the tlb. */
Andrew Scullda241972019-01-05 18:17:48 +0000520 if ((flags & MM_FLAG_STAGE1) || mm_stage2_invalidate) {
Andrew Scullda3df7f2019-01-05 17:49:27 +0000521 mm_invalidate_tlb(begin, end, flags);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100522 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100523
524 return true;
525}
526
527/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100528 * Writes the given table to the debug log, calling itself recursively to
529 * write sub-tables.
530 */
Andrew Sculle9827712018-10-19 14:54:20 +0100531static void mm_dump_table_recursive(struct mm_page_table *table, uint8_t level,
Andrew Scull4e5f8142018-10-12 14:37:19 +0100532 int max_level)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100533{
534 uint64_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000535
Andrew Scull4e5f8142018-10-12 14:37:19 +0100536 for (i = 0; i < MM_PTE_PER_PAGE; i++) {
537 if (!arch_mm_pte_is_present(table->entries[i], level)) {
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100538 continue;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100539 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100540
Andrew Scull4e5f8142018-10-12 14:37:19 +0100541 dlog("%*s%x: %x\n", 4 * (max_level - level), "", i,
542 table->entries[i]);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100543
Andrew Scull4e5f8142018-10-12 14:37:19 +0100544 if (arch_mm_pte_is_table(table->entries[i], level)) {
Andrew Scull80871322018-08-06 12:04:09 +0100545 mm_dump_table_recursive(
Andrew Scull4e5f8142018-10-12 14:37:19 +0100546 mm_page_table_from_pa(arch_mm_table_from_pte(
Andrew Scull3681b8d2018-12-12 14:22:59 +0000547 table->entries[i], level)),
Andrew Scull80871322018-08-06 12:04:09 +0100548 level - 1, max_level);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100549 }
550 }
551}
552
553/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000554 * Writes the given table to the debug log.
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100555 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000556static void mm_ptable_dump(struct mm_ptable *t, int flags)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100557{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000558 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000559 uint8_t max_level = mm_max_level(flags);
560 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000561 uint8_t i;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000562
Andrew Scull1ba470e2018-10-31 15:14:31 +0000563 for (i = 0; i < root_table_count; ++i) {
564 mm_dump_table_recursive(&tables[i], max_level, max_level);
565 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100566}
567
568/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000569 * Given the table PTE entries all have identical attributes, returns the single
570 * entry with which it can be replaced. Note that the table PTE will no longer
571 * be valid after calling this function as the table may have been freed.
Andrew Scullb6b9b562018-12-21 14:41:35 +0000572 *
573 * If the table is freed, the memory is freed directly rather than calling
574 * `mm_free_page_pte()` as it is known to not have subtables.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100575 */
Andrew Scullb6b9b562018-12-21 14:41:35 +0000576static pte_t mm_merge_table_pte(pte_t table_pte, uint8_t level,
577 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100578{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100579 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100580 uint64_t block_attrs;
581 uint64_t table_attrs;
582 uint64_t combined_attrs;
583 paddr_t block_address;
584
Andrew Scullb6b9b562018-12-21 14:41:35 +0000585 table = mm_page_table_from_pa(arch_mm_table_from_pte(table_pte, level));
586
587 if (!arch_mm_pte_is_present(table->entries[0], level - 1)) {
588 /* Free the table and return an absent entry. */
589 mpool_free(ppool, table);
590 return arch_mm_absent_pte(level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100591 }
592
Andrew Scullb6b9b562018-12-21 14:41:35 +0000593 /* Might not be possible to merge the table into a single block. */
594 if (!arch_mm_is_block_allowed(level)) {
595 return table_pte;
596 }
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000597
Andrew Scullb6b9b562018-12-21 14:41:35 +0000598 /* Replace table with a single block, with equivalent attributes. */
Andrew Scull3681b8d2018-12-12 14:22:59 +0000599 block_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
Andrew Scullb6b9b562018-12-21 14:41:35 +0000600 table_attrs = arch_mm_pte_attrs(table_pte, level);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100601 combined_attrs =
602 arch_mm_combine_table_entry_attrs(table_attrs, block_attrs);
Andrew Scull3681b8d2018-12-12 14:22:59 +0000603 block_address = arch_mm_block_from_pte(table->entries[0], level - 1);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000604
Andrew Scullb6b9b562018-12-21 14:41:35 +0000605 /* Free the table and return a block. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000606 mpool_free(ppool, table);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100607 return arch_mm_block_pte(level, block_address, combined_attrs);
608}
609
610/**
Wedson Almeida Filhoac8ad012018-12-17 18:00:29 +0000611 * Defragments the given PTE by recursively replacing any tables with blocks or
Andrew Scullb6b9b562018-12-21 14:41:35 +0000612 * absent entries where possible.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100613 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000614static pte_t mm_ptable_defrag_entry(pte_t entry, uint8_t level,
615 struct mpool *ppool)
Andrew Walbran2400ed22018-09-27 14:45:58 +0100616{
Andrew Scull4e5f8142018-10-12 14:37:19 +0100617 struct mm_page_table *table;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100618 uint64_t i;
Andrew Scull12122ce2019-11-19 14:21:07 +0000619 bool mergeable;
620 bool base_present;
621 uint64_t base_attrs;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100622
623 if (!arch_mm_pte_is_table(entry, level)) {
624 return entry;
625 }
626
Andrew Scull3681b8d2018-12-12 14:22:59 +0000627 table = mm_page_table_from_pa(arch_mm_table_from_pte(entry, level));
Andrew Walbran2400ed22018-09-27 14:45:58 +0100628
Andrew Scull12122ce2019-11-19 14:21:07 +0000629 /* Defrag the first entry in the table and use it as the base entry. */
630 static_assert(MM_PTE_PER_PAGE >= 1, "There must be at least one PTE.");
631 table->entries[0] =
632 mm_ptable_defrag_entry(table->entries[0], level - 1, ppool);
633 base_present = arch_mm_pte_is_present(table->entries[0], level - 1);
634 base_attrs = arch_mm_pte_attrs(table->entries[0], level - 1);
635
Andrew Walbran2400ed22018-09-27 14:45:58 +0100636 /*
Andrew Scull12122ce2019-11-19 14:21:07 +0000637 * Defrag the remaining entries in the table and check whether they are
638 * compatible with the base entry meaning the table can be merged into a
639 * block entry. It assumes addresses are contiguous due to identity
640 * mapping.
Andrew Walbran2400ed22018-09-27 14:45:58 +0100641 */
Andrew Scull12122ce2019-11-19 14:21:07 +0000642 mergeable = true;
643 for (i = 1; i < MM_PTE_PER_PAGE; ++i) {
644 bool present;
645
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000646 table->entries[i] = mm_ptable_defrag_entry(table->entries[i],
647 level - 1, ppool);
Andrew Scull12122ce2019-11-19 14:21:07 +0000648 present = arch_mm_pte_is_present(table->entries[i], level - 1);
Andrew Walbran2400ed22018-09-27 14:45:58 +0100649
Andrew Scull12122ce2019-11-19 14:21:07 +0000650 if (present != base_present) {
651 mergeable = false;
652 continue;
653 }
654
655 if (!present) {
656 continue;
657 }
658
659 if (!arch_mm_pte_is_block(table->entries[i], level - 1)) {
660 mergeable = false;
661 continue;
662 }
663
664 if (arch_mm_pte_attrs(table->entries[i], level - 1) !=
665 base_attrs) {
666 mergeable = false;
667 continue;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100668 }
669 }
Andrew Scullb6b9b562018-12-21 14:41:35 +0000670
Andrew Scull12122ce2019-11-19 14:21:07 +0000671 if (mergeable) {
672 return mm_merge_table_pte(entry, level, ppool);
673 }
674
675 return entry;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100676}
677
678/**
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100679 * Defragments the given page table by converting page table references to
680 * blocks whenever possible.
681 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000682static void mm_ptable_defrag(struct mm_ptable *t, int flags,
683 struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100684{
Andrew Scull1ba470e2018-10-31 15:14:31 +0000685 struct mm_page_table *tables = mm_page_table_from_pa(t->root);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000686 uint8_t level = mm_max_level(flags);
687 uint8_t root_table_count = mm_root_table_count(flags);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000688 uint8_t i;
689 uint64_t j;
Andrew Walbran2400ed22018-09-27 14:45:58 +0100690
691 /*
692 * Loop through each entry in the table. If it points to another table,
693 * check if that table can be replaced by a block or an absent entry.
694 */
Andrew Scull1ba470e2018-10-31 15:14:31 +0000695 for (i = 0; i < root_table_count; ++i) {
696 for (j = 0; j < MM_PTE_PER_PAGE; ++j) {
697 tables[i].entries[j] = mm_ptable_defrag_entry(
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000698 tables[i].entries[j], level, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000699 }
Andrew Walbran2400ed22018-09-27 14:45:58 +0100700 }
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100701}
702
703/**
Andrew Scull81e85092018-12-12 12:56:20 +0000704 * Gets the attributes applied to the given range of stage-2 addresses at the
705 * given level.
706 *
707 * The `got_attrs` argument is initially passed as false until `attrs` contains
708 * attributes of the memory region at which point it is passed as true.
709 *
710 * The value returned in `attrs` is only valid if the function returns true.
711 *
712 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100713 */
Andrew Scull81e85092018-12-12 12:56:20 +0000714static bool mm_ptable_get_attrs_level(struct mm_page_table *table,
715 ptable_addr_t begin, ptable_addr_t end,
716 uint8_t level, bool got_attrs,
717 uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100718{
Andrew Scull81e85092018-12-12 12:56:20 +0000719 pte_t *pte = &table->entries[mm_index(begin, level)];
720 ptable_addr_t level_end = mm_level_end(begin, level);
721 size_t entry_size = mm_entry_size(level);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100722
Andrew Scull81e85092018-12-12 12:56:20 +0000723 /* Cap end so that we don't go over the current level max. */
724 if (end > level_end) {
725 end = level_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100726 }
727
Andrew Scull81e85092018-12-12 12:56:20 +0000728 /* Check that each entry is owned. */
729 while (begin < end) {
730 if (arch_mm_pte_is_table(*pte, level)) {
731 if (!mm_ptable_get_attrs_level(
732 mm_page_table_from_pa(
733 arch_mm_table_from_pte(*pte,
734 level)),
735 begin, end, level - 1, got_attrs, attrs)) {
736 return false;
737 }
738 got_attrs = true;
739 } else {
740 if (!got_attrs) {
741 *attrs = arch_mm_pte_attrs(*pte, level);
742 got_attrs = true;
743 } else if (arch_mm_pte_attrs(*pte, level) != *attrs) {
744 return false;
745 }
746 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100747
Andrew Scull81e85092018-12-12 12:56:20 +0000748 begin = mm_start_of_next_block(begin, entry_size);
749 pte++;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100750 }
751
Andrew Scullc66a04d2018-12-07 13:41:56 +0000752 /* The entry is a valid block. */
Andrew Scull81e85092018-12-12 12:56:20 +0000753 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100754}
755
756/**
Andrew Scull81e85092018-12-12 12:56:20 +0000757 * Gets the attributes applies to the given range of addresses in the stage-2
758 * table.
759 *
760 * The value returned in `attrs` is only valid if the function returns true.
761 *
762 * Returns true if the whole range has the same attributes and false otherwise.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100763 */
Andrew Scull81e85092018-12-12 12:56:20 +0000764static bool mm_vm_get_attrs(struct mm_ptable *t, ptable_addr_t begin,
765 ptable_addr_t end, uint64_t *attrs)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100766{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000767 int flags = 0;
768 uint8_t max_level = mm_max_level(flags);
Andrew Scull81e85092018-12-12 12:56:20 +0000769 uint8_t root_level = max_level + 1;
770 size_t root_table_size = mm_entry_size(root_level);
771 ptable_addr_t ptable_end =
Andrew Scullda3df7f2019-01-05 17:49:27 +0000772 mm_root_table_count(flags) * mm_entry_size(root_level);
Andrew Scull81e85092018-12-12 12:56:20 +0000773 struct mm_page_table *table;
774 bool got_attrs = false;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100775
Andrew Scull81e85092018-12-12 12:56:20 +0000776 begin = mm_round_down_to_page(begin);
777 end = mm_round_up_to_page(end);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100778
Andrew Scull81e85092018-12-12 12:56:20 +0000779 /* Fail if the addresses are out of range. */
780 if (end > ptable_end) {
Andrew Scull1ba470e2018-10-31 15:14:31 +0000781 return false;
782 }
783
Andrew Scull81e85092018-12-12 12:56:20 +0000784 table = &mm_page_table_from_pa(t->root)[mm_index(begin, root_level)];
785 while (begin < end) {
786 if (!mm_ptable_get_attrs_level(table, begin, end, max_level,
787 got_attrs, attrs)) {
788 return false;
789 }
790
791 got_attrs = true;
792 begin = mm_start_of_next_block(begin, root_table_size);
793 table++;
794 }
795
796 return got_attrs;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100797}
798
Andrew Scullda3df7f2019-01-05 17:49:27 +0000799bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool)
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100800{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000801 return mm_ptable_init(t, 0, ppool);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100802}
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100803
Andrew Scullda3df7f2019-01-05 17:49:27 +0000804void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000805{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000806 mm_ptable_fini(t, 0, ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000807}
808
809/**
Andrew Scull80871322018-08-06 12:04:09 +0100810 * Updates a VM's page table such that the given physical address range is
811 * mapped in the address space at the corresponding address range in the
Andrew Scullfe636b12018-07-30 14:15:54 +0100812 * architecture-agnostic mode provided.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100813 */
Andrew Scull80871322018-08-06 12:04:09 +0100814bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100815 uint32_t mode, ipaddr_t *ipa, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100816{
Andrew Scullda241972019-01-05 18:17:48 +0000817 int flags = 0;
Andrew Scullda3df7f2019-01-05 17:49:27 +0000818 bool success = mm_ptable_identity_update(
819 t, begin, end, arch_mm_mode_to_stage2_attrs(mode), flags,
820 ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100821
822 if (success && ipa != NULL) {
823 *ipa = ipa_from_pa(begin);
824 }
825
826 return success;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100827}
828
829/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000830 * Updates the VM's table such that the given physical address range has no
831 * connection to the VM.
Andrew Scull80871322018-08-06 12:04:09 +0100832 */
Andrew Scullda241972019-01-05 18:17:48 +0000833bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000834 struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100835{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000836 return mm_ptable_identity_update(
837 t, begin, end,
838 arch_mm_mode_to_stage2_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
839 MM_MODE_SHARED),
Andrew Scullda241972019-01-05 18:17:48 +0000840 MM_FLAG_UNMAP, ppool);
Andrew Scull80871322018-08-06 12:04:09 +0100841}
842
843/**
Andrew Scull1ba470e2018-10-31 15:14:31 +0000844 * Unmaps the hypervisor pages from the given page table.
845 */
Andrew Scullda241972019-01-05 18:17:48 +0000846bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool)
Andrew Scull1ba470e2018-10-31 15:14:31 +0000847{
848 /* TODO: If we add pages dynamically, they must be included here too. */
Andrew Scullda241972019-01-05 18:17:48 +0000849 return mm_vm_unmap(t, layout_text_begin(), layout_text_end(), ppool) &&
850 mm_vm_unmap(t, layout_rodata_begin(), layout_rodata_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000851 ppool) &&
Andrew Scullda241972019-01-05 18:17:48 +0000852 mm_vm_unmap(t, layout_data_begin(), layout_data_end(), ppool);
Andrew Scull1ba470e2018-10-31 15:14:31 +0000853}
854
855/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000856 * Write the given page table of a VM to the debug log.
857 */
858void mm_vm_dump(struct mm_ptable *t)
859{
860 mm_ptable_dump(t, 0);
861}
862
863/**
864 * Defragments the VM page table.
865 */
866void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool)
867{
868 mm_ptable_defrag(t, 0, ppool);
869}
870
871/**
Andrew Scull81e85092018-12-12 12:56:20 +0000872 * Gets the mode of the give range of intermediate physical addresses if they
873 * are mapped with the same mode.
874 *
875 * Returns true if the range is mapped with the same mode and false otherwise.
Andrew Scull80871322018-08-06 12:04:09 +0100876 */
Andrew Scull81e85092018-12-12 12:56:20 +0000877bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100878 uint32_t *mode)
Andrew Scull80871322018-08-06 12:04:09 +0100879{
Andrew Scull81e85092018-12-12 12:56:20 +0000880 uint64_t attrs;
881 bool ret;
882
883 ret = mm_vm_get_attrs(t, ipa_addr(begin), ipa_addr(end), &attrs);
884 if (ret) {
885 *mode = arch_mm_stage2_attrs_to_mode(attrs);
886 }
887
888 return ret;
Andrew Scull80871322018-08-06 12:04:09 +0100889}
890
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100891static struct mm_stage1_locked mm_stage1_lock_unsafe(void)
892{
893 return (struct mm_stage1_locked){.ptable = &ptable};
894}
895
896struct mm_stage1_locked mm_lock_stage1(void)
897{
898 sl_lock(&ptable_lock);
899 return mm_stage1_lock_unsafe();
900}
901
902void mm_unlock_stage1(struct mm_stage1_locked *lock)
903{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100904 CHECK(lock->ptable == &ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100905 sl_unlock(&ptable_lock);
906 lock->ptable = NULL;
907}
908
Andrew Scull80871322018-08-06 12:04:09 +0100909/**
Andrew Scull80871322018-08-06 12:04:09 +0100910 * Updates the hypervisor page table such that the given physical address range
911 * is mapped into the address space at the corresponding address range in the
912 * architecture-agnostic mode provided.
913 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100914void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100915 paddr_t end, uint32_t mode, struct mpool *ppool)
Andrew Scull80871322018-08-06 12:04:09 +0100916{
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100917 if (mm_ptable_identity_update(stage1_locked.ptable, begin, end,
Andrew Scullda241972019-01-05 18:17:48 +0000918 arch_mm_mode_to_stage1_attrs(mode),
919 MM_FLAG_STAGE1, ppool)) {
Andrew Scull4e5f8142018-10-12 14:37:19 +0100920 return ptr_from_va(va_from_pa(begin));
Andrew Scull80871322018-08-06 12:04:09 +0100921 }
922
923 return NULL;
924}
925
926/**
927 * Updates the hypervisor table such that the given physical address range is
928 * not mapped in the address space.
929 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100930bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
931 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100932{
Andrew Scullda3df7f2019-01-05 17:49:27 +0000933 return mm_ptable_identity_update(
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100934 stage1_locked.ptable, begin, end,
Andrew Scullda3df7f2019-01-05 17:49:27 +0000935 arch_mm_mode_to_stage1_attrs(MM_MODE_UNOWNED | MM_MODE_INVALID |
936 MM_MODE_SHARED),
Andrew Scullda241972019-01-05 18:17:48 +0000937 MM_FLAG_STAGE1 | MM_FLAG_UNMAP, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100938}
939
940/**
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100941 * Defragments the hypervisor page table.
942 */
943void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool)
944{
945 mm_ptable_defrag(stage1_locked.ptable, MM_FLAG_STAGE1, ppool);
946}
947
948/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100949 * Initialises memory management for the hypervisor itself.
950 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000951bool mm_init(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100952{
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100953 /* Locking is not enabled yet so fake it, */
954 struct mm_stage1_locked stage1_locked = mm_stage1_lock_unsafe();
955
Andrew Walbranac5b2612019-07-12 16:44:19 +0100956 dlog("text: %#x - %#x\n", pa_addr(layout_text_begin()),
Andrew Scullfdd716e2018-12-20 05:37:31 +0000957 pa_addr(layout_text_end()));
Andrew Walbranac5b2612019-07-12 16:44:19 +0100958 dlog("rodata: %#x - %#x\n", pa_addr(layout_rodata_begin()),
Andrew Scullfdd716e2018-12-20 05:37:31 +0000959 pa_addr(layout_rodata_end()));
Andrew Walbranac5b2612019-07-12 16:44:19 +0100960 dlog("data: %#x - %#x\n", pa_addr(layout_data_begin()),
Andrew Scullfdd716e2018-12-20 05:37:31 +0000961 pa_addr(layout_data_end()));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100962
Andrew Scullda3df7f2019-01-05 17:49:27 +0000963 if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, ppool)) {
Andrew Scullfdd716e2018-12-20 05:37:31 +0000964 dlog("Unable to allocate memory for page table.\n");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100965 return false;
966 }
967
Andrew Walbran48699362019-05-20 14:38:00 +0100968 /* Let console driver map pages for itself. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100969 plat_console_mm_init(stage1_locked, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100970
971 /* Map each section. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100972 mm_identity_map(stage1_locked, layout_text_begin(), layout_text_end(),
973 MM_MODE_X, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100974
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100975 mm_identity_map(stage1_locked, layout_rodata_begin(),
976 layout_rodata_end(), MM_MODE_R, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100977
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100978 mm_identity_map(stage1_locked, layout_data_begin(), layout_data_end(),
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000979 MM_MODE_R | MM_MODE_W, ppool);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100980
Andrew Scullb2910562019-09-17 14:08:27 +0100981 return arch_mm_init(ptable.root);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100982}