blob: 93cda40e6fa3f7faae1f4d467ace9711db784313 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 * SPDX-FileCopyrightText: Copyright Arm Limited and Contributors.
5 */
6
7/* This file is derived from xlat_table_v2 library in TF-A project */
8
9#include <arch_helpers.h>
10#include <debug.h>
11#include <errno.h>
12#include <stdbool.h>
13#include <stdint.h>
14#include <stdio.h>
15#include <utils_def.h>
16#include <xlat_contexts.h>
17#include "xlat_defs_private.h"
18#include <xlat_tables.h>
19#include "xlat_tables_private.h"
20
21#if LOG_LEVEL < LOG_LEVEL_VERBOSE
22
23void xlat_mmap_print(const struct xlat_ctx *ctx)
24{
25 (void)ctx;
26
27 /* Empty */
28}
29
30void xlat_tables_print(struct xlat_ctx *ctx)
31{
32 (void)ctx;
33
34 /* Empty */
35}
36
37#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
38
39void xlat_mmap_print(const struct xlat_ctx *ctx)
40{
41 VERBOSE("mmap:\n");
42
43 for (unsigned int i = 0U; i < ctx->cfg->mmap_num; i++) {
44 uintptr_t base_va;
45
46 base_va = ((ctx->cfg->region == VA_LOW_REGION) ?
47 ctx->cfg->mmap[i].base_va :
48 (ctx->cfg->mmap[i].base_va
49 + ctx->cfg->base_va));
50 if (MT_TYPE(ctx->cfg->mmap[i].attr) != MT_TRANSIENT) {
51 VERBOSE(" VA:0x%lx PA:0x%lx size:0x%zx attr:0x%lx granularity:0x%zx\n",
52 base_va, ctx->cfg->mmap[i].base_pa,
53 ctx->cfg->mmap[i].size, ctx->cfg->mmap[i].attr,
54 ctx->cfg->mmap[i].granularity);
55 } else {
56 VERBOSE(" VA:0x%lx PA: TRANSIENT size:0x%zx granularity:0x%zx\n",
57 base_va, ctx->cfg->mmap[i].size,
58 ctx->cfg->mmap[i].granularity);
59 }
60 };
61 VERBOSE("\n");
62}
63
64/* Print the attributes of the specified block descriptor. */
65static void xlat_desc_print(uint64_t desc)
66{
67 uint64_t mem_type_index = ATTR_INDEX_GET(desc);
68
69 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
70 VERBOSE("MEM");
71 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
72 VERBOSE("NC");
73 } else {
74 if (mem_type_index != ATTR_DEVICE_INDEX) {
75 /* Unsupported memory type */
76 panic();
77 }
78 VERBOSE("DEV");
79 }
80
81 VERBOSE(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
82 VERBOSE(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-PXN" : "-PEXEC");
83 VERBOSE(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
84
85 if ((desc & LOWER_ATTRS(NS)) == 0ULL) {
86 VERBOSE("-RL");
87 } else {
88 VERBOSE("-N");
89 }
90
91 /* Check Guarded Page bit */
92 if ((desc & GP) != 0ULL) {
93 VERBOSE("-GP");
94 }
95}
96
97static const char * const level_spacers[] = {
98 "[LV0] ",
99 " [LV1] ",
100 " [LV2] ",
101 " [LV3] "
102};
103
104static const char *invalid_descriptors_ommited =
105 "%s(%d invalid descriptors omitted)\n";
106
107/*
108 * Recursive function that reads the translation tables passed as an argument
109 * and prints their status.
110 */
111static void xlat_tables_print_internal(struct xlat_ctx *ctx, uintptr_t table_base_va,
112 const uint64_t *table_base, unsigned int table_entries,
113 unsigned int level)
114{
115 uint64_t *addr_inner;
116 unsigned int invalid_row_count;
117 unsigned int table_idx = 0U;
118 size_t level_size;
119 uintptr_t table_idx_va;
120
121 if (level > XLAT_TABLE_LEVEL_MAX) {
122 /* Level out of bounds */
123 panic();
124 }
125
126 assert((ctx != NULL) &&
127 (ctx->cfg != NULL) &&
128 (ctx->tbls != NULL));
129
130 level_size = XLAT_BLOCK_SIZE(level);
131 table_idx_va = (ctx->cfg->region == (VA_LOW_REGION) ?
132 (table_base_va) :
133 (table_base_va + ctx->cfg->base_va));
134
135 /*
136 * Keep track of how many invalid descriptors are counted in a row.
137 * Whenever multiple invalid descriptors are found, only the first one
138 * is printed, and a line is added to inform about how many descriptors
139 * have been omitted.
140 */
141 invalid_row_count = 0U;
142
143 while (table_idx < table_entries) {
144 uint64_t desc;
145
146 desc = table_base[table_idx];
147
148 if ((desc & DESC_MASK) == INVALID_DESC) {
149
150 if (invalid_row_count == 0U) {
151 VERBOSE("%sVA:0x%lx size:0x%zx\n",
152 level_spacers[level],
153 table_idx_va, level_size);
154 }
155 invalid_row_count++;
156
157 } else {
158
159 if (invalid_row_count > 1U) {
160 VERBOSE(invalid_descriptors_ommited,
161 level_spacers[level],
162 invalid_row_count - 1U);
163 }
164 invalid_row_count = 0U;
165
166 /*
167 * Check if this is a table or a block. Tables are only
168 * allowed in levels other than 3, but DESC_PAGE has the
169 * same value as DESC_TABLE, so we need to check.
170 */
171 if (((desc & DESC_MASK) == TABLE_DESC) &&
172 (level < XLAT_TABLE_LEVEL_MAX)) {
173 /*
174 * Do not print any PA for a table descriptor,
175 * as it doesn't directly map physical memory
176 * but instead points to the next translation
177 * table in the translation table walk.
178 */
179 VERBOSE("%sVA:0x%lx size:0x%zx\n",
180 level_spacers[level],
181 table_idx_va, level_size);
182
183 addr_inner = (uint64_t *)(void *)(desc & TABLE_ADDR_MASK);
184
185 /* FIXME: Recursion. */
186 xlat_tables_print_internal(ctx, table_idx_va,
187 addr_inner, XLAT_TABLE_ENTRIES,
188 level + 1U);
189 } else {
190 VERBOSE("%sVA:0x%lx PA:0x%lx size:0x%zx ",
191 level_spacers[level], table_idx_va,
192 (uint64_t)(desc & TABLE_ADDR_MASK),
193 level_size);
194 xlat_desc_print(desc);
195 VERBOSE("\n");
196 }
197 }
198
199 table_idx++;
200 table_idx_va += level_size;
201 }
202
203 if (invalid_row_count > 1U) {
204 VERBOSE(invalid_descriptors_ommited,
205 level_spacers[level], invalid_row_count - 1U);
206 }
207}
208
209void xlat_tables_print(struct xlat_ctx *ctx)
210{
211 unsigned int used_page_tables;
212 struct xlat_ctx_cfg *ctx_cfg = ctx->cfg;
213
214 assert(ctx_cfg != NULL);
215
216 uintptr_t max_mapped_va_offset = (ctx_cfg->region == (VA_LOW_REGION) ?
217 (ctx_cfg->max_mapped_va_offset) :
218 (ctx_cfg->max_mapped_va_offset + ctx_cfg->base_va));
219 uintptr_t max_allowed_va = (ctx_cfg->region == (VA_LOW_REGION) ?
220 (ctx_cfg->max_va_size) :
221 (ctx_cfg->max_va_size + ctx_cfg->base_va));
222
223 VERBOSE("Translation tables state:\n");
224 VERBOSE(" Max allowed PA: 0x%lx\n", xlat_arch_get_max_supported_pa());
225 VERBOSE(" Max allowed VA: 0x%lx\n", max_allowed_va);
226 VERBOSE(" Max mapped PA: 0x%lx", ctx_cfg->max_mapped_pa);
227 for (unsigned int i = 0U; i < ctx_cfg->mmap_num; i++) {
228 if (ctx_cfg->mmap[i].attr == MT_TRANSIENT) {
229 /*
230 * If there is a transient region on this context, we
231 * do not know what will be the highest PA, so print a
232 * note on the log.
233 */
234 VERBOSE(" - Estimated (transient region)");
235 break;
236 }
237 }
238 VERBOSE("\n");
239 VERBOSE(" Max mapped VA: 0x%lx\n", max_mapped_va_offset);
240
241 VERBOSE(" Initial lookup level: %u\n", ctx_cfg->base_level);
242 VERBOSE(" Entries @initial lookup level: %u\n",
243 ctx->tbls->max_base_table_entries);
244
245 used_page_tables = ctx->tbls->next_table;
246 VERBOSE(" Used %d tables out of %d (spare: %d)\n",
247 used_page_tables, ctx->tbls->tables_num,
248 ctx->tbls->tables_num - used_page_tables);
249
250 xlat_tables_print_internal(ctx, 0U, ctx->tbls->base_table,
251 ctx->tbls->max_base_table_entries,
252 ctx_cfg->base_level);
253}
254
255#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
256
257/*
258 * Do a translation table walk to find the last level table that maps
259 * virtual_addr.
260 *
261 * On success, return the address of the last level table within the
262 * translation table. Its lookup level is stored in '*out_level'.
263 * On error, return NULL.
264 */
265static uint64_t *find_xlat_last_table(uintptr_t virtual_addr,
266 const struct xlat_ctx * const ctx,
267 unsigned int * const out_level)
268{
269 unsigned int start_level;
270 uint64_t *ret_table;
271 unsigned int entries;
272 struct xlat_ctx_tbls *ctx_tbls;
273 struct xlat_ctx_cfg *ctx_cfg;
274
275
276 assert(ctx != NULL);
277 assert(ctx->cfg != NULL);
278 assert(ctx->tbls != NULL);
279 assert(out_level != NULL);
280
281 ctx_tbls = ctx->tbls;
282 ctx_cfg = ctx->cfg;
283 start_level = ctx_cfg->base_level;
284 ret_table = ctx_tbls->base_table;
285 entries = ctx_tbls->max_base_table_entries;
286
287 for (unsigned int level = start_level;
288 level <= XLAT_TABLE_LEVEL_MAX;
289 level++) {
290 unsigned int idx;
291 uint64_t desc;
292 uint64_t desc_type;
293
294 idx = XLAT_TABLE_IDX(virtual_addr, level);
295 if (idx >= entries) {
296 WARN("Missing xlat table entry at address 0x%lx\n",
297 virtual_addr);
298 return NULL;
299 }
300
301 desc = ret_table[idx];
302 desc_type = desc & DESC_MASK;
303
304 if (desc_type != TABLE_DESC) {
305 if (((desc_type == BLOCK_DESC) ||
306 (((desc_type == PAGE_DESC) || (desc_type == INVALID_DESC))
307 && (level == XLAT_TABLE_LEVEL_MAX)))) {
308 *out_level = level;
309 return ret_table;
310 }
311 return NULL;
312 }
313
314 ret_table = (uint64_t *)(void *)(desc & TABLE_ADDR_MASK);
315 entries = XLAT_TABLE_ENTRIES;
316 }
317
318 /*
319 * This shouldn't be reached, the translation table walk should end at
320 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop
321 * but we need this to avoid MISRA problems.
322 */
323 return NULL;
324}
325
326/*****************************************************************************
327 * Public part of the utility library for translation tables.
328 ****************************************************************************/
329
330/*
331 * Function to unmap a physical memory page from the descriptor entry and
332 * VA given.
333 * This function implements the "Break" part of the Break-Before-Make semantics
334 * needed by the Armv8.x architecture in order to update the page descriptors.
335 *
336 * This function returns 0 on success or an error code otherwise.
337 *
338 * For simplicity, this function will not take into consideration holes on the
339 * table pointed by entry, as long as va belongs to the VA space owned by the
340 * context.
341 */
342int xlat_unmap_memory_page(struct xlat_table_entry * const table,
343 const uintptr_t va)
344{
345 uint64_t *entry;
346
347 assert(table != NULL);
348
349 entry = xlat_get_pte_from_table(table, va);
350
351 if (entry == NULL) {
352 return -EFAULT;
353 }
354
355 /*
356 * No need to perform any checks on this page descriptor as it is going
357 * to be made invalid anyway.
358 */
359 xlat_write_descriptor(entry, INVALID_DESC);
360
361 /* Invalidate any cached copy of this mapping in the TLBs. */
362 xlat_arch_tlbi_va(va);
363
364 /* Ensure completion of the invalidation. */
365 xlat_arch_tlbi_va_sync();
366
367 return 0;
368}
369
370/*
371 * Function to map a physical memory page from the descriptor table entry
372 * and VA given. This function implements the "Make" part of the
373 * Break-Before-Make semantics needed by the armv8.x architecture in order
374 * to update the page descriptors.
375 *
376 * This function eturns 0 on success or an error code otherwise.
377 *
378 * For simplicity, this function will not take into consideration holes on the
379 * table pointed by entry, as long as va belongs to the VA space owned by the
380 * context.
381 */
382int xlat_map_memory_page_with_attrs(const struct xlat_table_entry * const table,
383 const uintptr_t va,
384 const uintptr_t pa,
385 const uint64_t attrs)
386{
387 uint64_t desc;
388 uint64_t *desc_ptr;
389
390 assert(table != NULL);
391
392 desc_ptr = xlat_get_pte_from_table(table, va);
393
394 if (desc_ptr == NULL) {
395 return -EFAULT;
396 }
397
398 /* This function must only be called on invalid descriptors */
399 if (xlat_read_descriptor(desc_ptr) != INVALID_DESC) {
400 return -EFAULT;
401 }
402
403 /* Check that pa is within boundaries */
404 if (pa > xlat_arch_get_max_supported_pa()) {
405 return -EFAULT;
406 }
407
408 /* Generate the new descriptor */
409 desc = xlat_desc(attrs, pa, table->level);
410
411 xlat_write_descriptor(desc_ptr, desc);
412
413 /* Ensure the translation table write has drained into memory */
414 dsb(ishst);
415 isb();
416
417 return 0;
418}
419
420/*
421 * Return a table entry structure given a context and a VA.
422 * The return structure is populated on the retval field.
423 *
424 * This function returns 0 on success or a Linux error code otherwise.
425 */
426int xlat_get_table_from_va(struct xlat_table_entry * const retval,
427 const struct xlat_ctx * const ctx,
428 const uintptr_t va)
429{
430 uintptr_t page_va;
431 uint64_t *table;
432 unsigned int level;
433 struct xlat_ctx_cfg *ctx_cfg;
434
435 assert((ctx != NULL) &&
436 (ctx->cfg != NULL) &&
437 (ctx->tbls != NULL) &&
438 (retval != NULL) &&
439 (ctx->tbls->initialized == true));
440
441 ctx_cfg = ctx->cfg;
442
443 /* Check if the VA is within the mapped range */
444 if (((va > (ctx_cfg->max_mapped_va_offset + ctx_cfg->base_va))
445 || (va < ctx_cfg->base_va))) {
446 return -EFAULT;
447 }
448
449 /*
450 * From the translation tables point of view, the VA is actually an
451 * offset with regards to the base address of the VA space, so before
452 * using a VA, we need to extract the base VA from it.
453 */
454 page_va = va - ctx_cfg->base_va;
455 page_va &= ~PAGE_SIZE_MASK; /* Page address of the VA address passed. */
456
457 table = find_xlat_last_table(page_va, ctx, &level);
458
459 if (table == NULL) {
460 WARN("Address 0x%lx is not mapped.\n", va);
461 return -EFAULT;
462 }
463
464 /* Maximum number of entries used by this table. */
465 if (level == ctx_cfg->base_level) {
466 retval->entries = ctx->tbls->max_base_table_entries;
467 } else {
468 retval->entries = XLAT_TABLE_ENTRIES;
469 }
470
471 retval->table = table;
472 retval->level = level;
473 retval->base_va = ctx_cfg->base_va;
474
475 return 0;
476}
477
478/*
479 * This function finds the descriptor entry on a table given the corresponding
480 * table entry structure and the VA for that descriptor.
481 *
482 * If va is not mapped by the table pointed by entry, it returns NULL.
483 *
484 * For simplicity and as long as va belongs to the VA space owned by the
485 * translation context, this function will not take into consideration holes
486 * on the table pointed by entry either because the address is not mapped by
487 * the caller or left as INVALID_DESC for future dynamic mapping.
488 */
489uint64_t *xlat_get_pte_from_table(const struct xlat_table_entry * const entry,
490 const uintptr_t va)
491{
492 unsigned int index;
493 uint64_t *table;
494 uintptr_t va_offset;
495
496 assert(entry != NULL);
497
498 if (va < entry->base_va) {
499 return NULL;
500 }
501
502 /*
503 * From the translation tables point of view, the VA is actually an
504 * offset with regards to the base address of the VA space, so before
505 * using a VA, we need to extract the base VA from it.
506 */
507
508 va_offset = va - entry->base_va;
509 table = entry->table;
510 index = XLAT_TABLE_IDX(va_offset, entry->level);
511
512 if (index >= entry->entries) {
513 return NULL;
514 }
515
516 return &table[index];
517}