blob: b088cc7ad5d4482c41efc2fe0d2f1d9af226b679 [file] [log] [blame]
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <CppUTest/CommandLineTestRunner.h>
7#include <CppUTest/TestHarness.h>
8
9extern "C" {
10#include <arch_helpers.h>
11#include <debug.h>
12#include <host_utils.h>
13#include <stdlib.h>
14#include <string.h>
15#include <test_helpers.h>
16#include <utils_def.h>
17#include <xlat_contexts.h> /* API to test */
18#include <xlat_defs.h>
19#include <xlat_tables.h> /* API to test */
20#include <xlat_test_defs.h>
21#include <xlat_test_helpers.h>
22}
23
24TEST_GROUP(xlat_tests_G2) {
25 TEST_SETUP()
26 {
27 test_helpers_init();
28 xlat_test_hepers_arch_init();
29 }
30
31 TEST_TEARDOWN()
32 {}
33};
34
35/*
36 * Generate VA space parameters given a walk start level and a region.
37 * The VA returned will fit in a single table of level `level`, so that
38 * there translation can start at that given level.
39 */
40static unsigned long long gen_va_space_params_by_lvl(unsigned int level,
41 xlat_addr_region_id_t region,
42 size_t *va_size)
43{
44 assert(level <= XLAT_TABLE_LEVEL_MAX);
45 assert(va_size != NULL);
46
47 *va_size = (1ULL << (XLAT_ADDR_SHIFT(level) +
48 XLAT_TABLE_ENTRIES_SHIFT));
49
50 return xlat_test_helpers_get_start_va(region, *va_size);
51}
52
53/*
54 * Generate a mmap array containing a set of mmap regions defined by
55 * 'start_va', 'last_lvl' and 'offset'. The mmap array will have
56 * three regions:
57 *
58 * - First region mapped at the beginning of a table whose final
59 * lookup level is 'last_lvl'. This region will be descendant of
60 * an entry at the beginning of a table at level 'first_lvl'.
61 * - Second region mapped at a random index of a table whose final
62 * lookup level is 'last_lvl'. This region will be descendant of
63 * an entry at a random index of a table at level 'first_lvl'.
64 * - Third region mapped at the end of a table whose final
65 * lookup level is 'last_lvl'. This region will be descendant of
66 * an entry at the final entry of a table at level 'first_lvl'.
67 *
68 * ┌──────────┐
69 * ┌───────────────┤ First │
70 * │ │ Region │
71 * │ ├──────────┤
72 * │ │ │
73 * │ │ │
74 * │ │ │
75 * │ │ │
76 * │ │ │
77 * │ │ │
78 * │ │ │
79 * ┌──────────────┐ │ │ │
80 * │ │ │ │ │
81 * │ First entry ├───────┘ │ │
82 * ├──────────────┤ │ │
83 * │ Second entry │ │ │
84 * │ (Reserved) │ └──────────┘
85 * │ │
86 * ├──────────────┤
87 * │ │ ┌──────────┐
88 * │ │ │ │
89 * │ │ │ │
90 * │ │ │ │
91 * ├──────────────┤ ├──────────┤
92 * │ Second │ │ Second │
93 * │ Region ├────────────────────────┤ Region │
94 * ├──────────────┤ ├──────────┤
95 * │ │ │ │
96 * │ │ │ │
97 * │ │ │ │
98 * │ │ │ │
99 * │ │ │ │
100 * │ │ │ │
101 * ├──────────────┤ └──────────┘
102 * │ │
103 * │ Third Region ├───────┐
104 * └──────────────┘ │ ┌─────────┐
105 * First Level │ │ │
106 * │ │ │
107 * │ │ │
108 * │ │ │
109 * │ │ │
110 * │ │ │
111 * │ │ │
112 * │ │ │
113 * │ │ │
114 * │ │ │
115 * │ │ │
116 * │ │ │
117 * │ │ │
118 * │ ├─────────┤
119 * └─────────────────┤ Third |
120 * | region │
121 * └─────────┘
122 * Last level
123 *
124 * For all the mmap regions, the granularity (returned in *granularity) is
125 * setup to the minimum granularity needed to map a block at level 'last_lvl'.
126 * The size of the mmap region is setup to the same as the granularity.
127 *
128 * This function also returns :
129 * - An array ('tbl_idxs') with the expected indexes mapping
130 * the regions at the last level table.
131 */
132static int gen_mmap_array_by_level(xlat_mmap_region *mmap,
133 unsigned int *tbl_idxs,
134 unsigned int mmap_size,
135 unsigned int first_lvl,
136 unsigned int last_lvl,
137 size_t *granularity,
138 unsigned long long start_va,
139 bool allow_transient)
140{
141 uint64_t attrs;
142 unsigned long long mmap_start_va = start_va;
143
144 assert(mmap_size >= 3U);
145 assert(last_lvl > 0U);
146 assert(last_lvl <= XLAT_TABLE_LEVEL_MAX);
147 assert(first_lvl <= last_lvl);
148 assert(mmap != NULL);
149 assert(tbl_idxs != NULL);
150 assert(granularity != NULL);
151
152 /* Generate a mapping at the beginning of the table */
153 tbl_idxs[0U] = 0U;
154
155 /*
156 * Generate a mapping in a random possition of the table.
157 * The entry after the first one will always be left intentionally
158 * unused.
159 */
160 tbl_idxs[1U] = test_helpers_get_rand_in_range(2,
161 (XLAT_TABLE_ENTRIES - 2));
162
163 /* Generate a mapping at the end of the table */
164 tbl_idxs[2U] = XLAT_TABLE_ENTRIES - 1U;
165
166 do {
167 attrs = xlat_test_helpers_rand_mmap_attrs();
168 } while ((attrs == MT_TRANSIENT) && (allow_transient == false));
169
170 *granularity = XLAT_BLOCK_SIZE(last_lvl);
171
172 for (unsigned i = 0U; i < 3U; i++) {
173 mmap[i].base_va = mmap_start_va;
174 if (first_lvl < last_lvl)
175 {
176 /*
177 * Add an offset to the mmap region base VA so that
178 * this region will be mapped to a TTE in the
179 * `first_lvl` table at the same index as specified
180 * in tbl_idxs[].
181 */
182 mmap[i].base_va += tbl_idxs[i] *
183 XLAT_BLOCK_SIZE(first_lvl);
184 }
185
186 mmap[i].base_va += (tbl_idxs[i] * (*granularity));
187
188 /*
189 * PA can be any address (as long as there are not overlaps,
190 * for which there is a specific test). For simplicity,
191 * create an identity mapping using the base_va for the PA.
192 */
193 mmap[i].base_pa = mmap[i].base_va & XLAT_TESTS_PA_MASK;
194 mmap[i].size = *granularity;
195 mmap[i].attr = attrs;
196 mmap[i].granularity = *granularity;
197 }
198
199 return 0;
200}
201
202/*
203 * Given a context and a set of expected indexes and levels for the last walk,
204 * validate that the translation tables in the context are valid.
205 * Note that this function expects a valid and initialized context.
206 */
207static void validate_xlat_tables(xlat_ctx *ctx, unsigned int *expected_idxs,
208 unsigned int expected_level)
209{
210 uint64_t tte, attrs, upper_attrs, lower_attrs, type;
211 uint64_t exp_upper_attrs, exp_lower_attrs;
212 unsigned int level, index, granularity, addr_offset;
213 unsigned long long test_va, pa, pa_mask;
214 unsigned int retval;
215
216 assert(ctx != NULL);
217 assert(expected_idxs != NULL);
218
219 for (unsigned int i = 0U; i < ctx->cfg->mmap_regions; i++) {
220 granularity = ctx->cfg->mmap[i].granularity;
221 addr_offset = test_helpers_get_rand_in_range(0,
222 granularity - 1U);
223 test_va = ctx->cfg->base_va + ctx->cfg->mmap[i].base_va +
224 addr_offset;
225 pa = ctx->cfg->mmap[i].base_pa + addr_offset;
226
227 /* Perform a table walk */
228 retval = xlat_test_helpers_table_walk(ctx, test_va,
229 &tte, NULL, &level,
230 &index);
231
232 /* Return value */
233 CHECK_VERBOSE((retval == 0),
234 "Perform table walk for addr 0x%llx", test_va);
235
236 /* Last table level */
237 CHECK_EQUAL(expected_level, level);
238
239 /* tte index on the page */
240 CHECK_EQUAL(expected_idxs[i], index);
241
242 /* Expected tte attributes */
243 retval = xlat_test_helpers_get_attrs_for_va(ctx, test_va,
244 &attrs);
245
246 /* Return value */
247 CHECK_EQUAL(0, retval);
248
249 upper_attrs = EXTRACT(UPPER_ATTRS, attrs);
250 exp_upper_attrs = EXTRACT(UPPER_ATTRS, tte);
251 lower_attrs = EXTRACT(LOWER_ATTRS, attrs);
252 exp_lower_attrs = EXTRACT(LOWER_ATTRS, tte);
253
254 /* Validate that the attributes are as expected */
255 CHECK_VERBOSE((exp_upper_attrs == upper_attrs),
256 "Validate Upper Attrs: Read 0x%lx - Expected 0x%lx",
257 exp_upper_attrs, upper_attrs);
258
259 CHECK_VERBOSE((exp_lower_attrs == lower_attrs),
260 "Validate Lower Attrs: Read 0x%lx - Expected 0x%lx",
261 exp_lower_attrs, lower_attrs);
262
263 /* Validate the PA */
264 pa_mask = (1ULL << XLAT_ADDR_SHIFT(level)) - 1ULL;
265 CHECK_EQUAL((tte & TABLE_ADDR_MASK), (pa & ~pa_mask));
266
267 /* Validate the descriptor type */
268 type = (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC :
269 BLOCK_DESC;
270 CHECK_EQUAL(type, (tte & DESC_MASK));
271 }
272}
273
274TEST(xlat_tests_G2, xlat_ctx_init_TC6)
275{
276 struct xlat_ctx ctx;
277 struct xlat_ctx_cfg cfg;
278 struct xlat_ctx_tbls tbls;
279 uint64_t start_va;
280 size_t va_size, granularity;
281 unsigned int mmap_count;
282 xlat_addr_region_id_t va_region;
283 int retval;
284 struct xlat_mmap_region init_mmap[3U];
285 unsigned int tbl_idx[3U];
286 unsigned int base_lvl, end_lvl;
287
288 /**********************************************************************
289 * TEST CASE 6:
290 *
291 * For each possible base level, create a set of mmap regions
292 * ranging from level 1 (lowest level at which a valid walk can
293 * finish) to XLAT_TABLE_LEVEL_MAX.
294 *
295 * For each possible (va_region, base_lvl, end_lvl) triplet for a
296 * base table there will be three mmap regions created:
297 *
298 * - First region mapped at the beginning of a table whose final
299 * lookup level is 'last_lvl'. This region will be descendant of
300 * an entry at the beginning of a 'base_lvl' table.
301 * - Second region mapped at a random index of a table whose final
302 * lookup level is 'last_lvl'. This region will be descendant of
303 * an entry at a random index of a 'base_lvl' table.
304 * - Third region mapped at the end of a table whose final
305 * lookup level is 'last_lvl'. This region will be descendant of
306 * an entry at the end of a 'base_lvl'.
307 *
308 * Then verify that the tables can be walked and that the levels,
309 * offsets and attributes on the ttes are as expected.
310 *
311 * This test validates that the xlat library is able to create
312 * tables starting on any valid initial lookup level and
313 * finishing on any valid level as well.
314 *********************************************************************/
315
316 mmap_count = 3U;
317
318 /* The first level that supports blocks is L1 */
319 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
320 for (int i = 0U; i < VA_REGIONS; i++) {
321 va_region = (xlat_addr_region_id_t)i;
322
323 for (base_lvl = 0U;
324 base_lvl <= end_lvl;
325 base_lvl++) {
326
327 start_va = gen_va_space_params_by_lvl(base_lvl,
328 va_region,
329 &va_size);
330
331 retval = gen_mmap_array_by_level(&init_mmap[0U],
332 &tbl_idx[0U],
333 mmap_count,
334 base_lvl,
335 end_lvl,
336 &granularity,
337 start_va,
338 false);
339 /*
340 * verify that the test setup is correct so far
341 */
342 CHECK_TRUE(retval == 0);
343
344 /* Clean the data structures */
345 memset((void *)&ctx, 0,
346 sizeof(struct xlat_ctx));
347 memset((void *)&cfg, 0,
348 sizeof(struct xlat_ctx_cfg));
349 memset((void *)&tbls, 0,
350 sizeof(struct xlat_ctx_tbls));
351
352 /* Initialize the test structure */
353 retval = xlat_ctx_cfg_init(&cfg, va_region,
354 &init_mmap[0U],
355 mmap_count, va_size);
356
357 /*
358 * verify that the test setup is correct so far
359 */
360 CHECK_TRUE(retval == 0);
361
362 /* Test xlat_ctx_init() */
363 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
364 xlat_test_helpers_tbls(),
365 XLAT_TESTS_MAX_TABLES);
366
367 /*
368 * verify that the test setup is correct so far
369 */
370 CHECK_TRUE(retval == 0);
371
372 validate_xlat_tables(&ctx, &tbl_idx[0U],
373 end_lvl);
374 }
375 }
376 }
377}
378
379TEST(xlat_tests_G2, xlat_get_llt_from_va_TC1)
380{
381 struct xlat_ctx ctx;
382 struct xlat_ctx_cfg cfg;
383 struct xlat_ctx_tbls tbls;
384 struct xlat_llt_info tbl_info, tbl_val;
385 struct xlat_mmap_region init_mmap[3U];
386 uint64_t start_va;
387 size_t va_size, granularity;
388 unsigned int mmap_count, index;
389 xlat_addr_region_id_t va_region;
390 int retval;
391 unsigned int tbl_idx[3U];
392 unsigned int base_lvl, end_lvl;
393 unsigned int mmap_idx;
394 uint64_t tte;
395 unsigned long long test_va;
396
397 /***************************************************************
398 * TEST CASE 1:
399 *
400 * For each possible base level, create a set of mmap regions
401 * ranging from level 1 (lowest level at which a valid walk can
402 * finish) to XLAT_TABLE_LEVEL_MAX.
403 *
404 * For each possible (va_region, base_lvl, end_lvl) triplet,
405 * create 3 mappings that will correspond to a tte in the Last
406 * level Table. Then verify that the call to
407 * xlat_get_llt_from_va() is able to return the right
408 * xlat_tbl_info structure with the expected values.
409 ***************************************************************/
410
411 mmap_count = 3U;
412 va_region = (xlat_addr_region_id_t)test_helpers_get_rand_in_range(
413 0, VA_REGIONS - 1);
414
415 for (end_lvl = 1U;
416 end_lvl <= XLAT_TABLE_LEVEL_MAX;
417 end_lvl++) {
418
419 for (base_lvl = 0U;
420 base_lvl <= end_lvl;
421 base_lvl++) {
422
423 /* Clean the data structures */
424 memset((void *)&ctx, 0,
425 sizeof(struct xlat_ctx));
426 memset((void *)&cfg, 0,
427 sizeof(struct xlat_ctx_cfg));
428 memset((void *)&tbls, 0,
429 sizeof(struct xlat_ctx_tbls));
430 memset((void *)&tbl_info, 0,
431 sizeof(struct xlat_llt_info));
432 memset((void *)&tbl_val, 0,
433 sizeof(struct xlat_llt_info));
434
435 start_va = gen_va_space_params_by_lvl(base_lvl,
436 va_region,
437 &va_size);
438
439 /*
440 * Use gen_mmap_array_by_level() to generate
441 * the mmap array.
442 */
443 retval = gen_mmap_array_by_level(&init_mmap[0U],
444 &tbl_idx[0U],
445 mmap_count,
446 base_lvl,
447 end_lvl,
448 &granularity,
449 start_va,
450 true);
451
452 /* Ensure that so far the test setup is OK */
453 CHECK_TRUE(retval == 0);
454
455 retval = xlat_ctx_cfg_init(&cfg, va_region,
456 &init_mmap[0U],
457 mmap_count, va_size);
458
459 /* Ensure that so far the test setup is OK */
460 CHECK_TRUE(retval == 0);
461
462 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
463 xlat_test_helpers_tbls(),
464 XLAT_TESTS_MAX_TABLES);
465
466 /* Ensure that so far the test setup is OK */
467 CHECK_TRUE(retval == 0);
468
469 for (mmap_idx = 0U; mmap_idx < mmap_count; mmap_idx++) {
470 /*
471 * For each mmap region, pick up a
472 * random address for the test.
473 */
474 test_va = init_mmap[mmap_idx].base_va
475 + ctx.cfg->base_va;
476 test_va +=
477 test_helpers_get_rand_in_range(0,
478 init_mmap[mmap_idx].size - 1);
479
480 /*
481 * Perform a table walk to retrieve
482 * table info. Store the expected values
483 * inside the validation xlat_llt_info
484 * structure.
485 */
486 retval = xlat_test_helpers_table_walk(&ctx,
487 test_va,
488 &tte,
489 &(tbl_val.table),
490 &(tbl_val.level),
491 &index);
492
493 /*
494 * Calculate the expected base VA for the llt.
495 */
496 tbl_val.llt_base_va = start_va;
497 tbl_val.llt_base_va += (base_lvl < end_lvl) ?
498 (XLAT_BLOCK_SIZE(base_lvl) *
499 tbl_idx[mmap_idx]) : 0;
500
501
502 /* Ensure that so far the test setup is OK */
503 CHECK_TRUE(retval == 0);
504
505 VERBOSE("\nTesting VA 0x%llx", test_va);
506
507 /* Test xlat_get_llt_from_va */
508 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
509 test_va);
510
511 /* Check the return value */
512 CHECK_TRUE(retval == 0);
513
514 /*
515 * Validate the structure returned by
516 * xlat_get_llt_from_va
517 */
518 MEMCMP_EQUAL((void *)&tbl_val,
519 (void *)&tbl_info,
520 sizeof(struct xlat_llt_info));
521 VERBOSE(" : PASS\n\n");
522 }
523 }
524 }
525}
526
527TEST(xlat_tests_G2, xlat_get_llt_from_va_TC2)
528{
529 struct xlat_ctx ctx;
530 struct xlat_ctx_cfg cfg;
531 struct xlat_ctx_tbls tbls;
532 struct xlat_llt_info tbl_info;
533 struct xlat_mmap_region init_mmap[3U];
534 unsigned int tbl_idx[3U];
535 size_t va_size, granularity;
536 uint64_t start_va, test_va;
537 xlat_addr_region_id_t va_region;
538 unsigned int base_lvl, end_lvl;
539 int retval;
540
541 /***************************************************************
542 * TEST CASE 2:
543 *
544 * Test xlat_get_llt_from_va() with a VAs ouside
545 * of the context VA space.
546 ***************************************************************/
547
548 /*
549 * Pick up a base and end levels for the translation tables.
550 * The leves are arbitrary. Just to have a VA space enough
551 * for the tests.
552 */
553 base_lvl = 2U;
554 end_lvl = 3U;
555
556 for (int i = 0U; i < VA_REGIONS; i++) {
557 va_region = (xlat_addr_region_id_t)i;
558
559 /*
560 * For the low region, the test will be executed
561 * only once, for a VA above the VA space limits.
562 *
563 * For the high region, the test will be executed twice:
564 * - Once for VA below the VA space.
565 * - Once of a VA above the VA space.
566 */
567 for (unsigned int j = 0; j < (i + 1U); j++) {
568
569 /* Clean the data structures */
570 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
571 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
572 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
573 memset((void *)&tbl_info, 0,
574 sizeof(struct xlat_llt_info));
575
576 /* Get VA space limits for Level 2 */
577 start_va = gen_va_space_params_by_lvl(base_lvl, va_region,
578 &va_size);
579
580 /*
581 * use gen_mmap_array_by_level() to generate
582 * the mmap for convenience.
583 */
584 retval = gen_mmap_array_by_level(&init_mmap[0U],
585 &tbl_idx[0U],
586 3U, base_lvl, end_lvl,
587 &granularity,
588 start_va,
589 true);
590
591 /* Ensure that so far the test setup is OK */
592 CHECK_TRUE(retval == 0);
593
594 retval = xlat_ctx_cfg_init(&cfg, va_region,
595 &init_mmap[0U], 3U,
596 MAX_VIRT_ADDR_SPACE_SIZE);
597 CHECK_TRUE(retval == 0);
598
599 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
600 xlat_test_helpers_tbls(),
601 XLAT_TESTS_MAX_TABLES);
602 CHECK_TRUE(retval == 0);
603
604 VERBOSE("\n");
605
606 if (j == 0U) {
607 /*
608 * VA above the VA space.
609 * The upper range of the address is arbitrary.
610 */
611 test_va = (ctx.cfg->max_va_size) +
612 test_helpers_get_rand_in_range(0,
613 XLAT_BLOCK_SIZE(base_lvl) - 1);
614 } else {
615 /*
616 * VA below the VA space.
617 * The upper range of the address is arbitrary.
618 */
619 test_va = test_helpers_get_rand_in_range(0,
620 XLAT_BLOCK_SIZE(base_lvl) - 1);
621 }
622
623 /* Test xlat_get_llt_from_va */
624 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
625
626 /* Check the return value */
627 CHECK_VERBOSE((retval == -EFAULT),
628 "Testing VA 0x%lx", test_va);
629 VERBOSE("\n");
630 }
631 }
632}
633
634TEST(xlat_tests_G2, xlat_get_llt_from_va_TC3)
635{
636 struct xlat_ctx ctx;
637 struct xlat_ctx_cfg cfg;
638 struct xlat_ctx_tbls tbls;
639 struct xlat_llt_info tbl_info;
640 struct xlat_mmap_region init_mmap[3U];
641 unsigned int tbl_idx[3U];
642 size_t va_size, granularity;
643 uint64_t start_va, test_va;
644 xlat_addr_region_id_t va_region;
645 unsigned int base_lvl, end_lvl;
646 int retval;
647
648 /***************************************************************
649 * TEST CASE 3:
650 *
651 * Test xlat_get_llt_from_va() with an unmapped VAs belonging to
652 * the context VA space.
653 ***************************************************************/
654
655 /*
656 * Pick up a base and end levels for the translation tables.
657 * The leves are arbitrary. Just to have a VA space enough
658 * for the tests.
659 */
660 base_lvl = 0U;
661 end_lvl = 3U;
662
663 for (int i = 0U; i < VA_REGIONS; i++) {
664 va_region = (xlat_addr_region_id_t)i;
665
666 /* Clean the data structures */
667 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
668 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
669 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
670 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
671
672 /* VA space boundaries */
673 start_va = gen_va_space_params_by_lvl(base_lvl, va_region,
674 &va_size);
675
676 /*
677 * use gen_mmap_array_by_level() to generate
678 * the mmap for convenience, although we will
679 * only use one of the mmap regions (init_mmap[0]).
680 */
681 retval = gen_mmap_array_by_level(&init_mmap[0U],
682 &tbl_idx[0U],
683 3U, base_lvl, end_lvl,
684 &granularity,
685 start_va,
686 true);
687
688 /* Ensure that so far the test setup is OK */
689 CHECK_TRUE(retval == 0);
690
691 retval = xlat_ctx_cfg_init(&cfg, va_region,
692 &init_mmap[0U], 3U,
693 MAX_VIRT_ADDR_SPACE_SIZE);
694 CHECK_TRUE(retval == 0);
695
696 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
697 xlat_test_helpers_tbls(),
698 XLAT_TESTS_MAX_TABLES);
699 CHECK_TRUE(retval == 0);
700
701 VERBOSE("\n");
702
703 test_va = ctx.cfg->base_va;
704 test_va += (init_mmap[0U].base_va + init_mmap[0U].size);
705 test_va += test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
706
707 /* Test xlat_get_llt_from_va */
708 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
709
710 /* Check the return value */
711 CHECK_VERBOSE((retval == 0),
712 "Testing VA 0x%lx", test_va);
713 VERBOSE("\n");
714 }
715}
716
717void xlat_get_llt_from_va_prepare_assertion(struct xlat_ctx *ctx,
718 struct xlat_ctx_cfg *cfg,
719 struct xlat_ctx_tbls *tbls,
720 struct xlat_mmap_region *init_mmap)
721{
722 uint64_t start_va, end_va;
723 xlat_addr_region_id_t va_region;
724
725 assert(ctx != NULL);
726 assert(cfg != NULL);
727 assert(tbls != NULL);
728 assert(init_mmap != NULL);
729
730 va_region = (xlat_addr_region_id_t)test_helpers_get_rand_in_range(0,
731 VA_REGIONS - 1U);
732
733 /* Clean the data structures */
734 memset((void *)ctx, 0, sizeof(struct xlat_ctx));
735 memset((void *)cfg, 0, sizeof(struct xlat_ctx_cfg));
736 memset((void *)tbls, 0, sizeof(struct xlat_ctx_tbls));
737
738 /* VA space boundaries */
739 start_va = xlat_test_helpers_get_start_va(va_region,
740 MAX_VIRT_ADDR_SPACE_SIZE);
741 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
742
743 /* Generate a random mmap area */
744 xlat_test_helpers_rand_mmap_array(init_mmap, 1U, start_va, end_va);
745
746 (void)xlat_ctx_cfg_init(cfg, va_region, init_mmap, 1U,
747 MAX_VIRT_ADDR_SPACE_SIZE);
748
749 (void)xlat_ctx_init(ctx, cfg, tbls,
750 xlat_test_helpers_tbls(),
751 XLAT_TESTS_MAX_TABLES);
752}
753
754ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC4)
755{
756
757 struct xlat_ctx ctx;
758 struct xlat_ctx_cfg cfg;
759 struct xlat_ctx_tbls tbls;
760 struct xlat_mmap_region init_mmap;
761 uint64_t test_va;
762
763 /***************************************************************
764 * TEST CASE 4:
765 *
766 * Try calling xlat_get_llt_from_va() with a NULL
767 * xlat_llt_info structure
768 ***************************************************************/
769
770 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
771
772 test_va = ctx.cfg->base_va + init_mmap.base_va;
773
774 /* Test xlat_get_llt_from_va */
775 test_helpers_expect_assert_fail(true);
776 (void)xlat_get_llt_from_va(NULL, &ctx, test_va);
777 test_helpers_fail_if_no_assert_failed();
778}
779
780ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC5)
781{
782 struct xlat_llt_info tbl_info;
783
784 /***************************************************************
785 * TEST CASE 5:
786 *
787 * Try calling xlat_get_llt_from_va() with a NULL
788 * xlat_ctx structure.
789 ***************************************************************/
790
791 /* Clean the data structures */
792 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
793
794 /* Test xlat_get_llt_from_va: NULL xlat_ctx */
795 test_helpers_expect_assert_fail(true);
796 (void)xlat_get_llt_from_va(&tbl_info, NULL, 0ULL);
797 test_helpers_fail_if_no_assert_failed();
798}
799
800ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC6)
801{
802 struct xlat_ctx ctx;
803 struct xlat_ctx_cfg cfg;
804 struct xlat_ctx_tbls tbls;
805 struct xlat_llt_info tbl_info;
806 struct xlat_mmap_region init_mmap;
807 uint64_t test_va;
808
809 /***************************************************************
810 * TEST CASE 6:
811 *
812 * Try calling xlat_get_llt_from_va() with a NULL
813 * xlat_ctx_cfg structure.
814 ***************************************************************/
815
816 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
817 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
818
819 test_va = ctx.cfg->base_va + init_mmap.base_va;
820
821 /* Test xlat_get_llt_from_va: NULL xlat_ctx.cfg */
822 ctx.cfg = NULL;
823 test_helpers_expect_assert_fail(true);
824 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
825 test_helpers_fail_if_no_assert_failed();
826}
827
828ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC7)
829{
830 struct xlat_ctx ctx;
831 struct xlat_ctx_cfg cfg;
832 struct xlat_ctx_tbls tbls;
833 struct xlat_llt_info tbl_info;
834 struct xlat_mmap_region init_mmap;
835 uint64_t test_va;
836
837 /***************************************************************
838 * TEST CASE 7:
839 *
840 * Try calling xlat_get_llt_from_va() with a NULL
841 * xlat_ctx_tbls structure.
842 ***************************************************************/
843
844 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
845 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
846
847 test_va = ctx.cfg->base_va + init_mmap.base_va;
848
849 /* Test xlat_get_llt_from_va: NULL xlat_ctx.tbls */
850 ctx.tbls = NULL;
851 test_helpers_expect_assert_fail(true);
852 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
853 test_helpers_fail_if_no_assert_failed();
854}
855
856ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC8)
857{
858 struct xlat_ctx ctx;
859 struct xlat_ctx_cfg cfg;
860 struct xlat_ctx_tbls tbls;
861 struct xlat_llt_info tbl_info;
862 struct xlat_mmap_region init_mmap;
863 uint64_t test_va;
864
865 /***************************************************************
866 * TEST CASE 8:
867 *
868 * Try calling xlat_get_llt_from_va() with an uninitialized
869 * xlat_ctx_cfg structure.
870 * Perform a full initialization of the context and then force
871 * 'ctx.cfg->initialized' to 'false' so we can ensure that
872 * this is what it is actually tested.
873 ***************************************************************/
874
875 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
876 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
877
878 test_va = ctx.cfg->base_va + init_mmap.base_va;
879
880 /* Mark the cfg structure as not initialized */
881 cfg.initialized = false;
882
883 test_helpers_expect_assert_fail(true);
884 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
885 test_helpers_fail_if_no_assert_failed();
886}
887
888ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC9)
889{
890 struct xlat_ctx ctx;
891 struct xlat_ctx_cfg cfg;
892 struct xlat_ctx_tbls tbls;
893 struct xlat_llt_info tbl_info;
894 struct xlat_mmap_region init_mmap;
895 uint64_t test_va;
896
897 /***************************************************************
898 * TEST CASE 9:
899 *
900 * Try calling xlat_get_llt_from_va() with an uninitialized
901 * xlat_ctx_tbls structure.
902 * Perform a full initialization of the context and then force
903 * 'ctx.tbls->initialized' to 'false' so we can ensure that
904 * this is what it is actually tested.
905 ***************************************************************/
906
907 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
908 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
909
910 test_va = ctx.cfg->base_va + init_mmap.base_va;
911
912 /* Mark the tbls structure as not initialized */
913 tbls.initialized = false;
914
915 test_helpers_expect_assert_fail(true);
916 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
917 test_helpers_fail_if_no_assert_failed();
918}
919
920TEST(xlat_tests_G2, xlat_get_tte_ptr_TC1)
921{
922 struct xlat_ctx ctx;
923 struct xlat_ctx_cfg cfg;
924 struct xlat_ctx_tbls tbls;
925 struct xlat_llt_info tbl_info;
926 struct xlat_mmap_region init_mmap[3U];
927 unsigned int tbl_idx[3U];
928 uint64_t start_va, test_va;
929 xlat_addr_region_id_t va_region;
930 unsigned int level, index;
931 uint64_t *tte_ptr, *val_tte, *table;
932 uint64_t tte;
933 size_t granularity;
934 unsigned int base_lvl, end_lvl;
935 int retval;
936
937 /***************************************************************
938 * TEST CASE 1:
939 *
940 * Initialize a translation context with a given VA space and
941 * 3 mmap regions at level 3. Then get a tte using
942 * xlat_get_tte_ptr() and verify that it is the correct entry.
943 *
944 * This test tries three different mmap areas per VA region:
945 *
946 * - An address corresponding to the first entry at a
947 * last level table.
948 * - An address corresponding to the last entry at a
949 * last level table.
950 * - An address corresponding to an intermediate entry
951 * at a last level table.
952 *
953 * The test also tests a negative case wherein it tries to get
954 * the TTE via xlat_get_tte() for a lower than the base VA for
955 * the last level table.
956 ***************************************************************/
957
958 /*
959 * Pick up a base and end levels for the translation tables.
960 * The leves are arbitrary. Just to have a VA space enough
961 * for the tests.
962 */
963 base_lvl = 0U;
964 end_lvl = 3U;
965
966 for (int i = 0U; i < VA_REGIONS; i++) {
967 va_region = (xlat_addr_region_id_t)i;
968
969 /* Clean the data structures */
970 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
971 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
972 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
973 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
974
975 /* VA space boundaries */
976 start_va = xlat_test_helpers_get_start_va(va_region,
977 MAX_VIRT_ADDR_SPACE_SIZE);
978
979 /* Generate the mmap regions */
980 retval = gen_mmap_array_by_level(&init_mmap[0U],
981 &tbl_idx[0U],
982 3U, base_lvl, end_lvl,
983 &granularity,
984 start_va, true);
985
986 /* Ensure that so far the test setup is OK */
987 CHECK_TRUE(retval == 0);
988
989 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U], 3U,
990 MAX_VIRT_ADDR_SPACE_SIZE);
991
992 /* Ensure that so far the test setup is OK */
993 CHECK_TRUE(retval == 0);
994
995 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
996 xlat_test_helpers_tbls(),
997 XLAT_TESTS_MAX_TABLES);
998
999 /* Ensure that so far the test setup is OK */
1000 CHECK_TRUE(retval == 0);
1001
1002 /* Get the xlat_llt_info structure used to look for TTEs */
1003 test_va = ctx.cfg->base_va + init_mmap[0].base_va;
1004 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1005
1006 /* Ensure that so far the test setup is OK */
1007 CHECK_TRUE(retval == 0);
1008
1009 /*
1010 * Iterate over test VAs of all 3 mmap regions to
1011 * test xlat_get_tte_ptr().
1012 */
1013 VERBOSE("\n");
1014 for (unsigned int i = 0U; i < 3U; i++) {
1015 /*
1016 * Get the xlat_llt_info structure used
1017 * to look for TTEs.
1018 */
1019 test_va = ctx.cfg->base_va + init_mmap[i].base_va;
1020 retval = xlat_get_llt_from_va(&tbl_info,
1021 &ctx, test_va);
1022
1023 /* Ensure that so far the test setup is OK */
1024 CHECK_TRUE(retval == 0);
1025
1026 /*
1027 * Add a random offset to the current 'test_va'
1028 * to be used for the tests.
1029 */
1030 test_va += test_helpers_get_rand_in_range(0,
1031 PAGE_SIZE - 1);
1032
1033 /*
1034 * Perform a table walk to get the table containing
1035 * the tte we are insterested in as well as the
1036 * index of that tte in the table.
1037 */
1038 retval = xlat_test_helpers_table_walk(&ctx, test_va,
1039 &tte, &table,
1040 &level, &index);
1041 /* Ensure that so far the test setup is OK */
1042 CHECK_TRUE(retval == 0);
1043
1044 /* Get a pointer to the expected tte */
1045 val_tte = &table[index];
1046
1047 /* Test xlat_get_tte_ptr() */
1048 tte_ptr = xlat_get_tte_ptr(&tbl_info, test_va);
1049
1050 /* Validate the output */
1051 CHECK_VERBOSE((val_tte == tte_ptr),
1052 "Testing VA 0x%lx", test_va);
1053 }
1054
1055 /*
1056 * test xlat_get_tte_ptr() agains a VA below the minimum
1057 * VA mapped by 'tbl_info'. Use init_mmap[1] for this test.
1058 */
1059 test_va = ctx.cfg->base_va + init_mmap[1U].base_va;
1060 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1061
1062 /* Ensure that so far the test setup is OK */
1063 CHECK_TRUE(retval == 0);
1064
1065 test_va = tbl_info.llt_base_va;
1066 test_va -= test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
1067
1068 tte_ptr = xlat_get_tte_ptr(&tbl_info, test_va);
1069
1070
1071 /* Validate the output */
1072 CHECK_VERBOSE((tte_ptr == NULL),
1073 "Check address 0x%lx against TT at VA 0x%lx",
1074 test_va, tbl_info.llt_base_va);
1075
1076 VERBOSE("\n");
1077 }
1078}
1079
1080ASSERT_TEST(xlat_tests_G2, xlat_get_tte_ptr_TC2)
1081{
1082 /***************************************************************
1083 * TEST CASE 2:
1084 *
1085 * Try to get a tte using xlat_get_tte() with a NULL
1086 * xlat_llt_info structure.
1087 ***************************************************************/
1088
1089 test_helpers_expect_assert_fail(true);
1090 (void)xlat_get_tte_ptr(NULL, 0ULL);
1091 test_helpers_fail_if_no_assert_failed();
1092}
1093
1094TEST(xlat_tests_G2, xlat_unmap_memory_page_TC1)
1095{
1096 struct xlat_ctx ctx;
1097 struct xlat_ctx_cfg cfg;
1098 struct xlat_ctx_tbls tbls;
1099 uint64_t start_va;
1100 size_t va_size, granularity;
1101 unsigned int mmap_count;
1102 xlat_addr_region_id_t va_region;
1103 int retval;
1104 struct xlat_mmap_region init_mmap[3U];
1105 unsigned int tbl_idx[3U];
1106 unsigned int base_lvl, end_lvl;
1107
1108 /***************************************************************
1109 * TEST CASE 1:
1110 *
1111 * For each possible end lookup level, create a set transient
1112 * valid random mappings.
1113 *
1114 * For each possible (va_region, end_lvl) tuple, there will be
1115 * three mmap regions created:
1116 *
1117 * - First region mapped at the beginning of a table whose
1118 * final lookup level is 'end_lvl'
1119 * - Second region mapped at a random tte of a table whose
1120 * final lookup level is 'end_lvl'
1121 * - Third region mapped at the end of a table whose
1122 * final lookup level is 'end_lvl'
1123 *
1124 * Then verify that the tables can be unmapped and that the
1125 * resulting tte will contain a transient invalid entry.
1126 ***************************************************************/
1127
1128 mmap_count = 3U;
1129 base_lvl = 0U;
1130
1131 /* The first look-up level that supports blocks is L1 */
1132 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
1133 for (int i = 0U; i < VA_REGIONS; i++) {
1134 va_region = (xlat_addr_region_id_t)i;
1135
1136 start_va = gen_va_space_params_by_lvl(base_lvl,
1137 va_region,
1138 &va_size);
1139
1140 retval = gen_mmap_array_by_level(&init_mmap[0U],
1141 &tbl_idx[0U],
1142 mmap_count,
1143 base_lvl,
1144 end_lvl,
1145 &granularity,
1146 start_va,
1147 false);
1148
1149 /* Verify that the test setup is correct so far */
1150 CHECK_TRUE(retval == 0);
1151
1152 /* Clean the data structures */
1153 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1154 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1155 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1156
1157 /* Initialize the test structure */
1158 retval = xlat_ctx_cfg_init(&cfg, va_region,
1159 &init_mmap[0U],
1160 mmap_count, va_size);
1161
1162 /* Verify that the test setup is correct so far */
1163 CHECK_TRUE(retval == 0);
1164
1165 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1166 xlat_test_helpers_tbls(),
1167 XLAT_TESTS_MAX_TABLES);
1168
1169 /* Verify that the test setup is correct so far */
1170 CHECK_TRUE(retval == 0);
1171
1172 /*
1173 * For each one of the mmap regions:
1174 * - get the TTE of a random VA and make it transient
1175 * - call xlat_unmap_memory_page() over the same VA
1176 * - verify that the TTE is now transient invalid.
1177 */
1178 for (unsigned j = 0U; j < mmap_count; j++) {
1179 uint64_t tte;
1180 uint64_t *tbl_ptr;
1181 unsigned int tte_idx, tte_lvl;
1182 struct xlat_llt_info tbl_info;
1183 uint64_t offset =
1184 test_helpers_get_rand_in_range(0,
1185 PAGE_SIZE - 1);
1186 uint64_t test_va = init_mmap[j].base_va +
1187 ctx.cfg->base_va + offset;
1188
1189 /*
1190 * Perform a table walk to retrieve the table
1191 * where the VA is mapped along with the index
1192 * of the TTE within the table.
1193 */
1194 retval = xlat_test_helpers_table_walk(&ctx,
1195 test_va, &tte,
1196 &tbl_ptr, &tte_lvl,
1197 &tte_idx);
1198
1199 /*
1200 * Verify that the test setup is correct so far
1201 */
1202 CHECK_TRUE(retval == 0);
1203
1204 /*
1205 * The TTE is expected to be valid. Make it
1206 * transient valid within the table.
1207 */
1208 tbl_ptr[tte_idx] |=
1209 (1ULL << TRANSIENT_FLAG_SHIFT);
1210
1211 /*
1212 * Retrieve the xlat_llt_info structure needed
1213 * to feed xlat_unmap_memory_page()
1214 */
1215 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1216 test_va);
1217
1218 /*
1219 * Verify that the test setup is correct so far
1220 */
1221 CHECK_TRUE(retval == 0);
1222
1223 /*
1224 * Try to unmap the page/block
1225 * containing `test_va`
1226 */
1227 retval = xlat_unmap_memory_page(&tbl_info,
1228 test_va);
1229
1230 /* Verify that the return is as expected */
1231 CHECK_TRUE(retval == 0);
1232
1233 /*
1234 * Verify that the TTE is marked as transient
1235 * invalid.
1236 */
1237 CHECK_VERBOSE((tbl_ptr[tte_idx] ==
1238 TRANSIENT_DESC),
1239 "Verifying TTE for VA 0x%lx is marked as Transient Invalid",
1240 test_va);
1241 }
1242 VERBOSE("\n");
1243 }
1244 }
1245}
1246
1247TEST(xlat_tests_G2, xlat_unmap_memory_page_TC2)
1248{
1249 struct xlat_ctx ctx;
1250 struct xlat_ctx_cfg cfg;
1251 struct xlat_ctx_tbls tbls;
1252 uint64_t start_va, test_va;
1253 size_t va_size, granularity;
1254 unsigned int mmap_count;
1255 unsigned int tte_idx, tte_lvl;
1256 xlat_addr_region_id_t va_region;
1257 int retval;
1258 struct xlat_mmap_region init_mmap[3U];
1259 unsigned int tbl_idx[3U];
1260 struct xlat_llt_info tbl_info;
1261 uint64_t tte, val_tte;
1262 uint64_t *tbl_ptr;
1263 unsigned int base_lvl, end_lvl;
1264
1265 /***************************************************************
1266 * TEST CASE 2:
1267 *
1268 * Generate a mmap region with a set of transient valid
1269 * mappings. Then run a set of negative tests:
1270 *
1271 * - Try addresses below and above the range mapped by the
1272 * xlat_llt_info structure on a transient-valid entry.
1273 * - Try unmapping from a valid non-transient entry.
1274 * - Try unmapping from an invalid entry.
1275 ***************************************************************/
1276
1277 /*
1278 * Pick up a base and end levels for the translation tables.
1279 * The leves are arbitrary. Just to have a VA space enough
1280 * for the tests.
1281 */
1282 base_lvl = 0U;
1283 end_lvl = 3U;
1284
1285 mmap_count = 3U;
1286
1287 for (int i = 0U; i < VA_REGIONS; i++) {
1288 va_region = (xlat_addr_region_id_t)i;
1289
1290 start_va = gen_va_space_params_by_lvl(base_lvl,
1291 va_region, &va_size);
1292
1293 /*
1294 * We generate the mmap regions to use. We will be interested
1295 * in init_mmap[1].
1296 */
1297 retval = gen_mmap_array_by_level(&init_mmap[0U], &tbl_idx[0U],
1298 mmap_count, base_lvl, end_lvl,
1299 &granularity,
1300 start_va, false);
1301
1302 /* Verify that the test setup is correct so far */
1303 CHECK_TRUE(retval == 0);
1304
1305 /* Clean the data structures */
1306 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1307 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1308 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1309
1310 /* Initialize the test structure */
1311 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U],
1312 mmap_count, va_size);
1313
1314 /* Verify that the test setup is correct so far */
1315 CHECK_TRUE(retval == 0);
1316
1317 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1318 xlat_test_helpers_tbls(),
1319 XLAT_TESTS_MAX_TABLES);
1320
1321 /* Verify that the test setup is correct so far */
1322 CHECK_TRUE(retval == 0);
1323
1324 /*
1325 * Make the TTEs of the mapped region, which is expected
1326 * to be valid, transient valid.
1327 */
1328 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1329
1330 /*
1331 * Perform a table walk to retrieve the table where the VA
1332 * is mapped along with the index of the TTE within the table.
1333 */
1334 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1335 &tbl_ptr, &tte_lvl,
1336 &tte_idx);
1337
1338 /* Verify that the test setup is correct so far */
1339 CHECK_TRUE(retval == 0);
1340
1341 /*
1342 * The TTE is expected to be valid. Make it
1343 * transient valid within the table.
1344 */
1345 tbl_ptr[tte_idx] |= (1ULL << TRANSIENT_FLAG_SHIFT);
1346 val_tte = tbl_ptr[tte_idx];
1347
1348 /*
1349 * Retrieve the xlat_llt_info structure needed to feed
1350 * xlat_unmap_memory_page().
1351 */
1352 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1353 init_mmap[1U].base_pa + ctx.cfg->base_va);
1354
1355 /* Verify that the test setup is correct so far */
1356 CHECK_TRUE(retval == 0);
1357
1358 /*
1359 * Test xlat_unmmap_memory_page() with a valid address
1360 * below the start of init_mmap[0U]. This gives us an address
1361 * below the range mapped by table we retrieved.
1362 */
1363 test_va = init_mmap[0U].base_va + ctx.cfg->base_va;
1364 test_va -= test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
1365
1366 /* Try to unmap the page/block containing `test_va` */
1367 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1368
1369 /* Verify that the return is as expected */
1370 CHECK_VERBOSE((retval == -EFAULT),
1371 "Testing VA 0x%lx on TTE for VA 0x%lx",
1372 test_va,
1373 init_mmap[1U].base_va + ctx.cfg->base_va);
1374
1375 /* Verify that the TTE remains unchanged */
1376 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1377
1378 /*
1379 * Repeat the process, this time with an address on a page
1380 * after the one mapped by init_mmap[2U]. This gives us an
1381 * address over the range mapped by table we retrieved.
1382 */
1383 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1384 test_va += PAGE_SIZE;
1385 test_va += test_helpers_get_rand_in_range(0,
1386 PAGE_SIZE - 1);
1387
1388 /* Try to unmap the page/block containing `test_va` */
1389 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1390
1391 /* Verify that the return is as expected */
1392 CHECK_VERBOSE((retval == -EFAULT),
1393 "Testing VA 0x%lx on TTE for VA 0x%lx",
1394 test_va,
1395 init_mmap[2U].base_va + ctx.cfg->base_va);
1396
1397 /* Verify that the TTE remains unchanged */
1398 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1399
1400 /*
1401 * Try to unmap an address marked as non-transient
1402 */
1403 tbl_ptr[tte_idx] &= ~(MASK(TRANSIENT_FLAG));
1404 val_tte = tbl_ptr[tte_idx];
1405
1406 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1407 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1408
1409 /* Try to unmap the page/block containing `test_va` */
1410 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1411
1412 /* Verify that the return is as expected */
1413 CHECK_VERBOSE((retval == -EFAULT),
1414 "Testing VA 0x%lx on a non-transient valid TTE",
1415 test_va);
1416
1417 /* Verify that the TTE remains unchanged */
1418 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1419
1420 /*
1421 * Try to unmap an address marked as invalid.
1422 */
1423 tbl_ptr[tte_idx] = INVALID_DESC;
1424 val_tte = tbl_ptr[tte_idx];
1425
1426 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1427 test_va += test_helpers_get_rand_in_range(0,
1428 PAGE_SIZE - 1);
1429
1430 /* Try to unmap the page/block containing `test_va` */
1431 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1432
1433 /* Verify that the return is as expected */
1434 CHECK_VERBOSE((retval == -EFAULT),
1435 "Testing VA 0x%lx on a ninvalid TTE",
1436 test_va);
1437
1438 /* Verify that the TTE remains unchanged */
1439 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1440 VERBOSE("\n");
1441 }
1442}
1443
1444ASSERT_TEST(xlat_tests_G2, xlat_unmap_memory_page_TC3)
1445{
1446 /***************************************************************
1447 * TEST CASE 3:
1448 *
1449 * Try calling xlat_unmap_memory_page with a NULL
1450 * xlat_llt_info structure.
1451 ***************************************************************/
1452
1453 test_helpers_expect_assert_fail(true);
1454 (void)xlat_unmap_memory_page(NULL, 0ULL);
1455 test_helpers_fail_if_no_assert_failed();
1456}
1457
1458TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC1)
1459{
1460 struct xlat_ctx ctx;
1461 struct xlat_ctx_cfg cfg;
1462 struct xlat_ctx_tbls tbls;
1463 uint64_t start_va;
1464 size_t va_size, granularity;
1465 unsigned int mmap_count;
1466 xlat_addr_region_id_t va_region;
1467 int retval;
1468 struct xlat_mmap_region init_mmap[3U];
1469 unsigned int tbl_idx[3U];
1470 unsigned int base_lvl, end_lvl;
1471
1472 /***************************************************************
1473 * TEST CASE 1:
1474 *
1475 * For each possible end lookup level, create a set transient
1476 * random mappings.
1477 *
1478 * For each possible (va_region, end_lvl) tuple, there will be three
1479 * mmap regions created:
1480 *
1481 * - First region mapped at the beginning of a table whose
1482 * final lookup level is 'end_lvl'
1483 * - Second region mapped at a random index of a table whose
1484 * final lookup level is 'end_lvl'
1485 * - Third region mapped at the end of a table whose
1486 * final lookup level is 'end_lvl'
1487 *
1488 * Then verify that we can map PA areas into the transient
1489 * entries using random attributes and that the generated
1490 * entry is valid.
1491 ***************************************************************/
1492
1493 mmap_count = 3U;
1494 base_lvl = 0U;
1495
1496 /* The first look-up level that supports blocks is L1 */
1497 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
1498 for (int i = 0U; i < VA_REGIONS; i++) {
1499 va_region = (xlat_addr_region_id_t)i;
1500
1501 start_va = gen_va_space_params_by_lvl(base_lvl,
1502 va_region,
1503 &va_size);
1504
1505 retval = gen_mmap_array_by_level(&init_mmap[0U],
1506 &tbl_idx[0U],
1507 mmap_count,
1508 base_lvl,
1509 end_lvl,
1510 &granularity,
1511 start_va,
1512 false);
1513
1514 /* Verify that the test setup is correct so far */
1515 CHECK_TRUE(retval == 0);
1516
1517 /* Force all the mmap regions to be TRANSIENT */
1518 for (unsigned int j = 0U; j < mmap_count; j++) {
1519 init_mmap[j].attr = MT_TRANSIENT;
1520 }
1521
1522 /* Clean the data structures */
1523 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1524 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1525 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1526
1527 /* Initialize the test structure */
1528 retval = xlat_ctx_cfg_init(&cfg, va_region,
1529 &init_mmap[0U],
1530 mmap_count, va_size);
1531
1532 /* Verify that the test setup is correct so far */
1533 CHECK_TRUE(retval == 0);
1534
1535 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1536 xlat_test_helpers_tbls(),
1537 XLAT_TESTS_MAX_TABLES);
1538
1539 /* Verify that the test setup is correct so far */
1540 CHECK_TRUE(retval == 0);
1541
1542 /*
1543 * For each one of the mmap regions:
1544 * - Generate a random VA within the mmap VA space.
1545 * - generate a set of random attributes.
1546 * - Map a random PA to the generated VA and with
1547 * the generated attributes.
1548 * - call xlat_unmap_memory_page_map_with_attrs() to
1549 * create the mapping.
1550 * - verify that the new entry is valid.
1551 */
1552 for (unsigned j = 0U; j < mmap_count; j++) {
1553 uint64_t tte, val_tte, attrs, pa, type;
1554 uint64_t *tbl_ptr;
1555 unsigned int tte_idx, tte_lvl;
1556 struct xlat_llt_info tbl_info;
1557 uint64_t offset =
1558 test_helpers_get_rand_in_range(0,
1559 init_mmap[i].size - 1);
1560 uint64_t test_va = init_mmap[j].base_va +
1561 ctx.cfg->base_va + offset;
1562
1563 /*
1564 * Perform a table walk to retrieve the table
1565 * where the VA is mapped along with the index
1566 * of the TTE within the table.
1567 */
1568 retval = xlat_test_helpers_table_walk(&ctx,
1569 test_va, &tte,
1570 &tbl_ptr, &tte_lvl,
1571 &tte_idx);
1572
1573 /*
1574 * Verify that the test setup is correct so far
1575 */
1576 CHECK_TRUE(retval == 0);
1577
1578 /* Generate a random set of attributes. */
1579 do {
1580 attrs = xlat_test_helpers_rand_mmap_attrs();
1581 } while (attrs == MT_TRANSIENT);
1582
1583 /*
1584 * Generate the validation TTE. For convenience,
1585 * create an identity mapping.
1586 */
1587 retval = xlat_test_helpers_gen_attrs(&val_tte,
1588 attrs);
1589 pa = init_mmap[j].base_va & XLAT_TESTS_PA_MASK;
1590
1591 /*
1592 * Add an arbitrary offset to PA to be passed to
1593 * xlat_map_memory_page_with_attrs()
1594 */
1595 pa += test_helpers_get_rand_in_range(1,
1596 XLAT_BLOCK_SIZE(end_lvl) - 1);
1597 val_tte |= pa & XLAT_ADDR_MASK(end_lvl);
1598
1599 /* The TTE will be a transient one */
1600 val_tte |= (1ULL <<
1601 TRANSIENT_FLAG_SHIFT);
1602
1603 /* TTE type */
1604 type = (end_lvl == XLAT_TABLE_LEVEL_MAX) ?
1605 PAGE_DESC :
1606 BLOCK_DESC;
1607 val_tte |= type;
1608
1609 /* Verify the test setup */
1610 CHECK_TRUE(retval == 0);
1611
1612 /*
1613 * Retrieve the xlat_llt_info structure needed
1614 * to feed xlat_map_memory_page_with_attrs()
1615 */
1616 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1617 test_va);
1618
1619 /*
1620 * Verify that the test setup is correct so far
1621 */
1622 CHECK_TRUE(retval == 0);
1623
1624 /*
1625 * Try to map the PA with the attributes to the
1626 * `test_va`
1627 */
1628 retval = xlat_map_memory_page_with_attrs(
1629 &tbl_info,
1630 test_va, pa, attrs);
1631
1632 /* Verify that the return is as expected */
1633 CHECK_VERBOSE((retval == 0),
1634 "Mapping PA 0x%.16lx to VA 0x%.16lx with attrs 0x%lx",
1635 pa, test_va, attrs);
1636 CHECK_TRUE(retval == 0);
1637
1638 /*
1639 * Verify that the generated TTE matches
1640 * the validation one.
1641 */
1642 CHECK_VERBOSE((val_tte == tbl_ptr[tte_idx]),
1643 "Verifying TTE 0x%.16lx against 0x%.16lx",
1644 tbl_ptr[tte_idx], val_tte);
1645 }
1646 VERBOSE("\n");
1647 }
1648 }
1649}
1650
1651TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC2)
1652{
1653 struct xlat_ctx ctx;
1654 struct xlat_ctx_cfg cfg;
1655 struct xlat_ctx_tbls tbls;
1656 uint64_t start_va, test_va, test_pa;
1657 size_t va_size, granularity;
1658 unsigned int mmap_count;
1659 unsigned int tte_idx, tte_lvl;
1660 xlat_addr_region_id_t va_region;
1661 int retval;
1662 struct xlat_mmap_region init_mmap[3U];
1663 unsigned int tbl_idx[3U];
1664 struct xlat_llt_info tbl_info;
1665 uint64_t tte, val_tte;
1666 uint64_t *tbl_ptr;
1667 unsigned int base_lvl, end_lvl;
1668 unsigned int pa_range_bits_arr[] = {
1669 PARANGE_0000_WIDTH, PARANGE_0001_WIDTH, PARANGE_0010_WIDTH,
1670 PARANGE_0011_WIDTH, PARANGE_0100_WIDTH, PARANGE_0101_WIDTH,
1671 };
1672 unsigned int parange_index = test_helpers_get_rand_in_range(0,
1673 sizeof(pa_range_bits_arr)/sizeof(pa_range_bits_arr[0]) - 1U);
1674
1675
1676 /***************************************************************
1677 * TEST CASE 2:
1678 *
1679 * Generate a mmap region with a set of transient invalid
1680 * mappings. Then run a set of negative tests:
1681 *
1682 * - Try addresses below and above the range mapped by the
1683 * xlat_llt_info structure on a transient-invalid entry.
1684 * - Try mapping a PA lager than the maximum supported PA
1685 * to a transient-invalid entry.
1686 * - Try mapping to a transient-valid entry.
1687 * - Try mapping to a valid entry.
1688 * - Try mapping to an invalid entry.
1689 ***************************************************************/
1690
1691 /*
1692 * Pick up a base and end levels for the translation tables.
1693 * The leves are arbitrary. Just to have a VA space enough
1694 * for the tests.
1695 */
1696 base_lvl = 0U;
1697 end_lvl = 3U;
1698
1699 mmap_count = 3U;
1700
1701 for (int i = 0U; i < VA_REGIONS; i++) {
1702 va_region = (xlat_addr_region_id_t)i;
1703
1704 start_va = gen_va_space_params_by_lvl(base_lvl,
1705 va_region, &va_size);
1706
1707 /*
1708 * We generate the mmap regions to use. We will be interested
1709 * in init_mmap[1] for the transient-invalid tests and in
1710 * init_mmap[2] for the rest of tests.
1711 */
1712 retval = gen_mmap_array_by_level(&init_mmap[0U], &tbl_idx[0U],
1713 mmap_count, base_lvl, end_lvl,
1714 &granularity,
1715 start_va, false);
1716
1717 /* Verify that the test setup is correct so far */
1718 CHECK_TRUE(retval == 0);
1719
1720 /* Force init_mmap[1] to be TRANSIENT */
1721 init_mmap[1U].attr = MT_TRANSIENT;
1722
1723 /* Clean the data structures */
1724 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1725 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1726 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1727
1728 /* Initialize the test structure */
1729 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U],
1730 mmap_count, va_size);
1731
1732 /* Verify that the test setup is correct so far */
1733 CHECK_TRUE(retval == 0);
1734
1735 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1736 xlat_test_helpers_tbls(),
1737 XLAT_TESTS_MAX_TABLES);
1738
1739 /* Verify that the test setup is correct so far */
1740 CHECK_TRUE(retval == 0);
1741
1742 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1743
1744 /*
1745 * Retrieve the xlat_llt_info structure needed to feed
1746 * xlat_map_memory_page_with_attrs().
1747 */
1748 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1749
1750 /* Verify that the test setup is correct so far */
1751 CHECK_TRUE(retval == 0);
1752
1753 /*
1754 * Test xlat_map_memory_page_with_attrs() with a valid address
1755 * within init_mmap[0]. This gives us an address
1756 * below the range mapped by table we retrieved (which belongs
1757 * to init_mmap[1]). For simplicity, set the attributes and
1758 * the PA both to 0x0.
1759 */
1760 test_va = init_mmap[0U].base_va + ctx.cfg->base_va;
1761 test_va -= test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1762
1763 /* Try to map to the page/block containing `test_va` */
1764 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1765 0ULL, 0ULL);
1766
1767 /* Verify that the return is as expected */
1768 CHECK_VERBOSE((retval == -EFAULT),
1769 "Testing VA 0x%.16lx on TTE for VA 0x%.16lx",
1770 test_va,
1771 init_mmap[1U].base_va + ctx.cfg->base_va);
1772
1773 /*
1774 * Repeat the process, this time with an address on a page
1775 * mapped by init_mmap[2]. This gives us an
1776 * address over the range mapped by table we retrieved.
1777 */
1778 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1779 test_va += test_helpers_get_rand_in_range(0,
1780 PAGE_SIZE - 1);
1781
1782 /* Try to map to the page/block containing `test_va` */
1783 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1784 0ULL, 0ULL);
1785
1786 /* Verify that the return is as expected */
1787 CHECK_VERBOSE((retval == -EFAULT),
1788 "Testing VA 0x%.16lx on TTE for VA 0x%.16lx",
1789 test_va,
1790 init_mmap[2U].base_va + ctx.cfg->base_va);
1791
1792 /*
1793 * Test with a PA larger than the maximum PA supported.
1794 */
1795
1796 /* Configure a random maximum PA supported */
1797 host_write_sysreg("id_aa64mmfr0_el1",
1798 INPLACE(ID_AA64MMFR0_EL1_PARANGE,
1799 parange_index));
1800 test_pa =
1801 (1ULL << pa_range_bits_arr[parange_index]) + PAGE_SIZE;
1802
1803 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1804
1805 /*
1806 * Perform a table walk to retrieve the table where the VA
1807 * is mapped along with the index of the TTE within the table.
1808 */
1809 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1810 &tbl_ptr, &tte_lvl,
1811 &tte_idx);
1812
1813 /* Verify that the test setup is correct so far */
1814 CHECK_TRUE(retval == 0);
1815
1816 /*
1817 * Take a snapshot of the TTE. This will be used to verify
1818 * that the TTE hasn't been altered.
1819 */
1820 val_tte = tbl_ptr[tte_idx];
1821
1822 /* Get a random address to test */
1823 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1824
1825 /* Try to map the PA to the page/block containing `test_va` */
1826 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1827 test_pa, 0ULL);
1828
1829 /* Verify that the return is as expected */
1830 CHECK_VERBOSE((retval == -EFAULT),
1831 "Testing PA 0x%.16lx on with a max supported PA of 0x%.16llx",
1832 test_pa,
1833 (1ULL << pa_range_bits_arr[parange_index]) - 1ULL);
1834
1835 /* Verify that the TTE remains unchanged */
1836 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1837
1838 /* Restore the maximum supported PA size for next tests */
1839 host_write_sysreg("id_aa64mmfr0_el1",
1840 INPLACE(ID_AA64MMFR0_EL1_PARANGE, 5U));
1841
1842 /* The rest of the tests will be based on init_mmap[2] */
1843 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1844
1845 /*
1846 * Perform a table walk to retrieve the table where the VA
1847 * is mapped along with the index of the TTE within the table.
1848 */
1849 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1850 &tbl_ptr, &tte_lvl,
1851 &tte_idx);
1852
1853 /* Verify that the test setup is correct so far */
1854 CHECK_TRUE(retval == 0);
1855
1856 /*
1857 * Make the TTEs of the mapped region, which is expected
1858 * to be valid, transient valid.
1859 */
1860 tbl_ptr[tte_idx] |= (1ULL << TRANSIENT_FLAG_SHIFT);
1861
1862 /*
1863 * Take a snapshot of the TTE. This will be used to verify
1864 * that the TTE hasn't been altered.
1865 */
1866 val_tte = tbl_ptr[tte_idx];
1867
1868 /*
1869 * Now try to map to a valid VA. In this case the associated
1870 * TTE will contain a transient valid mapping.
1871 */
1872 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1873 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1874
1875 /* Try to map to the page/block containing `test_va` */
1876 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1877 0ULL, 0ULL);
1878
1879 /* Verify that the return is as expected */
1880 CHECK_VERBOSE((retval == -EFAULT),
1881 "Testing VA 0x%.16lx on a transient valid TTE",
1882 test_va);
1883
1884 /* Verify that the TTE remains unchanged */
1885 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1886
1887 /*
1888 * Repeat the last test but after clearing the TRANSIENT
1889 * flag from the TTE. This will test the behaviour with
1890 * a non transient TTE.
1891 */
1892 tbl_ptr[tte_idx] &= ~(1ULL << TRANSIENT_FLAG_SHIFT);
1893 val_tte = tbl_ptr[tte_idx];
1894
1895 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1896 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1897
1898 /* Try to map to the page/block containing `test_va` */
1899 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1900 0ULL, 0ULL);
1901
1902 /* Verify that the return is as expected */
1903 CHECK_VERBOSE((retval == -EFAULT),
1904 "Testing VA 0x%.16lx on a valid TTE",
1905 test_va);
1906
1907 /* Verify that the TTE remains unchanged */
1908 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1909
1910 /*
1911 * Repeat the last test on an INVALID TTE.
1912 */
1913 tbl_ptr[tte_idx] = 0ULL;
1914 val_tte = 0ULL;
1915
1916 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1917 test_va += test_helpers_get_rand_in_range(0,
1918 PAGE_SIZE - 1);
1919
1920 /* Try to map to the page/block containing `test_va` */
1921 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1922 0ULL, 0ULL);
1923
1924 /* Verify that the return is as expected */
1925 CHECK_VERBOSE((retval == -EFAULT),
1926 "Testing VA 0x%.16lx on an invalid TTE",
1927 test_va);
1928
1929 /* Verify that the TTE remains unchanged */
1930 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1931
1932 VERBOSE("\n");
1933 }
1934}
1935
1936ASSERT_TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC3)
1937{
1938 /***************************************************************
1939 * TEST CASE 3:
1940 *
1941 * Try calling xlat_map_memory_page_with_attrs with a NULL
1942 * xlat_llt_info structure.
1943 ***************************************************************/
1944
1945 test_helpers_expect_assert_fail(true);
1946 (void)xlat_map_memory_page_with_attrs(NULL, 0ULL, 0ULL, 0ULL);
1947 test_helpers_fail_if_no_assert_failed();
1948}
1949
1950/* Helper function to validate ttbrx_el2 registers */
1951static void validate_ttbrx_el2(struct xlat_ctx *ctx)
1952{
1953 uint64_t expected_ttbrx, ttbrx;
1954 xlat_addr_region_id_t va_region;
1955
1956 assert(ctx != NULL);
1957
1958 va_region = ctx->cfg->region;
1959
1960 /* BADDR */
1961 expected_ttbrx = ((uint64_t)&ctx->tbls->tables[0U]) &
1962 MASK(TTBRx_EL2_BADDR);
1963
1964 ttbrx = read_ttbr1_el2();
1965 if(va_region == VA_LOW_REGION) {
1966 ttbrx = read_ttbr0_el2();
1967
1968 /*
1969 * CnP bit. It is expected that the xlat library will
1970 * automatically set this bit for the low region.
1971 */
1972 expected_ttbrx |= (1ULL << TTBRx_EL2_CnP_SHIFT);
1973 }
1974
1975 CHECK_VERBOSE((expected_ttbrx == ttbrx),
1976 "Expected TTBR%c_EL2: 0x%lx - Received: 0x%lx",
1977 (unsigned int)va_region + '0',
1978 expected_ttbrx, ttbrx);
1979}
1980
1981/* Helper function to validate TCR_EL2 register */
1982static void validate_tcr_el2(struct xlat_ctx *low_ctx,
1983 struct xlat_ctx *high_ctx)
1984{
1985 uint64_t exp_tcr, tcr;
1986 size_t t0sz, t1sz;
1987 unsigned int parange;
1988
1989 tcr = read_tcr_el2();
1990
1991 /*
1992 * Calculate the VA space size for both contexts based on
1993 * the TCR_EL2 register.
1994 */
1995 t0sz = ((size_t)1) << (64U - EXTRACT(TCR_EL2_T0SZ, tcr));
1996 t1sz = ((size_t)1) << (64U - EXTRACT(TCR_EL2_T1SZ, tcr));
1997
1998 /* Validate the VA space size of the contexts */
1999 CHECK_VERBOSE((t0sz == low_ctx->cfg->max_va_size),
2000 "Check VA space size for Low Region: 0x%lx == 0x%lx",
2001 t0sz, low_ctx->cfg->max_va_size);
2002 CHECK_VERBOSE((t1sz == high_ctx->cfg->max_va_size),
2003 "Check VA space size for High Region: 0x%lx == 0x%lx",
2004 t1sz, high_ctx->cfg->max_va_size);
2005
2006 /* Mask out TxSZ fields. We have already validated them */
2007 tcr &= ~(MASK(TCR_EL2_T0SZ) | MASK(TCR_EL2_T1SZ));
2008
2009 /*
2010 * Inner and outher cacheability attributes as expected by RMM
2011 * for all the contexts.
2012 */
2013 exp_tcr = TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA;
2014 exp_tcr |= TCR_EL2_IRGN1_WBWA | TCR_EL2_ORGN1_WBWA;
2015
2016 /* Shareability as expected by RMM for all the contexts */
2017 exp_tcr |= TCR_EL2_SH0_IS | TCR_EL2_SH1_IS;
2018
2019 /* Granule size for all the contexts. Only 4KB supported */
2020 exp_tcr |= TCR_EL2_TG0_4K | TCR_EL2_TG1_4K;
2021
2022 /* Hierarchical permissions */
2023 exp_tcr |= TCR_EL2_AS | TCR_EL2_HPD0 | TCR_EL2_HPD1;
2024
2025 /*
2026 * Xlat library configures TCR_EL2.IPS to the max
2027 * supported by the PE.
2028 */
2029 parange = EXTRACT(ID_AA64MMFR0_EL1_PARANGE, read_id_aa64mmfr0_el1());
2030 exp_tcr |= INPLACE(TCR_EL2_IPS, parange);
2031
2032 /* Validate tcr_el2*/
2033 CHECK_VERBOSE((exp_tcr == tcr),
2034 "Validate TCR_EL2 against expected value: Read 0x%.16lx - Expected 0x%.16lx",
2035 tcr, exp_tcr);
2036}
2037
2038TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC1)
2039{
2040 struct xlat_ctx ctx[2U];
2041 struct xlat_ctx_cfg cfg[2U];
2042 struct xlat_ctx_tbls tbls[2U];
2043 uint64_t *base_tbl[2U], *xlat_tables;
2044 uint64_t start_va, end_va;
2045 xlat_addr_region_id_t va_region;
2046 int retval;
2047 struct xlat_mmap_region init_mmap[2U];
2048 unsigned int pa_range_bits_arr[] = {
2049 PARANGE_0000_WIDTH, PARANGE_0001_WIDTH, PARANGE_0010_WIDTH,
2050 PARANGE_0011_WIDTH, PARANGE_0100_WIDTH, PARANGE_0101_WIDTH,
2051 };
2052 unsigned int pa_index = test_helpers_get_rand_in_range(0,
2053 sizeof(pa_range_bits_arr)/sizeof(pa_range_bits_arr[0]) - 1U);
2054
2055 /***************************************************************
2056 * TEST CASE 1:
2057 *
2058 * Generate a translation context for each region and configure
2059 * the MMU registers based on both contexts. Verify that the
2060 * right parameters have been configured.
2061 ***************************************************************/
2062
2063 /* Clean the data structures */
2064 memset((void *)&ctx, 0, sizeof(struct xlat_ctx) * 2U);
2065 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg) * 2U);
2066 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls) * 2U);
2067
2068 /* Configure a random maximum PA supported */
2069 host_write_sysreg("id_aa64mmfr0_el1",
2070 INPLACE(ID_AA64MMFR0_EL1_PARANGE, pa_index));
2071
2072 for (int i = 0U; i < VA_REGIONS; i++) {
2073 va_region = (xlat_addr_region_id_t)i;
2074
2075 xlat_tables = xlat_test_helpers_tbls();
2076 /* Use half of the available tables for each region */
2077 base_tbl[i] = &xlat_tables[(i * XLAT_TESTS_MAX_TABLES *
2078 XLAT_TABLE_ENTRIES) >> 1U];
2079 /* VA space boundaries */
2080 start_va = xlat_test_helpers_get_start_va(va_region,
2081 MAX_VIRT_ADDR_SPACE_SIZE);
2082 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
2083
2084 /* Generate only a single mmap region for each region */
2085 xlat_test_helpers_rand_mmap_array(&init_mmap[i], 1U, start_va, end_va);
2086
2087 retval = xlat_ctx_cfg_init(&cfg[i], va_region, &init_mmap[i],
2088 1U, MAX_VIRT_ADDR_SPACE_SIZE);
2089 CHECK_TRUE(retval == 0);
2090
2091 retval = xlat_ctx_init(&ctx[i], &cfg[i], &tbls[i],
2092 base_tbl[i], XLAT_TESTS_MAX_TABLES >> 1U);
2093 CHECK_TRUE(retval == 0);
2094
2095 /* Initialize MMU for the given context */
2096 retval = xlat_arch_setup_mmu_cfg(&ctx[i]);
2097
2098 /* Verify that the MMU has been configured */
2099 CHECK_TRUE(retval == 0);
2100
2101 /* Validate TTBR_EL2 for each context */
2102 validate_ttbrx_el2(&ctx[i]);
2103 }
2104
2105 /* Validate TCR_EL2 for both contexts at the same time */
2106 validate_tcr_el2(&ctx[0U], &ctx[1U]);
2107}
2108
2109TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC2)
2110{
2111 struct xlat_ctx ctx;
2112 struct xlat_ctx_cfg cfg;
2113 struct xlat_ctx_tbls tbls;
2114 uint64_t start_va, end_va;
2115 int retval;
2116 struct xlat_mmap_region init_mmap;
2117
2118 /***************************************************************
2119 * TEST CASE 1:
2120 *
2121 * Generate a valid translation context for one of the regions
2122 * and overwrite it to test different failure conditions on
2123 * xlat_arch_setup_mmu_cfg():
2124 *
2125 * - Call xlat_arch_setup_mmu_cfg() with the MMU enabled.
2126 * - Call xlat_arch_setup_mmu_cfg() with an uninitialized
2127 * context configuration.
2128 ***************************************************************/
2129
2130 /* Clean the data structures */
2131 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
2132 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
2133 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
2134
2135 /* VA space boundaries */
2136 start_va = xlat_test_helpers_get_start_va(VA_LOW_REGION,
2137 MAX_VIRT_ADDR_SPACE_SIZE);
2138 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
2139
2140 /* Generate only a single mmap region for each region */
2141 xlat_test_helpers_rand_mmap_array(&init_mmap, 1U, start_va, end_va);
2142
2143 retval = xlat_ctx_cfg_init(&cfg, VA_LOW_REGION, &init_mmap,
2144 1U, MAX_VIRT_ADDR_SPACE_SIZE);
2145 CHECK_TRUE(retval == 0);
2146
2147 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
2148 xlat_test_helpers_tbls(),
2149 XLAT_TESTS_MAX_TABLES);
2150 CHECK_TRUE(retval == 0);
2151
2152 /* Force the MMU enblement */
2153 xlat_enable_mmu_el2();
2154
2155 /* Try to initialize MMU for the given context */
2156 retval = xlat_arch_setup_mmu_cfg(&ctx);
2157
2158 /* Verify that the MMU has failed to be initialized */
2159 CHECK_TRUE(retval == -EPERM);
2160
2161 /* Restore SCTLR_EL2 to disable the MMU */
2162 write_sctlr_el2(0ULL);
2163
2164 /* Force the context to be uninitialized */
2165 ctx.cfg->initialized = false;
2166
2167 /* Try to initialize MMU for the given context */
2168 retval = xlat_arch_setup_mmu_cfg(&ctx);
2169
2170 /* Verify that the MMU has failed to be initialized */
2171 CHECK_TRUE(retval == -EINVAL);
2172}
2173
2174ASSERT_TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC3)
2175{
2176 /***************************************************************
2177 * TEST CASE 3:
2178 *
2179 * Test xlat_arch_setup_mmu_cfg() with a NULL context.
2180 ***************************************************************/
2181
2182 test_helpers_expect_assert_fail(true);
2183 (void)xlat_arch_setup_mmu_cfg(NULL);
2184 test_helpers_fail_if_no_assert_failed();
2185}
2186
2187IGNORE_TEST(xlat_tests_G2, xlat_write_tte_TC1)
2188{
2189 /*
2190 * xlat_write_tte() is implemented as an assembler function
2191 * for target AArch64 Architecture. There is a C stub for the
2192 * fake_host platform which we do not need to test.
2193 *
2194 * This test can therefore be ignored.
2195 */
2196
2197 TEST_EXIT;
2198}
2199
2200IGNORE_TEST(xlat_tests_G2, xlat_read_tte_TC1)
2201{
2202 /*
2203 * xlat_read_tte() is implemented as an assembler function
2204 * for target AArch64 Architecture. There is a C stub for the
2205 * fake_host platform which we do not need to test.
2206 *
2207 * This test can therefore be ignored.
2208 */
2209
2210 TEST_EXIT;
2211}
2212
2213IGNORE_TEST(xlat_tests_G2, xlat_enable_mmu_el2_TC1)
2214{
2215 /*
2216 * xlat_enable_mmu_el2() is implemented as an assembler function
2217 * for target AArch64 Architecture. There is a C stub for the
2218 * fake_host platform which we do not need to test.
2219 *
2220 * This test can therefore be ignored.
2221 */
2222
2223 TEST_EXIT;
2224}