blob: 079917850ec68b3b7d260497ee1b01eb3a78265e [file] [log] [blame]
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <CppUTest/CommandLineTestRunner.h>
7#include <CppUTest/TestHarness.h>
8
9extern "C" {
10#include <arch_helpers.h>
11#include <debug.h>
12#include <host_utils.h>
13#include <stdlib.h>
14#include <string.h>
15#include <test_helpers.h>
16#include <utils_def.h>
17#include <xlat_contexts.h> /* API to test */
18#include <xlat_defs.h>
19#include <xlat_tables.h> /* API to test */
20#include <xlat_test_defs.h>
21#include <xlat_test_helpers.h>
22}
23
24TEST_GROUP(xlat_tests_G2) {
25 TEST_SETUP()
26 {
27 test_helpers_init();
28 xlat_test_hepers_arch_init();
29 }
30
31 TEST_TEARDOWN()
32 {}
33};
34
35/*
36 * Generate VA space parameters given a walk start level and a region.
37 * The VA returned will fit in a single table of level `level`, so that
38 * there translation can start at that given level.
39 */
40static unsigned long long gen_va_space_params_by_lvl(unsigned int level,
41 xlat_addr_region_id_t region,
42 size_t *va_size)
43{
44 assert(level <= XLAT_TABLE_LEVEL_MAX);
45 assert(va_size != NULL);
46
47 *va_size = (1ULL << (XLAT_ADDR_SHIFT(level) +
48 XLAT_TABLE_ENTRIES_SHIFT));
49
50 return xlat_test_helpers_get_start_va(region, *va_size);
51}
52
53/*
54 * Generate a mmap array containing a set of mmap regions defined by
55 * 'start_va', 'last_lvl' and 'offset'. The mmap array will have
56 * three regions:
57 *
58 * - First region mapped at the beginning of a table whose final
59 * lookup level is 'last_lvl'. This region will be descendant of
60 * an entry at the beginning of a table at level 'first_lvl'.
61 * - Second region mapped at a random index of a table whose final
62 * lookup level is 'last_lvl'. This region will be descendant of
63 * an entry at a random index of a table at level 'first_lvl'.
64 * - Third region mapped at the end of a table whose final
65 * lookup level is 'last_lvl'. This region will be descendant of
66 * an entry at the final entry of a table at level 'first_lvl'.
67 *
68 * ┌──────────┐
69 * ┌───────────────┤ First │
70 * │ │ Region │
71 * │ ├──────────┤
72 * │ │ │
73 * │ │ │
74 * │ │ │
75 * │ │ │
76 * │ │ │
77 * │ │ │
78 * │ │ │
79 * ┌──────────────┐ │ │ │
80 * │ │ │ │ │
81 * │ First entry ├───────┘ │ │
82 * ├──────────────┤ │ │
83 * │ Second entry │ │ │
84 * │ (Reserved) │ └──────────┘
85 * │ │
86 * ├──────────────┤
87 * │ │ ┌──────────┐
88 * │ │ │ │
89 * │ │ │ │
90 * │ │ │ │
91 * ├──────────────┤ ├──────────┤
92 * │ Second │ │ Second │
93 * │ Region ├────────────────────────┤ Region │
94 * ├──────────────┤ ├──────────┤
95 * │ │ │ │
96 * │ │ │ │
97 * │ │ │ │
98 * │ │ │ │
99 * │ │ │ │
100 * │ │ │ │
101 * ├──────────────┤ └──────────┘
102 * │ │
103 * │ Third Region ├───────┐
104 * └──────────────┘ │ ┌─────────┐
105 * First Level │ │ │
106 * │ │ │
107 * │ │ │
108 * │ │ │
109 * │ │ │
110 * │ │ │
111 * │ │ │
112 * │ │ │
113 * │ │ │
114 * │ │ │
115 * │ │ │
116 * │ │ │
117 * │ │ │
118 * │ ├─────────┤
119 * └─────────────────┤ Third |
120 * | region │
121 * └─────────┘
122 * Last level
123 *
124 * For all the mmap regions, the granularity (returned in *granularity) is
125 * setup to the minimum granularity needed to map a block at level 'last_lvl'.
126 * The size of the mmap region is setup to the same as the granularity.
127 *
128 * This function also returns :
129 * - An array ('tbl_idxs') with the expected indexes mapping
130 * the regions at the last level table.
131 */
132static int gen_mmap_array_by_level(xlat_mmap_region *mmap,
133 unsigned int *tbl_idxs,
134 unsigned int mmap_size,
135 unsigned int first_lvl,
136 unsigned int last_lvl,
137 size_t *granularity,
138 unsigned long long start_va,
139 bool allow_transient)
140{
141 uint64_t attrs;
142 unsigned long long mmap_start_va = start_va;
143
144 assert(mmap_size >= 3U);
145 assert(last_lvl > 0U);
146 assert(last_lvl <= XLAT_TABLE_LEVEL_MAX);
147 assert(first_lvl <= last_lvl);
148 assert(mmap != NULL);
149 assert(tbl_idxs != NULL);
150 assert(granularity != NULL);
151
152 /* Generate a mapping at the beginning of the table */
153 tbl_idxs[0U] = 0U;
154
155 /*
156 * Generate a mapping in a random possition of the table.
157 * The entry after the first one will always be left intentionally
158 * unused.
159 */
160 tbl_idxs[1U] = test_helpers_get_rand_in_range(2,
161 (XLAT_TABLE_ENTRIES - 2));
162
163 /* Generate a mapping at the end of the table */
164 tbl_idxs[2U] = XLAT_TABLE_ENTRIES - 1U;
165
166 do {
167 attrs = xlat_test_helpers_rand_mmap_attrs();
168 } while ((attrs == MT_TRANSIENT) && (allow_transient == false));
169
170 *granularity = XLAT_BLOCK_SIZE(last_lvl);
171
172 for (unsigned i = 0U; i < 3U; i++) {
173 mmap[i].base_va = mmap_start_va;
174 if (first_lvl < last_lvl)
175 {
176 /*
177 * Add an offset to the mmap region base VA so that
178 * this region will be mapped to a TTE in the
179 * `first_lvl` table at the same index as specified
180 * in tbl_idxs[].
181 */
182 mmap[i].base_va += tbl_idxs[i] *
183 XLAT_BLOCK_SIZE(first_lvl);
184 }
185
186 mmap[i].base_va += (tbl_idxs[i] * (*granularity));
187
188 /*
189 * PA can be any address (as long as there are not overlaps,
190 * for which there is a specific test). For simplicity,
191 * create an identity mapping using the base_va for the PA.
192 */
193 mmap[i].base_pa = mmap[i].base_va & XLAT_TESTS_PA_MASK;
194 mmap[i].size = *granularity;
195 mmap[i].attr = attrs;
196 mmap[i].granularity = *granularity;
197 }
198
199 return 0;
200}
201
202/*
203 * Given a context and a set of expected indexes and levels for the last walk,
204 * validate that the translation tables in the context are valid.
205 * Note that this function expects a valid and initialized context.
206 */
207static void validate_xlat_tables(xlat_ctx *ctx, unsigned int *expected_idxs,
208 unsigned int expected_level)
209{
210 uint64_t tte, attrs, upper_attrs, lower_attrs, type;
211 uint64_t exp_upper_attrs, exp_lower_attrs;
212 unsigned int level, index, granularity, addr_offset;
213 unsigned long long test_va, pa, pa_mask;
214 unsigned int retval;
215
216 assert(ctx != NULL);
217 assert(expected_idxs != NULL);
218
219 for (unsigned int i = 0U; i < ctx->cfg->mmap_regions; i++) {
220 granularity = ctx->cfg->mmap[i].granularity;
221 addr_offset = test_helpers_get_rand_in_range(0,
222 granularity - 1U);
223 test_va = ctx->cfg->base_va + ctx->cfg->mmap[i].base_va +
224 addr_offset;
225 pa = ctx->cfg->mmap[i].base_pa + addr_offset;
226
227 /* Perform a table walk */
228 retval = xlat_test_helpers_table_walk(ctx, test_va,
229 &tte, NULL, &level,
230 &index);
231
232 /* Return value */
233 CHECK_VERBOSE((retval == 0),
234 "Perform table walk for addr 0x%llx", test_va);
235
236 /* Last table level */
237 CHECK_EQUAL(expected_level, level);
238
239 /* tte index on the page */
240 CHECK_EQUAL(expected_idxs[i], index);
241
242 /* Expected tte attributes */
243 retval = xlat_test_helpers_get_attrs_for_va(ctx, test_va,
244 &attrs);
245
246 /* Return value */
247 CHECK_EQUAL(0, retval);
248
249 upper_attrs = EXTRACT(UPPER_ATTRS, attrs);
250 exp_upper_attrs = EXTRACT(UPPER_ATTRS, tte);
251 lower_attrs = EXTRACT(LOWER_ATTRS, attrs);
252 exp_lower_attrs = EXTRACT(LOWER_ATTRS, tte);
253
254 /* Validate that the attributes are as expected */
255 CHECK_VERBOSE((exp_upper_attrs == upper_attrs),
256 "Validate Upper Attrs: Read 0x%lx - Expected 0x%lx",
257 exp_upper_attrs, upper_attrs);
258
259 CHECK_VERBOSE((exp_lower_attrs == lower_attrs),
260 "Validate Lower Attrs: Read 0x%lx - Expected 0x%lx",
261 exp_lower_attrs, lower_attrs);
262
263 /* Validate the PA */
264 pa_mask = (1ULL << XLAT_ADDR_SHIFT(level)) - 1ULL;
265 CHECK_EQUAL((tte & TABLE_ADDR_MASK), (pa & ~pa_mask));
266
267 /* Validate the descriptor type */
268 type = (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC :
269 BLOCK_DESC;
270 CHECK_EQUAL(type, (tte & DESC_MASK));
271 }
272}
273
274TEST(xlat_tests_G2, xlat_ctx_init_TC6)
275{
276 struct xlat_ctx ctx;
277 struct xlat_ctx_cfg cfg;
278 struct xlat_ctx_tbls tbls;
279 uint64_t start_va;
280 size_t va_size, granularity;
281 unsigned int mmap_count;
282 xlat_addr_region_id_t va_region;
283 int retval;
284 struct xlat_mmap_region init_mmap[3U];
285 unsigned int tbl_idx[3U];
286 unsigned int base_lvl, end_lvl;
287
288 /**********************************************************************
289 * TEST CASE 6:
290 *
291 * For each possible base level, create a set of mmap regions
292 * ranging from level 1 (lowest level at which a valid walk can
293 * finish) to XLAT_TABLE_LEVEL_MAX.
294 *
295 * For each possible (va_region, base_lvl, end_lvl) triplet for a
296 * base table there will be three mmap regions created:
297 *
298 * - First region mapped at the beginning of a table whose final
299 * lookup level is 'last_lvl'. This region will be descendant of
300 * an entry at the beginning of a 'base_lvl' table.
301 * - Second region mapped at a random index of a table whose final
302 * lookup level is 'last_lvl'. This region will be descendant of
303 * an entry at a random index of a 'base_lvl' table.
304 * - Third region mapped at the end of a table whose final
305 * lookup level is 'last_lvl'. This region will be descendant of
306 * an entry at the end of a 'base_lvl'.
307 *
308 * Then verify that the tables can be walked and that the levels,
309 * offsets and attributes on the ttes are as expected.
310 *
311 * This test validates that the xlat library is able to create
312 * tables starting on any valid initial lookup level and
313 * finishing on any valid level as well.
314 *********************************************************************/
315
316 mmap_count = 3U;
317
318 /* The first level that supports blocks is L1 */
319 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
320 for (int i = 0U; i < VA_REGIONS; i++) {
321 va_region = (xlat_addr_region_id_t)i;
322
323 for (base_lvl = 0U;
324 base_lvl <= end_lvl;
325 base_lvl++) {
326
327 start_va = gen_va_space_params_by_lvl(base_lvl,
328 va_region,
329 &va_size);
330
331 retval = gen_mmap_array_by_level(&init_mmap[0U],
332 &tbl_idx[0U],
333 mmap_count,
334 base_lvl,
335 end_lvl,
336 &granularity,
337 start_va,
338 false);
339 /*
340 * verify that the test setup is correct so far
341 */
342 CHECK_TRUE(retval == 0);
343
344 /* Clean the data structures */
345 memset((void *)&ctx, 0,
346 sizeof(struct xlat_ctx));
347 memset((void *)&cfg, 0,
348 sizeof(struct xlat_ctx_cfg));
349 memset((void *)&tbls, 0,
350 sizeof(struct xlat_ctx_tbls));
351
352 /* Initialize the test structure */
353 retval = xlat_ctx_cfg_init(&cfg, va_region,
354 &init_mmap[0U],
355 mmap_count, va_size);
356
357 /*
358 * verify that the test setup is correct so far
359 */
360 CHECK_TRUE(retval == 0);
361
362 /* Test xlat_ctx_init() */
363 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
364 xlat_test_helpers_tbls(),
365 XLAT_TESTS_MAX_TABLES);
366
367 /*
368 * verify that the test setup is correct so far
369 */
370 CHECK_TRUE(retval == 0);
371
372 validate_xlat_tables(&ctx, &tbl_idx[0U],
373 end_lvl);
374 }
375 }
376 }
377}
378
379TEST(xlat_tests_G2, xlat_get_llt_from_va_TC1)
380{
381 struct xlat_ctx ctx;
382 struct xlat_ctx_cfg cfg;
383 struct xlat_ctx_tbls tbls;
384 struct xlat_llt_info tbl_info, tbl_val;
385 struct xlat_mmap_region init_mmap[3U];
386 uint64_t start_va;
387 size_t va_size, granularity;
388 unsigned int mmap_count, index;
389 xlat_addr_region_id_t va_region;
390 int retval;
391 unsigned int tbl_idx[3U];
392 unsigned int base_lvl, end_lvl;
393 unsigned int mmap_idx;
394 uint64_t tte;
395 unsigned long long test_va;
396
397 /***************************************************************
398 * TEST CASE 1:
399 *
400 * For each possible base level, create a set of mmap regions
401 * ranging from level 1 (lowest level at which a valid walk can
402 * finish) to XLAT_TABLE_LEVEL_MAX.
403 *
404 * For each possible (va_region, base_lvl, end_lvl) triplet,
405 * create 3 mappings that will correspond to a tte in the Last
406 * level Table. Then verify that the call to
407 * xlat_get_llt_from_va() is able to return the right
408 * xlat_tbl_info structure with the expected values.
409 ***************************************************************/
410
411 mmap_count = 3U;
412 va_region = (xlat_addr_region_id_t)test_helpers_get_rand_in_range(
413 0, VA_REGIONS - 1);
414
415 for (end_lvl = 1U;
416 end_lvl <= XLAT_TABLE_LEVEL_MAX;
417 end_lvl++) {
418
419 for (base_lvl = 0U;
420 base_lvl <= end_lvl;
421 base_lvl++) {
422
423 /* Clean the data structures */
424 memset((void *)&ctx, 0,
425 sizeof(struct xlat_ctx));
426 memset((void *)&cfg, 0,
427 sizeof(struct xlat_ctx_cfg));
428 memset((void *)&tbls, 0,
429 sizeof(struct xlat_ctx_tbls));
430 memset((void *)&tbl_info, 0,
431 sizeof(struct xlat_llt_info));
432 memset((void *)&tbl_val, 0,
433 sizeof(struct xlat_llt_info));
434
435 start_va = gen_va_space_params_by_lvl(base_lvl,
436 va_region,
437 &va_size);
438
439 /*
440 * Use gen_mmap_array_by_level() to generate
441 * the mmap array.
442 */
443 retval = gen_mmap_array_by_level(&init_mmap[0U],
444 &tbl_idx[0U],
445 mmap_count,
446 base_lvl,
447 end_lvl,
448 &granularity,
449 start_va,
450 true);
451
452 /* Ensure that so far the test setup is OK */
453 CHECK_TRUE(retval == 0);
454
455 retval = xlat_ctx_cfg_init(&cfg, va_region,
456 &init_mmap[0U],
457 mmap_count, va_size);
458
459 /* Ensure that so far the test setup is OK */
460 CHECK_TRUE(retval == 0);
461
462 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
463 xlat_test_helpers_tbls(),
464 XLAT_TESTS_MAX_TABLES);
465
466 /* Ensure that so far the test setup is OK */
467 CHECK_TRUE(retval == 0);
468
469 for (mmap_idx = 0U; mmap_idx < mmap_count; mmap_idx++) {
470 /*
471 * For each mmap region, pick up a
472 * random address for the test.
473 */
474 test_va = init_mmap[mmap_idx].base_va
475 + ctx.cfg->base_va;
476 test_va +=
477 test_helpers_get_rand_in_range(0,
478 init_mmap[mmap_idx].size - 1);
479
480 /*
481 * Perform a table walk to retrieve
482 * table info. Store the expected values
483 * inside the validation xlat_llt_info
484 * structure.
485 */
486 retval = xlat_test_helpers_table_walk(&ctx,
487 test_va,
488 &tte,
489 &(tbl_val.table),
490 &(tbl_val.level),
491 &index);
492
493 /*
494 * Calculate the expected base VA for the llt.
495 */
496 tbl_val.llt_base_va = start_va;
497 tbl_val.llt_base_va += (base_lvl < end_lvl) ?
498 (XLAT_BLOCK_SIZE(base_lvl) *
499 tbl_idx[mmap_idx]) : 0;
500
501
502 /* Ensure that so far the test setup is OK */
503 CHECK_TRUE(retval == 0);
504
505 VERBOSE("\nTesting VA 0x%llx", test_va);
506
507 /* Test xlat_get_llt_from_va */
508 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
509 test_va);
510
511 /* Check the return value */
512 CHECK_TRUE(retval == 0);
513
514 /*
515 * Validate the structure returned by
516 * xlat_get_llt_from_va
517 */
518 MEMCMP_EQUAL((void *)&tbl_val,
519 (void *)&tbl_info,
520 sizeof(struct xlat_llt_info));
521 VERBOSE(" : PASS\n\n");
522 }
523 }
524 }
525}
526
527TEST(xlat_tests_G2, xlat_get_llt_from_va_TC2)
528{
529 struct xlat_ctx ctx;
530 struct xlat_ctx_cfg cfg;
531 struct xlat_ctx_tbls tbls;
532 struct xlat_llt_info tbl_info;
533 struct xlat_mmap_region init_mmap[3U];
534 unsigned int tbl_idx[3U];
535 size_t va_size, granularity;
536 uint64_t start_va, test_va;
537 xlat_addr_region_id_t va_region;
538 unsigned int base_lvl, end_lvl;
539 int retval;
540
541 /***************************************************************
542 * TEST CASE 2:
543 *
544 * Test xlat_get_llt_from_va() with a VAs ouside
545 * of the context VA space.
546 ***************************************************************/
547
548 /*
549 * Pick up a base and end levels for the translation tables.
550 * The leves are arbitrary. Just to have a VA space enough
551 * for the tests.
552 */
553 base_lvl = 2U;
554 end_lvl = 3U;
555
556 for (int i = 0U; i < VA_REGIONS; i++) {
557 va_region = (xlat_addr_region_id_t)i;
558
559 /*
560 * For the low region, the test will be executed
561 * only once, for a VA above the VA space limits.
562 *
563 * For the high region, the test will be executed twice:
564 * - Once for VA below the VA space.
565 * - Once of a VA above the VA space.
566 */
567 for (unsigned int j = 0; j < (i + 1U); j++) {
568
569 /* Clean the data structures */
570 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
571 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
572 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
573 memset((void *)&tbl_info, 0,
574 sizeof(struct xlat_llt_info));
575
576 /* Get VA space limits for Level 2 */
577 start_va = gen_va_space_params_by_lvl(base_lvl, va_region,
578 &va_size);
579
580 /*
581 * use gen_mmap_array_by_level() to generate
582 * the mmap for convenience.
583 */
584 retval = gen_mmap_array_by_level(&init_mmap[0U],
585 &tbl_idx[0U],
586 3U, base_lvl, end_lvl,
587 &granularity,
588 start_va,
589 true);
590
591 /* Ensure that so far the test setup is OK */
592 CHECK_TRUE(retval == 0);
593
594 retval = xlat_ctx_cfg_init(&cfg, va_region,
595 &init_mmap[0U], 3U,
596 MAX_VIRT_ADDR_SPACE_SIZE);
597 CHECK_TRUE(retval == 0);
598
599 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
600 xlat_test_helpers_tbls(),
601 XLAT_TESTS_MAX_TABLES);
602 CHECK_TRUE(retval == 0);
603
604 VERBOSE("\n");
605
606 if (j == 0U) {
607 /*
608 * VA above the VA space.
609 * The upper range of the address is arbitrary.
610 */
611 test_va = (ctx.cfg->max_va_size) +
612 test_helpers_get_rand_in_range(0,
613 XLAT_BLOCK_SIZE(base_lvl) - 1);
614 } else {
615 /*
616 * VA below the VA space.
617 * The upper range of the address is arbitrary.
618 */
619 test_va = test_helpers_get_rand_in_range(0,
620 XLAT_BLOCK_SIZE(base_lvl) - 1);
621 }
622
623 /* Test xlat_get_llt_from_va */
624 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
625
626 /* Check the return value */
627 CHECK_VERBOSE((retval == -EFAULT),
628 "Testing VA 0x%lx", test_va);
629 VERBOSE("\n");
630 }
631 }
632}
633
634TEST(xlat_tests_G2, xlat_get_llt_from_va_TC3)
635{
636 struct xlat_ctx ctx;
637 struct xlat_ctx_cfg cfg;
638 struct xlat_ctx_tbls tbls;
639 struct xlat_llt_info tbl_info;
640 struct xlat_mmap_region init_mmap[3U];
641 unsigned int tbl_idx[3U];
642 size_t va_size, granularity;
643 uint64_t start_va, test_va;
644 xlat_addr_region_id_t va_region;
645 unsigned int base_lvl, end_lvl;
646 int retval;
647
648 /***************************************************************
649 * TEST CASE 3:
650 *
651 * Test xlat_get_llt_from_va() with an unmapped VAs belonging to
652 * the context VA space.
653 ***************************************************************/
654
655 /*
656 * Pick up a base and end levels for the translation tables.
657 * The leves are arbitrary. Just to have a VA space enough
658 * for the tests.
659 */
660 base_lvl = 0U;
661 end_lvl = 3U;
662
663 for (int i = 0U; i < VA_REGIONS; i++) {
664 va_region = (xlat_addr_region_id_t)i;
665
666 /* Clean the data structures */
667 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
668 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
669 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
670 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
671
672 /* VA space boundaries */
673 start_va = gen_va_space_params_by_lvl(base_lvl, va_region,
674 &va_size);
675
676 /*
677 * use gen_mmap_array_by_level() to generate
678 * the mmap for convenience, although we will
679 * only use one of the mmap regions (init_mmap[0]).
680 */
681 retval = gen_mmap_array_by_level(&init_mmap[0U],
682 &tbl_idx[0U],
683 3U, base_lvl, end_lvl,
684 &granularity,
685 start_va,
686 true);
687
688 /* Ensure that so far the test setup is OK */
689 CHECK_TRUE(retval == 0);
690
691 retval = xlat_ctx_cfg_init(&cfg, va_region,
692 &init_mmap[0U], 3U,
693 MAX_VIRT_ADDR_SPACE_SIZE);
694 CHECK_TRUE(retval == 0);
695
696 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
697 xlat_test_helpers_tbls(),
698 XLAT_TESTS_MAX_TABLES);
699 CHECK_TRUE(retval == 0);
700
701 VERBOSE("\n");
702
703 test_va = ctx.cfg->base_va;
704 test_va += (init_mmap[0U].base_va + init_mmap[0U].size);
705 test_va += test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
706
707 /* Test xlat_get_llt_from_va */
708 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
709
710 /* Check the return value */
711 CHECK_VERBOSE((retval == 0),
712 "Testing VA 0x%lx", test_va);
713 VERBOSE("\n");
714 }
715}
716
717void xlat_get_llt_from_va_prepare_assertion(struct xlat_ctx *ctx,
718 struct xlat_ctx_cfg *cfg,
719 struct xlat_ctx_tbls *tbls,
720 struct xlat_mmap_region *init_mmap)
721{
722 uint64_t start_va, end_va;
723 xlat_addr_region_id_t va_region;
724
725 assert(ctx != NULL);
726 assert(cfg != NULL);
727 assert(tbls != NULL);
728 assert(init_mmap != NULL);
729
730 va_region = (xlat_addr_region_id_t)test_helpers_get_rand_in_range(0,
731 VA_REGIONS - 1U);
732
733 /* Clean the data structures */
734 memset((void *)ctx, 0, sizeof(struct xlat_ctx));
735 memset((void *)cfg, 0, sizeof(struct xlat_ctx_cfg));
736 memset((void *)tbls, 0, sizeof(struct xlat_ctx_tbls));
737
738 /* VA space boundaries */
739 start_va = xlat_test_helpers_get_start_va(va_region,
740 MAX_VIRT_ADDR_SPACE_SIZE);
741 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
742
743 /* Generate a random mmap area */
744 xlat_test_helpers_rand_mmap_array(init_mmap, 1U, start_va, end_va);
745
746 (void)xlat_ctx_cfg_init(cfg, va_region, init_mmap, 1U,
747 MAX_VIRT_ADDR_SPACE_SIZE);
748
749 (void)xlat_ctx_init(ctx, cfg, tbls,
750 xlat_test_helpers_tbls(),
751 XLAT_TESTS_MAX_TABLES);
752}
753
754ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC4)
755{
756
757 struct xlat_ctx ctx;
758 struct xlat_ctx_cfg cfg;
759 struct xlat_ctx_tbls tbls;
760 struct xlat_mmap_region init_mmap;
761 uint64_t test_va;
762
763 /***************************************************************
764 * TEST CASE 4:
765 *
766 * Try calling xlat_get_llt_from_va() with a NULL
767 * xlat_llt_info structure
768 ***************************************************************/
769
770 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
771
772 test_va = ctx.cfg->base_va + init_mmap.base_va;
773
774 /* Test xlat_get_llt_from_va */
775 test_helpers_expect_assert_fail(true);
776 (void)xlat_get_llt_from_va(NULL, &ctx, test_va);
777 test_helpers_fail_if_no_assert_failed();
778}
779
780ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC5)
781{
782 struct xlat_llt_info tbl_info;
783
784 /***************************************************************
785 * TEST CASE 5:
786 *
787 * Try calling xlat_get_llt_from_va() with a NULL
788 * xlat_ctx structure.
789 ***************************************************************/
790
791 /* Clean the data structures */
792 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
793
794 /* Test xlat_get_llt_from_va: NULL xlat_ctx */
795 test_helpers_expect_assert_fail(true);
796 (void)xlat_get_llt_from_va(&tbl_info, NULL, 0ULL);
797 test_helpers_fail_if_no_assert_failed();
798}
799
800ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC6)
801{
802 struct xlat_ctx ctx;
803 struct xlat_ctx_cfg cfg;
804 struct xlat_ctx_tbls tbls;
805 struct xlat_llt_info tbl_info;
806 struct xlat_mmap_region init_mmap;
807 uint64_t test_va;
808
809 /***************************************************************
810 * TEST CASE 6:
811 *
812 * Try calling xlat_get_llt_from_va() with a NULL
813 * xlat_ctx_cfg structure.
814 ***************************************************************/
815
816 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
817 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
818
819 test_va = ctx.cfg->base_va + init_mmap.base_va;
820
821 /* Test xlat_get_llt_from_va: NULL xlat_ctx.cfg */
822 ctx.cfg = NULL;
823 test_helpers_expect_assert_fail(true);
824 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
825 test_helpers_fail_if_no_assert_failed();
826}
827
828ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC7)
829{
830 struct xlat_ctx ctx;
831 struct xlat_ctx_cfg cfg;
832 struct xlat_ctx_tbls tbls;
833 struct xlat_llt_info tbl_info;
834 struct xlat_mmap_region init_mmap;
835 uint64_t test_va;
836
837 /***************************************************************
838 * TEST CASE 7:
839 *
840 * Try calling xlat_get_llt_from_va() with a NULL
841 * xlat_ctx_tbls structure.
842 ***************************************************************/
843
844 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
845 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
846
847 test_va = ctx.cfg->base_va + init_mmap.base_va;
848
849 /* Test xlat_get_llt_from_va: NULL xlat_ctx.tbls */
850 ctx.tbls = NULL;
851 test_helpers_expect_assert_fail(true);
852 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
853 test_helpers_fail_if_no_assert_failed();
854}
855
856ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC8)
857{
858 struct xlat_ctx ctx;
859 struct xlat_ctx_cfg cfg;
860 struct xlat_ctx_tbls tbls;
861 struct xlat_llt_info tbl_info;
862 struct xlat_mmap_region init_mmap;
863 uint64_t test_va;
864
865 /***************************************************************
866 * TEST CASE 8:
867 *
868 * Try calling xlat_get_llt_from_va() with an uninitialized
869 * xlat_ctx_cfg structure.
870 * Perform a full initialization of the context and then force
871 * 'ctx.cfg->initialized' to 'false' so we can ensure that
872 * this is what it is actually tested.
873 ***************************************************************/
874
875 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
876 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
877
878 test_va = ctx.cfg->base_va + init_mmap.base_va;
879
880 /* Mark the cfg structure as not initialized */
881 cfg.initialized = false;
882
883 test_helpers_expect_assert_fail(true);
884 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
885 test_helpers_fail_if_no_assert_failed();
886}
887
888ASSERT_TEST(xlat_tests_G2, xlat_get_llt_from_va_TC9)
889{
890 struct xlat_ctx ctx;
891 struct xlat_ctx_cfg cfg;
892 struct xlat_ctx_tbls tbls;
893 struct xlat_llt_info tbl_info;
894 struct xlat_mmap_region init_mmap;
895 uint64_t test_va;
896
897 /***************************************************************
898 * TEST CASE 9:
899 *
900 * Try calling xlat_get_llt_from_va() with an uninitialized
901 * xlat_ctx_tbls structure.
902 * Perform a full initialization of the context and then force
903 * 'ctx.tbls->initialized' to 'false' so we can ensure that
904 * this is what it is actually tested.
905 ***************************************************************/
906
907 xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
908 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
909
910 test_va = ctx.cfg->base_va + init_mmap.base_va;
911
912 /* Mark the tbls structure as not initialized */
913 tbls.initialized = false;
914
915 test_helpers_expect_assert_fail(true);
916 (void)xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
917 test_helpers_fail_if_no_assert_failed();
918}
919
920TEST(xlat_tests_G2, xlat_get_tte_ptr_TC1)
921{
922 struct xlat_ctx ctx;
923 struct xlat_ctx_cfg cfg;
924 struct xlat_ctx_tbls tbls;
925 struct xlat_llt_info tbl_info;
926 struct xlat_mmap_region init_mmap[3U];
927 unsigned int tbl_idx[3U];
928 uint64_t start_va, test_va;
929 xlat_addr_region_id_t va_region;
930 unsigned int level, index;
931 uint64_t *tte_ptr, *val_tte, *table;
932 uint64_t tte;
933 size_t granularity;
934 unsigned int base_lvl, end_lvl;
935 int retval;
936
937 /***************************************************************
938 * TEST CASE 1:
939 *
940 * Initialize a translation context with a given VA space and
941 * 3 mmap regions at level 3. Then get a tte using
942 * xlat_get_tte_ptr() and verify that it is the correct entry.
943 *
944 * This test tries three different mmap areas per VA region:
945 *
Soby Mathewbddaba12023-04-22 03:34:00 +0000946 * - An address corresponding to the first entry at a
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +0100947 * last level table.
948 * - An address corresponding to the last entry at a
949 * last level table.
950 * - An address corresponding to an intermediate entry
951 * at a last level table.
952 *
Soby Mathewbddaba12023-04-22 03:34:00 +0000953 * The test also tests 2 negative cases :
954 * 1. It tries to get the TTE via xlat_get_tte() for a lower
955 * VA than the base VA.
956 * 2. It tries to get the TTE for a higher VA than is mapped
957 * by the last level table.
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +0100958 ***************************************************************/
959
960 /*
961 * Pick up a base and end levels for the translation tables.
962 * The leves are arbitrary. Just to have a VA space enough
963 * for the tests.
964 */
965 base_lvl = 0U;
966 end_lvl = 3U;
967
968 for (int i = 0U; i < VA_REGIONS; i++) {
969 va_region = (xlat_addr_region_id_t)i;
970
971 /* Clean the data structures */
972 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
973 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
974 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
975 memset((void *)&tbl_info, 0, sizeof(struct xlat_llt_info));
976
977 /* VA space boundaries */
978 start_va = xlat_test_helpers_get_start_va(va_region,
979 MAX_VIRT_ADDR_SPACE_SIZE);
980
981 /* Generate the mmap regions */
982 retval = gen_mmap_array_by_level(&init_mmap[0U],
983 &tbl_idx[0U],
984 3U, base_lvl, end_lvl,
985 &granularity,
986 start_va, true);
987
988 /* Ensure that so far the test setup is OK */
989 CHECK_TRUE(retval == 0);
990
991 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U], 3U,
992 MAX_VIRT_ADDR_SPACE_SIZE);
993
994 /* Ensure that so far the test setup is OK */
995 CHECK_TRUE(retval == 0);
996
997 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
998 xlat_test_helpers_tbls(),
999 XLAT_TESTS_MAX_TABLES);
1000
1001 /* Ensure that so far the test setup is OK */
1002 CHECK_TRUE(retval == 0);
1003
1004 /* Get the xlat_llt_info structure used to look for TTEs */
1005 test_va = ctx.cfg->base_va + init_mmap[0].base_va;
1006 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1007
1008 /* Ensure that so far the test setup is OK */
1009 CHECK_TRUE(retval == 0);
1010
1011 /*
1012 * Iterate over test VAs of all 3 mmap regions to
1013 * test xlat_get_tte_ptr().
1014 */
1015 VERBOSE("\n");
1016 for (unsigned int i = 0U; i < 3U; i++) {
1017 /*
1018 * Get the xlat_llt_info structure used
1019 * to look for TTEs.
1020 */
1021 test_va = ctx.cfg->base_va + init_mmap[i].base_va;
1022 retval = xlat_get_llt_from_va(&tbl_info,
1023 &ctx, test_va);
1024
1025 /* Ensure that so far the test setup is OK */
1026 CHECK_TRUE(retval == 0);
1027
1028 /*
1029 * Add a random offset to the current 'test_va'
1030 * to be used for the tests.
1031 */
1032 test_va += test_helpers_get_rand_in_range(0,
1033 PAGE_SIZE - 1);
1034
1035 /*
1036 * Perform a table walk to get the table containing
1037 * the tte we are insterested in as well as the
1038 * index of that tte in the table.
1039 */
1040 retval = xlat_test_helpers_table_walk(&ctx, test_va,
1041 &tte, &table,
1042 &level, &index);
1043 /* Ensure that so far the test setup is OK */
1044 CHECK_TRUE(retval == 0);
1045
1046 /* Get a pointer to the expected tte */
1047 val_tte = &table[index];
1048
1049 /* Test xlat_get_tte_ptr() */
1050 tte_ptr = xlat_get_tte_ptr(&tbl_info, test_va);
1051
1052 /* Validate the output */
1053 CHECK_VERBOSE((val_tte == tte_ptr),
1054 "Testing VA 0x%lx", test_va);
1055 }
1056
1057 /*
1058 * test xlat_get_tte_ptr() agains a VA below the minimum
1059 * VA mapped by 'tbl_info'. Use init_mmap[1] for this test.
1060 */
1061 test_va = ctx.cfg->base_va + init_mmap[1U].base_va;
1062 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1063
1064 /* Ensure that so far the test setup is OK */
1065 CHECK_TRUE(retval == 0);
1066
1067 test_va = tbl_info.llt_base_va;
1068 test_va -= test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
1069
1070 tte_ptr = xlat_get_tte_ptr(&tbl_info, test_va);
1071
1072
1073 /* Validate the output */
1074 CHECK_VERBOSE((tte_ptr == NULL),
1075 "Check address 0x%lx against TT at VA 0x%lx",
1076 test_va, tbl_info.llt_base_va);
1077
Soby Mathewbddaba12023-04-22 03:34:00 +00001078 /*
1079 * test xlat_get_tte_ptr() against a VA above the max
1080 * VA mapped by 'tbl_info'. Use init_mmap[0] for this test.
1081 */
1082 test_va = ctx.cfg->base_va + init_mmap[0U].base_va;
1083 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1084
1085 /* Ensure that so far the test setup is OK */
1086 CHECK_TRUE(retval == 0);
1087
1088 test_va = tbl_info.llt_base_va + XLAT_BLOCK_SIZE(tbl_info.level - 1);
1089 test_va += test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
1090
1091 tte_ptr = xlat_get_tte_ptr(&tbl_info, test_va);
1092
1093 /* Validate the output */
1094 CHECK_VERBOSE((tte_ptr == NULL),
1095 "Check address 0x%lx against TT at VA 0x%lx",
1096 test_va, tbl_info.llt_base_va);
1097
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001098 VERBOSE("\n");
1099 }
1100}
1101
1102ASSERT_TEST(xlat_tests_G2, xlat_get_tte_ptr_TC2)
1103{
1104 /***************************************************************
1105 * TEST CASE 2:
1106 *
1107 * Try to get a tte using xlat_get_tte() with a NULL
1108 * xlat_llt_info structure.
1109 ***************************************************************/
1110
1111 test_helpers_expect_assert_fail(true);
1112 (void)xlat_get_tte_ptr(NULL, 0ULL);
1113 test_helpers_fail_if_no_assert_failed();
1114}
1115
1116TEST(xlat_tests_G2, xlat_unmap_memory_page_TC1)
1117{
1118 struct xlat_ctx ctx;
1119 struct xlat_ctx_cfg cfg;
1120 struct xlat_ctx_tbls tbls;
1121 uint64_t start_va;
1122 size_t va_size, granularity;
1123 unsigned int mmap_count;
1124 xlat_addr_region_id_t va_region;
1125 int retval;
1126 struct xlat_mmap_region init_mmap[3U];
1127 unsigned int tbl_idx[3U];
1128 unsigned int base_lvl, end_lvl;
1129
1130 /***************************************************************
1131 * TEST CASE 1:
1132 *
1133 * For each possible end lookup level, create a set transient
1134 * valid random mappings.
1135 *
1136 * For each possible (va_region, end_lvl) tuple, there will be
1137 * three mmap regions created:
1138 *
1139 * - First region mapped at the beginning of a table whose
1140 * final lookup level is 'end_lvl'
1141 * - Second region mapped at a random tte of a table whose
1142 * final lookup level is 'end_lvl'
1143 * - Third region mapped at the end of a table whose
1144 * final lookup level is 'end_lvl'
1145 *
1146 * Then verify that the tables can be unmapped and that the
1147 * resulting tte will contain a transient invalid entry.
1148 ***************************************************************/
1149
1150 mmap_count = 3U;
1151 base_lvl = 0U;
1152
1153 /* The first look-up level that supports blocks is L1 */
1154 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
1155 for (int i = 0U; i < VA_REGIONS; i++) {
1156 va_region = (xlat_addr_region_id_t)i;
1157
1158 start_va = gen_va_space_params_by_lvl(base_lvl,
1159 va_region,
1160 &va_size);
1161
1162 retval = gen_mmap_array_by_level(&init_mmap[0U],
1163 &tbl_idx[0U],
1164 mmap_count,
1165 base_lvl,
1166 end_lvl,
1167 &granularity,
1168 start_va,
1169 false);
1170
1171 /* Verify that the test setup is correct so far */
1172 CHECK_TRUE(retval == 0);
1173
1174 /* Clean the data structures */
1175 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1176 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1177 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1178
1179 /* Initialize the test structure */
1180 retval = xlat_ctx_cfg_init(&cfg, va_region,
1181 &init_mmap[0U],
1182 mmap_count, va_size);
1183
1184 /* Verify that the test setup is correct so far */
1185 CHECK_TRUE(retval == 0);
1186
1187 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1188 xlat_test_helpers_tbls(),
1189 XLAT_TESTS_MAX_TABLES);
1190
1191 /* Verify that the test setup is correct so far */
1192 CHECK_TRUE(retval == 0);
1193
1194 /*
1195 * For each one of the mmap regions:
1196 * - get the TTE of a random VA and make it transient
1197 * - call xlat_unmap_memory_page() over the same VA
1198 * - verify that the TTE is now transient invalid.
1199 */
1200 for (unsigned j = 0U; j < mmap_count; j++) {
1201 uint64_t tte;
1202 uint64_t *tbl_ptr;
1203 unsigned int tte_idx, tte_lvl;
1204 struct xlat_llt_info tbl_info;
1205 uint64_t offset =
1206 test_helpers_get_rand_in_range(0,
1207 PAGE_SIZE - 1);
1208 uint64_t test_va = init_mmap[j].base_va +
1209 ctx.cfg->base_va + offset;
1210
1211 /*
1212 * Perform a table walk to retrieve the table
1213 * where the VA is mapped along with the index
1214 * of the TTE within the table.
1215 */
1216 retval = xlat_test_helpers_table_walk(&ctx,
1217 test_va, &tte,
1218 &tbl_ptr, &tte_lvl,
1219 &tte_idx);
1220
1221 /*
1222 * Verify that the test setup is correct so far
1223 */
1224 CHECK_TRUE(retval == 0);
1225
1226 /*
1227 * The TTE is expected to be valid. Make it
1228 * transient valid within the table.
1229 */
1230 tbl_ptr[tte_idx] |=
1231 (1ULL << TRANSIENT_FLAG_SHIFT);
1232
1233 /*
1234 * Retrieve the xlat_llt_info structure needed
1235 * to feed xlat_unmap_memory_page()
1236 */
1237 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1238 test_va);
1239
1240 /*
1241 * Verify that the test setup is correct so far
1242 */
1243 CHECK_TRUE(retval == 0);
1244
1245 /*
1246 * Try to unmap the page/block
1247 * containing `test_va`
1248 */
1249 retval = xlat_unmap_memory_page(&tbl_info,
1250 test_va);
1251
1252 /* Verify that the return is as expected */
1253 CHECK_TRUE(retval == 0);
1254
1255 /*
1256 * Verify that the TTE is marked as transient
1257 * invalid.
1258 */
1259 CHECK_VERBOSE((tbl_ptr[tte_idx] ==
1260 TRANSIENT_DESC),
1261 "Verifying TTE for VA 0x%lx is marked as Transient Invalid",
1262 test_va);
1263 }
1264 VERBOSE("\n");
1265 }
1266 }
1267}
1268
1269TEST(xlat_tests_G2, xlat_unmap_memory_page_TC2)
1270{
1271 struct xlat_ctx ctx;
1272 struct xlat_ctx_cfg cfg;
1273 struct xlat_ctx_tbls tbls;
1274 uint64_t start_va, test_va;
1275 size_t va_size, granularity;
1276 unsigned int mmap_count;
1277 unsigned int tte_idx, tte_lvl;
1278 xlat_addr_region_id_t va_region;
1279 int retval;
1280 struct xlat_mmap_region init_mmap[3U];
1281 unsigned int tbl_idx[3U];
1282 struct xlat_llt_info tbl_info;
1283 uint64_t tte, val_tte;
1284 uint64_t *tbl_ptr;
1285 unsigned int base_lvl, end_lvl;
1286
1287 /***************************************************************
1288 * TEST CASE 2:
1289 *
1290 * Generate a mmap region with a set of transient valid
1291 * mappings. Then run a set of negative tests:
1292 *
1293 * - Try addresses below and above the range mapped by the
1294 * xlat_llt_info structure on a transient-valid entry.
1295 * - Try unmapping from a valid non-transient entry.
1296 * - Try unmapping from an invalid entry.
1297 ***************************************************************/
1298
1299 /*
1300 * Pick up a base and end levels for the translation tables.
1301 * The leves are arbitrary. Just to have a VA space enough
1302 * for the tests.
1303 */
1304 base_lvl = 0U;
1305 end_lvl = 3U;
1306
1307 mmap_count = 3U;
1308
1309 for (int i = 0U; i < VA_REGIONS; i++) {
1310 va_region = (xlat_addr_region_id_t)i;
1311
1312 start_va = gen_va_space_params_by_lvl(base_lvl,
1313 va_region, &va_size);
1314
1315 /*
1316 * We generate the mmap regions to use. We will be interested
1317 * in init_mmap[1].
1318 */
1319 retval = gen_mmap_array_by_level(&init_mmap[0U], &tbl_idx[0U],
1320 mmap_count, base_lvl, end_lvl,
1321 &granularity,
1322 start_va, false);
1323
1324 /* Verify that the test setup is correct so far */
1325 CHECK_TRUE(retval == 0);
1326
1327 /* Clean the data structures */
1328 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1329 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1330 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1331
1332 /* Initialize the test structure */
1333 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U],
1334 mmap_count, va_size);
1335
1336 /* Verify that the test setup is correct so far */
1337 CHECK_TRUE(retval == 0);
1338
1339 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1340 xlat_test_helpers_tbls(),
1341 XLAT_TESTS_MAX_TABLES);
1342
1343 /* Verify that the test setup is correct so far */
1344 CHECK_TRUE(retval == 0);
1345
1346 /*
1347 * Make the TTEs of the mapped region, which is expected
1348 * to be valid, transient valid.
1349 */
1350 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1351
1352 /*
1353 * Perform a table walk to retrieve the table where the VA
1354 * is mapped along with the index of the TTE within the table.
1355 */
1356 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1357 &tbl_ptr, &tte_lvl,
1358 &tte_idx);
1359
1360 /* Verify that the test setup is correct so far */
1361 CHECK_TRUE(retval == 0);
1362
1363 /*
1364 * The TTE is expected to be valid. Make it
1365 * transient valid within the table.
1366 */
1367 tbl_ptr[tte_idx] |= (1ULL << TRANSIENT_FLAG_SHIFT);
1368 val_tte = tbl_ptr[tte_idx];
1369
1370 /*
1371 * Retrieve the xlat_llt_info structure needed to feed
1372 * xlat_unmap_memory_page().
1373 */
1374 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1375 init_mmap[1U].base_pa + ctx.cfg->base_va);
1376
1377 /* Verify that the test setup is correct so far */
1378 CHECK_TRUE(retval == 0);
1379
1380 /*
1381 * Test xlat_unmmap_memory_page() with a valid address
1382 * below the start of init_mmap[0U]. This gives us an address
1383 * below the range mapped by table we retrieved.
1384 */
1385 test_va = init_mmap[0U].base_va + ctx.cfg->base_va;
1386 test_va -= test_helpers_get_rand_in_range(1, PAGE_SIZE - 1);
1387
1388 /* Try to unmap the page/block containing `test_va` */
1389 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1390
1391 /* Verify that the return is as expected */
1392 CHECK_VERBOSE((retval == -EFAULT),
1393 "Testing VA 0x%lx on TTE for VA 0x%lx",
1394 test_va,
1395 init_mmap[1U].base_va + ctx.cfg->base_va);
1396
1397 /* Verify that the TTE remains unchanged */
1398 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1399
1400 /*
1401 * Repeat the process, this time with an address on a page
1402 * after the one mapped by init_mmap[2U]. This gives us an
1403 * address over the range mapped by table we retrieved.
1404 */
1405 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1406 test_va += PAGE_SIZE;
1407 test_va += test_helpers_get_rand_in_range(0,
1408 PAGE_SIZE - 1);
1409
1410 /* Try to unmap the page/block containing `test_va` */
1411 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1412
1413 /* Verify that the return is as expected */
1414 CHECK_VERBOSE((retval == -EFAULT),
1415 "Testing VA 0x%lx on TTE for VA 0x%lx",
1416 test_va,
1417 init_mmap[2U].base_va + ctx.cfg->base_va);
1418
1419 /* Verify that the TTE remains unchanged */
1420 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1421
1422 /*
1423 * Try to unmap an address marked as non-transient
1424 */
1425 tbl_ptr[tte_idx] &= ~(MASK(TRANSIENT_FLAG));
1426 val_tte = tbl_ptr[tte_idx];
1427
1428 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1429 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1430
1431 /* Try to unmap the page/block containing `test_va` */
1432 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1433
1434 /* Verify that the return is as expected */
1435 CHECK_VERBOSE((retval == -EFAULT),
1436 "Testing VA 0x%lx on a non-transient valid TTE",
1437 test_va);
1438
1439 /* Verify that the TTE remains unchanged */
1440 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1441
1442 /*
1443 * Try to unmap an address marked as invalid.
1444 */
1445 tbl_ptr[tte_idx] = INVALID_DESC;
1446 val_tte = tbl_ptr[tte_idx];
1447
1448 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1449 test_va += test_helpers_get_rand_in_range(0,
1450 PAGE_SIZE - 1);
1451
1452 /* Try to unmap the page/block containing `test_va` */
1453 retval = xlat_unmap_memory_page(&tbl_info, test_va);
1454
1455 /* Verify that the return is as expected */
1456 CHECK_VERBOSE((retval == -EFAULT),
1457 "Testing VA 0x%lx on a ninvalid TTE",
1458 test_va);
1459
1460 /* Verify that the TTE remains unchanged */
1461 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1462 VERBOSE("\n");
1463 }
1464}
1465
1466ASSERT_TEST(xlat_tests_G2, xlat_unmap_memory_page_TC3)
1467{
1468 /***************************************************************
1469 * TEST CASE 3:
1470 *
1471 * Try calling xlat_unmap_memory_page with a NULL
1472 * xlat_llt_info structure.
1473 ***************************************************************/
1474
1475 test_helpers_expect_assert_fail(true);
1476 (void)xlat_unmap_memory_page(NULL, 0ULL);
1477 test_helpers_fail_if_no_assert_failed();
1478}
1479
1480TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC1)
1481{
1482 struct xlat_ctx ctx;
1483 struct xlat_ctx_cfg cfg;
1484 struct xlat_ctx_tbls tbls;
1485 uint64_t start_va;
1486 size_t va_size, granularity;
1487 unsigned int mmap_count;
1488 xlat_addr_region_id_t va_region;
1489 int retval;
1490 struct xlat_mmap_region init_mmap[3U];
1491 unsigned int tbl_idx[3U];
1492 unsigned int base_lvl, end_lvl;
1493
1494 /***************************************************************
1495 * TEST CASE 1:
1496 *
1497 * For each possible end lookup level, create a set transient
1498 * random mappings.
1499 *
1500 * For each possible (va_region, end_lvl) tuple, there will be three
1501 * mmap regions created:
1502 *
1503 * - First region mapped at the beginning of a table whose
1504 * final lookup level is 'end_lvl'
1505 * - Second region mapped at a random index of a table whose
1506 * final lookup level is 'end_lvl'
1507 * - Third region mapped at the end of a table whose
1508 * final lookup level is 'end_lvl'
1509 *
1510 * Then verify that we can map PA areas into the transient
1511 * entries using random attributes and that the generated
1512 * entry is valid.
1513 ***************************************************************/
1514
1515 mmap_count = 3U;
1516 base_lvl = 0U;
1517
1518 /* The first look-up level that supports blocks is L1 */
1519 for (end_lvl = 1U; end_lvl <= XLAT_TABLE_LEVEL_MAX; end_lvl++) {
1520 for (int i = 0U; i < VA_REGIONS; i++) {
1521 va_region = (xlat_addr_region_id_t)i;
1522
1523 start_va = gen_va_space_params_by_lvl(base_lvl,
1524 va_region,
1525 &va_size);
1526
1527 retval = gen_mmap_array_by_level(&init_mmap[0U],
1528 &tbl_idx[0U],
1529 mmap_count,
1530 base_lvl,
1531 end_lvl,
1532 &granularity,
1533 start_va,
1534 false);
1535
1536 /* Verify that the test setup is correct so far */
1537 CHECK_TRUE(retval == 0);
1538
1539 /* Force all the mmap regions to be TRANSIENT */
1540 for (unsigned int j = 0U; j < mmap_count; j++) {
1541 init_mmap[j].attr = MT_TRANSIENT;
1542 }
1543
1544 /* Clean the data structures */
1545 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1546 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1547 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1548
1549 /* Initialize the test structure */
1550 retval = xlat_ctx_cfg_init(&cfg, va_region,
1551 &init_mmap[0U],
1552 mmap_count, va_size);
1553
1554 /* Verify that the test setup is correct so far */
1555 CHECK_TRUE(retval == 0);
1556
1557 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1558 xlat_test_helpers_tbls(),
1559 XLAT_TESTS_MAX_TABLES);
1560
1561 /* Verify that the test setup is correct so far */
1562 CHECK_TRUE(retval == 0);
1563
1564 /*
1565 * For each one of the mmap regions:
1566 * - Generate a random VA within the mmap VA space.
1567 * - generate a set of random attributes.
1568 * - Map a random PA to the generated VA and with
1569 * the generated attributes.
1570 * - call xlat_unmap_memory_page_map_with_attrs() to
1571 * create the mapping.
1572 * - verify that the new entry is valid.
1573 */
1574 for (unsigned j = 0U; j < mmap_count; j++) {
1575 uint64_t tte, val_tte, attrs, pa, type;
1576 uint64_t *tbl_ptr;
1577 unsigned int tte_idx, tte_lvl;
1578 struct xlat_llt_info tbl_info;
1579 uint64_t offset =
1580 test_helpers_get_rand_in_range(0,
1581 init_mmap[i].size - 1);
1582 uint64_t test_va = init_mmap[j].base_va +
1583 ctx.cfg->base_va + offset;
1584
1585 /*
1586 * Perform a table walk to retrieve the table
1587 * where the VA is mapped along with the index
1588 * of the TTE within the table.
1589 */
1590 retval = xlat_test_helpers_table_walk(&ctx,
1591 test_va, &tte,
1592 &tbl_ptr, &tte_lvl,
1593 &tte_idx);
1594
1595 /*
1596 * Verify that the test setup is correct so far
1597 */
1598 CHECK_TRUE(retval == 0);
1599
1600 /* Generate a random set of attributes. */
1601 do {
1602 attrs = xlat_test_helpers_rand_mmap_attrs();
1603 } while (attrs == MT_TRANSIENT);
1604
1605 /*
1606 * Generate the validation TTE. For convenience,
1607 * create an identity mapping.
1608 */
1609 retval = xlat_test_helpers_gen_attrs(&val_tte,
1610 attrs);
1611 pa = init_mmap[j].base_va & XLAT_TESTS_PA_MASK;
1612
1613 /*
1614 * Add an arbitrary offset to PA to be passed to
1615 * xlat_map_memory_page_with_attrs()
1616 */
1617 pa += test_helpers_get_rand_in_range(1,
1618 XLAT_BLOCK_SIZE(end_lvl) - 1);
1619 val_tte |= pa & XLAT_ADDR_MASK(end_lvl);
1620
1621 /* The TTE will be a transient one */
1622 val_tte |= (1ULL <<
1623 TRANSIENT_FLAG_SHIFT);
1624
1625 /* TTE type */
1626 type = (end_lvl == XLAT_TABLE_LEVEL_MAX) ?
1627 PAGE_DESC :
1628 BLOCK_DESC;
1629 val_tte |= type;
1630
1631 /* Verify the test setup */
1632 CHECK_TRUE(retval == 0);
1633
1634 /*
1635 * Retrieve the xlat_llt_info structure needed
1636 * to feed xlat_map_memory_page_with_attrs()
1637 */
1638 retval = xlat_get_llt_from_va(&tbl_info, &ctx,
1639 test_va);
1640
1641 /*
1642 * Verify that the test setup is correct so far
1643 */
1644 CHECK_TRUE(retval == 0);
1645
1646 /*
1647 * Try to map the PA with the attributes to the
1648 * `test_va`
1649 */
1650 retval = xlat_map_memory_page_with_attrs(
1651 &tbl_info,
1652 test_va, pa, attrs);
1653
1654 /* Verify that the return is as expected */
1655 CHECK_VERBOSE((retval == 0),
1656 "Mapping PA 0x%.16lx to VA 0x%.16lx with attrs 0x%lx",
1657 pa, test_va, attrs);
1658 CHECK_TRUE(retval == 0);
1659
1660 /*
1661 * Verify that the generated TTE matches
1662 * the validation one.
1663 */
1664 CHECK_VERBOSE((val_tte == tbl_ptr[tte_idx]),
1665 "Verifying TTE 0x%.16lx against 0x%.16lx",
1666 tbl_ptr[tte_idx], val_tte);
1667 }
1668 VERBOSE("\n");
1669 }
1670 }
1671}
1672
1673TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC2)
1674{
1675 struct xlat_ctx ctx;
1676 struct xlat_ctx_cfg cfg;
1677 struct xlat_ctx_tbls tbls;
1678 uint64_t start_va, test_va, test_pa;
1679 size_t va_size, granularity;
1680 unsigned int mmap_count;
1681 unsigned int tte_idx, tte_lvl;
1682 xlat_addr_region_id_t va_region;
1683 int retval;
1684 struct xlat_mmap_region init_mmap[3U];
1685 unsigned int tbl_idx[3U];
1686 struct xlat_llt_info tbl_info;
1687 uint64_t tte, val_tte;
1688 uint64_t *tbl_ptr;
1689 unsigned int base_lvl, end_lvl;
1690 unsigned int pa_range_bits_arr[] = {
1691 PARANGE_0000_WIDTH, PARANGE_0001_WIDTH, PARANGE_0010_WIDTH,
1692 PARANGE_0011_WIDTH, PARANGE_0100_WIDTH, PARANGE_0101_WIDTH,
1693 };
1694 unsigned int parange_index = test_helpers_get_rand_in_range(0,
1695 sizeof(pa_range_bits_arr)/sizeof(pa_range_bits_arr[0]) - 1U);
1696
1697
1698 /***************************************************************
1699 * TEST CASE 2:
1700 *
1701 * Generate a mmap region with a set of transient invalid
1702 * mappings. Then run a set of negative tests:
1703 *
1704 * - Try addresses below and above the range mapped by the
1705 * xlat_llt_info structure on a transient-invalid entry.
1706 * - Try mapping a PA lager than the maximum supported PA
1707 * to a transient-invalid entry.
1708 * - Try mapping to a transient-valid entry.
1709 * - Try mapping to a valid entry.
1710 * - Try mapping to an invalid entry.
1711 ***************************************************************/
1712
1713 /*
1714 * Pick up a base and end levels for the translation tables.
1715 * The leves are arbitrary. Just to have a VA space enough
1716 * for the tests.
1717 */
1718 base_lvl = 0U;
1719 end_lvl = 3U;
1720
1721 mmap_count = 3U;
1722
1723 for (int i = 0U; i < VA_REGIONS; i++) {
1724 va_region = (xlat_addr_region_id_t)i;
1725
1726 start_va = gen_va_space_params_by_lvl(base_lvl,
1727 va_region, &va_size);
1728
1729 /*
1730 * We generate the mmap regions to use. We will be interested
1731 * in init_mmap[1] for the transient-invalid tests and in
1732 * init_mmap[2] for the rest of tests.
1733 */
1734 retval = gen_mmap_array_by_level(&init_mmap[0U], &tbl_idx[0U],
1735 mmap_count, base_lvl, end_lvl,
1736 &granularity,
1737 start_va, false);
1738
1739 /* Verify that the test setup is correct so far */
1740 CHECK_TRUE(retval == 0);
1741
1742 /* Force init_mmap[1] to be TRANSIENT */
1743 init_mmap[1U].attr = MT_TRANSIENT;
1744
1745 /* Clean the data structures */
1746 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
1747 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
1748 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
1749
1750 /* Initialize the test structure */
1751 retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap[0U],
1752 mmap_count, va_size);
1753
1754 /* Verify that the test setup is correct so far */
1755 CHECK_TRUE(retval == 0);
1756
1757 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
1758 xlat_test_helpers_tbls(),
1759 XLAT_TESTS_MAX_TABLES);
1760
1761 /* Verify that the test setup is correct so far */
1762 CHECK_TRUE(retval == 0);
1763
1764 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1765
1766 /*
1767 * Retrieve the xlat_llt_info structure needed to feed
1768 * xlat_map_memory_page_with_attrs().
1769 */
1770 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1771
1772 /* Verify that the test setup is correct so far */
1773 CHECK_TRUE(retval == 0);
1774
1775 /*
1776 * Test xlat_map_memory_page_with_attrs() with a valid address
1777 * within init_mmap[0]. This gives us an address
1778 * below the range mapped by table we retrieved (which belongs
1779 * to init_mmap[1]). For simplicity, set the attributes and
1780 * the PA both to 0x0.
1781 */
1782 test_va = init_mmap[0U].base_va + ctx.cfg->base_va;
Soby Mathewbddaba12023-04-22 03:34:00 +00001783 test_va += test_helpers_get_rand_in_range(0, init_mmap[0U].size - 1);
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001784
1785 /* Try to map to the page/block containing `test_va` */
1786 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1787 0ULL, 0ULL);
1788
1789 /* Verify that the return is as expected */
1790 CHECK_VERBOSE((retval == -EFAULT),
1791 "Testing VA 0x%.16lx on TTE for VA 0x%.16lx",
1792 test_va,
1793 init_mmap[1U].base_va + ctx.cfg->base_va);
1794
1795 /*
1796 * Repeat the process, this time with an address on a page
1797 * mapped by init_mmap[2]. This gives us an
1798 * address over the range mapped by table we retrieved.
1799 */
1800 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1801 test_va += test_helpers_get_rand_in_range(0,
1802 PAGE_SIZE - 1);
1803
1804 /* Try to map to the page/block containing `test_va` */
1805 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1806 0ULL, 0ULL);
1807
1808 /* Verify that the return is as expected */
1809 CHECK_VERBOSE((retval == -EFAULT),
1810 "Testing VA 0x%.16lx on TTE for VA 0x%.16lx",
1811 test_va,
1812 init_mmap[2U].base_va + ctx.cfg->base_va);
1813
1814 /*
1815 * Test with a PA larger than the maximum PA supported.
1816 */
1817
1818 /* Configure a random maximum PA supported */
Javier Almansa Sobrino2fa8abe2023-06-06 13:18:17 +01001819 xlat_test_helpers_set_parange(parange_index);
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001820 test_pa =
1821 (1ULL << pa_range_bits_arr[parange_index]) + PAGE_SIZE;
1822
1823 test_va = init_mmap[1U].base_va + ctx.cfg->base_va;
1824
1825 /*
1826 * Perform a table walk to retrieve the table where the VA
1827 * is mapped along with the index of the TTE within the table.
1828 */
1829 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1830 &tbl_ptr, &tte_lvl,
1831 &tte_idx);
1832
1833 /* Verify that the test setup is correct so far */
1834 CHECK_TRUE(retval == 0);
1835
1836 /*
1837 * Take a snapshot of the TTE. This will be used to verify
1838 * that the TTE hasn't been altered.
1839 */
1840 val_tte = tbl_ptr[tte_idx];
1841
1842 /* Get a random address to test */
1843 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1844
1845 /* Try to map the PA to the page/block containing `test_va` */
1846 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1847 test_pa, 0ULL);
1848
1849 /* Verify that the return is as expected */
1850 CHECK_VERBOSE((retval == -EFAULT),
1851 "Testing PA 0x%.16lx on with a max supported PA of 0x%.16llx",
1852 test_pa,
1853 (1ULL << pa_range_bits_arr[parange_index]) - 1ULL);
1854
1855 /* Verify that the TTE remains unchanged */
1856 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1857
1858 /* Restore the maximum supported PA size for next tests */
1859 host_write_sysreg("id_aa64mmfr0_el1",
1860 INPLACE(ID_AA64MMFR0_EL1_PARANGE, 5U));
1861
1862 /* The rest of the tests will be based on init_mmap[2] */
1863 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1864
1865 /*
1866 * Perform a table walk to retrieve the table where the VA
1867 * is mapped along with the index of the TTE within the table.
1868 */
1869 retval = xlat_test_helpers_table_walk(&ctx, test_va, &tte,
1870 &tbl_ptr, &tte_lvl,
1871 &tte_idx);
1872
1873 /* Verify that the test setup is correct so far */
1874 CHECK_TRUE(retval == 0);
1875
1876 /*
Soby Mathewbddaba12023-04-22 03:34:00 +00001877 * Retrieve the xlat_llt_info structure needed to feed
1878 * xlat_map_memory_page_with_attrs().
1879 */
1880 retval = xlat_get_llt_from_va(&tbl_info, &ctx, test_va);
1881
1882 /* Verify that the test setup is correct so far */
1883 CHECK_TRUE(retval == 0);
1884
1885 /*
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001886 * Make the TTEs of the mapped region, which is expected
1887 * to be valid, transient valid.
1888 */
1889 tbl_ptr[tte_idx] |= (1ULL << TRANSIENT_FLAG_SHIFT);
1890
1891 /*
1892 * Take a snapshot of the TTE. This will be used to verify
1893 * that the TTE hasn't been altered.
1894 */
1895 val_tte = tbl_ptr[tte_idx];
1896
1897 /*
Soby Mathewbddaba12023-04-22 03:34:00 +00001898 * Now try to map a valid VA. In this case the associated
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01001899 * TTE will contain a transient valid mapping.
1900 */
1901 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1902 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1903
1904 /* Try to map to the page/block containing `test_va` */
1905 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1906 0ULL, 0ULL);
1907
1908 /* Verify that the return is as expected */
1909 CHECK_VERBOSE((retval == -EFAULT),
1910 "Testing VA 0x%.16lx on a transient valid TTE",
1911 test_va);
1912
1913 /* Verify that the TTE remains unchanged */
1914 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1915
1916 /*
1917 * Repeat the last test but after clearing the TRANSIENT
1918 * flag from the TTE. This will test the behaviour with
1919 * a non transient TTE.
1920 */
1921 tbl_ptr[tte_idx] &= ~(1ULL << TRANSIENT_FLAG_SHIFT);
1922 val_tte = tbl_ptr[tte_idx];
1923
1924 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1925 test_va += test_helpers_get_rand_in_range(0, PAGE_SIZE - 1);
1926
1927 /* Try to map to the page/block containing `test_va` */
1928 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1929 0ULL, 0ULL);
1930
1931 /* Verify that the return is as expected */
1932 CHECK_VERBOSE((retval == -EFAULT),
1933 "Testing VA 0x%.16lx on a valid TTE",
1934 test_va);
1935
1936 /* Verify that the TTE remains unchanged */
1937 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1938
1939 /*
1940 * Repeat the last test on an INVALID TTE.
1941 */
1942 tbl_ptr[tte_idx] = 0ULL;
1943 val_tte = 0ULL;
1944
1945 test_va = init_mmap[2U].base_va + ctx.cfg->base_va;
1946 test_va += test_helpers_get_rand_in_range(0,
1947 PAGE_SIZE - 1);
1948
1949 /* Try to map to the page/block containing `test_va` */
1950 retval = xlat_map_memory_page_with_attrs(&tbl_info, test_va,
1951 0ULL, 0ULL);
1952
1953 /* Verify that the return is as expected */
1954 CHECK_VERBOSE((retval == -EFAULT),
1955 "Testing VA 0x%.16lx on an invalid TTE",
1956 test_va);
1957
1958 /* Verify that the TTE remains unchanged */
1959 CHECK_EQUAL(val_tte, tbl_ptr[tte_idx]);
1960
1961 VERBOSE("\n");
1962 }
1963}
1964
1965ASSERT_TEST(xlat_tests_G2, xlat_map_memory_page_with_attrs_TC3)
1966{
1967 /***************************************************************
1968 * TEST CASE 3:
1969 *
1970 * Try calling xlat_map_memory_page_with_attrs with a NULL
1971 * xlat_llt_info structure.
1972 ***************************************************************/
1973
1974 test_helpers_expect_assert_fail(true);
1975 (void)xlat_map_memory_page_with_attrs(NULL, 0ULL, 0ULL, 0ULL);
1976 test_helpers_fail_if_no_assert_failed();
1977}
1978
1979/* Helper function to validate ttbrx_el2 registers */
1980static void validate_ttbrx_el2(struct xlat_ctx *ctx)
1981{
1982 uint64_t expected_ttbrx, ttbrx;
1983 xlat_addr_region_id_t va_region;
1984
1985 assert(ctx != NULL);
1986
1987 va_region = ctx->cfg->region;
1988
1989 /* BADDR */
1990 expected_ttbrx = ((uint64_t)&ctx->tbls->tables[0U]) &
1991 MASK(TTBRx_EL2_BADDR);
1992
1993 ttbrx = read_ttbr1_el2();
1994 if(va_region == VA_LOW_REGION) {
1995 ttbrx = read_ttbr0_el2();
1996
1997 /*
1998 * CnP bit. It is expected that the xlat library will
1999 * automatically set this bit for the low region.
2000 */
2001 expected_ttbrx |= (1ULL << TTBRx_EL2_CnP_SHIFT);
2002 }
2003
2004 CHECK_VERBOSE((expected_ttbrx == ttbrx),
2005 "Expected TTBR%c_EL2: 0x%lx - Received: 0x%lx",
2006 (unsigned int)va_region + '0',
2007 expected_ttbrx, ttbrx);
2008}
2009
2010/* Helper function to validate TCR_EL2 register */
2011static void validate_tcr_el2(struct xlat_ctx *low_ctx,
2012 struct xlat_ctx *high_ctx)
2013{
2014 uint64_t exp_tcr, tcr;
2015 size_t t0sz, t1sz;
2016 unsigned int parange;
2017
2018 tcr = read_tcr_el2();
2019
2020 /*
2021 * Calculate the VA space size for both contexts based on
2022 * the TCR_EL2 register.
2023 */
2024 t0sz = ((size_t)1) << (64U - EXTRACT(TCR_EL2_T0SZ, tcr));
2025 t1sz = ((size_t)1) << (64U - EXTRACT(TCR_EL2_T1SZ, tcr));
2026
2027 /* Validate the VA space size of the contexts */
2028 CHECK_VERBOSE((t0sz == low_ctx->cfg->max_va_size),
2029 "Check VA space size for Low Region: 0x%lx == 0x%lx",
2030 t0sz, low_ctx->cfg->max_va_size);
2031 CHECK_VERBOSE((t1sz == high_ctx->cfg->max_va_size),
2032 "Check VA space size for High Region: 0x%lx == 0x%lx",
2033 t1sz, high_ctx->cfg->max_va_size);
2034
2035 /* Mask out TxSZ fields. We have already validated them */
2036 tcr &= ~(MASK(TCR_EL2_T0SZ) | MASK(TCR_EL2_T1SZ));
2037
2038 /*
2039 * Inner and outher cacheability attributes as expected by RMM
2040 * for all the contexts.
2041 */
2042 exp_tcr = TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA;
2043 exp_tcr |= TCR_EL2_IRGN1_WBWA | TCR_EL2_ORGN1_WBWA;
2044
2045 /* Shareability as expected by RMM for all the contexts */
2046 exp_tcr |= TCR_EL2_SH0_IS | TCR_EL2_SH1_IS;
2047
2048 /* Granule size for all the contexts. Only 4KB supported */
2049 exp_tcr |= TCR_EL2_TG0_4K | TCR_EL2_TG1_4K;
2050
2051 /* Hierarchical permissions */
2052 exp_tcr |= TCR_EL2_AS | TCR_EL2_HPD0 | TCR_EL2_HPD1;
2053
2054 /*
2055 * Xlat library configures TCR_EL2.IPS to the max
2056 * supported by the PE.
2057 */
2058 parange = EXTRACT(ID_AA64MMFR0_EL1_PARANGE, read_id_aa64mmfr0_el1());
2059 exp_tcr |= INPLACE(TCR_EL2_IPS, parange);
2060
2061 /* Validate tcr_el2*/
2062 CHECK_VERBOSE((exp_tcr == tcr),
2063 "Validate TCR_EL2 against expected value: Read 0x%.16lx - Expected 0x%.16lx",
2064 tcr, exp_tcr);
2065}
2066
2067TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC1)
2068{
2069 struct xlat_ctx ctx[2U];
2070 struct xlat_ctx_cfg cfg[2U];
2071 struct xlat_ctx_tbls tbls[2U];
2072 uint64_t *base_tbl[2U], *xlat_tables;
2073 uint64_t start_va, end_va;
2074 xlat_addr_region_id_t va_region;
2075 int retval;
2076 struct xlat_mmap_region init_mmap[2U];
2077 unsigned int pa_range_bits_arr[] = {
2078 PARANGE_0000_WIDTH, PARANGE_0001_WIDTH, PARANGE_0010_WIDTH,
2079 PARANGE_0011_WIDTH, PARANGE_0100_WIDTH, PARANGE_0101_WIDTH,
2080 };
2081 unsigned int pa_index = test_helpers_get_rand_in_range(0,
2082 sizeof(pa_range_bits_arr)/sizeof(pa_range_bits_arr[0]) - 1U);
2083
2084 /***************************************************************
2085 * TEST CASE 1:
2086 *
2087 * Generate a translation context for each region and configure
2088 * the MMU registers based on both contexts. Verify that the
2089 * right parameters have been configured.
2090 ***************************************************************/
2091
2092 /* Clean the data structures */
2093 memset((void *)&ctx, 0, sizeof(struct xlat_ctx) * 2U);
2094 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg) * 2U);
2095 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls) * 2U);
2096
2097 /* Configure a random maximum PA supported */
Javier Almansa Sobrino2fa8abe2023-06-06 13:18:17 +01002098 xlat_test_helpers_set_parange(pa_index);
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01002099
2100 for (int i = 0U; i < VA_REGIONS; i++) {
2101 va_region = (xlat_addr_region_id_t)i;
2102
2103 xlat_tables = xlat_test_helpers_tbls();
2104 /* Use half of the available tables for each region */
2105 base_tbl[i] = &xlat_tables[(i * XLAT_TESTS_MAX_TABLES *
2106 XLAT_TABLE_ENTRIES) >> 1U];
2107 /* VA space boundaries */
2108 start_va = xlat_test_helpers_get_start_va(va_region,
2109 MAX_VIRT_ADDR_SPACE_SIZE);
2110 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
2111
2112 /* Generate only a single mmap region for each region */
2113 xlat_test_helpers_rand_mmap_array(&init_mmap[i], 1U, start_va, end_va);
2114
2115 retval = xlat_ctx_cfg_init(&cfg[i], va_region, &init_mmap[i],
2116 1U, MAX_VIRT_ADDR_SPACE_SIZE);
2117 CHECK_TRUE(retval == 0);
2118
2119 retval = xlat_ctx_init(&ctx[i], &cfg[i], &tbls[i],
2120 base_tbl[i], XLAT_TESTS_MAX_TABLES >> 1U);
2121 CHECK_TRUE(retval == 0);
2122
2123 /* Initialize MMU for the given context */
2124 retval = xlat_arch_setup_mmu_cfg(&ctx[i]);
2125
2126 /* Verify that the MMU has been configured */
2127 CHECK_TRUE(retval == 0);
2128
2129 /* Validate TTBR_EL2 for each context */
2130 validate_ttbrx_el2(&ctx[i]);
2131 }
2132
2133 /* Validate TCR_EL2 for both contexts at the same time */
2134 validate_tcr_el2(&ctx[0U], &ctx[1U]);
2135}
2136
2137TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC2)
2138{
2139 struct xlat_ctx ctx;
2140 struct xlat_ctx_cfg cfg;
2141 struct xlat_ctx_tbls tbls;
2142 uint64_t start_va, end_va;
2143 int retval;
2144 struct xlat_mmap_region init_mmap;
2145
2146 /***************************************************************
Javier Almansa Sobrino2fa8abe2023-06-06 13:18:17 +01002147 * TEST CASE 2:
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01002148 *
2149 * Generate a valid translation context for one of the regions
2150 * and overwrite it to test different failure conditions on
2151 * xlat_arch_setup_mmu_cfg():
2152 *
2153 * - Call xlat_arch_setup_mmu_cfg() with the MMU enabled.
2154 * - Call xlat_arch_setup_mmu_cfg() with an uninitialized
2155 * context configuration.
Javier Almansa Sobrino2fa8abe2023-06-06 13:18:17 +01002156 * - Call xlat_arch_setup_mmu_cfg() for a CPU which
2157 * does not have support for 4KB granularity.
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01002158 ***************************************************************/
2159
2160 /* Clean the data structures */
2161 memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
2162 memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
2163 memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
2164
2165 /* VA space boundaries */
2166 start_va = xlat_test_helpers_get_start_va(VA_LOW_REGION,
2167 MAX_VIRT_ADDR_SPACE_SIZE);
2168 end_va = start_va + MAX_VIRT_ADDR_SPACE_SIZE - 1ULL;
2169
2170 /* Generate only a single mmap region for each region */
2171 xlat_test_helpers_rand_mmap_array(&init_mmap, 1U, start_va, end_va);
2172
2173 retval = xlat_ctx_cfg_init(&cfg, VA_LOW_REGION, &init_mmap,
2174 1U, MAX_VIRT_ADDR_SPACE_SIZE);
2175 CHECK_TRUE(retval == 0);
2176
2177 retval = xlat_ctx_init(&ctx, &cfg, &tbls,
2178 xlat_test_helpers_tbls(),
2179 XLAT_TESTS_MAX_TABLES);
2180 CHECK_TRUE(retval == 0);
2181
2182 /* Force the MMU enblement */
2183 xlat_enable_mmu_el2();
2184
2185 /* Try to initialize MMU for the given context */
2186 retval = xlat_arch_setup_mmu_cfg(&ctx);
2187
2188 /* Verify that the MMU has failed to be initialized */
2189 CHECK_TRUE(retval == -EPERM);
2190
2191 /* Restore SCTLR_EL2 to disable the MMU */
2192 write_sctlr_el2(0ULL);
2193
2194 /* Force the context to be uninitialized */
2195 ctx.cfg->initialized = false;
2196
2197 /* Try to initialize MMU for the given context */
2198 retval = xlat_arch_setup_mmu_cfg(&ctx);
2199
2200 /* Verify that the MMU has failed to be initialized */
2201 CHECK_TRUE(retval == -EINVAL);
Javier Almansa Sobrino2fa8abe2023-06-06 13:18:17 +01002202
2203 /* Restore the context initialized flag */
2204 ctx.cfg->initialized = true;
2205
2206 /* Force the architecture to report 4K granularity as not available */
2207 host_write_sysreg("id_aa64mmfr0_el1",
2208 INPLACE(ID_AA64MMFR0_EL1_PARANGE, 5U) |
2209 INPLACE(ID_AA64MMFR0_EL1_TGRAN4,
2210 ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED));
2211
2212 /* Try to initialize MMU for the given context */
2213 retval = xlat_arch_setup_mmu_cfg(&ctx);
2214
2215 /* Verify that the MMU has failed to be initialized */
2216 CHECK_TRUE(retval == -EPERM);
Javier Almansa Sobrino6cf9d8a2023-03-29 17:42:22 +01002217}
2218
2219ASSERT_TEST(xlat_tests_G2, xlat_arch_setup_mmu_cfg_TC3)
2220{
2221 /***************************************************************
2222 * TEST CASE 3:
2223 *
2224 * Test xlat_arch_setup_mmu_cfg() with a NULL context.
2225 ***************************************************************/
2226
2227 test_helpers_expect_assert_fail(true);
2228 (void)xlat_arch_setup_mmu_cfg(NULL);
2229 test_helpers_fail_if_no_assert_failed();
2230}
2231
2232IGNORE_TEST(xlat_tests_G2, xlat_write_tte_TC1)
2233{
2234 /*
2235 * xlat_write_tte() is implemented as an assembler function
2236 * for target AArch64 Architecture. There is a C stub for the
2237 * fake_host platform which we do not need to test.
2238 *
2239 * This test can therefore be ignored.
2240 */
2241
2242 TEST_EXIT;
2243}
2244
2245IGNORE_TEST(xlat_tests_G2, xlat_read_tte_TC1)
2246{
2247 /*
2248 * xlat_read_tte() is implemented as an assembler function
2249 * for target AArch64 Architecture. There is a C stub for the
2250 * fake_host platform which we do not need to test.
2251 *
2252 * This test can therefore be ignored.
2253 */
2254
2255 TEST_EXIT;
2256}
2257
2258IGNORE_TEST(xlat_tests_G2, xlat_enable_mmu_el2_TC1)
2259{
2260 /*
2261 * xlat_enable_mmu_el2() is implemented as an assembler function
2262 * for target AArch64 Architecture. There is a C stub for the
2263 * fake_host platform which we do not need to test.
2264 *
2265 * This test can therefore be ignored.
2266 */
2267
2268 TEST_EXIT;
2269}