blob: 83e553c88b3528b3112b5aa947e99e84a5cd8bfa [file] [log] [blame]
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <CppUTest/CommandLineTestRunner.h>
7#include <CppUTest/TestHarness.h>
8
9extern "C" {
10#include <buffer.h> /* Interface to exercise */
11#include <buffer_private.h>
12#include <cpuid.h>
13#include <granule.h>
14#include <host_harness.h>
15#include <host_utils.h>
16#include <realm_test_utils.h>
17#include <stdlib.h>
18#include <string.h>
19#include <test_harness.h>
20#include <test_helpers.h>
21#include <time.h>
22#include <xlat_tables.h>
23}
24
25/*
26 * Size of a chunck of memory on a granule, used for random
27 * read and writes
28 */
29#define GRANULE_BLOCK_SIZE (GRANULE_SIZE >> 2U)
30#define GRANULE_BLOCKS (GRANULE_SIZE/GRANULE_BLOCK_SIZE)
31
32/*
33 * Function to get a random address within the granules range.
34 * The address will be aligned to granule size.
35 */
36static inline uintptr_t get_rand_granule_addr(void) {
37 uintptr_t addr;
38 int random_granule = test_helpers_get_rand_in_range(0,
39 test_helpers_get_nr_granules() - 1);
40
41 addr = (uintptr_t)(random_granule * GRANULE_SIZE)
42 + host_util_get_granule_base();
43
44 return addr;
45}
46
47/*
48 * Helper function to generate an array of random granule addresses
49 * in which none of them repeat.
50 */
51static void get_rand_granule_array(uintptr_t *arr, unsigned int count)
52{
53 for (unsigned int i = 0U; i < count; i++) {
54 arr[i] = get_rand_granule_addr();
55 if (i > 0U) {
56 bool match;
57 do {
58 /* Check for duplicates so far */
59 match = false;
60 for (unsigned j = 0U; j < i; j++) {
61 if (arr[j] == arr[i]) {
62 arr[i] =
63 get_rand_granule_addr();
64 match = true;
65 break;
66 }
67 }
68 } while(match == true);
69 }
70 }
71
72}
73
74TEST_GROUP(slot_buffer) {
75 /*
76 * For this test, TEST_SETUP() initializes RMM which includes
77 * translation table and slot buffer mechanism initialization.
78 * Therefore, all the tests assume that the slot buffer mechanism
79 * has been properly initialized.
80 */
81 TEST_SETUP()
82 {
83 static int random_seed = 0;
84
85 /* Enable the platform with support for multiple PEs */
86 test_helpers_rmm_start(true);
87
88 /* Make sure current cpu id is 0 (primary processor) */
89 host_util_set_cpuid(0U);
90
91 /* Initialize the random seed */
92 while (random_seed == 0) {
93 random_seed = (int)time(NULL);
94 srand(random_seed);
95 }
96
97 test_helpers_expect_assert_fail(false);
98 }
99
100 TEST_TEARDOWN()
101 {
102 /*
103 * Unregister any existing callback that might
104 * have been installed
105 */
106 (void)test_helpers_unregister_cb(CB_BUFFER_MAP);
107 (void)test_helpers_unregister_cb(CB_BUFFER_UNMAP);
108 }
109};
110
111TEST(slot_buffer, granule_map_buffer_unmap_TC1)
112{
113 uintptr_t slot_va, expected_va, granule_addr;
114 struct granule *test_granule;
115 union test_harness_cbs cb;
116
117 /******************************************************************
118 * TEST CASE 1:
119 *
120 * For all possible slot buffer types and all possible CPUs, try to
121 * map a random granule. Then unmap it.
122 ******************************************************************/
123
124 /* Register harness callbacks to use by this test */
125 cb.buffer_map = test_buffer_map_aarch64_vmsa;
126 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
127 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
128 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
129
130 granule_addr = get_rand_granule_addr();
131 test_granule = addr_to_granule(granule_addr);
132
133 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
134 host_util_set_cpuid(i);
135 for (unsigned int j = 0U; j < NR_CPU_SLOTS; j++) {
136 if (j == SLOT_NS) {
137 /* Not supported. granule_map() would assert */
138 continue;
139 }
140 slot_va = (uintptr_t)granule_map(test_granule,
141 (enum buffer_slot)j);
142 expected_va = slot_to_va((enum buffer_slot)j);
143
144 /* Test the return value from granule_map() */
145 POINTERS_EQUAL(slot_va, expected_va);
146
147 /*
148 * Test that the granule is actually mapped to the
149 * expected VA in the Stage 1 xlat tables as per
150 * aarch64 VMSA.
151 */
152 POINTERS_EQUAL(expected_va,
153 realm_test_util_slot_va_from_pa(granule_addr));
154
155 /* Unmap the buffer */
156 buffer_unmap((void *)slot_va);
157
158 /*
159 * realm_test_util_slot_va_from_pa() return NULL
160 * if the address passed to it is not mapped to any
161 * slot buffer.
162 */
163 POINTERS_EQUAL(NULL,
164 realm_test_util_slot_va_from_pa(granule_addr));
165
166 } /* For each slot type */
167 } /* For each CPU */
168}
169
170TEST(slot_buffer, granule_map_buffer_unmap_TC2)
171{
172 uintptr_t mapped_pa;
173 struct granule *test_granule;
174 uintptr_t granules_per_cpu[MAX_CPUS];
175 void *slot_va[MAX_CPUS];
176 union test_harness_cbs cb;
177
178 /******************************************************************
179 * TEST CASE 2:
180 *
181 * For each possible slot buffer type, map a different random
182 * granule to each one of the available CPUs. Then validate that
183 * the same PA is not mapped to two different CPUs.
184 ******************************************************************/
185
186 /* Register harness callbacks to use by this test */
187 cb.buffer_map = test_buffer_map_aarch64_vmsa;
188 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
189 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
190 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
191
192 get_rand_granule_array(granules_per_cpu, MAX_CPUS);
193 for (unsigned int i = 0U; i < NR_CPU_SLOTS; i++) {
194 if (i == SLOT_NS) {
195 /* Not supported. granule_map() would assert */
196 continue;
197 }
198
199 /* Map a granule on each CPU for the same slot */
200 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
201 host_util_set_cpuid(j);
202 test_granule = addr_to_granule(granules_per_cpu[j]);
203 slot_va[j] = granule_map(test_granule,
204 (enum buffer_slot)i);
205 }
206
207 /*
208 * Iterate over all CPUs, ensuring that the granules are mapped
209 * into the slots for the right CPU.
210 */
211 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
212 /*
213 * Get the PA mapped to the slot 'i' for CPU 'j'
214 */
215 host_util_set_cpuid(j);
216 mapped_pa = realm_test_util_slot_to_pa(
217 (enum buffer_slot)i);
218
219 /*
220 * Check that the PA mapped to slot 'i' for CPU 'j'
221 * is only mapped on the same slot for the same CPU.
222 * For the rest of CPUs, the PAs should not match.
223 */
224 for (unsigned int k = 0U; k < MAX_CPUS; k++) {
225 if (j == k) {
226 POINTERS_EQUAL(granules_per_cpu[k],
227 mapped_pa);
228 } else {
229 CHECK_FALSE(granules_per_cpu[k] ==
230 mapped_pa);
231 }
232 }
233
234 }
235
236 /* Unmap the granules. */
237 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
238 host_util_set_cpuid(j);
239 buffer_unmap((void *)slot_va[j]);
240 }
241 } /* NR_CPU_SLOTS */
242
243 /*
244 * granule_map() asserts if the granule address is not aligned, so
245 * skip that test.
246 */
247};
248
249TEST(slot_buffer, granule_map_buffer_unmap_TC3)
250{
251 /******************************************************************
252 * TEST CASE 3:
253 *
254 * Test that buffer_unmap() exits gracefully when an unmapped VA
255 * is used.
256 ******************************************************************/
257
258 buffer_unmap((void *)slot_to_va(SLOT_NS));
259 TEST_EXIT;
260}
261
262TEST(slot_buffer, granule_map_buffer_unmap_TC4)
263{
264 /******************************************************************
265 * TEST CASE 4:
266 *
267 * Test that buffer_unmap() exits gracefully when an invalid VA
268 * is used.
269 ******************************************************************/
270
271 buffer_unmap((void *)NULL);
272 TEST_EXIT;
273}
274
275TEST(slot_buffer, ns_buffer_write_TC1)
276{
277 uintptr_t granule_addrs[3];
278 struct granule *test_granule;
279 union test_harness_cbs cb;
280
281 /******************************************************************
282 * TEST CASE 1:
283 *
284 * For each CPU, map a random granule to NS_SLOT and copy random
285 * data into it through several calls to ns_buffer_write().
286 * Then verify that for each call to ns_buffer_write(), the data
287 * is properly copied without affecting other areas of the dest
288 * granule.
289 ******************************************************************/
290
291 /* Register harness callbacks to use by this test */
292 cb.buffer_map = test_buffer_map_access;
293 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
294 cb.buffer_unmap = test_buffer_unmap_access;
295 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
296
297 /*
298 * Get two random granules:
299 * granule_addrs[0]: To be used as dest write operations (SLOT_NS).
300 * granule_addrs[1]: will hold a copy of the data to transfer, so we
301 * can verify later.
302 * granule_addrs[2]: Just a zeroed granule to easy some tests.
303 */
304 get_rand_granule_array(granule_addrs, 3U);
305
306 /* Granule to test zeroes */
307 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
308
309 test_granule = addr_to_granule(granule_addrs[0]);
310
311 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
312
313 /* Fill the granule with random data */
314 for (unsigned int i = 0U; i < GRANULE_SIZE/sizeof(int); i++) {
315 *((int *)granule_addrs[1] + i) = rand();
316 }
317
318 /* Clean the granule to test */
319 (void)memset((void *)granule_addrs[0], 0, GRANULE_SIZE);
320
321 host_util_set_cpuid(i);
322
323 /*
324 * Copy block by block, verifying that each copied block
325 * doesn't affect anything written before nor a block to be
326 * written yet.
327 */
328 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
329 ns_buffer_write(SLOT_NS, test_granule,
330 GRANULE_BLOCK_SIZE * j,
331 GRANULE_BLOCK_SIZE,
332 (void*)(granule_addrs[1] +
333 (GRANULE_BLOCK_SIZE * j)));
334
335 MEMCMP_EQUAL((void *)granule_addrs[1],
336 (void *)granule_addrs[0],
337 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
338
339 /*
340 * Verify than any block that has not been written yet
341 * is still all zeros.
342 */
343 MEMCMP_EQUAL((void *)granule_addrs[2],
344 (void *)(granule_addrs[0] +
345 ((j + 1U) * GRANULE_BLOCK_SIZE)),
346 (GRANULE_BLOCKS - (j + 1U)) *
347 GRANULE_BLOCK_SIZE);
348 }
349 }
350}
351
352TEST(slot_buffer, ns_buffer_write_TC2)
353{
354 uintptr_t granule_addrs[3];
355 struct granule *test_granule;
356 union test_harness_cbs cb;
357 int val;
358
359 /******************************************************************
360 * TEST CASE 3:
361 *
362 * For every CPU, verify that ns_buffer_write() does not alter the
363 * source.
364 ******************************************************************/
365
366 /* Register harness callbacks to use by this test */
367 cb.buffer_map = test_buffer_map_access;
368 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
369 cb.buffer_unmap = test_buffer_unmap_access;
370 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
371
372 /*
373 * Get three random granules:
374 * granule_addrs[0]: Will contain the original data to write.
375 * granule_addrs[1]: Will hold a copy of the src granule to compare.
376 * granule_addrs[2]: Destination granule.
377 */
378 get_rand_granule_array(granule_addrs, 3U);
379
380 /* Generate random data. */
381 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
382 val = rand();
383 *((int *)granule_addrs[0] + j) = val;
384 *((int *)granule_addrs[1] + j) = val;
385 }
386
387 test_granule = addr_to_granule(granule_addrs[2]);
388
389 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
390 host_util_set_cpuid(i);
391
392 ns_buffer_write(SLOT_NS, test_granule, 0U,
393 GRANULE_SIZE, (void *)granule_addrs[0]);
394
395 /* Verify that the source has not been altered */
396 MEMCMP_EQUAL((void *)granule_addrs[1],
397 (void *)granule_addrs[0],
398 (size_t)GRANULE_SIZE);
399 }
400
401}
402
403TEST(slot_buffer, ns_buffer_write_TC3)
404{
405 uintptr_t granule_addrs[2];
406 unsigned int cpu[2];
407 long pattern[2];
408 long val;
409 union test_harness_cbs cb;
410
411 /******************************************************************
412 * TEST CASE 3:
413 *
414 * for two random CPUs, map a random granule to their SLOT_NS, then
415 * copy different random data to it. Verify that the data from one
416 * CPU's SLOT_NS hasn't been leaked to the other's CPU SLOT_NS.
417 * This test helps validating that ns_buffer_write() handles the
418 * translation contexts properly.
419 ******************************************************************/
420
421 /* Register harness callbacks to use by this test */
422 cb.buffer_map = test_buffer_map_access;
423 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
424 cb.buffer_unmap = test_buffer_unmap_access;
425 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
426
427 /* Get two random granules, one for each CPU to test. */
428 get_rand_granule_array(granule_addrs, 2U);
429
430 /* Get two random CPUs where to run the tests. */
431 do {
432 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
433 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
434 } while (cpu[0] == cpu[1]);
435
436 /* Get two different patterns of data to copy. */
437 do {
438 pattern[0] = (long)rand();
439 pattern[1] = (long)rand();
440 } while (pattern[0] == pattern[1]);
441
442 /* Copy the patterns into the destination granules. */
443 for (unsigned int i = 0U; i < 2U; i++) {
444 host_util_set_cpuid(cpu[i]);
445
446 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
447 sizeof(long), (void*)&pattern[i]);
448 }
449
450 /*
451 * Verify that the granule for the first CPU doesn't contain the
452 * pattern on the second one.
453 */
454 val = *(long *)granule_addrs[0];
455 CHECK_FALSE(val == pattern[1]);
456
457 /*
458 * Repeat the same check, this time with the second CPU.
459 */
460 val = *(long *)granule_addrs[1];
461 CHECK_FALSE(val == pattern[0]);
462
463 /*
464 * ns_buffer_write() will assert if:
465 * - The slot is not a non-secure one.
466 * - The granule to read from is NULL.
467 * - The size is not aligned to a byte size.
468 * - The offset is not aligned to a byte size.
469 * - The source is not aligned to a byte size.
470 * - The offset + size overflows the granule size.
471 * So skip tests for these cases.
472 */
473}
474
475TEST(slot_buffer, ns_buffer_read_TC1)
476{
477 uintptr_t granule_addrs[3];
478 struct granule *test_granule;
479 union test_harness_cbs cb;
480
481 /******************************************************************
482 * TEST CASE 1:
483 *
484 * For each CPU, map a random granule to NS_SLOT and copy random
485 * data into it. Then verify that the data is properly read and
486 * that the source has not been altered.
487 ******************************************************************/
488
489 /* Register harness callbacks to use by this test */
490 cb.buffer_map = test_buffer_map_access;
491 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
492 cb.buffer_unmap = test_buffer_unmap_access;
493 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
494
495 /*
496 * Get three random granules:
497 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
498 * granule_addrs[1]: Will be the dst granule for the ns_buffer_read
499 * operation.
500 * granule_addrs[2]: Just a zeroed granule to easy some tests.
501 */
502 get_rand_granule_array(granule_addrs, 3U);
503
504 /* Granule to test zeroes */
505 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
506
507 test_granule = addr_to_granule(granule_addrs[0]);
508
509 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
510 host_util_set_cpuid(i);
511
512 /* Generate random data. */
513 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
514 *((int *)granule_addrs[0] + j) = rand();
515 }
516
517 /* Clean the dest granule */
518 (void)memset((void *)granule_addrs[1], 0, GRANULE_SIZE);
519
520 /*
521 * Read block by block, verifying that each copied block
522 * doesn't affect anything read before nor a block to be
523 * read yet.
524 */
525 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
526 ns_buffer_read(SLOT_NS, test_granule,
527 GRANULE_BLOCK_SIZE * j,
528 GRANULE_BLOCK_SIZE,
529 (void*)(granule_addrs[1] +
530 (GRANULE_BLOCK_SIZE * j)));
531
532 MEMCMP_EQUAL((void *)granule_addrs[1],
533 (void *)granule_addrs[0],
534 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
535
536 /*
537 * Verify than any block that has not been read yet
538 * is still all zeros.
539 */
540 MEMCMP_EQUAL((void *)granule_addrs[2],
541 (void *)(granule_addrs[1] +
542 ((j + 1U) * GRANULE_BLOCK_SIZE)),
543 (GRANULE_BLOCKS - (j + 1U)) *
544 GRANULE_BLOCK_SIZE);
545
546 }
547 }
548}
549
550TEST(slot_buffer, ns_buffer_read_TC2)
551{
552 uintptr_t granule_addrs[3];
553 struct granule *test_granule;
554 union test_harness_cbs cb;
555 int val;
556
557 /******************************************************************
558 * TEST CASE 3:
559 *
560 * For every CPU, verify that ns_buffer_read() does not alter the
561 * source.
562 ******************************************************************/
563
564 /* Register harness callbacks to use by this test */
565 cb.buffer_map = test_buffer_map_access;
566 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
567 cb.buffer_unmap = test_buffer_unmap_access;
568 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
569
570 /*
571 * Get three random granules:
572 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
573 * granule_addrs[1]: Will hold a copy of the src granule to compare.
574 * granule_addrs[2]: Destination granule.
575 */
576 get_rand_granule_array(granule_addrs, 3U);
577
578 /* Generate random data. */
579 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
580 val = rand();
581 *((int *)granule_addrs[0] + j) = val;
582 *((int *)granule_addrs[1] + j) = val;
583 }
584
585 test_granule = addr_to_granule(granule_addrs[0]);
586
587 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
588 host_util_set_cpuid(i);
589
590 ns_buffer_read(SLOT_NS, test_granule, 0U,
591 GRANULE_SIZE, (void *)granule_addrs[2]);
592
593 /* Verify that the source has not been altered */
594 MEMCMP_EQUAL((void *)granule_addrs[1],
595 (void *)granule_addrs[0],
596 (size_t)GRANULE_SIZE);
597 }
598
599}
600
601TEST(slot_buffer, ns_buffer_read_TC3)
602{
603 uintptr_t granule_addrs[2];
604 unsigned int cpu[2];
605 long dest[2];
606 long val;
607 union test_harness_cbs cb;
608
609 /******************************************************************
610 * TEST CASE 3:
611 *
612 * for two random CPUs, map a random granule with random data to
613 * their SLOT_NS, then read the SLOT_NS on each CPU and ensure that
614 * the destination buffers contain the data from their CPU SLOT_NS
615 * only and no leak from the other CPU has happened.
616 * This test helps validating that ns_buffer_read() handles the
617 * translation contexts properly.
618 ******************************************************************/
619
620 /* Register harness callbacks to use by this test */
621 cb.buffer_map = test_buffer_map_access;
622 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
623 cb.buffer_unmap = test_buffer_unmap_access;
624 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
625
626 /* Get a random granule for each CPU to use. */
627 get_rand_granule_array(granule_addrs, 2U);
628
629 /* Get two random CPUs where to run the tests. */
630 do {
631 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
632 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
633 } while (cpu[0] == cpu[1]);
634
635 /* Store random data at the beginning of each granule */
636 *(long *)granule_addrs[0] = (long)rand();
637 *(long *)granule_addrs[1] = (long)rand();
638
639 /* Read the granules and store the result in dest */
640 for (unsigned int i = 0U; i < 2U; i++) {
641 host_util_set_cpuid(cpu[i]);
642
643 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
644 sizeof(long), (void*)&dest[i]);
645 }
646
647 /*
648 * Verify that the dest granule for the first CPU doesn't contain
649 * the pattern for the second one.
650 */
651 val = *(long *)granule_addrs[0];
652 CHECK_FALSE(val == dest[1]);
653
654 /*
655 * Repeat the same check, this time with the second CPU.
656 */
657 val = *(long *)granule_addrs[1];
658 CHECK_FALSE(val == dest[0]);
659
660 /*
661 * ns_buffer_read() will assert if:
662 * - The slot is not a non-secure one.
663 * - The granule to read from is NULL.
664 * - The size is not aligned to a byte size.
665 * - The offset is not aligned to a byte size.
666 * - The dest is not aligned to a byte size.
667 * - The offset + size overflows the granule size.
668 * So skip tests for these cases.
669 */
670}
671
672TEST(slot_buffer, slot_buf_setup_xlat_TC1)
673{
674 /*
675 * slot_buf_setup_xlat() has already been used during initialization
676 * for all tests, so skip it.
677 */
678}
679
680TEST(slot_buffer, slot_buf_init_TC1)
681{
682 /*
683 * slot_buf_init() has already been used during initialization
684 * for all tests, so skip it.
685 */
686}