blob: 77050df94fc78764a44c5231bae4c0e991f92add [file] [log] [blame]
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <CppUTest/CommandLineTestRunner.h>
7#include <CppUTest/TestHarness.h>
8
9extern "C" {
10#include <buffer.h> /* Interface to exercise */
11#include <buffer_private.h>
12#include <cpuid.h>
13#include <granule.h>
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +000014#include <host_defs.h>
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010015#include <host_harness.h>
16#include <host_utils.h>
17#include <realm_test_utils.h>
18#include <stdlib.h>
19#include <string.h>
20#include <test_harness.h>
21#include <test_helpers.h>
22#include <time.h>
23#include <xlat_tables.h>
24}
25
26/*
27 * Size of a chunck of memory on a granule, used for random
28 * read and writes
29 */
30#define GRANULE_BLOCK_SIZE (GRANULE_SIZE >> 2U)
31#define GRANULE_BLOCKS (GRANULE_SIZE/GRANULE_BLOCK_SIZE)
32
33/*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +000034 * Function to get a random granule address within the valid address range.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010035 */
36static inline uintptr_t get_rand_granule_addr(void) {
37 uintptr_t addr;
38 int random_granule = test_helpers_get_rand_in_range(0,
39 test_helpers_get_nr_granules() - 1);
40
41 addr = (uintptr_t)(random_granule * GRANULE_SIZE)
42 + host_util_get_granule_base();
43
44 return addr;
45}
46
47/*
48 * Helper function to generate an array of random granule addresses
49 * in which none of them repeat.
50 */
51static void get_rand_granule_array(uintptr_t *arr, unsigned int count)
52{
53 for (unsigned int i = 0U; i < count; i++) {
54 arr[i] = get_rand_granule_addr();
55 if (i > 0U) {
56 bool match;
57 do {
58 /* Check for duplicates so far */
59 match = false;
60 for (unsigned j = 0U; j < i; j++) {
61 if (arr[j] == arr[i]) {
62 arr[i] =
63 get_rand_granule_addr();
64 match = true;
65 break;
66 }
67 }
68 } while(match == true);
69 }
70 }
71
72}
73
74TEST_GROUP(slot_buffer) {
75 /*
76 * For this test, TEST_SETUP() initializes RMM which includes
77 * translation table and slot buffer mechanism initialization.
78 * Therefore, all the tests assume that the slot buffer mechanism
79 * has been properly initialized.
80 */
81 TEST_SETUP()
82 {
83 static int random_seed = 0;
84
85 /* Enable the platform with support for multiple PEs */
86 test_helpers_rmm_start(true);
87
88 /* Make sure current cpu id is 0 (primary processor) */
89 host_util_set_cpuid(0U);
90
91 /* Initialize the random seed */
92 while (random_seed == 0) {
93 random_seed = (int)time(NULL);
94 srand(random_seed);
95 }
96
97 test_helpers_expect_assert_fail(false);
98 }
99
100 TEST_TEARDOWN()
101 {
102 /*
103 * Unregister any existing callback that might
104 * have been installed
105 */
106 (void)test_helpers_unregister_cb(CB_BUFFER_MAP);
107 (void)test_helpers_unregister_cb(CB_BUFFER_UNMAP);
108 }
109};
110
111TEST(slot_buffer, granule_map_buffer_unmap_TC1)
112{
113 uintptr_t slot_va, expected_va, granule_addr;
114 struct granule *test_granule;
115 union test_harness_cbs cb;
116
117 /******************************************************************
118 * TEST CASE 1:
119 *
120 * For all possible slot buffer types and all possible CPUs, try to
121 * map a random granule. Then unmap it.
122 ******************************************************************/
123
124 /* Register harness callbacks to use by this test */
125 cb.buffer_map = test_buffer_map_aarch64_vmsa;
126 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
127 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
128 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
129
130 granule_addr = get_rand_granule_addr();
131 test_granule = addr_to_granule(granule_addr);
132
133 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
134 host_util_set_cpuid(i);
135 for (unsigned int j = 0U; j < NR_CPU_SLOTS; j++) {
136 if (j == SLOT_NS) {
137 /* Not supported. granule_map() would assert */
138 continue;
139 }
140 slot_va = (uintptr_t)granule_map(test_granule,
141 (enum buffer_slot)j);
142 expected_va = slot_to_va((enum buffer_slot)j);
143
144 /* Test the return value from granule_map() */
145 POINTERS_EQUAL(slot_va, expected_va);
146
147 /*
148 * Test that the granule is actually mapped to the
149 * expected VA in the Stage 1 xlat tables as per
150 * aarch64 VMSA.
151 */
152 POINTERS_EQUAL(expected_va,
153 realm_test_util_slot_va_from_pa(granule_addr));
154
155 /* Unmap the buffer */
156 buffer_unmap((void *)slot_va);
157
158 /*
159 * realm_test_util_slot_va_from_pa() return NULL
160 * if the address passed to it is not mapped to any
161 * slot buffer.
162 */
163 POINTERS_EQUAL(NULL,
164 realm_test_util_slot_va_from_pa(granule_addr));
165
166 } /* For each slot type */
167 } /* For each CPU */
168}
169
170TEST(slot_buffer, granule_map_buffer_unmap_TC2)
171{
172 uintptr_t mapped_pa;
173 struct granule *test_granule;
174 uintptr_t granules_per_cpu[MAX_CPUS];
175 void *slot_va[MAX_CPUS];
176 union test_harness_cbs cb;
177
178 /******************************************************************
179 * TEST CASE 2:
180 *
181 * For each possible slot buffer type, map a different random
182 * granule to each one of the available CPUs. Then validate that
183 * the same PA is not mapped to two different CPUs.
184 ******************************************************************/
185
186 /* Register harness callbacks to use by this test */
187 cb.buffer_map = test_buffer_map_aarch64_vmsa;
188 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
189 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
190 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
191
192 get_rand_granule_array(granules_per_cpu, MAX_CPUS);
193 for (unsigned int i = 0U; i < NR_CPU_SLOTS; i++) {
194 if (i == SLOT_NS) {
195 /* Not supported. granule_map() would assert */
196 continue;
197 }
198
199 /* Map a granule on each CPU for the same slot */
200 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
201 host_util_set_cpuid(j);
202 test_granule = addr_to_granule(granules_per_cpu[j]);
203 slot_va[j] = granule_map(test_granule,
204 (enum buffer_slot)i);
205 }
206
207 /*
208 * Iterate over all CPUs, ensuring that the granules are mapped
209 * into the slots for the right CPU.
210 */
211 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
212 /*
213 * Get the PA mapped to the slot 'i' for CPU 'j'
214 */
215 host_util_set_cpuid(j);
216 mapped_pa = realm_test_util_slot_to_pa(
217 (enum buffer_slot)i);
218
219 /*
220 * Check that the PA mapped to slot 'i' for CPU 'j'
221 * is only mapped on the same slot for the same CPU.
222 * For the rest of CPUs, the PAs should not match.
223 */
224 for (unsigned int k = 0U; k < MAX_CPUS; k++) {
225 if (j == k) {
226 POINTERS_EQUAL(granules_per_cpu[k],
227 mapped_pa);
228 } else {
229 CHECK_FALSE(granules_per_cpu[k] ==
230 mapped_pa);
231 }
232 }
233
234 }
235
236 /* Unmap the granules. */
237 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
238 host_util_set_cpuid(j);
239 buffer_unmap((void *)slot_va[j]);
240 }
241 } /* NR_CPU_SLOTS */
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100242};
243
244TEST(slot_buffer, granule_map_buffer_unmap_TC3)
245{
246 /******************************************************************
247 * TEST CASE 3:
248 *
249 * Test that buffer_unmap() exits gracefully when an unmapped VA
250 * is used.
251 ******************************************************************/
252
253 buffer_unmap((void *)slot_to_va(SLOT_NS));
254 TEST_EXIT;
255}
256
257TEST(slot_buffer, granule_map_buffer_unmap_TC4)
258{
259 /******************************************************************
260 * TEST CASE 4:
261 *
262 * Test that buffer_unmap() exits gracefully when an invalid VA
263 * is used.
264 ******************************************************************/
265
266 buffer_unmap((void *)NULL);
267 TEST_EXIT;
268}
269
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000270ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC5)
271{
272 uintptr_t granule_addr;
273 struct granule *test_granule;
274 union test_harness_cbs cb;
275 unsigned int cpuid;
276
277 /******************************************************************
278 * TEST CASE 5:
279 *
280 * For a random CPU, try to map a random granule to a SLOT_NS buffer.
281 * The operation should generate an assertion failure.
282 ******************************************************************/
283
284 /* Register harness callbacks to use by this test */
285 cb.buffer_map = test_buffer_map_aarch64_vmsa;
286 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
287 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
288 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
289
290 granule_addr = get_rand_granule_addr();
291 test_granule = addr_to_granule(granule_addr);
292 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
293 host_util_set_cpuid(cpuid);
294
295 test_helpers_expect_assert_fail(true);
296 (void)granule_map(test_granule, SLOT_NS);
297 test_helpers_fail_if_no_assert_failed();
298}
299
300ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC6)
301{
302 union test_harness_cbs cb;
303 unsigned int cpuid;
304 enum buffer_slot slot;
305
306 /******************************************************************
307 * TEST CASE 6:
308 *
309 * For a random CPU, try to map a NULL granule address to a random
310 * slot type other than SLOT_NS.
311 * The operation should generate an assertion failure.
312 ******************************************************************/
313
314 /* Register harness callbacks to use by this test */
315 cb.buffer_map = test_buffer_map_aarch64_vmsa;
316 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
317 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
318 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
319
320 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
321 SLOT_NS + 1U, NR_CPU_SLOTS);
322 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
323 host_util_set_cpuid(cpuid);
324
325 test_helpers_expect_assert_fail(true);
326 (void)granule_map((struct granule *)NULL, slot);
327 test_helpers_fail_if_no_assert_failed();
328}
329
330ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC7)
331{
332 union test_harness_cbs cb;
333 unsigned int cpuid;
334 enum buffer_slot slot;
335 struct granule *test_granule;
336
337 /******************************************************************
338 * TEST CASE 7:
339 *
340 * For a random CPU, try to map a granule address less than the
341 * start of valid granule addr range to a random slot type other
342 * than SLOT_NS.
343 * The operation should generate an assertion failure.
344 ******************************************************************/
345
346 /* Register harness callbacks to use by this test */
347 cb.buffer_map = test_buffer_map_aarch64_vmsa;
348 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
349 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
350 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
351
352 test_granule = realm_test_util_granule_struct_base() - 1U;
353 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
354 SLOT_NS + 1U, NR_CPU_SLOTS);
355 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
356 host_util_set_cpuid(cpuid);
357
358 test_helpers_expect_assert_fail(true);
359 (void)granule_map(test_granule, slot);
360 test_helpers_fail_if_no_assert_failed();
361}
362
363ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC8)
364{
365 union test_harness_cbs cb;
366 unsigned int cpuid;
367 enum buffer_slot slot;
368 struct granule *test_granule;
369
370 /******************************************************************
371 * TEST CASE 8:
372 *
373 * For a random CPU, try to map a granule address over the end of
374 * the granules array to a random slot type other than SLOT_NS.
375 * The operation should generate an assertion failure.
376 ******************************************************************/
377
378 /* Register harness callbacks to use by this test */
379 cb.buffer_map = test_buffer_map_aarch64_vmsa;
380 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
381 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
382 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
383
384 test_granule = realm_test_util_granule_struct_base() + \
385 HOST_NR_GRANULES;
386 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
387 SLOT_NS + 1U, NR_CPU_SLOTS);
388 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
389 host_util_set_cpuid(cpuid);
390
391 test_helpers_expect_assert_fail(true);
392 (void)granule_map(test_granule, slot);
393 test_helpers_fail_if_no_assert_failed();
394}
395
396ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC9)
397{
398 uintptr_t granule_addr;
399 uintptr_t test_granule;
400 union test_harness_cbs cb;
401 unsigned int cpuid;
402 enum buffer_slot slot;
403
404 /******************************************************************
405 * TEST CASE 9:
406 *
407 * For a random CPU, try to map an unaligned granule address to a
408 * random slot type other than SLOT_NS.
409 * The operation should generate an assertion failure.
410 ******************************************************************/
411
412 /* Register harness callbacks to use by this test */
413 cb.buffer_map = test_buffer_map_aarch64_vmsa;
414 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
415 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
416 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
417
418 granule_addr = get_rand_granule_addr();
419 test_granule = (uintptr_t)addr_to_granule(granule_addr);
420 test_granule += test_helpers_get_rand_in_range(1,
421 sizeof(struct granule) - 1);
422
423 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
424 SLOT_NS + 1U, NR_CPU_SLOTS);
425 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
426 host_util_set_cpuid(cpuid);
427
428 test_helpers_expect_assert_fail(true);
429 (void)granule_map((struct granule*)test_granule, slot);
430 test_helpers_fail_if_no_assert_failed();
431}
432
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100433TEST(slot_buffer, ns_buffer_write_TC1)
434{
435 uintptr_t granule_addrs[3];
436 struct granule *test_granule;
437 union test_harness_cbs cb;
438
439 /******************************************************************
440 * TEST CASE 1:
441 *
442 * For each CPU, map a random granule to NS_SLOT and copy random
443 * data into it through several calls to ns_buffer_write().
444 * Then verify that for each call to ns_buffer_write(), the data
445 * is properly copied without affecting other areas of the dest
446 * granule.
447 ******************************************************************/
448
449 /* Register harness callbacks to use by this test */
450 cb.buffer_map = test_buffer_map_access;
451 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
452 cb.buffer_unmap = test_buffer_unmap_access;
453 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
454
455 /*
456 * Get two random granules:
457 * granule_addrs[0]: To be used as dest write operations (SLOT_NS).
458 * granule_addrs[1]: will hold a copy of the data to transfer, so we
459 * can verify later.
460 * granule_addrs[2]: Just a zeroed granule to easy some tests.
461 */
462 get_rand_granule_array(granule_addrs, 3U);
463
464 /* Granule to test zeroes */
465 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
466
467 test_granule = addr_to_granule(granule_addrs[0]);
468
469 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
470
471 /* Fill the granule with random data */
472 for (unsigned int i = 0U; i < GRANULE_SIZE/sizeof(int); i++) {
473 *((int *)granule_addrs[1] + i) = rand();
474 }
475
476 /* Clean the granule to test */
477 (void)memset((void *)granule_addrs[0], 0, GRANULE_SIZE);
478
479 host_util_set_cpuid(i);
480
481 /*
482 * Copy block by block, verifying that each copied block
483 * doesn't affect anything written before nor a block to be
484 * written yet.
485 */
486 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
487 ns_buffer_write(SLOT_NS, test_granule,
488 GRANULE_BLOCK_SIZE * j,
489 GRANULE_BLOCK_SIZE,
490 (void*)(granule_addrs[1] +
491 (GRANULE_BLOCK_SIZE * j)));
492
493 MEMCMP_EQUAL((void *)granule_addrs[1],
494 (void *)granule_addrs[0],
495 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
496
497 /*
498 * Verify than any block that has not been written yet
499 * is still all zeros.
500 */
501 MEMCMP_EQUAL((void *)granule_addrs[2],
502 (void *)(granule_addrs[0] +
503 ((j + 1U) * GRANULE_BLOCK_SIZE)),
504 (GRANULE_BLOCKS - (j + 1U)) *
505 GRANULE_BLOCK_SIZE);
506 }
507 }
508}
509
510TEST(slot_buffer, ns_buffer_write_TC2)
511{
512 uintptr_t granule_addrs[3];
513 struct granule *test_granule;
514 union test_harness_cbs cb;
515 int val;
516
517 /******************************************************************
518 * TEST CASE 3:
519 *
520 * For every CPU, verify that ns_buffer_write() does not alter the
521 * source.
522 ******************************************************************/
523
524 /* Register harness callbacks to use by this test */
525 cb.buffer_map = test_buffer_map_access;
526 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
527 cb.buffer_unmap = test_buffer_unmap_access;
528 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
529
530 /*
531 * Get three random granules:
532 * granule_addrs[0]: Will contain the original data to write.
533 * granule_addrs[1]: Will hold a copy of the src granule to compare.
534 * granule_addrs[2]: Destination granule.
535 */
536 get_rand_granule_array(granule_addrs, 3U);
537
538 /* Generate random data. */
539 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
540 val = rand();
541 *((int *)granule_addrs[0] + j) = val;
542 *((int *)granule_addrs[1] + j) = val;
543 }
544
545 test_granule = addr_to_granule(granule_addrs[2]);
546
547 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
548 host_util_set_cpuid(i);
549
550 ns_buffer_write(SLOT_NS, test_granule, 0U,
551 GRANULE_SIZE, (void *)granule_addrs[0]);
552
553 /* Verify that the source has not been altered */
554 MEMCMP_EQUAL((void *)granule_addrs[1],
555 (void *)granule_addrs[0],
556 (size_t)GRANULE_SIZE);
557 }
558
559}
560
561TEST(slot_buffer, ns_buffer_write_TC3)
562{
563 uintptr_t granule_addrs[2];
564 unsigned int cpu[2];
565 long pattern[2];
566 long val;
567 union test_harness_cbs cb;
568
569 /******************************************************************
570 * TEST CASE 3:
571 *
572 * for two random CPUs, map a random granule to their SLOT_NS, then
573 * copy different random data to it. Verify that the data from one
574 * CPU's SLOT_NS hasn't been leaked to the other's CPU SLOT_NS.
575 * This test helps validating that ns_buffer_write() handles the
576 * translation contexts properly.
577 ******************************************************************/
578
579 /* Register harness callbacks to use by this test */
580 cb.buffer_map = test_buffer_map_access;
581 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
582 cb.buffer_unmap = test_buffer_unmap_access;
583 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
584
585 /* Get two random granules, one for each CPU to test. */
586 get_rand_granule_array(granule_addrs, 2U);
587
588 /* Get two random CPUs where to run the tests. */
589 do {
590 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
591 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
592 } while (cpu[0] == cpu[1]);
593
594 /* Get two different patterns of data to copy. */
595 do {
596 pattern[0] = (long)rand();
597 pattern[1] = (long)rand();
598 } while (pattern[0] == pattern[1]);
599
600 /* Copy the patterns into the destination granules. */
601 for (unsigned int i = 0U; i < 2U; i++) {
602 host_util_set_cpuid(cpu[i]);
603
604 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
605 sizeof(long), (void*)&pattern[i]);
606 }
607
608 /*
609 * Verify that the granule for the first CPU doesn't contain the
610 * pattern on the second one.
611 */
612 val = *(long *)granule_addrs[0];
613 CHECK_FALSE(val == pattern[1]);
614
615 /*
616 * Repeat the same check, this time with the second CPU.
617 */
618 val = *(long *)granule_addrs[1];
619 CHECK_FALSE(val == pattern[0]);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000620}
621
622ASSERT_TEST(slot_buffer, ns_buffer_write_TC4)
623{
624 uintptr_t granule_addrs[2];
625 unsigned int cpuid;
626 union test_harness_cbs cb;
627 enum buffer_slot slot;
628
629 /******************************************************************
630 * TEST CASE 4:
631 *
632 * for a random CPU, try to call ns_buffer_write() with a
633 * random secure slot.
634 * ns_buffer_write() should cause an assertion failure.
635 ******************************************************************/
636
637 /* Register harness callbacks to use by this test */
638 cb.buffer_map = test_buffer_map_access;
639 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
640 cb.buffer_unmap = test_buffer_unmap_access;
641 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
642
643 /* Get two random granules, one for destination and one for source. */
644 get_rand_granule_array(granule_addrs, 2U);
645
646 /* Get a random slot. Secure slots are after SLOT_NS */
647 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
648 SLOT_NS + 1U, NR_CPU_SLOTS);
649
650 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
651 host_util_set_cpuid(cpuid);
652
653 test_helpers_expect_assert_fail(true);
654 ns_buffer_write(slot, addr_to_granule(granule_addrs[0]), 0U,
655 (size_t)GRANULE_SIZE, (void *)granule_addrs[1]);
656 test_helpers_fail_if_no_assert_failed();
657}
658
659ASSERT_TEST(slot_buffer, ns_buffer_write_TC5)
660{
661 uintptr_t granule_addr;
662 unsigned int cpuid;
663 union test_harness_cbs cb;
664
665 /******************************************************************
666 * TEST CASE 5:
667 *
668 * for a random CPU, try to call ns_buffer_write() with a
669 * NULL pointer to copy from.
670 * ns_buffer_write() should cause an assertion failure.
671 ******************************************************************/
672
673 /* Register harness callbacks to use by this test */
674 cb.buffer_map = test_buffer_map_access;
675 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
676 cb.buffer_unmap = test_buffer_unmap_access;
677 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
678
679 granule_addr = get_rand_granule_addr();
680
681 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
682 host_util_set_cpuid(cpuid);
683
684 test_helpers_expect_assert_fail(true);
685 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addr), 0U,
686 (size_t)GRANULE_SIZE, NULL);
687 test_helpers_fail_if_no_assert_failed();
688}
689
690ASSERT_TEST(slot_buffer, ns_buffer_write_TC6)
691{
692 uintptr_t granule_addr;
693 unsigned int cpuid;
694 union test_harness_cbs cb;
695
696 /******************************************************************
697 * TEST CASE 6:
698 *
699 * for a random CPU, try to call ns_buffer_write() with a
700 * NULL granule to topy to.
701 * ns_buffer_write() should cause an assertion failure.
702 ******************************************************************/
703
704 /* Register harness callbacks to use by this test */
705 cb.buffer_map = test_buffer_map_access;
706 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
707 cb.buffer_unmap = test_buffer_unmap_access;
708 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
709
710 granule_addr = get_rand_granule_addr();
711
712 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
713 host_util_set_cpuid(cpuid);
714
715 test_helpers_expect_assert_fail(true);
716 ns_buffer_write(SLOT_NS, NULL, 0U,
717 (size_t)GRANULE_SIZE, (void *)granule_addr);
718 test_helpers_fail_if_no_assert_failed();
719}
720
721ASSERT_TEST(slot_buffer, ns_buffer_write_TC7)
722{
723 uintptr_t granule_addrs[2];
724 unsigned int cpuid;
725 union test_harness_cbs cb;
726 size_t size;
727
728 /******************************************************************
729 * TEST CASE 7:
730 *
731 * for a random CPU, try to call ns_buffer_write() with a
732 * size not aligned to 8 bytes.
733 * ns_buffer_write() should cause an assertion failure.
734 ******************************************************************/
735
736 /* Register harness callbacks to use by this test */
737 cb.buffer_map = test_buffer_map_access;
738 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
739 cb.buffer_unmap = test_buffer_unmap_access;
740 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
741
742 /* Get two random granules, one for destination and one for source. */
743 get_rand_granule_array(granule_addrs, 2U);
744
745 /* Get a random size between 1 and 7 bytes */
746 size = (size_t)test_helpers_get_rand_in_range(1, 7);
747
748 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
749 host_util_set_cpuid(cpuid);
750
751 test_helpers_expect_assert_fail(true);
752 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
753 size, (void *)granule_addrs[1]);
754 test_helpers_fail_if_no_assert_failed();
755}
756
757ASSERT_TEST(slot_buffer, ns_buffer_write_TC8)
758{
759 uintptr_t granule_addrs[2];
760 unsigned int cpuid;
761 union test_harness_cbs cb;
762 unsigned int offset;
763
764 /******************************************************************
765 * TEST CASE 8:
766 *
767 * for a random CPU, try to call ns_buffer_write() with an
768 * offset not aligned to 8 bytes.
769 * ns_buffer_write() should cause an assertion failure.
770 ******************************************************************/
771
772 /* Register harness callbacks to use by this test */
773 cb.buffer_map = test_buffer_map_access;
774 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
775 cb.buffer_unmap = test_buffer_unmap_access;
776 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
777
778 /* Get two random granules, one for destination and one for source. */
779 get_rand_granule_array(granule_addrs, 2U);
780
781 /* Get a random offset between 1 and 7 */
782 offset = test_helpers_get_rand_in_range(1, 7);
783
784 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
785 host_util_set_cpuid(cpuid);
786
787 test_helpers_expect_assert_fail(true);
788 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
789 GRANULE_SIZE, (void *)granule_addrs[1]);
790 test_helpers_fail_if_no_assert_failed();
791}
792
793ASSERT_TEST(slot_buffer, ns_buffer_write_TC9)
794{
795 uintptr_t granule_addrs[2];
796 unsigned int cpuid;
797 union test_harness_cbs cb;
798
799 /******************************************************************
800 * TEST CASE 9:
801 *
802 * for a random CPU, try to call ns_buffer_write() with an
803 * source not aligned to 8 bytes.
804 * ns_buffer_write() should cause an assertion failure.
805 ******************************************************************/
806
807 /* Register harness callbacks to use by this test */
808 cb.buffer_map = test_buffer_map_access;
809 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
810 cb.buffer_unmap = test_buffer_unmap_access;
811 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
812
813 /* Get two random granules, one for destination and one for source. */
814 get_rand_granule_array(granule_addrs, 2U);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100815
816 /*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000817 * Misalign the address of the source.
818 * test_helpers_get_rand_in_range() will never return an address for
819 * the last granule, so we are safe increasing the address.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100820 */
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000821 granule_addrs[1] += test_helpers_get_rand_in_range(1, 7);
822
823 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
824 host_util_set_cpuid(cpuid);
825
826 test_helpers_expect_assert_fail(true);
827 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
828 GRANULE_SIZE, (void *)granule_addrs[1]);
829 test_helpers_fail_if_no_assert_failed();
830}
831
832ASSERT_TEST(slot_buffer, ns_buffer_write_TC10)
833{
834 uintptr_t granule_addrs[2];
835 unsigned int cpuid;
836 size_t size;
837 unsigned int offset;
838 union test_harness_cbs cb;
839
840 /******************************************************************
841 * TEST CASE 10:
842 *
843 * for a random CPU, try to call ns_buffer_write() with an
844 * offset + size higher than GRANULE_SIZE.
845 * ns_buffer_write() should cause an assertion failure.
846 ******************************************************************/
847
848 /* Register harness callbacks to use by this test */
849 cb.buffer_map = test_buffer_map_access;
850 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
851 cb.buffer_unmap = test_buffer_unmap_access;
852 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
853
854 /* Get two random granules, one for destination and one for source. */
855 get_rand_granule_array(granule_addrs, 2U);
856
857 /*
858 * offset + granule = 1.5 * granule_size.
859 * Both parameters are properly aligned.
860 */
861 offset = GRANULE_SIZE >> 1U;
862 size = (size_t)GRANULE_SIZE;
863
864 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
865 host_util_set_cpuid(cpuid);
866
867 test_helpers_expect_assert_fail(true);
868 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
869 size, (void *)granule_addrs[1]);
870 test_helpers_fail_if_no_assert_failed();
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100871}
872
873TEST(slot_buffer, ns_buffer_read_TC1)
874{
875 uintptr_t granule_addrs[3];
876 struct granule *test_granule;
877 union test_harness_cbs cb;
878
879 /******************************************************************
880 * TEST CASE 1:
881 *
882 * For each CPU, map a random granule to NS_SLOT and copy random
883 * data into it. Then verify that the data is properly read and
884 * that the source has not been altered.
885 ******************************************************************/
886
887 /* Register harness callbacks to use by this test */
888 cb.buffer_map = test_buffer_map_access;
889 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
890 cb.buffer_unmap = test_buffer_unmap_access;
891 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
892
893 /*
894 * Get three random granules:
895 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
896 * granule_addrs[1]: Will be the dst granule for the ns_buffer_read
897 * operation.
898 * granule_addrs[2]: Just a zeroed granule to easy some tests.
899 */
900 get_rand_granule_array(granule_addrs, 3U);
901
902 /* Granule to test zeroes */
903 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
904
905 test_granule = addr_to_granule(granule_addrs[0]);
906
907 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
908 host_util_set_cpuid(i);
909
910 /* Generate random data. */
911 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
912 *((int *)granule_addrs[0] + j) = rand();
913 }
914
915 /* Clean the dest granule */
916 (void)memset((void *)granule_addrs[1], 0, GRANULE_SIZE);
917
918 /*
919 * Read block by block, verifying that each copied block
920 * doesn't affect anything read before nor a block to be
921 * read yet.
922 */
923 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
924 ns_buffer_read(SLOT_NS, test_granule,
925 GRANULE_BLOCK_SIZE * j,
926 GRANULE_BLOCK_SIZE,
927 (void*)(granule_addrs[1] +
928 (GRANULE_BLOCK_SIZE * j)));
929
930 MEMCMP_EQUAL((void *)granule_addrs[1],
931 (void *)granule_addrs[0],
932 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
933
934 /*
935 * Verify than any block that has not been read yet
936 * is still all zeros.
937 */
938 MEMCMP_EQUAL((void *)granule_addrs[2],
939 (void *)(granule_addrs[1] +
940 ((j + 1U) * GRANULE_BLOCK_SIZE)),
941 (GRANULE_BLOCKS - (j + 1U)) *
942 GRANULE_BLOCK_SIZE);
943
944 }
945 }
946}
947
948TEST(slot_buffer, ns_buffer_read_TC2)
949{
950 uintptr_t granule_addrs[3];
951 struct granule *test_granule;
952 union test_harness_cbs cb;
953 int val;
954
955 /******************************************************************
956 * TEST CASE 3:
957 *
958 * For every CPU, verify that ns_buffer_read() does not alter the
959 * source.
960 ******************************************************************/
961
962 /* Register harness callbacks to use by this test */
963 cb.buffer_map = test_buffer_map_access;
964 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
965 cb.buffer_unmap = test_buffer_unmap_access;
966 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
967
968 /*
969 * Get three random granules:
970 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
971 * granule_addrs[1]: Will hold a copy of the src granule to compare.
972 * granule_addrs[2]: Destination granule.
973 */
974 get_rand_granule_array(granule_addrs, 3U);
975
976 /* Generate random data. */
977 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
978 val = rand();
979 *((int *)granule_addrs[0] + j) = val;
980 *((int *)granule_addrs[1] + j) = val;
981 }
982
983 test_granule = addr_to_granule(granule_addrs[0]);
984
985 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
986 host_util_set_cpuid(i);
987
988 ns_buffer_read(SLOT_NS, test_granule, 0U,
989 GRANULE_SIZE, (void *)granule_addrs[2]);
990
991 /* Verify that the source has not been altered */
992 MEMCMP_EQUAL((void *)granule_addrs[1],
993 (void *)granule_addrs[0],
994 (size_t)GRANULE_SIZE);
995 }
996
997}
998
999TEST(slot_buffer, ns_buffer_read_TC3)
1000{
1001 uintptr_t granule_addrs[2];
1002 unsigned int cpu[2];
1003 long dest[2];
1004 long val;
1005 union test_harness_cbs cb;
1006
1007 /******************************************************************
1008 * TEST CASE 3:
1009 *
1010 * for two random CPUs, map a random granule with random data to
1011 * their SLOT_NS, then read the SLOT_NS on each CPU and ensure that
1012 * the destination buffers contain the data from their CPU SLOT_NS
1013 * only and no leak from the other CPU has happened.
1014 * This test helps validating that ns_buffer_read() handles the
1015 * translation contexts properly.
1016 ******************************************************************/
1017
1018 /* Register harness callbacks to use by this test */
1019 cb.buffer_map = test_buffer_map_access;
1020 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1021 cb.buffer_unmap = test_buffer_unmap_access;
1022 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1023
1024 /* Get a random granule for each CPU to use. */
1025 get_rand_granule_array(granule_addrs, 2U);
1026
1027 /* Get two random CPUs where to run the tests. */
1028 do {
1029 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1030 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1031 } while (cpu[0] == cpu[1]);
1032
1033 /* Store random data at the beginning of each granule */
1034 *(long *)granule_addrs[0] = (long)rand();
1035 *(long *)granule_addrs[1] = (long)rand();
1036
1037 /* Read the granules and store the result in dest */
1038 for (unsigned int i = 0U; i < 2U; i++) {
1039 host_util_set_cpuid(cpu[i]);
1040
1041 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
1042 sizeof(long), (void*)&dest[i]);
1043 }
1044
1045 /*
1046 * Verify that the dest granule for the first CPU doesn't contain
1047 * the pattern for the second one.
1048 */
1049 val = *(long *)granule_addrs[0];
1050 CHECK_FALSE(val == dest[1]);
1051
1052 /*
1053 * Repeat the same check, this time with the second CPU.
1054 */
1055 val = *(long *)granule_addrs[1];
1056 CHECK_FALSE(val == dest[0]);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001057}
1058
1059ASSERT_TEST(slot_buffer, ns_buffer_read_TC4)
1060{
1061 uintptr_t granule_addrs[2];
1062 unsigned int cpuid;
1063 union test_harness_cbs cb;
1064 enum buffer_slot slot;
1065
1066 /******************************************************************
1067 * TEST CASE 4:
1068 *
1069 * for a random CPU, try to call ns_buffer_read() with a
1070 * random secure slot.
1071 * ns_buffer_read() should cause an assertion failure.
1072 ******************************************************************/
1073
1074 /* Register harness callbacks to use by this test */
1075 cb.buffer_map = test_buffer_map_access;
1076 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1077 cb.buffer_unmap = test_buffer_unmap_access;
1078 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1079
1080 /* Get two random granules, one for destination and one for source. */
1081 get_rand_granule_array(granule_addrs, 2U);
1082
1083 /* Get a random slot. Secure slots are after SLOT_NS */
1084 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
1085 SLOT_NS + 1U, NR_CPU_SLOTS);
1086
1087 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1088 host_util_set_cpuid(cpuid);
1089
1090 test_helpers_expect_assert_fail(true);
1091 ns_buffer_read(slot, addr_to_granule(granule_addrs[0]), 0U,
1092 (size_t)GRANULE_SIZE, (void *)granule_addrs[1]);
1093 test_helpers_fail_if_no_assert_failed();
1094}
1095
1096ASSERT_TEST(slot_buffer, ns_buffer_read_TC5)
1097{
1098 uintptr_t granule_addr;
1099 unsigned int cpuid;
1100 union test_harness_cbs cb;
1101
1102 /******************************************************************
1103 * TEST CASE 5:
1104 *
1105 * for a random CPU, try to call ns_buffer_read() with a
1106 * NULL pointer to copy to.
1107 * ns_buffer_read() should cause an assertion failure.
1108 ******************************************************************/
1109
1110 /* Register harness callbacks to use by this test */
1111 cb.buffer_map = test_buffer_map_access;
1112 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1113 cb.buffer_unmap = test_buffer_unmap_access;
1114 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1115
1116 granule_addr = get_rand_granule_addr();
1117
1118 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1119 host_util_set_cpuid(cpuid);
1120
1121 test_helpers_expect_assert_fail(true);
1122 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addr), 0U,
1123 (size_t)GRANULE_SIZE, NULL);
1124 test_helpers_fail_if_no_assert_failed();
1125}
1126
1127ASSERT_TEST(slot_buffer, ns_buffer_read_TC6)
1128{
1129 uintptr_t granule_addr;
1130 unsigned int cpuid;
1131 union test_harness_cbs cb;
1132
1133 /******************************************************************
1134 * TEST CASE 6:
1135 *
1136 * for a random CPU, try to call ns_buffer_read() with a
1137 * NULL granule to copy from.
1138 * ns_buffer_read() should cause an assertion failure.
1139 ******************************************************************/
1140
1141 /* Register harness callbacks to use by this test */
1142 cb.buffer_map = test_buffer_map_access;
1143 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1144 cb.buffer_unmap = test_buffer_unmap_access;
1145 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1146
1147 granule_addr = get_rand_granule_addr();
1148
1149 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1150 host_util_set_cpuid(cpuid);
1151
1152 test_helpers_expect_assert_fail(true);
1153 ns_buffer_read(SLOT_NS, NULL, 0U,
1154 (size_t)GRANULE_SIZE, (void *)granule_addr);
1155 test_helpers_fail_if_no_assert_failed();
1156}
1157
1158ASSERT_TEST(slot_buffer, ns_buffer_read_TC7)
1159{
1160 uintptr_t granule_addrs[2];
1161 unsigned int cpuid;
1162 union test_harness_cbs cb;
1163 size_t size;
1164
1165 /******************************************************************
1166 * TEST CASE 7:
1167 *
1168 * for a random CPU, try to call ns_buffer_read() with a
1169 * size not aligned to 8 bytes.
1170 * ns_buffer_read() should cause an assertion failure.
1171 ******************************************************************/
1172
1173 /* Register harness callbacks to use by this test */
1174 cb.buffer_map = test_buffer_map_access;
1175 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1176 cb.buffer_unmap = test_buffer_unmap_access;
1177 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1178
1179 /* Get two random granules, one for destination and one for source. */
1180 get_rand_granule_array(granule_addrs, 2U);
1181
1182 /* Get a random size between 1 and 7 bytes */
1183 size = (size_t)test_helpers_get_rand_in_range(1, 7);
1184
1185 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1186 host_util_set_cpuid(cpuid);
1187
1188 test_helpers_expect_assert_fail(true);
1189 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
1190 size, (void *)granule_addrs[1]);
1191 test_helpers_fail_if_no_assert_failed();
1192}
1193
1194ASSERT_TEST(slot_buffer, ns_buffer_read_TC8)
1195{
1196 uintptr_t granule_addrs[2];
1197 unsigned int cpuid;
1198 union test_harness_cbs cb;
1199 unsigned int offset;
1200
1201 /******************************************************************
1202 * TEST CASE 8:
1203 *
1204 * for a random CPU, try to call ns_buffer_read() with an
1205 * offset not aligned to 8 bytes.
1206 * ns_buffer_read() should cause an assertion failure.
1207 ******************************************************************/
1208
1209 /* Register harness callbacks to use by this test */
1210 cb.buffer_map = test_buffer_map_access;
1211 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1212 cb.buffer_unmap = test_buffer_unmap_access;
1213 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1214
1215 /* Get two random granules, one for destination and one for source. */
1216 get_rand_granule_array(granule_addrs, 2U);
1217
1218 /* Get a random offset between 1 and 7 */
1219 offset = test_helpers_get_rand_in_range(1, 7);
1220
1221 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1222 host_util_set_cpuid(cpuid);
1223
1224 test_helpers_expect_assert_fail(true);
1225 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
1226 GRANULE_SIZE, (void *)granule_addrs[1]);
1227 test_helpers_fail_if_no_assert_failed();
1228}
1229
1230ASSERT_TEST(slot_buffer, ns_buffer_read_TC9)
1231{
1232 uintptr_t granule_addrs[2];
1233 unsigned int cpuid;
1234 union test_harness_cbs cb;
1235
1236 /******************************************************************
1237 * TEST CASE 9:
1238 *
1239 * for a random CPU, try to call ns_buffer_read() with a
1240 * destination not aligned to 8 bytes.
1241 * ns_buffer_read() should cause an assertion failure.
1242 ******************************************************************/
1243
1244 /* Register harness callbacks to use by this test */
1245 cb.buffer_map = test_buffer_map_access;
1246 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1247 cb.buffer_unmap = test_buffer_unmap_access;
1248 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1249
1250 /* Get two random granules, one for destination and one for source. */
1251 get_rand_granule_array(granule_addrs, 2U);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001252
1253 /*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001254 * Misalign the address of the destination.
1255 * test_helpers_get_rand_in_range() will never return an address for
1256 * the last granule, so we are safe increasing the address.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001257 */
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001258 granule_addrs[1] += test_helpers_get_rand_in_range(1, 7);
1259
1260 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1261 host_util_set_cpuid(cpuid);
1262
1263 test_helpers_expect_assert_fail(true);
1264 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
1265 GRANULE_SIZE, (void *)granule_addrs[1]);
1266 test_helpers_fail_if_no_assert_failed();
1267}
1268
1269ASSERT_TEST(slot_buffer, ns_buffer_read_TC10)
1270{
1271 uintptr_t granule_addrs[2];
1272 unsigned int cpuid;
1273 size_t size;
1274 unsigned int offset;
1275 union test_harness_cbs cb;
1276
1277 /******************************************************************
1278 * TEST CASE 10:
1279 *
1280 * for a random CPU, try to call ns_buffer_read() with an
1281 * offset + size higher than GRANULE_SIZE.
1282 * ns_buffer_read() should cause an assertion failure.
1283 ******************************************************************/
1284
1285 /* Register harness callbacks to use by this test */
1286 cb.buffer_map = test_buffer_map_access;
1287 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1288 cb.buffer_unmap = test_buffer_unmap_access;
1289 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1290
1291 /* Get two random granules, one for destination and one for source. */
1292 get_rand_granule_array(granule_addrs, 2U);
1293
1294 /*
1295 * offset + granule = 1.5 * granule_size.
1296 * Both parameters are properly aligned.
1297 */
1298 offset = GRANULE_SIZE >> 1U;
1299 size = (size_t)GRANULE_SIZE;
1300
1301 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1302 host_util_set_cpuid(cpuid);
1303
1304 test_helpers_expect_assert_fail(true);
1305 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
1306 size, (void *)granule_addrs[1]);
1307 test_helpers_fail_if_no_assert_failed();
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001308}
1309
1310TEST(slot_buffer, slot_buf_setup_xlat_TC1)
1311{
1312 /*
1313 * slot_buf_setup_xlat() has already been used during initialization
1314 * for all tests, so skip it.
1315 */
1316}
1317
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +00001318TEST(slot_buffer, slot_buf_finish_warmboot_init_TC1)
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001319{
1320 /*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +00001321 * slot_buf_finish_warmboot_init() has already been used during
1322 * initialization for all tests, so skip it.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001323 */
1324}