blob: 4be126ecd082bbf0edf7976c8556b4bdba8fecd7 [file] [log] [blame]
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <CppUTest/CommandLineTestRunner.h>
7#include <CppUTest/TestHarness.h>
8
9extern "C" {
10#include <buffer.h> /* Interface to exercise */
11#include <buffer_private.h>
12#include <cpuid.h>
13#include <granule.h>
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +000014#include <host_defs.h>
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010015#include <host_harness.h>
16#include <host_utils.h>
17#include <realm_test_utils.h>
18#include <stdlib.h>
19#include <string.h>
20#include <test_harness.h>
21#include <test_helpers.h>
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010022#include <xlat_tables.h>
23}
24
25/*
26 * Size of a chunck of memory on a granule, used for random
27 * read and writes
28 */
29#define GRANULE_BLOCK_SIZE (GRANULE_SIZE >> 2U)
30#define GRANULE_BLOCKS (GRANULE_SIZE/GRANULE_BLOCK_SIZE)
31
32/*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +000033 * Function to get a random granule address within the valid address range.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010034 */
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +010035static inline uintptr_t get_rand_granule_addr(void)
36{
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010037 uintptr_t addr;
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +010038
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010039 int random_granule = test_helpers_get_rand_in_range(0,
40 test_helpers_get_nr_granules() - 1);
41
42 addr = (uintptr_t)(random_granule * GRANULE_SIZE)
43 + host_util_get_granule_base();
44
45 return addr;
46}
47
48/*
49 * Helper function to generate an array of random granule addresses
50 * in which none of them repeat.
51 */
52static void get_rand_granule_array(uintptr_t *arr, unsigned int count)
53{
54 for (unsigned int i = 0U; i < count; i++) {
55 arr[i] = get_rand_granule_addr();
56 if (i > 0U) {
57 bool match;
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +010058
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010059 do {
60 /* Check for duplicates so far */
61 match = false;
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +010062 for (unsigned int j = 0U; j < i; j++) {
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010063 if (arr[j] == arr[i]) {
64 arr[i] =
65 get_rand_granule_addr();
66 match = true;
67 break;
68 }
69 }
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +010070 } while (match == true);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010071 }
72 }
73
74}
75
76TEST_GROUP(slot_buffer) {
77 /*
78 * For this test, TEST_SETUP() initializes RMM which includes
79 * translation table and slot buffer mechanism initialization.
80 * Therefore, all the tests assume that the slot buffer mechanism
81 * has been properly initialized.
82 */
83 TEST_SETUP()
84 {
Javier Almansa Sobrino10cd6b82023-01-20 11:55:09 +000085 test_helpers_init();
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010086
87 /* Enable the platform with support for multiple PEs */
88 test_helpers_rmm_start(true);
89
90 /* Make sure current cpu id is 0 (primary processor) */
91 host_util_set_cpuid(0U);
92
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +010093 test_helpers_expect_assert_fail(false);
94 }
95
96 TEST_TEARDOWN()
97 {
98 /*
99 * Unregister any existing callback that might
100 * have been installed
101 */
102 (void)test_helpers_unregister_cb(CB_BUFFER_MAP);
103 (void)test_helpers_unregister_cb(CB_BUFFER_UNMAP);
104 }
105};
106
107TEST(slot_buffer, granule_map_buffer_unmap_TC1)
108{
109 uintptr_t slot_va, expected_va, granule_addr;
110 struct granule *test_granule;
111 union test_harness_cbs cb;
112
113 /******************************************************************
114 * TEST CASE 1:
115 *
116 * For all possible slot buffer types and all possible CPUs, try to
117 * map a random granule. Then unmap it.
118 ******************************************************************/
119
120 /* Register harness callbacks to use by this test */
121 cb.buffer_map = test_buffer_map_aarch64_vmsa;
122 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
123 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
124 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
125
126 granule_addr = get_rand_granule_addr();
127 test_granule = addr_to_granule(granule_addr);
128
129 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
130 host_util_set_cpuid(i);
131 for (unsigned int j = 0U; j < NR_CPU_SLOTS; j++) {
132 if (j == SLOT_NS) {
133 /* Not supported. granule_map() would assert */
134 continue;
135 }
136 slot_va = (uintptr_t)granule_map(test_granule,
137 (enum buffer_slot)j);
138 expected_va = slot_to_va((enum buffer_slot)j);
139
140 /* Test the return value from granule_map() */
141 POINTERS_EQUAL(slot_va, expected_va);
142
143 /*
144 * Test that the granule is actually mapped to the
145 * expected VA in the Stage 1 xlat tables as per
146 * aarch64 VMSA.
147 */
148 POINTERS_EQUAL(expected_va,
149 realm_test_util_slot_va_from_pa(granule_addr));
150
151 /* Unmap the buffer */
152 buffer_unmap((void *)slot_va);
153
154 /*
155 * realm_test_util_slot_va_from_pa() return NULL
156 * if the address passed to it is not mapped to any
157 * slot buffer.
158 */
159 POINTERS_EQUAL(NULL,
160 realm_test_util_slot_va_from_pa(granule_addr));
161
162 } /* For each slot type */
163 } /* For each CPU */
164}
165
166TEST(slot_buffer, granule_map_buffer_unmap_TC2)
167{
168 uintptr_t mapped_pa;
169 struct granule *test_granule;
170 uintptr_t granules_per_cpu[MAX_CPUS];
171 void *slot_va[MAX_CPUS];
172 union test_harness_cbs cb;
173
174 /******************************************************************
175 * TEST CASE 2:
176 *
177 * For each possible slot buffer type, map a different random
178 * granule to each one of the available CPUs. Then validate that
179 * the same PA is not mapped to two different CPUs.
180 ******************************************************************/
181
182 /* Register harness callbacks to use by this test */
183 cb.buffer_map = test_buffer_map_aarch64_vmsa;
184 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
185 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
186 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
187
188 get_rand_granule_array(granules_per_cpu, MAX_CPUS);
189 for (unsigned int i = 0U; i < NR_CPU_SLOTS; i++) {
190 if (i == SLOT_NS) {
191 /* Not supported. granule_map() would assert */
192 continue;
193 }
194
195 /* Map a granule on each CPU for the same slot */
196 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
197 host_util_set_cpuid(j);
198 test_granule = addr_to_granule(granules_per_cpu[j]);
199 slot_va[j] = granule_map(test_granule,
200 (enum buffer_slot)i);
201 }
202
203 /*
204 * Iterate over all CPUs, ensuring that the granules are mapped
205 * into the slots for the right CPU.
206 */
207 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
208 /*
209 * Get the PA mapped to the slot 'i' for CPU 'j'
210 */
211 host_util_set_cpuid(j);
212 mapped_pa = realm_test_util_slot_to_pa(
213 (enum buffer_slot)i);
214
215 /*
216 * Check that the PA mapped to slot 'i' for CPU 'j'
217 * is only mapped on the same slot for the same CPU.
218 * For the rest of CPUs, the PAs should not match.
219 */
220 for (unsigned int k = 0U; k < MAX_CPUS; k++) {
221 if (j == k) {
222 POINTERS_EQUAL(granules_per_cpu[k],
223 mapped_pa);
224 } else {
225 CHECK_FALSE(granules_per_cpu[k] ==
226 mapped_pa);
227 }
228 }
229
230 }
231
232 /* Unmap the granules. */
233 for (unsigned int j = 0U; j < MAX_CPUS; j++) {
234 host_util_set_cpuid(j);
235 buffer_unmap((void *)slot_va[j]);
236 }
237 } /* NR_CPU_SLOTS */
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100238};
239
240TEST(slot_buffer, granule_map_buffer_unmap_TC3)
241{
242 /******************************************************************
243 * TEST CASE 3:
244 *
245 * Test that buffer_unmap() exits gracefully when an unmapped VA
246 * is used.
247 ******************************************************************/
248
249 buffer_unmap((void *)slot_to_va(SLOT_NS));
250 TEST_EXIT;
251}
252
253TEST(slot_buffer, granule_map_buffer_unmap_TC4)
254{
255 /******************************************************************
256 * TEST CASE 4:
257 *
258 * Test that buffer_unmap() exits gracefully when an invalid VA
259 * is used.
260 ******************************************************************/
261
262 buffer_unmap((void *)NULL);
263 TEST_EXIT;
264}
265
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000266ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC5)
267{
268 uintptr_t granule_addr;
269 struct granule *test_granule;
270 union test_harness_cbs cb;
271 unsigned int cpuid;
272
273 /******************************************************************
274 * TEST CASE 5:
275 *
276 * For a random CPU, try to map a random granule to a SLOT_NS buffer.
277 * The operation should generate an assertion failure.
278 ******************************************************************/
279
280 /* Register harness callbacks to use by this test */
281 cb.buffer_map = test_buffer_map_aarch64_vmsa;
282 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
283 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
284 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
285
286 granule_addr = get_rand_granule_addr();
287 test_granule = addr_to_granule(granule_addr);
288 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
289 host_util_set_cpuid(cpuid);
290
291 test_helpers_expect_assert_fail(true);
292 (void)granule_map(test_granule, SLOT_NS);
293 test_helpers_fail_if_no_assert_failed();
294}
295
296ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC6)
297{
298 union test_harness_cbs cb;
299 unsigned int cpuid;
300 enum buffer_slot slot;
301
302 /******************************************************************
303 * TEST CASE 6:
304 *
305 * For a random CPU, try to map a NULL granule address to a random
306 * slot type other than SLOT_NS.
307 * The operation should generate an assertion failure.
308 ******************************************************************/
309
310 /* Register harness callbacks to use by this test */
311 cb.buffer_map = test_buffer_map_aarch64_vmsa;
312 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
313 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
314 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
315
316 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
317 SLOT_NS + 1U, NR_CPU_SLOTS);
318 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
319 host_util_set_cpuid(cpuid);
320
321 test_helpers_expect_assert_fail(true);
322 (void)granule_map((struct granule *)NULL, slot);
323 test_helpers_fail_if_no_assert_failed();
324}
325
326ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC7)
327{
328 union test_harness_cbs cb;
329 unsigned int cpuid;
330 enum buffer_slot slot;
331 struct granule *test_granule;
332
333 /******************************************************************
334 * TEST CASE 7:
335 *
336 * For a random CPU, try to map a granule address less than the
337 * start of valid granule addr range to a random slot type other
338 * than SLOT_NS.
339 * The operation should generate an assertion failure.
340 ******************************************************************/
341
342 /* Register harness callbacks to use by this test */
343 cb.buffer_map = test_buffer_map_aarch64_vmsa;
344 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
345 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
346 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
347
348 test_granule = realm_test_util_granule_struct_base() - 1U;
349 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
350 SLOT_NS + 1U, NR_CPU_SLOTS);
351 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
352 host_util_set_cpuid(cpuid);
353
354 test_helpers_expect_assert_fail(true);
355 (void)granule_map(test_granule, slot);
356 test_helpers_fail_if_no_assert_failed();
357}
358
359ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC8)
360{
361 union test_harness_cbs cb;
362 unsigned int cpuid;
363 enum buffer_slot slot;
364 struct granule *test_granule;
365
366 /******************************************************************
367 * TEST CASE 8:
368 *
369 * For a random CPU, try to map a granule address over the end of
370 * the granules array to a random slot type other than SLOT_NS.
371 * The operation should generate an assertion failure.
372 ******************************************************************/
373
374 /* Register harness callbacks to use by this test */
375 cb.buffer_map = test_buffer_map_aarch64_vmsa;
376 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
377 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
378 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
379
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +0100380 test_granule = realm_test_util_granule_struct_base() +
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000381 HOST_NR_GRANULES;
382 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
383 SLOT_NS + 1U, NR_CPU_SLOTS);
384 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
385 host_util_set_cpuid(cpuid);
386
387 test_helpers_expect_assert_fail(true);
388 (void)granule_map(test_granule, slot);
389 test_helpers_fail_if_no_assert_failed();
390}
391
392ASSERT_TEST(slot_buffer, granule_map_buffer_unmap_TC9)
393{
394 uintptr_t granule_addr;
395 uintptr_t test_granule;
396 union test_harness_cbs cb;
397 unsigned int cpuid;
398 enum buffer_slot slot;
399
400 /******************************************************************
401 * TEST CASE 9:
402 *
403 * For a random CPU, try to map an unaligned granule address to a
404 * random slot type other than SLOT_NS.
405 * The operation should generate an assertion failure.
406 ******************************************************************/
407
408 /* Register harness callbacks to use by this test */
409 cb.buffer_map = test_buffer_map_aarch64_vmsa;
410 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
411 cb.buffer_unmap = test_buffer_unmap_aarch64_vmsa;
412 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
413
414 granule_addr = get_rand_granule_addr();
415 test_granule = (uintptr_t)addr_to_granule(granule_addr);
416 test_granule += test_helpers_get_rand_in_range(1,
417 sizeof(struct granule) - 1);
418
419 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
420 SLOT_NS + 1U, NR_CPU_SLOTS);
421 cpuid = (unsigned int)test_helpers_get_rand_in_range(0, MAX_CPUS - 1);
422 host_util_set_cpuid(cpuid);
423
424 test_helpers_expect_assert_fail(true);
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +0100425 (void)granule_map((struct granule *)test_granule, slot);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000426 test_helpers_fail_if_no_assert_failed();
427}
428
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100429TEST(slot_buffer, ns_buffer_write_TC1)
430{
431 uintptr_t granule_addrs[3];
432 struct granule *test_granule;
433 union test_harness_cbs cb;
434
435 /******************************************************************
436 * TEST CASE 1:
437 *
438 * For each CPU, map a random granule to NS_SLOT and copy random
439 * data into it through several calls to ns_buffer_write().
440 * Then verify that for each call to ns_buffer_write(), the data
441 * is properly copied without affecting other areas of the dest
442 * granule.
443 ******************************************************************/
444
445 /* Register harness callbacks to use by this test */
446 cb.buffer_map = test_buffer_map_access;
447 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
448 cb.buffer_unmap = test_buffer_unmap_access;
449 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
450
451 /*
452 * Get two random granules:
453 * granule_addrs[0]: To be used as dest write operations (SLOT_NS).
454 * granule_addrs[1]: will hold a copy of the data to transfer, so we
455 * can verify later.
456 * granule_addrs[2]: Just a zeroed granule to easy some tests.
457 */
458 get_rand_granule_array(granule_addrs, 3U);
459
460 /* Granule to test zeroes */
461 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
462
463 test_granule = addr_to_granule(granule_addrs[0]);
464
465 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
466
467 /* Fill the granule with random data */
468 for (unsigned int i = 0U; i < GRANULE_SIZE/sizeof(int); i++) {
469 *((int *)granule_addrs[1] + i) = rand();
470 }
471
472 /* Clean the granule to test */
473 (void)memset((void *)granule_addrs[0], 0, GRANULE_SIZE);
474
475 host_util_set_cpuid(i);
476
477 /*
478 * Copy block by block, verifying that each copied block
479 * doesn't affect anything written before nor a block to be
480 * written yet.
481 */
482 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
483 ns_buffer_write(SLOT_NS, test_granule,
484 GRANULE_BLOCK_SIZE * j,
485 GRANULE_BLOCK_SIZE,
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +0100486 (void *)(granule_addrs[1] +
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100487 (GRANULE_BLOCK_SIZE * j)));
488
489 MEMCMP_EQUAL((void *)granule_addrs[1],
490 (void *)granule_addrs[0],
491 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
492
493 /*
494 * Verify than any block that has not been written yet
495 * is still all zeros.
496 */
497 MEMCMP_EQUAL((void *)granule_addrs[2],
498 (void *)(granule_addrs[0] +
499 ((j + 1U) * GRANULE_BLOCK_SIZE)),
500 (GRANULE_BLOCKS - (j + 1U)) *
501 GRANULE_BLOCK_SIZE);
502 }
503 }
504}
505
506TEST(slot_buffer, ns_buffer_write_TC2)
507{
508 uintptr_t granule_addrs[3];
509 struct granule *test_granule;
510 union test_harness_cbs cb;
511 int val;
512
513 /******************************************************************
514 * TEST CASE 3:
515 *
516 * For every CPU, verify that ns_buffer_write() does not alter the
517 * source.
518 ******************************************************************/
519
520 /* Register harness callbacks to use by this test */
521 cb.buffer_map = test_buffer_map_access;
522 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
523 cb.buffer_unmap = test_buffer_unmap_access;
524 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
525
526 /*
527 * Get three random granules:
528 * granule_addrs[0]: Will contain the original data to write.
529 * granule_addrs[1]: Will hold a copy of the src granule to compare.
530 * granule_addrs[2]: Destination granule.
531 */
532 get_rand_granule_array(granule_addrs, 3U);
533
534 /* Generate random data. */
535 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
536 val = rand();
537 *((int *)granule_addrs[0] + j) = val;
538 *((int *)granule_addrs[1] + j) = val;
539 }
540
541 test_granule = addr_to_granule(granule_addrs[2]);
542
543 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
544 host_util_set_cpuid(i);
545
546 ns_buffer_write(SLOT_NS, test_granule, 0U,
547 GRANULE_SIZE, (void *)granule_addrs[0]);
548
549 /* Verify that the source has not been altered */
550 MEMCMP_EQUAL((void *)granule_addrs[1],
551 (void *)granule_addrs[0],
552 (size_t)GRANULE_SIZE);
553 }
554
555}
556
557TEST(slot_buffer, ns_buffer_write_TC3)
558{
559 uintptr_t granule_addrs[2];
560 unsigned int cpu[2];
561 long pattern[2];
562 long val;
563 union test_harness_cbs cb;
564
565 /******************************************************************
566 * TEST CASE 3:
567 *
568 * for two random CPUs, map a random granule to their SLOT_NS, then
569 * copy different random data to it. Verify that the data from one
570 * CPU's SLOT_NS hasn't been leaked to the other's CPU SLOT_NS.
571 * This test helps validating that ns_buffer_write() handles the
572 * translation contexts properly.
573 ******************************************************************/
574
575 /* Register harness callbacks to use by this test */
576 cb.buffer_map = test_buffer_map_access;
577 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
578 cb.buffer_unmap = test_buffer_unmap_access;
579 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
580
581 /* Get two random granules, one for each CPU to test. */
582 get_rand_granule_array(granule_addrs, 2U);
583
584 /* Get two random CPUs where to run the tests. */
585 do {
586 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
587 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
588 } while (cpu[0] == cpu[1]);
589
590 /* Get two different patterns of data to copy. */
591 do {
592 pattern[0] = (long)rand();
593 pattern[1] = (long)rand();
594 } while (pattern[0] == pattern[1]);
595
596 /* Copy the patterns into the destination granules. */
597 for (unsigned int i = 0U; i < 2U; i++) {
598 host_util_set_cpuid(cpu[i]);
599
600 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +0100601 sizeof(long), (void *)&pattern[i]);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100602 }
603
604 /*
605 * Verify that the granule for the first CPU doesn't contain the
606 * pattern on the second one.
607 */
608 val = *(long *)granule_addrs[0];
609 CHECK_FALSE(val == pattern[1]);
610
611 /*
612 * Repeat the same check, this time with the second CPU.
613 */
614 val = *(long *)granule_addrs[1];
615 CHECK_FALSE(val == pattern[0]);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000616}
617
618ASSERT_TEST(slot_buffer, ns_buffer_write_TC4)
619{
620 uintptr_t granule_addrs[2];
621 unsigned int cpuid;
622 union test_harness_cbs cb;
623 enum buffer_slot slot;
624
625 /******************************************************************
626 * TEST CASE 4:
627 *
628 * for a random CPU, try to call ns_buffer_write() with a
629 * random secure slot.
630 * ns_buffer_write() should cause an assertion failure.
631 ******************************************************************/
632
633 /* Register harness callbacks to use by this test */
634 cb.buffer_map = test_buffer_map_access;
635 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
636 cb.buffer_unmap = test_buffer_unmap_access;
637 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
638
639 /* Get two random granules, one for destination and one for source. */
640 get_rand_granule_array(granule_addrs, 2U);
641
642 /* Get a random slot. Secure slots are after SLOT_NS */
643 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
644 SLOT_NS + 1U, NR_CPU_SLOTS);
645
646 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
647 host_util_set_cpuid(cpuid);
648
649 test_helpers_expect_assert_fail(true);
650 ns_buffer_write(slot, addr_to_granule(granule_addrs[0]), 0U,
651 (size_t)GRANULE_SIZE, (void *)granule_addrs[1]);
652 test_helpers_fail_if_no_assert_failed();
653}
654
655ASSERT_TEST(slot_buffer, ns_buffer_write_TC5)
656{
657 uintptr_t granule_addr;
658 unsigned int cpuid;
659 union test_harness_cbs cb;
660
661 /******************************************************************
662 * TEST CASE 5:
663 *
664 * for a random CPU, try to call ns_buffer_write() with a
665 * NULL pointer to copy from.
666 * ns_buffer_write() should cause an assertion failure.
667 ******************************************************************/
668
669 /* Register harness callbacks to use by this test */
670 cb.buffer_map = test_buffer_map_access;
671 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
672 cb.buffer_unmap = test_buffer_unmap_access;
673 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
674
675 granule_addr = get_rand_granule_addr();
676
677 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
678 host_util_set_cpuid(cpuid);
679
680 test_helpers_expect_assert_fail(true);
681 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addr), 0U,
682 (size_t)GRANULE_SIZE, NULL);
683 test_helpers_fail_if_no_assert_failed();
684}
685
686ASSERT_TEST(slot_buffer, ns_buffer_write_TC6)
687{
688 uintptr_t granule_addr;
689 unsigned int cpuid;
690 union test_harness_cbs cb;
691
692 /******************************************************************
693 * TEST CASE 6:
694 *
695 * for a random CPU, try to call ns_buffer_write() with a
696 * NULL granule to topy to.
697 * ns_buffer_write() should cause an assertion failure.
698 ******************************************************************/
699
700 /* Register harness callbacks to use by this test */
701 cb.buffer_map = test_buffer_map_access;
702 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
703 cb.buffer_unmap = test_buffer_unmap_access;
704 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
705
706 granule_addr = get_rand_granule_addr();
707
708 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
709 host_util_set_cpuid(cpuid);
710
711 test_helpers_expect_assert_fail(true);
712 ns_buffer_write(SLOT_NS, NULL, 0U,
713 (size_t)GRANULE_SIZE, (void *)granule_addr);
714 test_helpers_fail_if_no_assert_failed();
715}
716
717ASSERT_TEST(slot_buffer, ns_buffer_write_TC7)
718{
719 uintptr_t granule_addrs[2];
720 unsigned int cpuid;
721 union test_harness_cbs cb;
722 size_t size;
723
724 /******************************************************************
725 * TEST CASE 7:
726 *
727 * for a random CPU, try to call ns_buffer_write() with a
728 * size not aligned to 8 bytes.
729 * ns_buffer_write() should cause an assertion failure.
730 ******************************************************************/
731
732 /* Register harness callbacks to use by this test */
733 cb.buffer_map = test_buffer_map_access;
734 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
735 cb.buffer_unmap = test_buffer_unmap_access;
736 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
737
738 /* Get two random granules, one for destination and one for source. */
739 get_rand_granule_array(granule_addrs, 2U);
740
741 /* Get a random size between 1 and 7 bytes */
742 size = (size_t)test_helpers_get_rand_in_range(1, 7);
743
744 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
745 host_util_set_cpuid(cpuid);
746
747 test_helpers_expect_assert_fail(true);
748 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
749 size, (void *)granule_addrs[1]);
750 test_helpers_fail_if_no_assert_failed();
751}
752
753ASSERT_TEST(slot_buffer, ns_buffer_write_TC8)
754{
755 uintptr_t granule_addrs[2];
756 unsigned int cpuid;
757 union test_harness_cbs cb;
758 unsigned int offset;
759
760 /******************************************************************
761 * TEST CASE 8:
762 *
763 * for a random CPU, try to call ns_buffer_write() with an
764 * offset not aligned to 8 bytes.
765 * ns_buffer_write() should cause an assertion failure.
766 ******************************************************************/
767
768 /* Register harness callbacks to use by this test */
769 cb.buffer_map = test_buffer_map_access;
770 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
771 cb.buffer_unmap = test_buffer_unmap_access;
772 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
773
774 /* Get two random granules, one for destination and one for source. */
775 get_rand_granule_array(granule_addrs, 2U);
776
777 /* Get a random offset between 1 and 7 */
778 offset = test_helpers_get_rand_in_range(1, 7);
779
780 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
781 host_util_set_cpuid(cpuid);
782
783 test_helpers_expect_assert_fail(true);
784 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
785 GRANULE_SIZE, (void *)granule_addrs[1]);
786 test_helpers_fail_if_no_assert_failed();
787}
788
789ASSERT_TEST(slot_buffer, ns_buffer_write_TC9)
790{
791 uintptr_t granule_addrs[2];
792 unsigned int cpuid;
793 union test_harness_cbs cb;
794
795 /******************************************************************
796 * TEST CASE 9:
797 *
798 * for a random CPU, try to call ns_buffer_write() with an
799 * source not aligned to 8 bytes.
800 * ns_buffer_write() should cause an assertion failure.
801 ******************************************************************/
802
803 /* Register harness callbacks to use by this test */
804 cb.buffer_map = test_buffer_map_access;
805 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
806 cb.buffer_unmap = test_buffer_unmap_access;
807 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
808
809 /* Get two random granules, one for destination and one for source. */
810 get_rand_granule_array(granule_addrs, 2U);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100811
812 /*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000813 * Misalign the address of the source.
814 * test_helpers_get_rand_in_range() will never return an address for
815 * the last granule, so we are safe increasing the address.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100816 */
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000817 granule_addrs[1] += test_helpers_get_rand_in_range(1, 7);
818
819 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
820 host_util_set_cpuid(cpuid);
821
822 test_helpers_expect_assert_fail(true);
823 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
824 GRANULE_SIZE, (void *)granule_addrs[1]);
825 test_helpers_fail_if_no_assert_failed();
826}
827
828ASSERT_TEST(slot_buffer, ns_buffer_write_TC10)
829{
830 uintptr_t granule_addrs[2];
831 unsigned int cpuid;
832 size_t size;
833 unsigned int offset;
834 union test_harness_cbs cb;
835
836 /******************************************************************
837 * TEST CASE 10:
838 *
839 * for a random CPU, try to call ns_buffer_write() with an
840 * offset + size higher than GRANULE_SIZE.
841 * ns_buffer_write() should cause an assertion failure.
842 ******************************************************************/
843
844 /* Register harness callbacks to use by this test */
845 cb.buffer_map = test_buffer_map_access;
846 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
847 cb.buffer_unmap = test_buffer_unmap_access;
848 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
849
850 /* Get two random granules, one for destination and one for source. */
851 get_rand_granule_array(granule_addrs, 2U);
852
853 /*
854 * offset + granule = 1.5 * granule_size.
855 * Both parameters are properly aligned.
856 */
857 offset = GRANULE_SIZE >> 1U;
858 size = (size_t)GRANULE_SIZE;
859
860 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
861 host_util_set_cpuid(cpuid);
862
863 test_helpers_expect_assert_fail(true);
864 ns_buffer_write(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
865 size, (void *)granule_addrs[1]);
866 test_helpers_fail_if_no_assert_failed();
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100867}
868
869TEST(slot_buffer, ns_buffer_read_TC1)
870{
871 uintptr_t granule_addrs[3];
872 struct granule *test_granule;
873 union test_harness_cbs cb;
874
875 /******************************************************************
876 * TEST CASE 1:
877 *
878 * For each CPU, map a random granule to NS_SLOT and copy random
879 * data into it. Then verify that the data is properly read and
880 * that the source has not been altered.
881 ******************************************************************/
882
883 /* Register harness callbacks to use by this test */
884 cb.buffer_map = test_buffer_map_access;
885 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
886 cb.buffer_unmap = test_buffer_unmap_access;
887 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
888
889 /*
890 * Get three random granules:
891 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
892 * granule_addrs[1]: Will be the dst granule for the ns_buffer_read
893 * operation.
894 * granule_addrs[2]: Just a zeroed granule to easy some tests.
895 */
896 get_rand_granule_array(granule_addrs, 3U);
897
898 /* Granule to test zeroes */
899 (void)memset((void *)granule_addrs[2], 0, GRANULE_SIZE);
900
901 test_granule = addr_to_granule(granule_addrs[0]);
902
903 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
904 host_util_set_cpuid(i);
905
906 /* Generate random data. */
907 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
908 *((int *)granule_addrs[0] + j) = rand();
909 }
910
911 /* Clean the dest granule */
912 (void)memset((void *)granule_addrs[1], 0, GRANULE_SIZE);
913
914 /*
915 * Read block by block, verifying that each copied block
916 * doesn't affect anything read before nor a block to be
917 * read yet.
918 */
919 for (unsigned int j = 0U; j < GRANULE_BLOCKS; j++) {
920 ns_buffer_read(SLOT_NS, test_granule,
921 GRANULE_BLOCK_SIZE * j,
922 GRANULE_BLOCK_SIZE,
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +0100923 (void *)(granule_addrs[1] +
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +0100924 (GRANULE_BLOCK_SIZE * j)));
925
926 MEMCMP_EQUAL((void *)granule_addrs[1],
927 (void *)granule_addrs[0],
928 (size_t)((j + 1U) * GRANULE_BLOCK_SIZE));
929
930 /*
931 * Verify than any block that has not been read yet
932 * is still all zeros.
933 */
934 MEMCMP_EQUAL((void *)granule_addrs[2],
935 (void *)(granule_addrs[1] +
936 ((j + 1U) * GRANULE_BLOCK_SIZE)),
937 (GRANULE_BLOCKS - (j + 1U)) *
938 GRANULE_BLOCK_SIZE);
939
940 }
941 }
942}
943
944TEST(slot_buffer, ns_buffer_read_TC2)
945{
946 uintptr_t granule_addrs[3];
947 struct granule *test_granule;
948 union test_harness_cbs cb;
949 int val;
950
951 /******************************************************************
952 * TEST CASE 3:
953 *
954 * For every CPU, verify that ns_buffer_read() does not alter the
955 * source.
956 ******************************************************************/
957
958 /* Register harness callbacks to use by this test */
959 cb.buffer_map = test_buffer_map_access;
960 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
961 cb.buffer_unmap = test_buffer_unmap_access;
962 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
963
964 /*
965 * Get three random granules:
966 * granule_addrs[0]: To be used as src for read operations (SLOT_NS).
967 * granule_addrs[1]: Will hold a copy of the src granule to compare.
968 * granule_addrs[2]: Destination granule.
969 */
970 get_rand_granule_array(granule_addrs, 3U);
971
972 /* Generate random data. */
973 for (unsigned int j = 0U; j < GRANULE_SIZE/sizeof(int); j++) {
974 val = rand();
975 *((int *)granule_addrs[0] + j) = val;
976 *((int *)granule_addrs[1] + j) = val;
977 }
978
979 test_granule = addr_to_granule(granule_addrs[0]);
980
981 for (unsigned int i = 0U; i < MAX_CPUS; i++) {
982 host_util_set_cpuid(i);
983
984 ns_buffer_read(SLOT_NS, test_granule, 0U,
985 GRANULE_SIZE, (void *)granule_addrs[2]);
986
987 /* Verify that the source has not been altered */
988 MEMCMP_EQUAL((void *)granule_addrs[1],
989 (void *)granule_addrs[0],
990 (size_t)GRANULE_SIZE);
991 }
992
993}
994
995TEST(slot_buffer, ns_buffer_read_TC3)
996{
997 uintptr_t granule_addrs[2];
998 unsigned int cpu[2];
999 long dest[2];
1000 long val;
1001 union test_harness_cbs cb;
1002
1003 /******************************************************************
1004 * TEST CASE 3:
1005 *
1006 * for two random CPUs, map a random granule with random data to
1007 * their SLOT_NS, then read the SLOT_NS on each CPU and ensure that
1008 * the destination buffers contain the data from their CPU SLOT_NS
1009 * only and no leak from the other CPU has happened.
1010 * This test helps validating that ns_buffer_read() handles the
1011 * translation contexts properly.
1012 ******************************************************************/
1013
1014 /* Register harness callbacks to use by this test */
1015 cb.buffer_map = test_buffer_map_access;
1016 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1017 cb.buffer_unmap = test_buffer_unmap_access;
1018 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1019
1020 /* Get a random granule for each CPU to use. */
1021 get_rand_granule_array(granule_addrs, 2U);
1022
1023 /* Get two random CPUs where to run the tests. */
1024 do {
1025 cpu[0] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1026 cpu[1] = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1027 } while (cpu[0] == cpu[1]);
1028
1029 /* Store random data at the beginning of each granule */
1030 *(long *)granule_addrs[0] = (long)rand();
1031 *(long *)granule_addrs[1] = (long)rand();
1032
1033 /* Read the granules and store the result in dest */
1034 for (unsigned int i = 0U; i < 2U; i++) {
1035 host_util_set_cpuid(cpu[i]);
1036
1037 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[i]), 0U,
Javier Almansa Sobrino654ebf32023-06-13 10:51:39 +01001038 sizeof(long), (void *)&dest[i]);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001039 }
1040
1041 /*
1042 * Verify that the dest granule for the first CPU doesn't contain
1043 * the pattern for the second one.
1044 */
1045 val = *(long *)granule_addrs[0];
1046 CHECK_FALSE(val == dest[1]);
1047
1048 /*
1049 * Repeat the same check, this time with the second CPU.
1050 */
1051 val = *(long *)granule_addrs[1];
1052 CHECK_FALSE(val == dest[0]);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001053}
1054
1055ASSERT_TEST(slot_buffer, ns_buffer_read_TC4)
1056{
1057 uintptr_t granule_addrs[2];
1058 unsigned int cpuid;
1059 union test_harness_cbs cb;
1060 enum buffer_slot slot;
1061
1062 /******************************************************************
1063 * TEST CASE 4:
1064 *
1065 * for a random CPU, try to call ns_buffer_read() with a
1066 * random secure slot.
1067 * ns_buffer_read() should cause an assertion failure.
1068 ******************************************************************/
1069
1070 /* Register harness callbacks to use by this test */
1071 cb.buffer_map = test_buffer_map_access;
1072 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1073 cb.buffer_unmap = test_buffer_unmap_access;
1074 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1075
1076 /* Get two random granules, one for destination and one for source. */
1077 get_rand_granule_array(granule_addrs, 2U);
1078
1079 /* Get a random slot. Secure slots are after SLOT_NS */
1080 slot = (enum buffer_slot)test_helpers_get_rand_in_range(
1081 SLOT_NS + 1U, NR_CPU_SLOTS);
1082
1083 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1084 host_util_set_cpuid(cpuid);
1085
1086 test_helpers_expect_assert_fail(true);
1087 ns_buffer_read(slot, addr_to_granule(granule_addrs[0]), 0U,
1088 (size_t)GRANULE_SIZE, (void *)granule_addrs[1]);
1089 test_helpers_fail_if_no_assert_failed();
1090}
1091
1092ASSERT_TEST(slot_buffer, ns_buffer_read_TC5)
1093{
1094 uintptr_t granule_addr;
1095 unsigned int cpuid;
1096 union test_harness_cbs cb;
1097
1098 /******************************************************************
1099 * TEST CASE 5:
1100 *
1101 * for a random CPU, try to call ns_buffer_read() with a
1102 * NULL pointer to copy to.
1103 * ns_buffer_read() should cause an assertion failure.
1104 ******************************************************************/
1105
1106 /* Register harness callbacks to use by this test */
1107 cb.buffer_map = test_buffer_map_access;
1108 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1109 cb.buffer_unmap = test_buffer_unmap_access;
1110 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1111
1112 granule_addr = get_rand_granule_addr();
1113
1114 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1115 host_util_set_cpuid(cpuid);
1116
1117 test_helpers_expect_assert_fail(true);
1118 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addr), 0U,
1119 (size_t)GRANULE_SIZE, NULL);
1120 test_helpers_fail_if_no_assert_failed();
1121}
1122
1123ASSERT_TEST(slot_buffer, ns_buffer_read_TC6)
1124{
1125 uintptr_t granule_addr;
1126 unsigned int cpuid;
1127 union test_harness_cbs cb;
1128
1129 /******************************************************************
1130 * TEST CASE 6:
1131 *
1132 * for a random CPU, try to call ns_buffer_read() with a
1133 * NULL granule to copy from.
1134 * ns_buffer_read() should cause an assertion failure.
1135 ******************************************************************/
1136
1137 /* Register harness callbacks to use by this test */
1138 cb.buffer_map = test_buffer_map_access;
1139 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1140 cb.buffer_unmap = test_buffer_unmap_access;
1141 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1142
1143 granule_addr = get_rand_granule_addr();
1144
1145 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1146 host_util_set_cpuid(cpuid);
1147
1148 test_helpers_expect_assert_fail(true);
1149 ns_buffer_read(SLOT_NS, NULL, 0U,
1150 (size_t)GRANULE_SIZE, (void *)granule_addr);
1151 test_helpers_fail_if_no_assert_failed();
1152}
1153
1154ASSERT_TEST(slot_buffer, ns_buffer_read_TC7)
1155{
1156 uintptr_t granule_addrs[2];
1157 unsigned int cpuid;
1158 union test_harness_cbs cb;
1159 size_t size;
1160
1161 /******************************************************************
1162 * TEST CASE 7:
1163 *
1164 * for a random CPU, try to call ns_buffer_read() with a
1165 * size not aligned to 8 bytes.
1166 * ns_buffer_read() should cause an assertion failure.
1167 ******************************************************************/
1168
1169 /* Register harness callbacks to use by this test */
1170 cb.buffer_map = test_buffer_map_access;
1171 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1172 cb.buffer_unmap = test_buffer_unmap_access;
1173 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1174
1175 /* Get two random granules, one for destination and one for source. */
1176 get_rand_granule_array(granule_addrs, 2U);
1177
1178 /* Get a random size between 1 and 7 bytes */
1179 size = (size_t)test_helpers_get_rand_in_range(1, 7);
1180
1181 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1182 host_util_set_cpuid(cpuid);
1183
1184 test_helpers_expect_assert_fail(true);
1185 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
1186 size, (void *)granule_addrs[1]);
1187 test_helpers_fail_if_no_assert_failed();
1188}
1189
1190ASSERT_TEST(slot_buffer, ns_buffer_read_TC8)
1191{
1192 uintptr_t granule_addrs[2];
1193 unsigned int cpuid;
1194 union test_harness_cbs cb;
1195 unsigned int offset;
1196
1197 /******************************************************************
1198 * TEST CASE 8:
1199 *
1200 * for a random CPU, try to call ns_buffer_read() with an
1201 * offset not aligned to 8 bytes.
1202 * ns_buffer_read() should cause an assertion failure.
1203 ******************************************************************/
1204
1205 /* Register harness callbacks to use by this test */
1206 cb.buffer_map = test_buffer_map_access;
1207 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1208 cb.buffer_unmap = test_buffer_unmap_access;
1209 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1210
1211 /* Get two random granules, one for destination and one for source. */
1212 get_rand_granule_array(granule_addrs, 2U);
1213
1214 /* Get a random offset between 1 and 7 */
1215 offset = test_helpers_get_rand_in_range(1, 7);
1216
1217 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1218 host_util_set_cpuid(cpuid);
1219
1220 test_helpers_expect_assert_fail(true);
1221 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
1222 GRANULE_SIZE, (void *)granule_addrs[1]);
1223 test_helpers_fail_if_no_assert_failed();
1224}
1225
1226ASSERT_TEST(slot_buffer, ns_buffer_read_TC9)
1227{
1228 uintptr_t granule_addrs[2];
1229 unsigned int cpuid;
1230 union test_harness_cbs cb;
1231
1232 /******************************************************************
1233 * TEST CASE 9:
1234 *
1235 * for a random CPU, try to call ns_buffer_read() with a
1236 * destination not aligned to 8 bytes.
1237 * ns_buffer_read() should cause an assertion failure.
1238 ******************************************************************/
1239
1240 /* Register harness callbacks to use by this test */
1241 cb.buffer_map = test_buffer_map_access;
1242 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1243 cb.buffer_unmap = test_buffer_unmap_access;
1244 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1245
1246 /* Get two random granules, one for destination and one for source. */
1247 get_rand_granule_array(granule_addrs, 2U);
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001248
1249 /*
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001250 * Misalign the address of the destination.
1251 * test_helpers_get_rand_in_range() will never return an address for
1252 * the last granule, so we are safe increasing the address.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001253 */
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +00001254 granule_addrs[1] += test_helpers_get_rand_in_range(1, 7);
1255
1256 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1257 host_util_set_cpuid(cpuid);
1258
1259 test_helpers_expect_assert_fail(true);
1260 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), 0U,
1261 GRANULE_SIZE, (void *)granule_addrs[1]);
1262 test_helpers_fail_if_no_assert_failed();
1263}
1264
1265ASSERT_TEST(slot_buffer, ns_buffer_read_TC10)
1266{
1267 uintptr_t granule_addrs[2];
1268 unsigned int cpuid;
1269 size_t size;
1270 unsigned int offset;
1271 union test_harness_cbs cb;
1272
1273 /******************************************************************
1274 * TEST CASE 10:
1275 *
1276 * for a random CPU, try to call ns_buffer_read() with an
1277 * offset + size higher than GRANULE_SIZE.
1278 * ns_buffer_read() should cause an assertion failure.
1279 ******************************************************************/
1280
1281 /* Register harness callbacks to use by this test */
1282 cb.buffer_map = test_buffer_map_access;
1283 (void)test_helpers_register_cb(cb, CB_BUFFER_MAP);
1284 cb.buffer_unmap = test_buffer_unmap_access;
1285 (void)test_helpers_register_cb(cb, CB_BUFFER_UNMAP);
1286
1287 /* Get two random granules, one for destination and one for source. */
1288 get_rand_granule_array(granule_addrs, 2U);
1289
1290 /*
1291 * offset + granule = 1.5 * granule_size.
1292 * Both parameters are properly aligned.
1293 */
1294 offset = GRANULE_SIZE >> 1U;
1295 size = (size_t)GRANULE_SIZE;
1296
1297 cpuid = test_helpers_get_rand_in_range(0, MAX_CPUS - 1U);
1298 host_util_set_cpuid(cpuid);
1299
1300 test_helpers_expect_assert_fail(true);
1301 ns_buffer_read(SLOT_NS, addr_to_granule(granule_addrs[0]), offset,
1302 size, (void *)granule_addrs[1]);
1303 test_helpers_fail_if_no_assert_failed();
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001304}
1305
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +00001306TEST(slot_buffer, slot_buf_finish_warmboot_init_TC1)
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001307{
1308 /*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +00001309 * slot_buf_finish_warmboot_init() has already been used during
1310 * initialization for all tests, so skip it.
Javier Almansa Sobrinoed0ffd22022-07-25 09:35:32 +01001311 */
1312}