blob: aac2de477a796805fad371226591557e98de6f1b [file] [log] [blame]
J-Alves40618a32020-10-08 17:25:37 +01001/*
J-Alves3be0efa2023-10-02 19:11:11 +01002 * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
J-Alves40618a32020-10-08 17:25:37 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
J-Alves8083db22023-06-27 17:22:58 +01007#include "arch_features.h"
J-Alves3be0efa2023-10-02 19:11:11 +01008#include "arch_helpers.h"
9#include "ffa_helpers.h"
J-Alves8083db22023-06-27 17:22:58 +010010#include "ffa_svc.h"
J-Alves72f00292024-01-08 11:31:45 +000011#include "stdint.h"
J-Alves3be0efa2023-10-02 19:11:11 +010012#include "utils_def.h"
J-Alves40618a32020-10-08 17:25:37 +010013#include <debug.h>
Karl Meakin3d879b82023-06-16 10:32:08 +010014#include "ffa_helpers.h"
J-Alves8083db22023-06-27 17:22:58 +010015#include <sync.h>
Max Shvetsov103e0562021-02-04 16:58:31 +000016
17#include <cactus_test_cmds.h>
J-Alves40618a32020-10-08 17:25:37 +010018#include <ffa_endpoints.h>
J-Alves3be0efa2023-10-02 19:11:11 +010019#include <host_realm_rmi.h>
Daniel Boulby82bf3392023-07-28 18:32:27 +010020#include <spm_common.h>
21#include <spm_test_helpers.h>
J-Alves40618a32020-10-08 17:25:37 +010022#include <test_helpers.h>
23#include <tftf_lib.h>
24#include <xlat_tables_defs.h>
25
26#define MAILBOX_SIZE PAGE_SIZE
27
28#define SENDER HYP_ID
29#define RECEIVER SP_ID(1)
30
Karl Meakin0d4f5ff2023-10-13 20:03:16 +010031/*
32 * A number of pages that is large enough that it must take two fragments to
33 * share.
34 */
35#define FRAGMENTED_SHARE_PAGE_COUNT \
36 (sizeof(struct ffa_memory_region) / \
37 sizeof(struct ffa_memory_region_constituent))
38
J-Alves3106b072020-11-18 10:37:21 +000039static const struct ffa_uuid expected_sp_uuids[] = {
40 {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
41 };
42
J-Alves27390fd2020-11-18 10:51:15 +000043/* Memory section to be used for memory share operations */
Karl Meakin0d4f5ff2023-10-13 20:03:16 +010044static __aligned(PAGE_SIZE) uint8_t
45 share_page[PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT];
J-Alves3be0efa2023-10-02 19:11:11 +010046static __aligned(PAGE_SIZE) uint8_t donate_page[PAGE_SIZE];
J-Alves36c9b072023-03-17 15:05:41 +000047static __aligned(PAGE_SIZE) uint8_t consecutive_donate_page[PAGE_SIZE];
J-Alves04fc4f22023-10-11 17:04:52 +010048static __aligned(PAGE_SIZE) uint8_t four_share_pages[PAGE_SIZE * 4];
J-Alves72f00292024-01-08 11:31:45 +000049
J-Alves8083db22023-06-27 17:22:58 +010050static bool gpc_abort_triggered;
J-Alves40618a32020-10-08 17:25:37 +010051
J-Alves807ce142021-12-14 15:24:11 +000052static bool check_written_words(uint32_t *ptr, uint32_t word, uint32_t wcount)
53{
54 VERBOSE("TFTF - Memory contents after SP use:\n");
55 for (unsigned int i = 0U; i < wcount; i++) {
56 VERBOSE(" %u: %x\n", i, ptr[i]);
57
58 /* Verify content of memory is as expected. */
59 if (ptr[i] != word) {
60 return false;
61 }
62 }
63 return true;
64}
65
J-Alves9c088902023-03-16 18:21:37 +000066static bool test_memory_send_expect_denied(uint32_t mem_func,
J-Alves7b8f5c02023-03-17 15:05:13 +000067 void *mem_ptr,
68 ffa_id_t borrower)
J-Alves9c088902023-03-16 18:21:37 +000069{
70 struct ffa_value ret;
71 struct mailbox_buffers mb;
72 struct ffa_memory_region_constituent constituents[] = {
73 {(void *)mem_ptr, 1, 0}
74 };
75 ffa_memory_handle_t handle;
76
77 const uint32_t constituents_count = sizeof(constituents) /
78 sizeof(struct ffa_memory_region_constituent);
Karl Meakin1331a8c2023-09-14 16:25:15 +010079
80 struct ffa_memory_access receiver =
81 ffa_memory_access_init_permissions_from_mem_func(borrower,
82 mem_func);
83
J-Alves9c088902023-03-16 18:21:37 +000084 GET_TFTF_MAILBOX(mb);
85
86 handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
Karl Meakin1331a8c2023-09-14 16:25:15 +010087 MAILBOX_SIZE, SENDER, &receiver, 1,
J-Alves9c088902023-03-16 18:21:37 +000088 constituents, constituents_count,
89 mem_func, &ret);
90
91 if (handle != FFA_MEMORY_HANDLE_INVALID) {
Karl Meakin0d4f5ff2023-10-13 20:03:16 +010092 ERROR("Received a valid FF-A memory handle, and that isn't "
93 "expected.\n");
J-Alves9c088902023-03-16 18:21:37 +000094 return false;
95 }
96
97 if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
98 return false;
99 }
100
101 return true;
102}
103
J-Alves8083db22023-06-27 17:22:58 +0100104static bool data_abort_handler(void)
105{
106 uint64_t esr_elx = IS_IN_EL2() ? read_esr_el2() : read_esr_el1();
107
108 VERBOSE("%s esr_elx %llx\n", __func__, esr_elx);
109
110 if (EC_BITS(esr_elx) == EC_DABORT_CUR_EL) {
111 /* Synchronous data abort triggered by Granule protection */
112 if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_GPF_DABORT) {
J-Alves3be0efa2023-10-02 19:11:11 +0100113 VERBOSE("%s GPF Data Abort caught to address: %llx\n",
114 __func__, (uint64_t)read_far_el2());
J-Alves8083db22023-06-27 17:22:58 +0100115 gpc_abort_triggered = true;
116 return true;
117 }
118 }
119
120 return false;
121}
122
J-Alves3be0efa2023-10-02 19:11:11 +0100123static bool get_gpc_abort_triggered(void)
124{
125 bool ret = gpc_abort_triggered;
126
127 gpc_abort_triggered = false;
128
129 return ret;
130}
131
J-Alves9c088902023-03-16 18:21:37 +0000132/**
133 * Test invocation to FF-A memory sharing interfaces that should return in an
134 * error.
135 */
136test_result_t test_share_forbidden_ranges(void)
137{
138 const uintptr_t forbidden_address[] = {
139 /* Cactus SP memory. */
140 (uintptr_t)0x7200000,
141 /* SPMC Memory. */
142 (uintptr_t)0x6000000,
143 /* NS memory defined in cactus tertiary. */
144 (uintptr_t)0x0000880080001000,
145 };
146
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000147 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
J-Alves9c088902023-03-16 18:21:37 +0000148
149 for (unsigned i = 0; i < 3; i++) {
150 if (!test_memory_send_expect_denied(
J-Alves8984e722024-05-07 22:21:54 +0100151 FFA_MEM_SHARE_SMC64, (void *)forbidden_address[i],
J-Alves7b8f5c02023-03-17 15:05:13 +0000152 RECEIVER)) {
J-Alves9c088902023-03-16 18:21:37 +0000153 return TEST_RESULT_FAIL;
154 }
155 }
156
157 return TEST_RESULT_SUCCESS;
158}
159
J-Alves3106b072020-11-18 10:37:21 +0000160/**
161 * Tests that it is possible to share memory with SWd from NWd.
162 * After calling the respective memory send API, it will expect a reply from
163 * cactus SP, at which point it will reclaim access to the memory region and
164 * check the memory region has been used by receiver SP.
165 *
166 * Accessing memory before a memory reclaim operation should only be possible
167 * in the context of a memory share operation.
168 * According to the FF-A spec, the owner is temporarily relinquishing
169 * access to the memory region on a memory lend operation, and on a
170 * memory donate operation the access is relinquished permanently.
171 * SPMC is positioned in S-EL2, and doesn't control stage-1 mapping for
172 * EL2. Therefore, it is impossible to enforce the expected access
173 * policy for a donate and lend operations within the SPMC.
174 * Current SPMC implementation is under the assumption of trust that
175 * Hypervisor (sitting in EL2) would relinquish access from EL1/EL0
176 * FF-A endpoint at relevant moment.
177 */
J-Alves7b8f5c02023-03-17 15:05:13 +0000178static test_result_t test_memory_send_sp(uint32_t mem_func, ffa_id_t borrower,
179 struct ffa_memory_region_constituent *constituents,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100180 size_t constituents_count, bool is_normal_memory)
J-Alves40618a32020-10-08 17:25:37 +0100181{
Daniel Boulbyce386b12022-03-29 18:36:36 +0100182 struct ffa_value ret;
J-Alves40618a32020-10-08 17:25:37 +0100183 ffa_memory_handle_t handle;
184 uint32_t *ptr;
J-Alvesf1126f22020-11-02 17:28:20 +0000185 struct mailbox_buffers mb;
J-Alves8083db22023-06-27 17:22:58 +0100186 unsigned int rme_supported = get_armv9_2_feat_rme_support();
187 const bool check_gpc_fault =
J-Alves8984e722024-05-07 22:21:54 +0100188 mem_func != FFA_MEM_SHARE_SMC64 &&
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100189 rme_supported != 0U && is_normal_memory;
J-Alves40618a32020-10-08 17:25:37 +0100190
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100191 /*
192 * For normal memory arbitrarilty write 5 words after using memory.
193 * For device just write 1 so we only write in the data register of the device.
194 */
195 const uint32_t nr_words_to_write = is_normal_memory ? 5 : 1;
J-Alves32ccd2b2021-12-14 14:59:51 +0000196
Karl Meakin1331a8c2023-09-14 16:25:15 +0100197 struct ffa_memory_access receiver =
198 ffa_memory_access_init_permissions_from_mem_func(borrower,
199 mem_func);
200
J-Alves3106b072020-11-18 10:37:21 +0000201 /***********************************************************************
202 * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
J-Alves40618a32020-10-08 17:25:37 +0100203 **********************************************************************/
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000204 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
J-Alves40618a32020-10-08 17:25:37 +0100205
J-Alves3106b072020-11-18 10:37:21 +0000206 GET_TFTF_MAILBOX(mb);
J-Alves40618a32020-10-08 17:25:37 +0100207
J-Alves8083db22023-06-27 17:22:58 +0100208 /*
J-Alves72f00292024-01-08 11:31:45 +0000209 * If the RME is enabled for the platform under test, check that the
210 * GPCs are working as expected, as such setup the exception handler.
J-Alves8083db22023-06-27 17:22:58 +0100211 */
212 if (check_gpc_fault) {
213 register_custom_sync_exception_handler(data_abort_handler);
214 }
215
J-Alvesc2e32ed2023-12-04 13:22:44 +0000216 for (size_t i = 0; i < constituents_count; i++) {
217 VERBOSE("Sharing Address: %p\n", constituents[i].address);
218 ptr = (uint32_t *)constituents[i].address;
219 for (size_t j = 0; j < nr_words_to_write; j++) {
220 ptr[j] = mem_func + 0xFFA;
221 }
222 }
223
J-Alvesbe1519a2021-02-19 14:33:54 +0000224 handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
Karl Meakin1331a8c2023-09-14 16:25:15 +0100225 MAILBOX_SIZE, SENDER, &receiver, 1,
J-Alves3106b072020-11-18 10:37:21 +0000226 constituents, constituents_count,
Maksims Svecovsa65a9072021-12-10 13:08:41 +0000227 mem_func, &ret);
J-Alves40618a32020-10-08 17:25:37 +0100228
J-Alves3106b072020-11-18 10:37:21 +0000229 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alves40618a32020-10-08 17:25:37 +0100230 return TEST_RESULT_FAIL;
231 }
232
J-Alvesff607a22023-03-16 15:42:21 +0000233 VERBOSE("TFTF - Handle: %llx\n", handle);
J-Alves40618a32020-10-08 17:25:37 +0100234
235 ptr = (uint32_t *)constituents[0].address;
236
J-Alves7b8f5c02023-03-17 15:05:13 +0000237 ret = cactus_mem_send_cmd(SENDER, borrower, mem_func, handle, 0,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100238 nr_words_to_write, false, is_normal_memory);
J-Alves40618a32020-10-08 17:25:37 +0100239
J-Alvesb122eea2023-10-11 16:59:46 +0100240 if (!is_ffa_direct_response(ret) ||
241 cactus_get_response(ret) != CACTUS_SUCCESS) {
242 ffa_mem_reclaim(handle, 0);
J-Alves3106b072020-11-18 10:37:21 +0000243 ERROR("Failed memory send operation!\n");
J-Alves40618a32020-10-08 17:25:37 +0100244 return TEST_RESULT_FAIL;
245 }
246
J-Alves8083db22023-06-27 17:22:58 +0100247 /*
J-Alves72f00292024-01-08 11:31:45 +0000248 * If there is RME support, look to trigger an exception as soon as the
J-Alves8083db22023-06-27 17:22:58 +0100249 * security state is update, due to GPC fault.
250 */
251 if (check_gpc_fault) {
252 *ptr = 0xBEEF;
253 }
254
J-Alves8984e722024-05-07 22:21:54 +0100255 if (mem_func != FFA_MEM_DONATE_SMC64) {
J-Alves40618a32020-10-08 17:25:37 +0100256
J-Alvesdbd5ac22023-05-05 19:47:24 +0100257 /* Reclaim memory entirely before checking its state. */
258 if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
J-Alves40618a32020-10-08 17:25:37 +0100259 tftf_testcase_printf("Couldn't reclaim memory\n");
260 return TEST_RESULT_FAIL;
J-Alvesdbd5ac22023-05-05 19:47:24 +0100261 }
262
J-Alves72f00292024-01-08 11:31:45 +0000263 for (uint32_t i = 0; i < constituents_count; i++) {
264 ptr = constituents[i].address;
265
266 /*
J-Alvesc2e32ed2023-12-04 13:22:44 +0000267 * Check that borrower used the memory as expected
268 * for FFA_MEM_SHARE test.
J-Alves72f00292024-01-08 11:31:45 +0000269 */
J-Alves8984e722024-05-07 22:21:54 +0100270 if (mem_func == FFA_MEM_SHARE_SMC64 &&
J-Alvesc2e32ed2023-12-04 13:22:44 +0000271 !check_written_words(ptr,
272 mem_func + 0xFFAU,
J-Alves72f00292024-01-08 11:31:45 +0000273 nr_words_to_write)) {
274 ERROR("Fail because of state of memory.\n");
275 return TEST_RESULT_FAIL;
276 }
J-Alvesdbd5ac22023-05-05 19:47:24 +0100277 }
J-Alves40618a32020-10-08 17:25:37 +0100278 }
279
J-Alves8083db22023-06-27 17:22:58 +0100280 if (check_gpc_fault) {
281 unregister_custom_sync_exception_handler();
J-Alves3be0efa2023-10-02 19:11:11 +0100282 if (!get_gpc_abort_triggered()) {
J-Alves8083db22023-06-27 17:22:58 +0100283 ERROR("No exception due to GPC for lend/donate with RME.\n");
284 return TEST_RESULT_FAIL;
285 }
286 }
287
J-Alves40618a32020-10-08 17:25:37 +0100288 return TEST_RESULT_SUCCESS;
289}
290
291test_result_t test_mem_share_sp(void)
292{
J-Alves7b8f5c02023-03-17 15:05:13 +0000293 struct ffa_memory_region_constituent constituents[] = {
294 {(void *)share_page, 1, 0}
295 };
296
297 const uint32_t constituents_count = sizeof(constituents) /
298 sizeof(struct ffa_memory_region_constituent);
299
J-Alves8984e722024-05-07 22:21:54 +0100300 return test_memory_send_sp(FFA_MEM_SHARE_SMC64, RECEIVER, constituents,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100301 constituents_count, true);
J-Alves40618a32020-10-08 17:25:37 +0100302}
303
304test_result_t test_mem_lend_sp(void)
305{
J-Alves7b8f5c02023-03-17 15:05:13 +0000306 struct ffa_memory_region_constituent constituents[] = {
J-Alves72f00292024-01-08 11:31:45 +0000307 {(void *)four_share_pages, 4, 0},
J-Alves7b8f5c02023-03-17 15:05:13 +0000308 {(void *)share_page, 1, 0}
309 };
310
311 const uint32_t constituents_count = sizeof(constituents) /
312 sizeof(struct ffa_memory_region_constituent);
313
J-Alves8984e722024-05-07 22:21:54 +0100314 return test_memory_send_sp(FFA_MEM_LEND_SMC64, RECEIVER, constituents,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100315 constituents_count, true);
J-Alves40618a32020-10-08 17:25:37 +0100316}
317
318test_result_t test_mem_donate_sp(void)
319{
J-Alves7b8f5c02023-03-17 15:05:13 +0000320 struct ffa_memory_region_constituent constituents[] = {
J-Alves3be0efa2023-10-02 19:11:11 +0100321 {(void *)donate_page, 1, 0}
J-Alves7b8f5c02023-03-17 15:05:13 +0000322 };
323 const uint32_t constituents_count = sizeof(constituents) /
324 sizeof(struct ffa_memory_region_constituent);
J-Alves8984e722024-05-07 22:21:54 +0100325 return test_memory_send_sp(FFA_MEM_DONATE_SMC64, RECEIVER, constituents,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100326 constituents_count, true);
J-Alves40618a32020-10-08 17:25:37 +0100327}
J-Alves27390fd2020-11-18 10:51:15 +0000328
J-Alves36c9b072023-03-17 15:05:41 +0000329test_result_t test_consecutive_donate(void)
330{
331 struct ffa_memory_region_constituent constituents[] = {
332 {(void *)consecutive_donate_page, 1, 0}
333 };
334 const uint32_t constituents_count = sizeof(constituents) /
335 sizeof(struct ffa_memory_region_constituent);
336
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000337 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
J-Alves36c9b072023-03-17 15:05:41 +0000338
J-Alves8984e722024-05-07 22:21:54 +0100339 test_result_t ret = test_memory_send_sp(FFA_MEM_DONATE_SMC64, SP_ID(1),
J-Alves36c9b072023-03-17 15:05:41 +0000340 constituents,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100341 constituents_count, true);
J-Alves36c9b072023-03-17 15:05:41 +0000342
343 if (ret != TEST_RESULT_SUCCESS) {
344 ERROR("Failed at first attempting of sharing.\n");
345 return TEST_RESULT_FAIL;
346 }
347
J-Alves8984e722024-05-07 22:21:54 +0100348 if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC64,
J-Alves36c9b072023-03-17 15:05:41 +0000349 consecutive_donate_page,
350 SP_ID(1))) {
351 ERROR("Memory was successfully donated again from the NWd, to "
352 "the same borrower.\n");
353 return TEST_RESULT_FAIL;
354 }
355
J-Alves8984e722024-05-07 22:21:54 +0100356 if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC64,
J-Alves36c9b072023-03-17 15:05:41 +0000357 consecutive_donate_page,
358 SP_ID(2))) {
359 ERROR("Memory was successfully donated again from the NWd, to "
360 "another borrower.\n");
361 return TEST_RESULT_FAIL;
362 }
363
364 return TEST_RESULT_SUCCESS;
365}
366
J-Alves27390fd2020-11-18 10:51:15 +0000367/*
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100368 * Lend device memory to the Secure Partition.
369 */
370test_result_t test_ffa_mem_lend_device_memory_sp(void)
371{
Daniel Boulby7a3c1dc2024-08-14 10:34:03 +0100372#if PLAT_fvp || PLAT_tc
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100373 struct ffa_memory_region_constituent constituents[] = {
Daniel Boulby7a3c1dc2024-08-14 10:34:03 +0100374 {(void *)PLAT_ARM_UART_BASE, 1, 0},
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100375 };
376
377 const uint32_t constituents_count = sizeof(constituents) /
378 sizeof(struct ffa_memory_region_constituent);
379
380 return test_memory_send_sp(FFA_MEM_LEND_SMC64, RECEIVER, constituents,
381 constituents_count, false);
Daniel Boulby7a3c1dc2024-08-14 10:34:03 +0100382#else
383 return TEST_RESULT_SKIPPED;
384#endif
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100385
386}
387
388
389/*
J-Alves27390fd2020-11-18 10:51:15 +0000390 * Test requests a memory send operation between cactus SPs.
391 * Cactus SP should reply to TFTF on whether the test succeeded or not.
392 */
393static test_result_t test_req_mem_send_sp_to_sp(uint32_t mem_func,
Daniel Boulbye79d2072021-03-03 11:34:53 +0000394 ffa_id_t sender_sp,
Federico Recanati6328fb02022-01-14 15:48:16 +0100395 ffa_id_t receiver_sp,
396 bool non_secure)
J-Alves27390fd2020-11-18 10:51:15 +0000397{
Daniel Boulbyce386b12022-03-29 18:36:36 +0100398 struct ffa_value ret;
J-Alves27390fd2020-11-18 10:51:15 +0000399
400 /***********************************************************************
401 * Check if SPMC's ffa_version and presence of expected FF-A endpoints.
402 **********************************************************************/
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000403 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
J-Alves27390fd2020-11-18 10:51:15 +0000404
J-Alves53392012020-11-18 14:51:57 +0000405 ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
Federico Recanati6328fb02022-01-14 15:48:16 +0100406 receiver_sp, non_secure);
J-Alves27390fd2020-11-18 10:51:15 +0000407
J-Alves06373c52021-02-11 15:17:42 +0000408 if (!is_ffa_direct_response(ret)) {
J-Alves27390fd2020-11-18 10:51:15 +0000409 return TEST_RESULT_FAIL;
410 }
411
J-Alves53392012020-11-18 14:51:57 +0000412 if (cactus_get_response(ret) == CACTUS_ERROR) {
Maksims Svecovs61740652021-12-14 12:01:45 +0000413 ERROR("Failed sharing memory between SPs. Error code: %d\n",
414 cactus_error_code(ret));
J-Alves27390fd2020-11-18 10:51:15 +0000415 return TEST_RESULT_FAIL;
416 }
417
418 return TEST_RESULT_SUCCESS;
419}
420
Maksims Svecovs61740652021-12-14 12:01:45 +0000421/*
422 * Test requests a memory send operation from SP to VM.
423 * The tests expects cactus to reply CACTUS_ERROR, providing FF-A error code of
424 * the last memory send FF-A call that cactus performed.
425 */
426static test_result_t test_req_mem_send_sp_to_vm(uint32_t mem_func,
427 ffa_id_t sender_sp,
428 ffa_id_t receiver_vm)
429{
Daniel Boulbyce386b12022-03-29 18:36:36 +0100430 struct ffa_value ret;
Maksims Svecovs61740652021-12-14 12:01:45 +0000431
432 /**********************************************************************
433 * Check if SPMC's ffa_version and presence of expected FF-A endpoints.
434 *********************************************************************/
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000435 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
Maksims Svecovs61740652021-12-14 12:01:45 +0000436
437 ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
Federico Recanati6328fb02022-01-14 15:48:16 +0100438 receiver_vm, false);
Maksims Svecovs61740652021-12-14 12:01:45 +0000439
440 if (!is_ffa_direct_response(ret)) {
441 return TEST_RESULT_FAIL;
442 }
443
444 if (cactus_get_response(ret) == CACTUS_ERROR &&
445 cactus_error_code(ret) == FFA_ERROR_DENIED) {
446 return TEST_RESULT_SUCCESS;
447 }
448
449 tftf_testcase_printf("Did not get the expected error, "
450 "mem send returned with %d\n",
451 cactus_get_response(ret));
452 return TEST_RESULT_FAIL;
453}
454
J-Alves27390fd2020-11-18 10:51:15 +0000455test_result_t test_req_mem_share_sp_to_sp(void)
456{
J-Alves8984e722024-05-07 22:21:54 +0100457 return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC64, SP_ID(3),
Federico Recanati6328fb02022-01-14 15:48:16 +0100458 SP_ID(2), false);
459}
460
461test_result_t test_req_ns_mem_share_sp_to_sp(void)
462{
Olivier Deprez728cc562022-06-09 11:37:46 +0200463 /*
464 * Skip the test when RME is enabled (for test setup reasons).
465 * For RME tests, the model specifies 48b physical address size
466 * at the PE, but misses allocating RAM and increasing the PA at
467 * the interconnect level.
468 */
469 if (get_armv9_2_feat_rme_support() != 0U) {
470 return TEST_RESULT_SKIPPED;
471 }
472
473 /* This test requires 48b physical address size capability. */
Federico Recanati6328fb02022-01-14 15:48:16 +0100474 SKIP_TEST_IF_PA_SIZE_LESS_THAN(48);
Olivier Deprez728cc562022-06-09 11:37:46 +0200475
J-Alves8984e722024-05-07 22:21:54 +0100476 return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC64, SP_ID(3),
Federico Recanati6328fb02022-01-14 15:48:16 +0100477 SP_ID(2), true);
J-Alves27390fd2020-11-18 10:51:15 +0000478}
479
480test_result_t test_req_mem_lend_sp_to_sp(void)
481{
J-Alves8984e722024-05-07 22:21:54 +0100482 return test_req_mem_send_sp_to_sp(FFA_MEM_LEND_SMC64, SP_ID(3),
Federico Recanati6328fb02022-01-14 15:48:16 +0100483 SP_ID(2), false);
J-Alves27390fd2020-11-18 10:51:15 +0000484}
485
486test_result_t test_req_mem_donate_sp_to_sp(void)
487{
J-Alves8984e722024-05-07 22:21:54 +0100488 return test_req_mem_send_sp_to_sp(FFA_MEM_DONATE_SMC64, SP_ID(1),
Federico Recanati6328fb02022-01-14 15:48:16 +0100489 SP_ID(3), false);
J-Alves27390fd2020-11-18 10:51:15 +0000490}
Maksims Svecovs61740652021-12-14 12:01:45 +0000491
492test_result_t test_req_mem_share_sp_to_vm(void)
493{
J-Alves8984e722024-05-07 22:21:54 +0100494 return test_req_mem_send_sp_to_vm(FFA_MEM_SHARE_SMC64, SP_ID(1),
Maksims Svecovs61740652021-12-14 12:01:45 +0000495 HYP_ID);
496}
497
498test_result_t test_req_mem_lend_sp_to_vm(void)
499{
J-Alves8984e722024-05-07 22:21:54 +0100500 return test_req_mem_send_sp_to_vm(FFA_MEM_LEND_SMC64, SP_ID(2),
Maksims Svecovs61740652021-12-14 12:01:45 +0000501 HYP_ID);
502}
J-Alves807ce142021-12-14 15:24:11 +0000503
504test_result_t test_mem_share_to_sp_clear_memory(void)
505{
506 struct ffa_memory_region_constituent constituents[] = {
J-Alves04fc4f22023-10-11 17:04:52 +0100507 {(void *)four_share_pages, 4, 0},
508 {(void *)share_page, 1, 0}
509 };
510
J-Alves807ce142021-12-14 15:24:11 +0000511 const uint32_t constituents_count = sizeof(constituents) /
512 sizeof(struct ffa_memory_region_constituent);
513 struct mailbox_buffers mb;
514 uint32_t remaining_constituent_count;
515 uint32_t total_length;
516 uint32_t fragment_length;
517 ffa_memory_handle_t handle;
Daniel Boulbyce386b12022-03-29 18:36:36 +0100518 struct ffa_value ret;
J-Alves807ce142021-12-14 15:24:11 +0000519 /* Arbitrarily write 10 words after using shared memory. */
520 const uint32_t nr_words_to_write = 10U;
521
Karl Meakin1331a8c2023-09-14 16:25:15 +0100522 struct ffa_memory_access receiver =
523 ffa_memory_access_init_permissions_from_mem_func(
J-Alves8984e722024-05-07 22:21:54 +0100524 RECEIVER, FFA_MEM_LEND_SMC64);
Karl Meakin1331a8c2023-09-14 16:25:15 +0100525
Daniel Boulbyb34fe102024-01-17 15:10:52 +0000526 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
J-Alves807ce142021-12-14 15:24:11 +0000527
528 GET_TFTF_MAILBOX(mb);
529
530 remaining_constituent_count = ffa_memory_region_init(
531 (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
Karl Meakin1331a8c2023-09-14 16:25:15 +0100532 &receiver, 1, constituents, constituents_count, 0,
533 FFA_MEMORY_REGION_FLAG_CLEAR,
J-Alves6656a772022-07-04 09:48:12 +0100534 FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
J-Alves807ce142021-12-14 15:24:11 +0000535 &total_length, &fragment_length);
536
537 if (remaining_constituent_count != 0) {
538 ERROR("Transaction descriptor initialization failed!\n");
539 return TEST_RESULT_FAIL;
540 }
541
J-Alves8984e722024-05-07 22:21:54 +0100542 handle = memory_send(mb.send, FFA_MEM_LEND_SMC64, constituents,
Karl Meakin0d4f5ff2023-10-13 20:03:16 +0100543 constituents_count, remaining_constituent_count,
544 fragment_length, total_length, &ret);
J-Alves807ce142021-12-14 15:24:11 +0000545
546 if (handle == FFA_MEMORY_HANDLE_INVALID) {
547 ERROR("Memory Share failed!\n");
548 return TEST_RESULT_FAIL;
549 }
550
551 VERBOSE("Memory has been shared!\n");
552
J-Alves8984e722024-05-07 22:21:54 +0100553 ret = cactus_mem_send_cmd(SENDER, RECEIVER, FFA_MEM_LEND_SMC64, handle,
J-Alvese742ba82024-01-11 10:38:29 +0000554 FFA_MEMORY_REGION_FLAG_CLEAR,
Daniel Boulby3d8cd682024-07-23 14:28:15 +0100555 nr_words_to_write, false, true);
J-Alves807ce142021-12-14 15:24:11 +0000556
557 if (!is_ffa_direct_response(ret)) {
558 return TEST_RESULT_FAIL;
559 }
560
561 if (cactus_get_response(ret) != CACTUS_SUCCESS) {
562 ERROR("Failed memory send operation!\n");
563 return TEST_RESULT_FAIL;
564 }
565
566 ret = ffa_mem_reclaim(handle, 0);
567
568 if (is_ffa_call_error(ret)) {
569 ERROR("Memory reclaim failed!\n");
570 return TEST_RESULT_FAIL;
571 }
572
J-Alves807ce142021-12-14 15:24:11 +0000573 return TEST_RESULT_SUCCESS;
574}
Karl Meakin3d879b82023-06-16 10:32:08 +0100575
576/**
577 * Print `region` if LOG_LEVEL >= LOG_LEVEL_VERBOSE
578 */
579static void print_memory_region(struct ffa_memory_region *region)
580{
581 VERBOSE("region.sender = %d\n", region->sender);
582 VERBOSE("region.attributes.shareability = %d\n",
583 region->attributes.shareability);
584 VERBOSE("region.attributes.cacheability = %d\n",
585 region->attributes.cacheability);
586 VERBOSE("region.attributes.type = %d\n", region->attributes.type);
587 VERBOSE("region.attributes.security = %d\n",
588 region->attributes.security);
589 VERBOSE("region.flags = %d\n", region->flags);
590 VERBOSE("region.handle = %lld\n", region->handle);
591 VERBOSE("region.tag = %lld\n", region->tag);
592 VERBOSE("region.memory_access_desc_size = %d\n",
593 region->memory_access_desc_size);
594 VERBOSE("region.receiver_count = %d\n", region->receiver_count);
595 VERBOSE("region.receivers_offset = %d\n", region->receivers_offset);
596}
597
598/**
599 * Used by hypervisor retrieve request test: validate descriptors provided by
600 * SPMC.
601 */
602static bool verify_retrieve_response(const struct ffa_memory_region *region1,
603 const struct ffa_memory_region *region2)
604{
605 if (region1->sender != region2->sender) {
606 ERROR("region1.sender=%d, expected %d\n", region1->sender,
607 region2->sender);
608 return false;
609 }
610 if (region1->attributes.shareability != region2->attributes.shareability) {
611 ERROR("region1.attributes.shareability=%d, expected %d\n",
612 region1->attributes.shareability,
613 region2->attributes.shareability);
614 return false;
615 }
616 if (region1->attributes.cacheability != region2->attributes.cacheability) {
617 ERROR("region1.attributes.cacheability=%d, expected %d\n",
618 region1->attributes.cacheability,
619 region2->attributes.cacheability);
620 return false;
621 }
622 if (region1->attributes.type != region2->attributes.type) {
623 ERROR("region1.attributes.type=%d, expected %d\n",
624 region1->attributes.type, region2->attributes.type);
625 return false;
626 }
627 if (region1->attributes.security != region2->attributes.security) {
628 ERROR("region1.attributes.security=%d, expected %d\n",
629 region1->attributes.security, region2->attributes.security);
630 return false;
631 }
632 if (region1->flags != region2->flags) {
633 ERROR("region1->flags=%d, expected %d\n", region1->flags,
634 region2->flags);
635 return false;
636 }
637 if (region1->handle != region2->handle) {
638 ERROR("region1.handle=%lld, expected %lld\n", region1->handle,
639 region2->handle);
640 return false;
641 }
642 if (region1->tag != region2->tag) {
643 ERROR("region1.tag=%lld, expected %lld\n", region1->tag, region2->tag);
644 return false;
645 }
646 if (region1->memory_access_desc_size != region2->memory_access_desc_size) {
647 ERROR("region1.memory_access_desc_size=%d, expected %d\n",
648 region1->memory_access_desc_size,
649 region2->memory_access_desc_size);
650 return false;
651 }
652 if (region1->receiver_count != region2->receiver_count) {
653 ERROR("region1.receiver_count=%d, expected %d\n",
654 region1->receiver_count, region2->receiver_count);
655 return false;
656 }
657 if (region1->receivers_offset != region2->receivers_offset) {
658 ERROR("region1.receivers_offset=%d, expected %d\n",
659 region1->receivers_offset, region2->receivers_offset);
660 return false;
661 }
662 for (uint32_t i = 0; i < 3; i++) {
663 if (region1->reserved[i] != 0) {
664 ERROR("region.reserved[%d]=%d, expected 0\n", i,
665 region1->reserved[i]);
666 return false;
667 }
668 }
669 return true;
670}
671
672/**
673 * Used by hypervisor retrieve request test: validate descriptors provided by
674 * SPMC.
675 */
676static bool
677verify_constituent(struct ffa_memory_region_constituent *constituent,
678 void *address, uint32_t page_count)
679{
680 if (constituent->address != address) {
681 ERROR("constituent.address=%p, expected %p\n",
682 constituent->address, address);
683 return false;
684 }
685 if (constituent->page_count != page_count) {
686 ERROR("constituent.page_count=%d, expected %d\n",
687 constituent->page_count, page_count);
688 return false;
689 }
690 if (constituent->reserved != 0) {
691 ERROR("constituent.reserved=%d, expected 0\n",
692 constituent->reserved);
693 return false;
694 }
695 return true;
696}
697
698/**
699 * Used by hypervisor retrieve request test: validate descriptors provided by
700 * SPMC.
701 */
702static bool verify_composite(struct ffa_composite_memory_region *composite,
703 struct ffa_memory_region_constituent *constituent,
704 uint32_t page_count, uint32_t constituent_count)
705{
706 if (composite->page_count != page_count) {
707 ERROR("composite.page_count=%d, expected %d\n",
708 composite->page_count, page_count);
709 return false;
710 }
711 if (composite->constituent_count != constituent_count) {
712 ERROR("composite.constituent_count=%d, expected %d\n",
713 composite->constituent_count, constituent_count);
714 return false;
715 }
716 if (composite->reserved_0 != 0) {
717 ERROR("composite.reserved_0=%llu, expected 0\n",
718 composite->reserved_0);
719 return false;
720 }
721 for (uint32_t j = 0; j < composite->constituent_count; j++) {
722 if (!verify_constituent(constituent, share_page, 1)) {
723 return false;
724 }
725 }
726 return true;
727}
728
J-Alvesb744c9b2024-01-18 17:03:47 +0000729static bool verify_receivers_impdef(struct ffa_memory_access_impdef impdef1,
730 struct ffa_memory_access_impdef impdef2)
731{
732 if (impdef1.val[0] != impdef2.val[0] ||
733 impdef1.val[1] != impdef2.val[1]) {
734 ERROR("ipmdef1.val[0]=%llu expected=%llu"
735 " ipmdef1.val[1]=%llu expected=%llu\n",
736 impdef1.val[0], impdef2.val[0],
737 impdef1.val[1], impdef2.val[1]);
738 return false;
739 }
740
741 return true;
742}
743
744static bool verify_permissions(
745 ffa_memory_access_permissions_t permissions1,
746 ffa_memory_access_permissions_t permissions2)
747{
748 uint8_t access1;
749 uint8_t access2;
750
751 access1 = permissions1.data_access;
752 access2 = permissions2.data_access;
753
754 if (access1 != access2) {
755 ERROR("permissions1.data_access=%u expected=%u\n",
756 access1, access2);
757 return false;
758 }
759
760 access1 = permissions1.instruction_access;
761 access2 = permissions2.instruction_access;
762
763 if (access1 != access2) {
764 ERROR("permissions1.instruction_access=%u expected=%u\n",
765 access1, access2);
766 return false;
767 }
768
769 return true;
770}
771
772/**
773 * Used by hypervisor retrieve request test: validate descriptors provided by
774 * SPMC.
775 */
776static bool verify_receivers(struct ffa_memory_access *receivers1,
777 struct ffa_memory_access *receivers2,
778 uint32_t receivers_count)
779{
780 for (uint32_t i = 0; i < receivers_count; i++) {
781 if (receivers1[i].receiver_permissions.receiver !=
782 receivers2[i].receiver_permissions.receiver) {
783 ERROR("receivers1[%u].receiver_permissions.receiver=%x"
784 " expected=%x\n", i,
785 receivers1[i].receiver_permissions.receiver,
786 receivers2[i].receiver_permissions.receiver);
787 return false;
788 }
789
790 if (receivers1[i].receiver_permissions.flags !=
791 receivers2[i].receiver_permissions.flags) {
792 ERROR("receivers1[%u].receiver_permissions.flags=%u"
793 " expected=%u\n", i,
794 receivers1[i].receiver_permissions.flags,
795 receivers2[i].receiver_permissions.flags);
796 return false;
797 }
798
799 if (!verify_permissions(
800 receivers1[i].receiver_permissions.permissions,
801 receivers2[i].receiver_permissions.permissions)) {
802 return false;
803 }
804
805 if (receivers1[i].composite_memory_region_offset !=
806 receivers2[i].composite_memory_region_offset) {
807 ERROR("receivers1[%u].composite_memory_region_offset=%u"
808 " expected %u\n",
809 i, receivers1[i].composite_memory_region_offset,
810 receivers2[i].composite_memory_region_offset);
811 return false;
812 }
813
814 if (!verify_receivers_impdef(receivers1[i].impdef,
815 receivers1[i].impdef)) {
816 return false;
817 }
818 }
819
820 return true;
821}
822
Karl Meakin3d879b82023-06-16 10:32:08 +0100823/**
824 * Helper for performing a hypervisor retrieve request test.
825 */
Karl Meakin028712b2023-12-06 15:50:33 +0000826static test_result_t hypervisor_retrieve_request_test_helper(
827 uint32_t mem_func, bool multiple_receivers, bool fragmented)
Karl Meakin3d879b82023-06-16 10:32:08 +0100828{
Karl Meakin0d4f5ff2023-10-13 20:03:16 +0100829 static struct ffa_memory_region_constituent
830 sent_constituents[FRAGMENTED_SHARE_PAGE_COUNT];
Karl Meakin3d879b82023-06-16 10:32:08 +0100831 __aligned(PAGE_SIZE) static uint8_t page[PAGE_SIZE * 2] = {0};
832 struct ffa_memory_region *hypervisor_retrieve_response =
833 (struct ffa_memory_region *)page;
834 struct ffa_memory_region expected_response;
835 struct mailbox_buffers mb;
836 ffa_memory_handle_t handle;
837 struct ffa_value ret;
J-Alvesb744c9b2024-01-18 17:03:47 +0000838 struct ffa_composite_memory_region *composite;
839 struct ffa_memory_access *retrvd_receivers;
Karl Meakin3d879b82023-06-16 10:32:08 +0100840 uint32_t expected_flags = 0;
841
J-Alvesb2ca2dc2025-10-08 12:26:06 +0100842 /*
843 * Do not expect the security state to be reported given that it has no
844 * meaning in the normal world.
845 * Given the hypervisor retrieve request should only be used for memory
846 * shared/lent/donated from the NWd, hence all memory is non_secure.
847 */
Karl Meakin3d879b82023-06-16 10:32:08 +0100848 ffa_memory_attributes_t expected_attrs = {
849 .cacheability = FFA_MEMORY_CACHE_WRITE_BACK,
850 .shareability = FFA_MEMORY_INNER_SHAREABLE,
J-Alvesb2ca2dc2025-10-08 12:26:06 +0100851 .security = FFA_MEMORY_SECURITY_UNSPECIFIED,
J-Alves8984e722024-05-07 22:21:54 +0100852 .type = (!multiple_receivers && mem_func != FFA_MEM_SHARE_SMC64)
Karl Meakin3d879b82023-06-16 10:32:08 +0100853 ? FFA_MEMORY_NOT_SPECIFIED_MEM
854 : FFA_MEMORY_NORMAL_MEM,
855 };
856
Karl Meakin1f488922023-12-04 21:28:25 +0000857 struct ffa_memory_access receivers[2] = {
858 ffa_memory_access_init_permissions_from_mem_func(SP_ID(1),
859 mem_func),
860 ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
861 mem_func),
862 };
863
864 /*
865 * Only pass 1 receiver to `memory_init_and_send` if we are not testing
866 * the multiple-receivers functionality of the hypervisor retrieve
867 * request.
868 */
869 uint32_t receiver_count =
870 multiple_receivers ? ARRAY_SIZE(receivers) : 1;
Karl Meakin1331a8c2023-09-14 16:25:15 +0100871
Karl Meakin028712b2023-12-06 15:50:33 +0000872 uint32_t sent_constituents_count =
873 fragmented ? ARRAY_SIZE(sent_constituents) : 1;
874
J-Alvesb744c9b2024-01-18 17:03:47 +0000875 /* Prepare the composite offset for the comparison. */
876 for (uint32_t i = 0; i < receiver_count; i++) {
877 receivers[i].composite_memory_region_offset =
878 sizeof(struct ffa_memory_region) +
879 receiver_count *
880 sizeof(struct ffa_memory_access);
881 }
882
Karl Meakin0d4f5ff2023-10-13 20:03:16 +0100883 /* Add a page per constituent, so that we exhaust the size of a single
884 * fragment (for testing). In a real world scenario, the whole region
885 * could be described in a single constituent.
886 */
887 for (uint32_t i = 0; i < sent_constituents_count; i++) {
888 sent_constituents[i].address = share_page + i * PAGE_SIZE;
889 sent_constituents[i].page_count = 1;
890 sent_constituents[i].reserved = 0;
891 }
892
Karl Meakin3d879b82023-06-16 10:32:08 +0100893 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
894 GET_TFTF_MAILBOX(mb);
895
896 switch (mem_func) {
J-Alves8984e722024-05-07 22:21:54 +0100897 case FFA_MEM_SHARE_SMC64:
Karl Meakin3d879b82023-06-16 10:32:08 +0100898 expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
899 break;
J-Alves8984e722024-05-07 22:21:54 +0100900 case FFA_MEM_LEND_SMC64:
Karl Meakin3d879b82023-06-16 10:32:08 +0100901 expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
902 break;
J-Alves8984e722024-05-07 22:21:54 +0100903 case FFA_MEM_DONATE_SMC64:
Karl Meakin3d879b82023-06-16 10:32:08 +0100904 expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
905 break;
906 default:
907 ERROR("Invalid mem_func: %d\n", mem_func);
908 panic();
909 }
910
J-Alvesb744c9b2024-01-18 17:03:47 +0000911 handle = memory_init_and_send(mb.send, MAILBOX_SIZE, SENDER, receivers,
912 receiver_count, sent_constituents,
Karl Meakin3d879b82023-06-16 10:32:08 +0100913 sent_constituents_count, mem_func, &ret);
914 if (handle == FFA_MEMORY_HANDLE_INVALID) {
915 ERROR("Memory share failed: %d\n", ffa_error_code(ret));
916 return TEST_RESULT_FAIL;
917 }
918
919 /*
920 * Send Hypervisor Retrieve request according to section 17.4.3 of FFA
921 * v1.2-REL0 specification.
922 */
923 if (!hypervisor_retrieve_request(&mb, handle, page, sizeof(page))) {
924 return TEST_RESULT_FAIL;
925 }
926
927 print_memory_region(hypervisor_retrieve_response);
928
929 /*
930 * Verify the received `FFA_MEM_RETRIEVE_RESP` aligns with
931 * transaction description sent above.
932 */
J-Alvesb744c9b2024-01-18 17:03:47 +0000933 expected_response = (struct ffa_memory_region) {
Karl Meakin3d879b82023-06-16 10:32:08 +0100934 .sender = SENDER,
935 .attributes = expected_attrs,
936 .flags = expected_flags,
937 .handle = handle,
938 .tag = 0,
939 .memory_access_desc_size = sizeof(struct ffa_memory_access),
J-Alvesb744c9b2024-01-18 17:03:47 +0000940 .receiver_count = receiver_count,
Karl Meakin3d879b82023-06-16 10:32:08 +0100941 .receivers_offset =
942 offsetof(struct ffa_memory_region, receivers),
943 };
J-Alvesb744c9b2024-01-18 17:03:47 +0000944
945 if (!verify_retrieve_response(hypervisor_retrieve_response,
946 &expected_response)) {
Karl Meakin3d879b82023-06-16 10:32:08 +0100947 return TEST_RESULT_FAIL;
948 }
949
J-Alvesb744c9b2024-01-18 17:03:47 +0000950 retrvd_receivers =
951 ffa_memory_region_get_receiver(hypervisor_retrieve_response, 0);
Karl Meakin3d879b82023-06-16 10:32:08 +0100952
J-Alvesb744c9b2024-01-18 17:03:47 +0000953 if (!verify_receivers(retrvd_receivers,
954 receivers, receiver_count)) {
955 return TEST_RESULT_FAIL;
956 }
957
958 composite = ffa_memory_region_get_composite(
959 hypervisor_retrieve_response, 0);
960
961 if (!verify_composite(composite, composite->constituents,
962 sent_constituents_count, sent_constituents_count)) {
963 return TEST_RESULT_FAIL;
Karl Meakin3d879b82023-06-16 10:32:08 +0100964 }
965
966 /*
967 * Reclaim for the SPMC to deallocate any data related to the handle.
968 */
969 ret = ffa_mem_reclaim(handle, 0);
970 if (is_ffa_call_error(ret)) {
971 ERROR("Memory reclaim failed: %d\n", ffa_error_code(ret));
972 return TEST_RESULT_FAIL;
973 }
974
Karl Meakin3d879b82023-06-16 10:32:08 +0100975 return TEST_RESULT_SUCCESS;
976}
977
978test_result_t test_hypervisor_share_retrieve(void)
979{
J-Alves8984e722024-05-07 22:21:54 +0100980 return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC64, false, false);
Karl Meakin3d879b82023-06-16 10:32:08 +0100981}
982
983test_result_t test_hypervisor_lend_retrieve(void)
984{
J-Alves8984e722024-05-07 22:21:54 +0100985 return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC64, false, false);
Karl Meakin3d879b82023-06-16 10:32:08 +0100986}
987
988test_result_t test_hypervisor_donate_retrieve(void)
989{
J-Alves8984e722024-05-07 22:21:54 +0100990 return hypervisor_retrieve_request_test_helper(FFA_MEM_DONATE_SMC64, false, false);
Karl Meakin1f488922023-12-04 21:28:25 +0000991}
992
993test_result_t test_hypervisor_share_retrieve_multiple_receivers(void)
994{
J-Alves8984e722024-05-07 22:21:54 +0100995 return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC64, true, false);
Karl Meakin1f488922023-12-04 21:28:25 +0000996}
997
998test_result_t test_hypervisor_lend_retrieve_multiple_receivers(void)
999{
J-Alves8984e722024-05-07 22:21:54 +01001000 return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC64, true, false);
Karl Meakin028712b2023-12-06 15:50:33 +00001001}
1002
1003test_result_t test_hypervisor_share_retrieve_fragmented(void)
1004{
J-Alves8984e722024-05-07 22:21:54 +01001005 return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC64, false, true);
Karl Meakin028712b2023-12-06 15:50:33 +00001006}
1007
1008test_result_t test_hypervisor_lend_retrieve_fragmented(void)
1009{
J-Alves8984e722024-05-07 22:21:54 +01001010 return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC64, false, true);
Karl Meakin3d879b82023-06-16 10:32:08 +01001011}
J-Alves3be0efa2023-10-02 19:11:11 +01001012
1013/**
1014 * Test helper that performs memory sharing operation, and alters the PAS
1015 * of the memory, to validate that SPM intersects the operation in case the PAS
1016 * is not coherent with its use. Relevant for the functioning of FFA_MEM_LEND
1017 * and FFA_MEM_DONATE from NWd to an SP.
1018 *
1019 * In cases the memory is not in NS state, the SPMC should intersect memory
1020 * management call with an appropriate FFA_ERROR.
1021 */
1022static test_result_t test_ffa_mem_send_realm_expect_fail(
1023 uint32_t mem_func, ffa_id_t borrower,
1024 struct ffa_memory_region_constituent *constituents,
1025 size_t constituents_count, uint64_t delegate_addr)
1026{
1027 struct ffa_value ret;
1028 uint32_t remaining_constituent_count;
1029 uint32_t total_length;
1030 uint32_t fragment_length;
1031 struct mailbox_buffers mb;
1032 u_register_t ret_rmm;
1033 test_result_t result = TEST_RESULT_FAIL;
1034 struct ffa_memory_access receiver =
1035 ffa_memory_access_init_permissions_from_mem_func(borrower,
1036 mem_func);
1037
1038 if (get_armv9_2_feat_rme_support() == 0U) {
1039 return TEST_RESULT_SKIPPED;
1040 }
1041
1042 /***********************************************************************
1043 * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
1044 **********************************************************************/
1045 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1046
1047 GET_TFTF_MAILBOX(mb);
1048
1049 register_custom_sync_exception_handler(data_abort_handler);
1050
1051 /*
1052 * Delegate page to a realm. This should make memory sharing operation
1053 * fail.
1054 */
1055 ret_rmm = host_rmi_granule_delegate((u_register_t)delegate_addr);
1056
1057 if (ret_rmm != 0UL) {
1058 INFO("Delegate operation returns 0x%lx for address %llx\n",
1059 ret_rmm, delegate_addr);
1060 return TEST_RESULT_FAIL;
1061 }
1062
1063 remaining_constituent_count = ffa_memory_region_init(
1064 (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
1065 &receiver, 1, constituents, constituents_count, 0,
1066 FFA_MEMORY_REGION_FLAG_CLEAR,
1067 FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
1068 &total_length, &fragment_length);
1069
1070 if (remaining_constituent_count != 0) {
1071 goto out;
1072 }
1073
1074 switch (mem_func) {
J-Alves8984e722024-05-07 22:21:54 +01001075 case FFA_MEM_LEND_SMC64:
J-Alves3be0efa2023-10-02 19:11:11 +01001076 ret = ffa_mem_lend(total_length, fragment_length);
1077 break;
J-Alves8984e722024-05-07 22:21:54 +01001078 case FFA_MEM_DONATE_SMC64:
J-Alves3be0efa2023-10-02 19:11:11 +01001079 ret = ffa_mem_donate(total_length, fragment_length);
1080 break;
1081 default:
1082 ERROR("Not expected for func name: %x\n", mem_func);
1083 return TEST_RESULT_FAIL;
1084 }
1085
1086 if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
1087 goto out;
1088 }
1089
1090 /* Undelegate to reestablish the same security state for PAS. */
1091 ret_rmm = host_rmi_granule_undelegate((u_register_t)delegate_addr);
1092
1093 for (uint32_t i = 0; i < constituents_count; i++) {
1094 uint32_t *ptr = (uint32_t *)constituents[i].address;
1095
1096 *ptr = 0xFFA;
1097 }
1098
1099 if (get_gpc_abort_triggered()) {
1100 ERROR("Exception due to GPC for lend/donate with RME. Not"
1101 " expected for this case.\n");
1102 result = TEST_RESULT_FAIL;
1103 } else {
1104 result = TEST_RESULT_SUCCESS;
1105 }
1106out:
1107 unregister_custom_sync_exception_handler();
1108
1109 if (ret_rmm != 0UL) {
1110 INFO("Undelegate operation returns 0x%lx for address %llx\n",
1111 ret_rmm, (uint64_t)delegate_addr);
1112 return TEST_RESULT_FAIL;
1113 }
1114
1115 return result;
1116}
1117
1118/**
1119 * Memory to be shared between partitions is described in a composite, with
1120 * various constituents. In an RME system, the memory must be in NS PAS in
1121 * operations from NWd to an SP. In case the PAS is not following this
1122 * expectation memory lend/donate should fail, and all constituents must
1123 * remain in the NS PAS.
1124 *
1125 * This test validates that if one page in the middle of one of the constituents
1126 * is not in the NS PAS the operation fails.
1127 */
1128test_result_t test_ffa_mem_send_sp_realm_memory(void)
1129{
1130 test_result_t ret;
J-Alves8984e722024-05-07 22:21:54 +01001131 uint32_t mem_func[] = {FFA_MEM_LEND_SMC64, FFA_MEM_DONATE_SMC64};
J-Alves3be0efa2023-10-02 19:11:11 +01001132 struct ffa_memory_region_constituent constituents[] = {
1133 {(void *)four_share_pages, 4, 0},
1134 {(void *)share_page, 1, 0}
1135 };
1136
1137 const uint32_t constituents_count = sizeof(constituents) /
1138 sizeof(struct ffa_memory_region_constituent);
1139
J-Alvesb44b4752024-05-13 10:15:51 +01001140 /***********************************************************************
1141 * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
1142 **********************************************************************/
1143 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1144
J-Alves3be0efa2023-10-02 19:11:11 +01001145 for (unsigned j = 0; j < ARRAY_SIZE(mem_func); j++) {
1146 for (unsigned int i = 0; i < 4; i++) {
1147 /* Address to be delegated to Realm PAS. */
1148 uint64_t realm_addr =
1149 (uint64_t)&four_share_pages[i * PAGE_SIZE];
1150
1151 INFO("%s memory with realm addr: %llx\n",
J-Alves8984e722024-05-07 22:21:54 +01001152 mem_func[j] == FFA_MEM_LEND_SMC64
J-Alves3be0efa2023-10-02 19:11:11 +01001153 ? "Lend"
1154 : "Donate",
1155 realm_addr);
1156
1157 ret = test_ffa_mem_send_realm_expect_fail(
1158 mem_func[j], SP_ID(1), constituents,
1159 constituents_count, realm_addr);
1160
1161 if (ret != TEST_RESULT_SUCCESS) {
1162 break;
1163 }
1164 }
1165 }
1166
1167 return ret;
1168}
1169
1170/**
1171 * Memory to be shared between partitions is described in a composite, with
1172 * various constituents. In an RME system, the memory must be in NS PAS in
1173 * operations from NWd to an SP. In case the PAS is not following this
1174 * expectation memory lend/donate should fail, and all constituents must
1175 * remain in the NS PAS.
1176 *
1177 * This test validates the case in which the memory lend/donate fail in
1178 * case one of the constituents in the composite is not in the NS PAS.
1179 */
1180test_result_t test_ffa_mem_lend_sp_realm_memory_separate_constituent(void)
1181{
1182 test_result_t ret;
1183 struct ffa_memory_region_constituent constituents[] = {
1184 {(void *)four_share_pages, 4, 0},
1185 {(void *)share_page, 1, 0}
1186 };
1187 const uint32_t constituents_count = sizeof(constituents) /
1188 sizeof(struct ffa_memory_region_constituent);
1189 /* Address to be delegated to Realm PAS. */
1190 uint64_t realm_addr = (uint64_t)&share_page[0];
1191
1192 INFO("Sharing memory with realm addr: %llx\n", realm_addr);
1193
1194 ret = test_ffa_mem_send_realm_expect_fail(
J-Alves8984e722024-05-07 22:21:54 +01001195 FFA_MEM_LEND_SMC64, SP_ID(1), constituents,
J-Alves3be0efa2023-10-02 19:11:11 +01001196 constituents_count, realm_addr);
1197
1198 return ret;
1199}
J-Alvesd8e2fcd2024-03-28 15:53:51 +00001200
1201/**
1202 * Map the NS RXTX buffers to the SPM, change RX buffer PAS to realm,
1203 * invoke the FFA_MEM_SHARE interface, such that SPM does NS access
1204 * to realm region and triggers GPF.
1205 */
1206test_result_t test_ffa_mem_share_tx_realm_expect_fail(void)
1207{
1208 struct ffa_value ret;
1209 uint32_t total_length;
1210 uint32_t fragment_length;
1211 struct mailbox_buffers mb;
1212 u_register_t ret_rmm;
1213 struct ffa_memory_access receiver =
1214 ffa_memory_access_init_permissions_from_mem_func(SP_ID(1),
1215 FFA_MEM_SHARE_SMC64);
1216 size_t remaining_constituent_count;
1217 struct ffa_memory_region_constituent constituents[] = {
1218 {(void *)share_page, 1, 0}
1219 };
1220
1221 if (get_armv9_2_feat_rme_support() == 0U) {
1222 return TEST_RESULT_SKIPPED;
1223 }
1224
1225 /***********************************************************************
1226 * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
1227 **********************************************************************/
1228 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1229
1230 GET_TFTF_MAILBOX(mb);
1231
1232 remaining_constituent_count = ffa_memory_region_init(
1233 (struct ffa_memory_region *)mb.send, PAGE_SIZE, HYP_ID,
1234 &receiver, 1, constituents, 1, 0, 0,
1235 FFA_MEMORY_NOT_SPECIFIED_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
1236 FFA_MEMORY_INNER_SHAREABLE,
1237 &total_length, &fragment_length);
1238
1239 if (remaining_constituent_count != 0) {
1240 return TEST_RESULT_FAIL;
1241 }
1242
1243 /*
1244 * Delegate TX buffer to realm.
1245 */
1246 ret_rmm = host_rmi_granule_delegate((u_register_t)mb.send);
1247
1248 if (ret_rmm != 0UL) {
1249 INFO("Delegate operation returns %#lx for address %p\n",
1250 ret_rmm, mb.send);
1251 return TEST_RESULT_FAIL;
1252 }
1253
1254 ret = ffa_mem_share(total_length, fragment_length);
1255
1256 /* Access to Realm region from SPMC should return FFA_ERROR_ABORTED. */
1257 if (!is_expected_ffa_error(ret, FFA_ERROR_ABORTED)) {
1258 return TEST_RESULT_FAIL;
1259 }
1260
1261 /* Undelegate to reestablish the same security state for PAS. */
1262 ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.send);
1263
1264 if (ret_rmm != 0UL) {
1265 INFO("Undelegate operation returns 0x%lx for address %p\n",
1266 ret_rmm, mb.send);
1267 return TEST_RESULT_FAIL;
1268 }
1269
J-Alves09d34fc2024-07-17 14:35:26 +01001270 remaining_constituent_count = ffa_memory_region_init(
1271 (struct ffa_memory_region *)mb.send, PAGE_SIZE, HYP_ID,
1272 &receiver, 1, constituents, 1, 0, 0,
1273 FFA_MEMORY_NOT_SPECIFIED_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
1274 FFA_MEMORY_INNER_SHAREABLE,
1275 &total_length, &fragment_length);
1276
J-Alvesd8e2fcd2024-03-28 15:53:51 +00001277 /* Retry but expect test to pass. */
1278 ret = ffa_mem_share(total_length, fragment_length);
1279
1280 if (is_ffa_call_error(ret)) {
1281 return TEST_RESULT_FAIL;
1282 }
1283
1284 /* Reclaim to clean-up. */
1285 ret = ffa_mem_reclaim(ffa_mem_success_handle(ret), 0);
1286
1287 if (is_ffa_call_error(ret)) {
1288 return TEST_RESULT_FAIL;
1289 }
1290
1291 return TEST_RESULT_SUCCESS;
1292}
J-Alves3aa08bc2024-04-24 22:20:23 +01001293
1294/**
1295 * Base helper to prepare for tests that need to retrieve memory from the SPMC
1296 * from a VM endpoint.
1297 */
1298static ffa_memory_handle_t base_memory_send_for_nwd_retrieve(struct mailbox_buffers *mb,
1299 struct ffa_memory_access receivers[],
1300 size_t receivers_count)
1301{
1302 ffa_memory_handle_t handle;
1303 struct ffa_memory_region_constituent constituents[] = {
1304 {(void *)four_share_pages, 4, 0},
1305 {(void *)share_page, 1, 0}
1306 };
1307 const uint32_t constituents_count = ARRAY_SIZE(constituents);
1308 struct ffa_value ret;
1309
1310 /* Prepare the composite offset for the comparison. */
1311 for (uint32_t i = 0; i < receivers_count; i++) {
1312 receivers[i].composite_memory_region_offset =
1313 sizeof(struct ffa_memory_region) +
1314 receivers_count *
1315 sizeof(struct ffa_memory_access);
1316 }
1317
1318 handle = memory_init_and_send(mb->send, MAILBOX_SIZE, SENDER, receivers,
1319 receivers_count, constituents,
1320 constituents_count, FFA_MEM_SHARE_SMC64, &ret);
1321 return handle;
1322}
1323
1324/**
1325 * Test FF-A memory retrieve request from a VM into the SPMC.
1326 * TFTF invokes all the FF-A calls expected from an hypervisor into the
1327 * SPMC, even those that would be initiated by a VM, and then forwarded
1328 * to the SPMC by the Hypervisor.
1329 */
1330test_result_t test_ffa_memory_retrieve_request_from_vm(void)
1331{
1332 struct mailbox_buffers mb;
1333 struct ffa_memory_region *m;
1334 struct ffa_memory_access receivers[2] = {
1335 ffa_memory_access_init_permissions_from_mem_func(VM_ID(1),
1336 FFA_MEM_SHARE_SMC64),
1337 ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
1338 FFA_MEM_SHARE_SMC64),
1339 };
1340 ffa_memory_handle_t handle;
1341
1342 GET_TFTF_MAILBOX(mb);
1343
1344 if (get_armv9_2_feat_rme_support() == 0U) {
1345 return TEST_RESULT_SKIPPED;
1346 }
1347
1348 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1349
1350 handle = base_memory_send_for_nwd_retrieve(&mb, receivers, ARRAY_SIZE(receivers));
1351
1352 if (handle == FFA_MEMORY_HANDLE_INVALID) {
1353 return TEST_RESULT_FAIL;
1354 }
1355
Daniel Boulby3d8cd682024-07-23 14:28:15 +01001356 if (!memory_retrieve(&mb, &m, handle, 0, receivers, ARRAY_SIZE(receivers),
1357 0, true)) {
J-Alves3aa08bc2024-04-24 22:20:23 +01001358 ERROR("Failed to retrieve the memory.\n");
1359 return TEST_RESULT_FAIL;
1360 }
1361
1362 ffa_rx_release();
1363
1364 if (!memory_relinquish(mb.send, handle, VM_ID(1))) {
1365 ERROR("%s: Failed to relinquish.\n", __func__);
1366 return TEST_RESULT_FAIL;
1367 }
1368
1369 if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
1370 ERROR("%s: Failed to reclaim memory.\n", __func__);
1371 return TEST_RESULT_FAIL;
1372 }
1373
1374 return TEST_RESULT_SUCCESS;
1375}
J-Alves14c4a322024-04-25 13:57:09 +01001376
J-Alves06f430a2024-04-26 19:09:33 +01001377test_result_t base_ffa_memory_retrieve_request_fail_buffer_realm(bool delegate_rx,
1378 bool is_hypervisor_retrieve_req)
J-Alves14c4a322024-04-25 13:57:09 +01001379{
1380 struct mailbox_buffers mb;
1381 struct ffa_memory_access receivers[2] = {
1382 ffa_memory_access_init_permissions_from_mem_func(VM_ID(1),
1383 FFA_MEM_SHARE_SMC64),
1384 ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
1385 FFA_MEM_SHARE_SMC64),
1386 };
1387 ffa_memory_handle_t handle;
1388 u_register_t ret_rmm;
1389 struct ffa_value ret;
1390 size_t descriptor_size;
J-Alves8d6843a2024-04-25 14:17:52 +01001391 void *to_delegate;
J-Alves14c4a322024-04-25 13:57:09 +01001392
1393 GET_TFTF_MAILBOX(mb);
1394
J-Alves8d6843a2024-04-25 14:17:52 +01001395 to_delegate = delegate_rx ? mb.recv : mb.send;
1396
J-Alves14c4a322024-04-25 13:57:09 +01001397 if (get_armv9_2_feat_rme_support() == 0U) {
1398 return TEST_RESULT_SKIPPED;
1399 }
1400
1401 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1402
1403 handle = base_memory_send_for_nwd_retrieve(&mb, receivers, ARRAY_SIZE(receivers));
1404
1405 if (handle == FFA_MEMORY_HANDLE_INVALID) {
1406 return TEST_RESULT_FAIL;
1407 }
1408
J-Alves06f430a2024-04-26 19:09:33 +01001409 if (is_hypervisor_retrieve_req) {
1410 /* Prepare the hypervisor retrieve request. */
1411 ffa_hypervisor_retrieve_request_init(mb.send, handle);
1412 descriptor_size = sizeof(struct ffa_memory_region);
1413 } else {
1414 /* Prepare the descriptor before delegating the buffer. */
1415 descriptor_size = ffa_memory_retrieve_request_init(
1416 mb.send, handle, SENDER, receivers, ARRAY_SIZE(receivers),
1417 0, 0, FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
1418 FFA_MEMORY_INNER_SHAREABLE);
1419 }
J-Alves14c4a322024-04-25 13:57:09 +01001420
J-Alves8d6843a2024-04-25 14:17:52 +01001421 /* Delegate buffer to realm. */
1422 ret_rmm = host_rmi_granule_delegate((u_register_t)to_delegate);
J-Alves14c4a322024-04-25 13:57:09 +01001423
1424 if (ret_rmm != 0UL) {
1425 ERROR("Delegate operation returns %#lx for address %p\n",
1426 ret_rmm, mb.send);
1427 return TEST_RESULT_FAIL;
1428 }
1429
1430 ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
1431
1432 if (!is_expected_ffa_error(ret, FFA_ERROR_ABORTED)) {
1433 return TEST_RESULT_FAIL;
1434 }
1435
1436 /* Undelegate to reestablish the same security state for PAS. */
J-Alves8d6843a2024-04-25 14:17:52 +01001437 ret_rmm = host_rmi_granule_undelegate((u_register_t)to_delegate);
J-Alves14c4a322024-04-25 13:57:09 +01001438
1439 if (ret_rmm != 0UL) {
1440 ERROR("Undelegate operation returns %#lx for address %p\n",
1441 ret_rmm, mb.send);
1442 return TEST_RESULT_FAIL;
1443 }
1444
J-Alves09d34fc2024-07-17 14:35:26 +01001445 if (is_hypervisor_retrieve_req) {
1446 /* Prepare the hypervisor retrieve request. */
1447 ffa_hypervisor_retrieve_request_init(mb.send, handle);
1448 descriptor_size = sizeof(struct ffa_memory_region);
1449 } else {
1450 /* Prepare the descriptor before delegating the buffer. */
1451 descriptor_size = ffa_memory_retrieve_request_init(
1452 mb.send, handle, SENDER, receivers, ARRAY_SIZE(receivers),
1453 0, 0, FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
1454 FFA_MEMORY_INNER_SHAREABLE);
1455 }
1456
J-Alves14c4a322024-04-25 13:57:09 +01001457 /* Retry the memory retrieve request, but this time expect success. */
1458 ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
1459
1460 if (is_ffa_call_error(ret)) {
1461 return TEST_RESULT_FAIL;
1462 }
1463
1464 ffa_rx_release();
1465
J-Alves06f430a2024-04-26 19:09:33 +01001466 if (!is_hypervisor_retrieve_req &&
1467 !memory_relinquish(mb.send, handle, VM_ID(1))) {
J-Alves14c4a322024-04-25 13:57:09 +01001468 ERROR("%s: Failed to relinquish.\n", __func__);
1469 return TEST_RESULT_FAIL;
1470 }
1471
1472 if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
1473 ERROR("%s: Failed to reclaim memory.\n", __func__);
1474 return TEST_RESULT_FAIL;
1475 }
1476
1477 return TEST_RESULT_SUCCESS;
1478}
J-Alves8d6843a2024-04-25 14:17:52 +01001479
1480/**
1481 * Test that a retrieve request from the hypervisor would fail if the TX buffer
1482 * was in realm state. This is recreating the situation in which the Hyp doesn't
1483 * track the state of the operation, and it is forwarding the retrieve request
1484 * to the SPMC.
1485 */
1486test_result_t test_ffa_memory_retrieve_request_fail_tx_realm(void)
1487{
J-Alves06f430a2024-04-26 19:09:33 +01001488 return base_ffa_memory_retrieve_request_fail_buffer_realm(false, false);
J-Alves8d6843a2024-04-25 14:17:52 +01001489}
1490
1491/**
1492 * Test that a retrieve request from the hypervisor would fail if the RX buffer
1493 * was in realm state. This is recreating the situation in which the Hyp doesn't
1494 * track the state of the operation, and it is forwarding the retrieve request
1495 * to the SPMC. The operation shall fail at the point at which the SPMC is
1496 * providing retrieve response. The SPMC should have reverted the change to any
1497 * of its share state tracking structures, such that the final reclaim would be
1498 * possible.
1499 */
1500test_result_t test_ffa_memory_retrieve_request_fail_rx_realm(void)
1501{
J-Alves06f430a2024-04-26 19:09:33 +01001502 return base_ffa_memory_retrieve_request_fail_buffer_realm(true, false);
J-Alves8d6843a2024-04-25 14:17:52 +01001503}
J-Alves4e6fa5b2024-04-26 16:24:07 +01001504
1505/**
1506 * Test that a memory relinquish call fails smoothly if the TX buffer of the
1507 * Hypervisor is on realm PAS.
1508 */
1509test_result_t test_ffa_memory_relinquish_fail_tx_realm(void)
1510{
1511 struct mailbox_buffers mb;
1512 struct ffa_memory_region *m;
1513 const ffa_id_t vm_id = VM_ID(1);
1514 struct ffa_memory_access receivers[2] = {
1515 ffa_memory_access_init_permissions_from_mem_func(vm_id,
1516 FFA_MEM_SHARE_SMC64),
1517 ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
1518 FFA_MEM_SHARE_SMC64),
1519 };
1520 struct ffa_value ret;
1521 ffa_memory_handle_t handle;
1522 u_register_t ret_rmm;
1523
1524 GET_TFTF_MAILBOX(mb);
1525
1526 if (get_armv9_2_feat_rme_support() == 0U) {
1527 return TEST_RESULT_SKIPPED;
1528 }
1529
1530 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1531
1532 handle = base_memory_send_for_nwd_retrieve(&mb, receivers, ARRAY_SIZE(receivers));
1533
1534 if (handle == FFA_MEMORY_HANDLE_INVALID) {
1535 return TEST_RESULT_FAIL;
1536 }
1537
Daniel Boulby3d8cd682024-07-23 14:28:15 +01001538 if (!memory_retrieve(&mb, &m, handle, 0, receivers, ARRAY_SIZE(receivers),
1539 0, true)) {
J-Alves4e6fa5b2024-04-26 16:24:07 +01001540 ERROR("Failed to retrieve the memory.\n");
1541 return TEST_RESULT_FAIL;
1542 }
1543
1544 /* Prepare relinquish descriptor before calling ffa_mem_relinquish. */
1545 ffa_mem_relinquish_init(mb.send, handle, 0, vm_id);
1546
1547 /*
1548 * Delegate page to a realm. This should make memory sharing operation
1549 * fail.
1550 */
1551 ret_rmm = host_rmi_granule_delegate((u_register_t)mb.send);
1552 if (ret_rmm != 0UL) {
1553 ERROR("Delegate operation returns 0x%lx for address %llx\n",
1554 ret_rmm, (uint64_t)mb.send);
1555 return TEST_RESULT_FAIL;
1556 }
1557
1558 /* Access to Realm region from SPMC should return FFA_ERROR_ABORTED. */
1559 ret = ffa_mem_relinquish();
1560 if (!is_expected_ffa_error(ret, FFA_ERROR_ABORTED)) {
1561 return TEST_RESULT_FAIL;
1562 }
1563
1564 /* Undelegate to reestablish the same security state for PAS. */
1565 ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.send);
1566 if (ret_rmm != 0UL) {
1567 ERROR("Undelegate operation returns 0x%lx for address %llx\n",
1568 ret_rmm, (uint64_t)mb.send);
1569 return TEST_RESULT_FAIL;
1570 }
1571
J-Alves09d34fc2024-07-17 14:35:26 +01001572 /* Prepare the descriptor. */
1573 ffa_mem_relinquish_init(mb.send, handle, 0, vm_id);
1574
J-Alves4e6fa5b2024-04-26 16:24:07 +01001575 /* After undelegate the relinquish is expected to succeed. */
1576 ret = ffa_mem_relinquish();
1577
1578 if (is_ffa_call_error(ret)) {
1579 ERROR("Expected relinquish to succeed\n");
1580 return TEST_RESULT_FAIL;
1581 }
1582
1583 ret = ffa_mem_reclaim(handle, 0);
1584 if (is_ffa_call_error(ret)) {
1585 ERROR("Memory reclaim failed!\n");
1586 return TEST_RESULT_FAIL;
1587 }
1588
J-Alvesc362de32024-06-20 12:50:14 +01001589 ffa_rx_release();
1590
J-Alves4e6fa5b2024-04-26 16:24:07 +01001591 return TEST_RESULT_SUCCESS;
1592}
J-Alves06f430a2024-04-26 19:09:33 +01001593
1594/**
1595 * Test that a hypervisor retrieve request would fail if the TX buffer
1596 * was in realm PAS.
1597 * The hypervisor retrieve request normally happens during an FFA_MEM_RECLAIM.
1598 * This validates that the SPMC is able to recover from a GPF from accessing the
1599 * TX buffer when reading the hypervisor retrieve request message.
1600 */
1601test_result_t test_ffa_hypervisor_retrieve_request_fail_tx_realm(void)
1602{
1603 return base_ffa_memory_retrieve_request_fail_buffer_realm(false, true);
1604}
1605
1606/**
1607 * Test that a hypervisor retrieve request would fail if the RX buffer
1608 * was in realm PAS.
1609 * The hypervisor retrieve request normally happens during an FFA_MEM_RECLAIM.
1610 * This validates the SPMC is able to recover from a GPF from accessing the RX
1611 * buffer when preparing the retrieve response.
1612 */
1613test_result_t test_ffa_hypervisor_retrieve_request_fail_rx_realm(void)
1614{
1615 return base_ffa_memory_retrieve_request_fail_buffer_realm(true, true);
1616}
J-Alvesc362de32024-06-20 12:50:14 +01001617
1618/**
1619 * Do a memory sharing operation over two fragments.
1620 * Before the 2nd fragment the TX buffer is set in the realm PAS.
1621 * The SPMC should fault, recover from it and return ffa_error(FFA_ERROR_ABORTED).
1622 */
1623test_result_t test_ffa_memory_share_fragmented_tx_realm(void)
1624{
1625 struct mailbox_buffers mb;
1626 uint32_t remaining_constituent_count = 0;
1627 uint32_t total_length;
1628 uint32_t fragment_length;
1629 struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
1630 SP_ID(1), FFA_MEM_SHARE_SMC32);
1631 struct ffa_memory_region_constituent constituents[] = {
1632 {(void *)four_share_pages, 4, 0},
1633 {(void *)share_page, 1, 0}
1634 };
1635 struct ffa_value ffa_ret;
1636 u_register_t ret_rmm;
1637 test_result_t ret;
1638 uint64_t handle;
1639
1640 if (get_armv9_2_feat_rme_support() == 0U) {
1641 return TEST_RESULT_SKIPPED;
1642 }
1643
1644 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1645
1646 GET_TFTF_MAILBOX(mb);
1647
1648 register_custom_sync_exception_handler(data_abort_handler);
1649
1650 /* Only send one constituent to start with. */
1651 remaining_constituent_count = ffa_memory_region_init(
1652 (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
1653 &receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
1654 0, FFA_MEMORY_NOT_SPECIFIED_MEM,
1655 FFA_MEMORY_CACHE_WRITE_BACK,
1656 FFA_MEMORY_INNER_SHAREABLE,
1657 &total_length, &fragment_length);
1658
1659 /* It should have copied them all. */
1660 if (remaining_constituent_count > 0) {
1661 ERROR("Transaction descriptor initialization failed!\n");
1662 ret = TEST_RESULT_FAIL;
1663 goto exit;
1664 }
1665
1666 /*
1667 * Take the size of a constituent from the fragment to force the
1668 * operation to be fragmented.
1669 */
1670 fragment_length -= sizeof(struct ffa_memory_region_constituent);
1671
1672 ffa_ret = ffa_mem_share(total_length, fragment_length);
1673
1674 if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
1675 ERROR("Expected %s after the memory share.\n",
1676 ffa_func_name(FFA_MEM_FRAG_RX));
1677 ret = TEST_RESULT_FAIL;
1678 goto exit;
1679 }
1680
1681 handle = ffa_frag_handle(ffa_ret);
1682
1683 if (handle == FFA_MEMORY_HANDLE_INVALID) {
1684 ERROR("SPMC returned an invalid handle for the operation.\n");
1685 ret = TEST_RESULT_FAIL;
1686 goto exit;
1687 }
1688
1689 /* Prepare the next fragment for the operation. */
1690 remaining_constituent_count = ffa_memory_fragment_init(
1691 mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_length);
1692
1693 /*
1694 * Delegate send/tx buffer to a realm. This should make memory sharing operation
1695 * fail.
1696 */
1697 ret_rmm = host_rmi_granule_delegate((u_register_t)mb.send);
1698
1699 if (ret_rmm != 0UL) {
1700 INFO("Delegate operation returns 0x%lx for address %p\n",
1701 ret_rmm, mb.send);
1702 ret = TEST_RESULT_FAIL;
1703 goto exit;
1704 }
1705
1706 ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
1707
1708 if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
1709 ret = TEST_RESULT_FAIL;
1710 goto exit;
1711 }
1712
1713 /* Undelegate to reestablish the same security state for PAS. */
1714 ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.send);
1715 if (ret_rmm != 0UL) {
1716 ERROR("Undelegate operation returns 0x%lx for address %llx\n",
1717 ret_rmm, (uint64_t)mb.send);
1718 ret = TEST_RESULT_FAIL;
1719 goto exit;
1720 }
1721
Olivier Deprez6ab3fe92024-12-05 11:46:28 +01001722 /* Undelegate operation scrubbed the TX buffer, re-init the fragment. */
1723 remaining_constituent_count = ffa_memory_fragment_init(
1724 mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_length);
1725
1726 /* This time the test is expected to pass. */
J-Alvesc362de32024-06-20 12:50:14 +01001727 ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
1728
1729 if (is_ffa_call_error(ffa_ret)) {
1730 ret = TEST_RESULT_FAIL;
1731 goto exit;
1732 }
1733
1734 /* Reclaim memory to be able to reuse it. */
1735 ffa_ret = ffa_mem_reclaim(handle, 0);
1736
1737 if (is_ffa_call_error(ffa_ret)) {
1738 ERROR("Failed to reclaim memory to be used in next test\n");
1739 ret = TEST_RESULT_FAIL;
1740 goto exit;
1741 }
1742
1743 ret = TEST_RESULT_SUCCESS;
1744
1745exit:
1746 unregister_custom_sync_exception_handler();
1747
1748 return ret;
1749}
J-Alvesbd2fd4e2024-10-15 11:31:54 +01001750
1751/**
1752 * Do a memory sharing operation over two fragments.
1753 * Before the 2nd fragment the RX buffer is set in the realm PAS.
1754 * The SPMC should fault, recover from it and return
1755 * ffa_error(FFA_ERROR_ABORTED).
1756 *
1757 * Test Sequence:
1758 * - Share memory with SP(1), using a force fragmented approach.
1759 * - Initiate an hypervisor retrieve request, and retrieve only
1760 * the first fragment.
1761 * - Change the physical address space of NWd RX buffer.
1762 * - Invoke the FFA_MEM_FRAG_RX interface, which should abort because
1763 * of previous step.
1764 * - Reestablish the PAS of the NWd RX buffer.
1765 * - Contiueing with hypervisor retrieve request, and obtain the 2nd
1766 * fragment.
1767 * - Reclaim memory for clean-up of SPMC state.
1768 */
1769test_result_t test_ffa_memory_share_fragmented_rx_realm(void)
1770{
1771 struct mailbox_buffers mb;
1772 uint32_t remaining_constituent_count = 0;
1773 uint32_t total_size;
1774 uint32_t fragment_size;
1775 uint32_t fragment_offset;
1776 struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
1777 SP_ID(1), FFA_MEM_SHARE_SMC32);
1778 struct ffa_memory_region_constituent constituents[] = {
1779 {(void *)four_share_pages, 4, 0},
1780 {(void *)share_page, 1, 0}
1781 };
1782 struct ffa_value ffa_ret;
1783 u_register_t ret_rmm;
1784 test_result_t ret;
1785 uint64_t handle;
1786
1787 if (get_armv9_2_feat_rme_support() == 0U) {
1788 return TEST_RESULT_SKIPPED;
1789 }
1790
1791 CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
1792
1793 GET_TFTF_MAILBOX(mb);
1794
1795 register_custom_sync_exception_handler(data_abort_handler);
1796
1797 /* Only send one constituent to start with. */
1798 remaining_constituent_count = ffa_memory_region_init(
1799 (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
1800 &receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
1801 0, FFA_MEMORY_NOT_SPECIFIED_MEM,
1802 FFA_MEMORY_CACHE_WRITE_BACK,
1803 FFA_MEMORY_INNER_SHAREABLE,
1804 &total_size, &fragment_size);
1805
1806 /* It should have copied them all. */
1807 if (remaining_constituent_count > 0) {
1808 ERROR("Transaction descriptor initialization failed!\n");
1809 ret = TEST_RESULT_FAIL;
1810 goto exit;
1811 }
1812
1813 /*
1814 * Take the size of a constituent from the fragment to force the
1815 * operation to be fragmented.
1816 */
1817 fragment_size -= sizeof(struct ffa_memory_region_constituent);
1818
1819 ffa_ret = ffa_mem_share(total_size, fragment_size);
1820
1821 if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
1822 ERROR("Expected %s after the memory share.\n",
1823 ffa_func_name(FFA_MEM_FRAG_RX));
1824 ret = TEST_RESULT_FAIL;
1825 goto exit;
1826 }
1827
1828 handle = ffa_frag_handle(ffa_ret);
1829
1830 if (handle == FFA_MEMORY_HANDLE_INVALID) {
1831 ERROR("SPMC returned an invalid handle for the operation.\n");
1832 ret = TEST_RESULT_FAIL;
1833 goto exit;
1834 }
1835
1836 /* Prepare the next fragment for the operation. */
1837 remaining_constituent_count = ffa_memory_fragment_init(
1838 mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_size);
1839
1840 ffa_ret = ffa_mem_frag_tx(handle, fragment_size);
1841
1842 if (is_ffa_call_error(ffa_ret)) {
1843 ret = TEST_RESULT_FAIL;
1844 goto exit;
1845 }
1846
1847 /*
1848 * Request the hypervisor retrieve request.
1849 * Response should be fragmented.
1850 */
1851 ffa_hypervisor_retrieve_request_init(mb.send, handle);
1852 ffa_ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
1853 sizeof(struct ffa_memory_region));
1854
1855 if (ffa_func_id(ffa_ret) != FFA_MEM_RETRIEVE_RESP) {
1856 ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
1857 __func__, ffa_error_code(ffa_ret));
1858 ret = TEST_RESULT_FAIL;
1859 goto exit;
1860 }
1861
1862 total_size = ffa_mem_retrieve_res_total_size(ffa_ret);
1863 fragment_size = ffa_mem_retrieve_res_frag_size(ffa_ret);
1864 fragment_offset = fragment_size;
1865
1866 ret_rmm = host_rmi_granule_delegate((u_register_t)mb.recv);
1867
1868 if (ret_rmm != 0UL) {
1869 INFO("Delegate operation returns 0x%lx for address %p\n",
1870 ret_rmm, mb.send);
1871 ret = TEST_RESULT_FAIL;
1872 goto exit;
1873 }
1874
1875 ffa_ret = ffa_rx_release();
1876 if (is_ffa_call_error(ffa_ret)) {
1877 ERROR("ffa_rx_release() failed.\n");
1878 ret = TEST_RESULT_FAIL;
1879 goto exit;
1880 }
1881
1882 /* Call FFA_MEM_FRAG_RX but expect it to abort. */
1883 ffa_ret = ffa_mem_frag_rx(handle, fragment_offset);
1884
1885 if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
1886 ERROR("Expected FFA_MEM_FRAG_RX to have failed with"
1887 "FFA_ERROR_ABORTED.\n");
1888 ret = TEST_RESULT_FAIL;
1889 goto exit;
1890 }
1891
1892 /* Undelegate to reestablish the same security state for PAS. */
1893 ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.recv);
1894 if (ret_rmm != 0UL) {
1895 ERROR("Undelegate operation returns 0x%lx for address %llx\n",
1896 ret_rmm, (uint64_t)mb.send);
1897 ret = TEST_RESULT_FAIL;
1898 goto exit;
1899 }
1900
1901 /* Continue the hypervisor retrieve request. */
1902 if (!hypervisor_retrieve_request_continue(
1903 &mb, handle, NULL, 0, total_size, fragment_offset, false)) {
1904 ERROR("Failed to continue hypervisor retrieve request after"
1905 " restablishing PAS.\n");
1906 ret = TEST_RESULT_FAIL;
1907 goto exit;
1908 }
1909
1910 /* Reclaim memory to be able to reuse it. */
1911 ffa_ret = ffa_mem_reclaim(handle, 0);
1912
1913 if (is_ffa_call_error(ffa_ret)) {
1914 ERROR("Failed to reclaim memory to be used in next test\n");
1915 ret = TEST_RESULT_FAIL;
1916 goto exit;
1917 }
1918
1919 ret = TEST_RESULT_SUCCESS;
1920
1921exit:
1922 unregister_custom_sync_exception_handler();
1923
1924 return ret;
1925}