blob: e840b2f2c00dd62b99cd52113f85c8f705a7035b [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
J-Alves8d6a35e2022-01-24 14:26:55 +000011#include "hf/check.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/mm.h"
13#include "hf/static_assert.h"
14
15#include "vmapi/hf/call.h"
16
17#include "test/hftest.h"
18#include "test/vmapi/ffa.h"
19
20static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
21static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
22static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
23static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
24
25static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
26static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
27
28struct mailbox_buffers set_up_mailbox(void)
29{
30 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
31 FFA_SUCCESS_32);
32 return (struct mailbox_buffers){
33 .send = send_page,
34 .recv = recv_page,
35 };
36}
37
38/*
39 * Helper function to send memory to a VM then send a message with the retrieve
40 * request it needs to retrieve it.
41 */
42ffa_memory_handle_t send_memory_and_retrieve_request(
43 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
44 ffa_vm_id_t recipient,
45 struct ffa_memory_region_constituent constituents[],
J-Alves42a6f172022-04-07 11:46:37 +010046 uint32_t constituent_count, ffa_memory_region_flags_t send_flags,
47 ffa_memory_region_flags_t retrieve_flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010048 enum ffa_data_access send_data_access,
49 enum ffa_data_access retrieve_data_access,
50 enum ffa_instruction_access send_instruction_access,
51 enum ffa_instruction_access retrieve_instruction_access)
52{
Andrew Walbranca808b12020-05-15 17:22:28 +010053 uint32_t total_length;
54 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010055 uint32_t msg_size;
56 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +010057 const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
58 ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010059 ffa_memory_handle_t handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010060 uint32_t remaining_constituent_count;
61 uint32_t sent_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010062
Andrew Walbranca808b12020-05-15 17:22:28 +010063 /* Send the first fragment of the memory. */
64 remaining_constituent_count = ffa_memory_region_init(
65 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
J-Alves42a6f172022-04-07 11:46:37 +010066 constituent_count, 0, send_flags, send_data_access,
J-Alves807794e2022-06-16 13:42:47 +010067 send_instruction_access,
68 share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM
69 : FFA_MEMORY_NOT_SPECIFIED_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +010070 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +010071 &total_length, &fragment_length);
72 if (remaining_constituent_count == 0) {
73 EXPECT_EQ(total_length, fragment_length);
74 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075 switch (share_func) {
76 case FFA_MEM_DONATE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010077 ret = ffa_mem_donate(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010078 break;
79 case FFA_MEM_LEND_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010080 ret = ffa_mem_lend(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010081 break;
82 case FFA_MEM_SHARE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010083 ret = ffa_mem_share(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010084 break;
85 default:
86 FAIL("Invalid share_func %#x.\n", share_func);
87 /* Never reached, but needed to keep clang-analyser happy. */
88 return 0;
89 }
Andrew Walbranca808b12020-05-15 17:22:28 +010090 sent_length = fragment_length;
91
92 /* Send the remaining fragments. */
93 while (remaining_constituent_count != 0) {
94 dlog_verbose("%d constituents left to send.\n",
95 remaining_constituent_count);
96 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
97 if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
98 fragment_handle = ffa_frag_handle(ret);
99 } else {
100 EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
101 }
102 EXPECT_EQ(ret.arg3, sent_length);
103 /* Sender MBZ at virtual instance. */
104 EXPECT_EQ(ffa_frag_sender(ret), 0);
105
106 remaining_constituent_count = ffa_memory_fragment_init(
107 tx_buffer, HF_MAILBOX_SIZE,
108 constituents + constituent_count -
109 remaining_constituent_count,
110 remaining_constituent_count, &fragment_length);
111
112 ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
113 sent_length += fragment_length;
114 }
115
116 EXPECT_EQ(sent_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100117 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
118 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100119 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
120 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranca808b12020-05-15 17:22:28 +0100121 if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
122 EXPECT_EQ(handle, fragment_handle);
123 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100124
125 /*
126 * Send the appropriate retrieve request to the VM so that it can use it
127 * to retrieve the memory.
128 */
129 msg_size = ffa_memory_retrieve_request_init(
J-Alves42a6f172022-04-07 11:46:37 +0100130 tx_buffer, handle, sender, recipient, 0, retrieve_flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100131 retrieve_data_access, retrieve_instruction_access,
132 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100133 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
135 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
136 FFA_SUCCESS_32);
137
138 return handle;
139}
140
141/*
142 * Helper function to send memory to a VM then send a message with the retrieve
143 * request it needs to retrieve it, forcing the request to be made in at least
144 * two fragments even if it could fit in one.
145 */
146ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
147 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
148 ffa_vm_id_t recipient,
149 struct ffa_memory_region_constituent constituents[],
150 uint32_t constituent_count, ffa_memory_region_flags_t flags,
151 enum ffa_data_access send_data_access,
152 enum ffa_data_access retrieve_data_access,
153 enum ffa_instruction_access send_instruction_access,
154 enum ffa_instruction_access retrieve_instruction_access)
155{
156 uint32_t total_length;
157 uint32_t fragment_length;
158 uint32_t msg_size;
159 uint32_t remaining_constituent_count;
160 struct ffa_value ret;
161 ffa_memory_handle_t handle;
162
163 /* Send everything except the last constituent in the first fragment. */
164 remaining_constituent_count = ffa_memory_region_init(
165 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
166 constituent_count, 0, flags, send_data_access,
J-Alves807794e2022-06-16 13:42:47 +0100167 send_instruction_access,
168 share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM
169 : FFA_MEMORY_NOT_SPECIFIED_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100170 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 &total_length, &fragment_length);
172 EXPECT_EQ(remaining_constituent_count, 0);
173 EXPECT_EQ(total_length, fragment_length);
174 /* Don't include the last constituent in the first fragment. */
175 fragment_length -= sizeof(struct ffa_memory_region_constituent);
176 switch (share_func) {
177 case FFA_MEM_DONATE_32:
178 ret = ffa_mem_donate(total_length, fragment_length);
179 break;
180 case FFA_MEM_LEND_32:
181 ret = ffa_mem_lend(total_length, fragment_length);
182 break;
183 case FFA_MEM_SHARE_32:
184 ret = ffa_mem_share(total_length, fragment_length);
185 break;
186 default:
187 FAIL("Invalid share_func %#x.\n", share_func);
188 /* Never reached, but needed to keep clang-analyser happy. */
189 return 0;
190 }
191 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
192 EXPECT_EQ(ret.arg3, fragment_length);
193 /* Sender MBZ at virtual instance. */
194 EXPECT_EQ(ffa_frag_sender(ret), 0);
195
196 handle = ffa_frag_handle(ret);
197
198 /* Send the last constituent in a separate fragment. */
199 remaining_constituent_count = ffa_memory_fragment_init(
200 tx_buffer, HF_MAILBOX_SIZE,
201 &constituents[constituent_count - 1], 1, &fragment_length);
202 EXPECT_EQ(remaining_constituent_count, 0);
203 ret = ffa_mem_frag_tx(handle, fragment_length);
204 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
205 EXPECT_EQ(ffa_mem_success_handle(ret), handle);
206
207 /*
208 * Send the appropriate retrieve request to the VM so that it can use it
209 * to retrieve the memory.
210 */
211 msg_size = ffa_memory_retrieve_request_init(
212 tx_buffer, handle, sender, recipient, 0, 0,
213 retrieve_data_access, retrieve_instruction_access,
214 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100215 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100216 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100217 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
218 FFA_SUCCESS_32);
219
220 return handle;
221}
222
223/*
224 * Use the retrieve request from the receive buffer to retrieve a memory region
Andrew Walbranca808b12020-05-15 17:22:28 +0100225 * which has been sent to us. Copies all the fragments into the provided buffer
226 * if any, and checks that the total length of all fragments is no more than
227 * `memory_region_max_size`. Returns the sender, and the handle via a return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100228 * parameter.
229 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100230ffa_vm_id_t retrieve_memory_from_message(
231 void *recv_buf, void *send_buf, struct ffa_value msg_ret,
232 ffa_memory_handle_t *handle,
233 struct ffa_memory_region *memory_region_ret,
234 size_t memory_region_max_size)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100235{
236 uint32_t msg_size;
237 struct ffa_value ret;
238 struct ffa_memory_region *memory_region;
239 ffa_vm_id_t sender;
Andrew Walbranca808b12020-05-15 17:22:28 +0100240 struct ffa_memory_region *retrieve_request;
241 ffa_memory_handle_t handle_;
242 uint32_t fragment_length;
243 uint32_t total_length;
244 uint32_t fragment_offset;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100245
246 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
247 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000248 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100249
Andrew Walbranca808b12020-05-15 17:22:28 +0100250 retrieve_request = (struct ffa_memory_region *)recv_buf;
251 handle_ = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100252 if (handle != NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100253 *handle = handle_;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 }
255 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
256 ffa_rx_release();
257 ret = ffa_mem_retrieve_req(msg_size, msg_size);
258 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
Andrew Walbranca808b12020-05-15 17:22:28 +0100259 total_length = ret.arg1;
260 fragment_length = ret.arg2;
261 EXPECT_GE(fragment_length,
262 sizeof(struct ffa_memory_region) +
263 sizeof(struct ffa_memory_access) +
264 sizeof(struct ffa_composite_memory_region));
265 EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
266 EXPECT_LE(fragment_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267 memory_region = (struct ffa_memory_region *)recv_buf;
268 EXPECT_EQ(memory_region->receiver_count, 1);
269 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
270 hf_vm_get_id());
271
Andrew Walbranca808b12020-05-15 17:22:28 +0100272 /* Copy into the return buffer. */
273 if (memory_region_ret != NULL) {
274 memcpy_s(memory_region_ret, memory_region_max_size,
275 memory_region, fragment_length);
276 }
277
278 /*
279 * Release the RX buffer now that we have read everything we need from
280 * it.
281 */
282 memory_region = NULL;
283 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
284
285 /* Retrieve the remaining fragments. */
286 fragment_offset = fragment_length;
287 while (fragment_offset < total_length) {
288 ret = ffa_mem_frag_rx(handle_, fragment_offset);
289 EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
290 EXPECT_EQ(ffa_frag_handle(ret), handle_);
291 /* Sender MBZ at virtual instance. */
292 EXPECT_EQ(ffa_frag_sender(ret), 0);
293 fragment_length = ret.arg3;
294 EXPECT_GT(fragment_length, 0);
295 ASSERT_LE(fragment_offset + fragment_length,
296 memory_region_max_size);
297 if (memory_region_ret != NULL) {
298 memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
299 memory_region_max_size - fragment_offset,
300 recv_buf, fragment_length);
301 }
302 fragment_offset += fragment_length;
303 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
304 }
305 EXPECT_EQ(fragment_offset, total_length);
306
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100307 return sender;
308}
309
310/*
311 * Use the retrieve request from the receive buffer to retrieve a memory region
312 * which has been sent to us, expecting it to fail with the given error code.
313 * Returns the sender.
314 */
315ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
316 void *send_buf,
317 struct ffa_value msg_ret,
318 int32_t expected_error)
319{
320 uint32_t msg_size;
321 struct ffa_value ret;
322 ffa_vm_id_t sender;
323
324 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
325 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000326 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100327
328 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
329 ffa_rx_release();
330 ret = ffa_mem_retrieve_req(msg_size, msg_size);
331 EXPECT_FFA_ERROR(ret, expected_error);
332
333 return sender;
334}
J-Alves8d6a35e2022-01-24 14:26:55 +0000335
336ffa_vm_count_t get_ffa_partition_info(struct ffa_uuid *uuid,
337 struct ffa_partition_info *info,
338 size_t info_size)
339{
340 struct ffa_value ret;
341 struct ffa_partition_info *ret_info = set_up_mailbox().recv;
342
343 CHECK(uuid != NULL);
344 CHECK(info != NULL);
345
346 ffa_version(MAKE_FFA_VERSION(1, 1));
347
348 ret = ffa_partition_info_get(uuid, 0);
349
350 if (ffa_func_id(ret) != FFA_SUCCESS_32) {
351 return 0;
352 }
353
354 if (ret.arg2 != 0) {
355 size_t src_size = ret.arg2 * sizeof(struct ffa_partition_info);
356 size_t dest_size =
357 info_size * sizeof(struct ffa_partition_info);
358
359 memcpy_s(info, dest_size, ret_info, src_size);
360 }
361
362 ffa_rx_release();
363
364 return ret.arg2;
365}
J-Alves57caa382022-01-27 13:54:50 +0000366
367/**
368 * Dump the boot information passed to the partition.
369 */
370void dump_boot_info(struct ffa_boot_info_header *boot_info_header)
371{
372 struct ffa_boot_info_desc *boot_info_desc;
373
374 if (boot_info_header == NULL) {
375 HFTEST_LOG("SP doesn't have boot arguments!\n");
376 return;
377 }
378
379 HFTEST_LOG("SP boot info (%x):", (uintptr_t)boot_info_header);
380 HFTEST_LOG(" Signature: %x", boot_info_header->signature);
381 HFTEST_LOG(" Version: %x", boot_info_header->version);
382 HFTEST_LOG(" Blob Size: %u", boot_info_header->info_blob_size);
383 HFTEST_LOG(" Descriptor Size: %u", boot_info_header->desc_size);
384 HFTEST_LOG(" Descriptor Count: %u", boot_info_header->desc_count);
385
386 boot_info_desc = boot_info_header->boot_info;
387
388 if (boot_info_desc == NULL) {
389 dlog_error("Boot data arguments error...");
390 return;
391 }
392
393 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
394 HFTEST_LOG(" Type: %u", boot_info_desc[i].type);
395 HFTEST_LOG(" Flags:");
396 HFTEST_LOG(" Name Format: %x",
397 ffa_boot_info_name_format(&boot_info_desc[i]));
398 HFTEST_LOG(" Content Format: %x",
399 ffa_boot_info_content_format(&boot_info_desc[i]));
400 HFTEST_LOG(" Size: %u", boot_info_desc[i].size);
401 HFTEST_LOG(" Value: %x", boot_info_desc[i].content);
402 }
403}
404
405/**
406 * Retrieve the boot info descriptor related to the provided type and type ID.
407 */
408struct ffa_boot_info_desc *get_boot_info_desc(
409 struct ffa_boot_info_header *boot_info_header, uint8_t type,
410 uint8_t type_id)
411{
412 struct ffa_boot_info_desc *boot_info_desc;
413
414 assert(boot_info_header != NULL);
415
416 ASSERT_EQ(boot_info_header->signature, 0xFFAU);
417 ASSERT_EQ(boot_info_header->version, 0x10001U);
418 ASSERT_EQ(boot_info_header->desc_size,
419 sizeof(struct ffa_boot_info_desc));
420 ASSERT_EQ((uintptr_t)boot_info_header + boot_info_header->desc_offset,
421 (uintptr_t)boot_info_header->boot_info);
422
423 boot_info_desc = boot_info_header->boot_info;
424
425 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
426 if (ffa_boot_info_type_id(&boot_info_desc[i]) == type_id &&
427 ffa_boot_info_type(&boot_info_desc[i]) == type) {
428 return &boot_info_desc[i];
429 }
430 }
431
432 return NULL;
433}