blob: 6e57ae6aa4ec180805459515fdedc59907a45c80 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
J-Alves8d6a35e2022-01-24 14:26:55 +000011#include "hf/check.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/mm.h"
13#include "hf/static_assert.h"
14
15#include "vmapi/hf/call.h"
16
17#include "test/hftest.h"
18#include "test/vmapi/ffa.h"
19
20static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
21static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
22static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
23static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
24
25static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
26static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
27
28struct mailbox_buffers set_up_mailbox(void)
29{
30 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
31 FFA_SUCCESS_32);
32 return (struct mailbox_buffers){
33 .send = send_page,
34 .recv = recv_page,
35 };
36}
37
38/*
39 * Helper function to send memory to a VM then send a message with the retrieve
40 * request it needs to retrieve it.
41 */
42ffa_memory_handle_t send_memory_and_retrieve_request(
43 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
44 ffa_vm_id_t recipient,
45 struct ffa_memory_region_constituent constituents[],
46 uint32_t constituent_count, ffa_memory_region_flags_t flags,
47 enum ffa_data_access send_data_access,
48 enum ffa_data_access retrieve_data_access,
49 enum ffa_instruction_access send_instruction_access,
50 enum ffa_instruction_access retrieve_instruction_access)
51{
Andrew Walbranca808b12020-05-15 17:22:28 +010052 uint32_t total_length;
53 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010054 uint32_t msg_size;
55 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +010056 const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
57 ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010058 ffa_memory_handle_t handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010059 uint32_t remaining_constituent_count;
60 uint32_t sent_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010061
Andrew Walbranca808b12020-05-15 17:22:28 +010062 /* Send the first fragment of the memory. */
63 remaining_constituent_count = ffa_memory_region_init(
64 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
65 constituent_count, 0, flags, send_data_access,
J-Alves807794e2022-06-16 13:42:47 +010066 send_instruction_access,
67 share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM
68 : FFA_MEMORY_NOT_SPECIFIED_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +010069 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +010070 &total_length, &fragment_length);
71 if (remaining_constituent_count == 0) {
72 EXPECT_EQ(total_length, fragment_length);
73 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010074 switch (share_func) {
75 case FFA_MEM_DONATE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010076 ret = ffa_mem_donate(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010077 break;
78 case FFA_MEM_LEND_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010079 ret = ffa_mem_lend(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010080 break;
81 case FFA_MEM_SHARE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010082 ret = ffa_mem_share(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010083 break;
84 default:
85 FAIL("Invalid share_func %#x.\n", share_func);
86 /* Never reached, but needed to keep clang-analyser happy. */
87 return 0;
88 }
Andrew Walbranca808b12020-05-15 17:22:28 +010089 sent_length = fragment_length;
90
91 /* Send the remaining fragments. */
92 while (remaining_constituent_count != 0) {
93 dlog_verbose("%d constituents left to send.\n",
94 remaining_constituent_count);
95 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
96 if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
97 fragment_handle = ffa_frag_handle(ret);
98 } else {
99 EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
100 }
101 EXPECT_EQ(ret.arg3, sent_length);
102 /* Sender MBZ at virtual instance. */
103 EXPECT_EQ(ffa_frag_sender(ret), 0);
104
105 remaining_constituent_count = ffa_memory_fragment_init(
106 tx_buffer, HF_MAILBOX_SIZE,
107 constituents + constituent_count -
108 remaining_constituent_count,
109 remaining_constituent_count, &fragment_length);
110
111 ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
112 sent_length += fragment_length;
113 }
114
115 EXPECT_EQ(sent_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100116 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
117 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100118 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
119 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranca808b12020-05-15 17:22:28 +0100120 if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
121 EXPECT_EQ(handle, fragment_handle);
122 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123
124 /*
125 * Send the appropriate retrieve request to the VM so that it can use it
126 * to retrieve the memory.
127 */
128 msg_size = ffa_memory_retrieve_request_init(
129 tx_buffer, handle, sender, recipient, 0, 0,
130 retrieve_data_access, retrieve_instruction_access,
131 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100132 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100133 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
134 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
135 FFA_SUCCESS_32);
136
137 return handle;
138}
139
140/*
141 * Helper function to send memory to a VM then send a message with the retrieve
142 * request it needs to retrieve it, forcing the request to be made in at least
143 * two fragments even if it could fit in one.
144 */
145ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
146 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
147 ffa_vm_id_t recipient,
148 struct ffa_memory_region_constituent constituents[],
149 uint32_t constituent_count, ffa_memory_region_flags_t flags,
150 enum ffa_data_access send_data_access,
151 enum ffa_data_access retrieve_data_access,
152 enum ffa_instruction_access send_instruction_access,
153 enum ffa_instruction_access retrieve_instruction_access)
154{
155 uint32_t total_length;
156 uint32_t fragment_length;
157 uint32_t msg_size;
158 uint32_t remaining_constituent_count;
159 struct ffa_value ret;
160 ffa_memory_handle_t handle;
161
162 /* Send everything except the last constituent in the first fragment. */
163 remaining_constituent_count = ffa_memory_region_init(
164 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
165 constituent_count, 0, flags, send_data_access,
J-Alves807794e2022-06-16 13:42:47 +0100166 send_instruction_access,
167 share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM
168 : FFA_MEMORY_NOT_SPECIFIED_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100169 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +0100170 &total_length, &fragment_length);
171 EXPECT_EQ(remaining_constituent_count, 0);
172 EXPECT_EQ(total_length, fragment_length);
173 /* Don't include the last constituent in the first fragment. */
174 fragment_length -= sizeof(struct ffa_memory_region_constituent);
175 switch (share_func) {
176 case FFA_MEM_DONATE_32:
177 ret = ffa_mem_donate(total_length, fragment_length);
178 break;
179 case FFA_MEM_LEND_32:
180 ret = ffa_mem_lend(total_length, fragment_length);
181 break;
182 case FFA_MEM_SHARE_32:
183 ret = ffa_mem_share(total_length, fragment_length);
184 break;
185 default:
186 FAIL("Invalid share_func %#x.\n", share_func);
187 /* Never reached, but needed to keep clang-analyser happy. */
188 return 0;
189 }
190 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
191 EXPECT_EQ(ret.arg3, fragment_length);
192 /* Sender MBZ at virtual instance. */
193 EXPECT_EQ(ffa_frag_sender(ret), 0);
194
195 handle = ffa_frag_handle(ret);
196
197 /* Send the last constituent in a separate fragment. */
198 remaining_constituent_count = ffa_memory_fragment_init(
199 tx_buffer, HF_MAILBOX_SIZE,
200 &constituents[constituent_count - 1], 1, &fragment_length);
201 EXPECT_EQ(remaining_constituent_count, 0);
202 ret = ffa_mem_frag_tx(handle, fragment_length);
203 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
204 EXPECT_EQ(ffa_mem_success_handle(ret), handle);
205
206 /*
207 * Send the appropriate retrieve request to the VM so that it can use it
208 * to retrieve the memory.
209 */
210 msg_size = ffa_memory_retrieve_request_init(
211 tx_buffer, handle, sender, recipient, 0, 0,
212 retrieve_data_access, retrieve_instruction_access,
213 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100214 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100215 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100216 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
217 FFA_SUCCESS_32);
218
219 return handle;
220}
221
222/*
223 * Use the retrieve request from the receive buffer to retrieve a memory region
Andrew Walbranca808b12020-05-15 17:22:28 +0100224 * which has been sent to us. Copies all the fragments into the provided buffer
225 * if any, and checks that the total length of all fragments is no more than
226 * `memory_region_max_size`. Returns the sender, and the handle via a return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100227 * parameter.
228 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100229ffa_vm_id_t retrieve_memory_from_message(
230 void *recv_buf, void *send_buf, struct ffa_value msg_ret,
231 ffa_memory_handle_t *handle,
232 struct ffa_memory_region *memory_region_ret,
233 size_t memory_region_max_size)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100234{
235 uint32_t msg_size;
236 struct ffa_value ret;
237 struct ffa_memory_region *memory_region;
238 ffa_vm_id_t sender;
Andrew Walbranca808b12020-05-15 17:22:28 +0100239 struct ffa_memory_region *retrieve_request;
240 ffa_memory_handle_t handle_;
241 uint32_t fragment_length;
242 uint32_t total_length;
243 uint32_t fragment_offset;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100244
245 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
246 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000247 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100248
Andrew Walbranca808b12020-05-15 17:22:28 +0100249 retrieve_request = (struct ffa_memory_region *)recv_buf;
250 handle_ = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100251 if (handle != NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100252 *handle = handle_;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100253 }
254 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
255 ffa_rx_release();
256 ret = ffa_mem_retrieve_req(msg_size, msg_size);
257 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
Andrew Walbranca808b12020-05-15 17:22:28 +0100258 total_length = ret.arg1;
259 fragment_length = ret.arg2;
260 EXPECT_GE(fragment_length,
261 sizeof(struct ffa_memory_region) +
262 sizeof(struct ffa_memory_access) +
263 sizeof(struct ffa_composite_memory_region));
264 EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
265 EXPECT_LE(fragment_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100266 memory_region = (struct ffa_memory_region *)recv_buf;
267 EXPECT_EQ(memory_region->receiver_count, 1);
268 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
269 hf_vm_get_id());
270
Andrew Walbranca808b12020-05-15 17:22:28 +0100271 /* Copy into the return buffer. */
272 if (memory_region_ret != NULL) {
273 memcpy_s(memory_region_ret, memory_region_max_size,
274 memory_region, fragment_length);
275 }
276
277 /*
278 * Release the RX buffer now that we have read everything we need from
279 * it.
280 */
281 memory_region = NULL;
282 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
283
284 /* Retrieve the remaining fragments. */
285 fragment_offset = fragment_length;
286 while (fragment_offset < total_length) {
287 ret = ffa_mem_frag_rx(handle_, fragment_offset);
288 EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
289 EXPECT_EQ(ffa_frag_handle(ret), handle_);
290 /* Sender MBZ at virtual instance. */
291 EXPECT_EQ(ffa_frag_sender(ret), 0);
292 fragment_length = ret.arg3;
293 EXPECT_GT(fragment_length, 0);
294 ASSERT_LE(fragment_offset + fragment_length,
295 memory_region_max_size);
296 if (memory_region_ret != NULL) {
297 memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
298 memory_region_max_size - fragment_offset,
299 recv_buf, fragment_length);
300 }
301 fragment_offset += fragment_length;
302 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
303 }
304 EXPECT_EQ(fragment_offset, total_length);
305
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100306 return sender;
307}
308
309/*
310 * Use the retrieve request from the receive buffer to retrieve a memory region
311 * which has been sent to us, expecting it to fail with the given error code.
312 * Returns the sender.
313 */
314ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
315 void *send_buf,
316 struct ffa_value msg_ret,
317 int32_t expected_error)
318{
319 uint32_t msg_size;
320 struct ffa_value ret;
321 ffa_vm_id_t sender;
322
323 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
324 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000325 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326
327 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
328 ffa_rx_release();
329 ret = ffa_mem_retrieve_req(msg_size, msg_size);
330 EXPECT_FFA_ERROR(ret, expected_error);
331
332 return sender;
333}
J-Alves8d6a35e2022-01-24 14:26:55 +0000334
335ffa_vm_count_t get_ffa_partition_info(struct ffa_uuid *uuid,
336 struct ffa_partition_info *info,
337 size_t info_size)
338{
339 struct ffa_value ret;
340 struct ffa_partition_info *ret_info = set_up_mailbox().recv;
341
342 CHECK(uuid != NULL);
343 CHECK(info != NULL);
344
345 ffa_version(MAKE_FFA_VERSION(1, 1));
346
347 ret = ffa_partition_info_get(uuid, 0);
348
349 if (ffa_func_id(ret) != FFA_SUCCESS_32) {
350 return 0;
351 }
352
353 if (ret.arg2 != 0) {
354 size_t src_size = ret.arg2 * sizeof(struct ffa_partition_info);
355 size_t dest_size =
356 info_size * sizeof(struct ffa_partition_info);
357
358 memcpy_s(info, dest_size, ret_info, src_size);
359 }
360
361 ffa_rx_release();
362
363 return ret.arg2;
364}
J-Alves57caa382022-01-27 13:54:50 +0000365
366/**
367 * Dump the boot information passed to the partition.
368 */
369void dump_boot_info(struct ffa_boot_info_header *boot_info_header)
370{
371 struct ffa_boot_info_desc *boot_info_desc;
372
373 if (boot_info_header == NULL) {
374 HFTEST_LOG("SP doesn't have boot arguments!\n");
375 return;
376 }
377
378 HFTEST_LOG("SP boot info (%x):", (uintptr_t)boot_info_header);
379 HFTEST_LOG(" Signature: %x", boot_info_header->signature);
380 HFTEST_LOG(" Version: %x", boot_info_header->version);
381 HFTEST_LOG(" Blob Size: %u", boot_info_header->info_blob_size);
382 HFTEST_LOG(" Descriptor Size: %u", boot_info_header->desc_size);
383 HFTEST_LOG(" Descriptor Count: %u", boot_info_header->desc_count);
384
385 boot_info_desc = boot_info_header->boot_info;
386
387 if (boot_info_desc == NULL) {
388 dlog_error("Boot data arguments error...");
389 return;
390 }
391
392 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
393 HFTEST_LOG(" Type: %u", boot_info_desc[i].type);
394 HFTEST_LOG(" Flags:");
395 HFTEST_LOG(" Name Format: %x",
396 ffa_boot_info_name_format(&boot_info_desc[i]));
397 HFTEST_LOG(" Content Format: %x",
398 ffa_boot_info_content_format(&boot_info_desc[i]));
399 HFTEST_LOG(" Size: %u", boot_info_desc[i].size);
400 HFTEST_LOG(" Value: %x", boot_info_desc[i].content);
401 }
402}
403
404/**
405 * Retrieve the boot info descriptor related to the provided type and type ID.
406 */
407struct ffa_boot_info_desc *get_boot_info_desc(
408 struct ffa_boot_info_header *boot_info_header, uint8_t type,
409 uint8_t type_id)
410{
411 struct ffa_boot_info_desc *boot_info_desc;
412
413 assert(boot_info_header != NULL);
414
415 ASSERT_EQ(boot_info_header->signature, 0xFFAU);
416 ASSERT_EQ(boot_info_header->version, 0x10001U);
417 ASSERT_EQ(boot_info_header->desc_size,
418 sizeof(struct ffa_boot_info_desc));
419 ASSERT_EQ((uintptr_t)boot_info_header + boot_info_header->desc_offset,
420 (uintptr_t)boot_info_header->boot_info);
421
422 boot_info_desc = boot_info_header->boot_info;
423
424 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
425 if (ffa_boot_info_type_id(&boot_info_desc[i]) == type_id &&
426 ffa_boot_info_type(&boot_info_desc[i]) == type) {
427 return &boot_info_desc[i];
428 }
429 }
430
431 return NULL;
432}