blob: 31a2517aed229432c37181d4b1e5d17858f4da53 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
J-Alves8d6a35e2022-01-24 14:26:55 +000011#include "hf/check.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/mm.h"
13#include "hf/static_assert.h"
14
15#include "vmapi/hf/call.h"
16
17#include "test/hftest.h"
18#include "test/vmapi/ffa.h"
19
20static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
21static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
22static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
23static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
24
25static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
26static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
27
28struct mailbox_buffers set_up_mailbox(void)
29{
30 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
31 FFA_SUCCESS_32);
32 return (struct mailbox_buffers){
33 .send = send_page,
34 .recv = recv_page,
35 };
36}
37
38/*
39 * Helper function to send memory to a VM then send a message with the retrieve
40 * request it needs to retrieve it.
41 */
42ffa_memory_handle_t send_memory_and_retrieve_request(
43 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
44 ffa_vm_id_t recipient,
45 struct ffa_memory_region_constituent constituents[],
46 uint32_t constituent_count, ffa_memory_region_flags_t flags,
47 enum ffa_data_access send_data_access,
48 enum ffa_data_access retrieve_data_access,
49 enum ffa_instruction_access send_instruction_access,
50 enum ffa_instruction_access retrieve_instruction_access)
51{
Andrew Walbranca808b12020-05-15 17:22:28 +010052 uint32_t total_length;
53 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010054 uint32_t msg_size;
55 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +010056 const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
57 ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010058 ffa_memory_handle_t handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010059 uint32_t remaining_constituent_count;
60 uint32_t sent_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010061
Andrew Walbranca808b12020-05-15 17:22:28 +010062 /* Send the first fragment of the memory. */
63 remaining_constituent_count = ffa_memory_region_init(
64 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
65 constituent_count, 0, flags, send_data_access,
66 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +010067 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +010068 &total_length, &fragment_length);
69 if (remaining_constituent_count == 0) {
70 EXPECT_EQ(total_length, fragment_length);
71 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010072 switch (share_func) {
73 case FFA_MEM_DONATE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010074 ret = ffa_mem_donate(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075 break;
76 case FFA_MEM_LEND_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010077 ret = ffa_mem_lend(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010078 break;
79 case FFA_MEM_SHARE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010080 ret = ffa_mem_share(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010081 break;
82 default:
83 FAIL("Invalid share_func %#x.\n", share_func);
84 /* Never reached, but needed to keep clang-analyser happy. */
85 return 0;
86 }
Andrew Walbranca808b12020-05-15 17:22:28 +010087 sent_length = fragment_length;
88
89 /* Send the remaining fragments. */
90 while (remaining_constituent_count != 0) {
91 dlog_verbose("%d constituents left to send.\n",
92 remaining_constituent_count);
93 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
94 if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
95 fragment_handle = ffa_frag_handle(ret);
96 } else {
97 EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
98 }
99 EXPECT_EQ(ret.arg3, sent_length);
100 /* Sender MBZ at virtual instance. */
101 EXPECT_EQ(ffa_frag_sender(ret), 0);
102
103 remaining_constituent_count = ffa_memory_fragment_init(
104 tx_buffer, HF_MAILBOX_SIZE,
105 constituents + constituent_count -
106 remaining_constituent_count,
107 remaining_constituent_count, &fragment_length);
108
109 ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
110 sent_length += fragment_length;
111 }
112
113 EXPECT_EQ(sent_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100114 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
115 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100116 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
117 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranca808b12020-05-15 17:22:28 +0100118 if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
119 EXPECT_EQ(handle, fragment_handle);
120 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100121
122 /*
123 * Send the appropriate retrieve request to the VM so that it can use it
124 * to retrieve the memory.
125 */
126 msg_size = ffa_memory_retrieve_request_init(
127 tx_buffer, handle, sender, recipient, 0, 0,
128 retrieve_data_access, retrieve_instruction_access,
129 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100130 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100131 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
132 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
133 FFA_SUCCESS_32);
134
135 return handle;
136}
137
138/*
139 * Helper function to send memory to a VM then send a message with the retrieve
140 * request it needs to retrieve it, forcing the request to be made in at least
141 * two fragments even if it could fit in one.
142 */
143ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
144 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
145 ffa_vm_id_t recipient,
146 struct ffa_memory_region_constituent constituents[],
147 uint32_t constituent_count, ffa_memory_region_flags_t flags,
148 enum ffa_data_access send_data_access,
149 enum ffa_data_access retrieve_data_access,
150 enum ffa_instruction_access send_instruction_access,
151 enum ffa_instruction_access retrieve_instruction_access)
152{
153 uint32_t total_length;
154 uint32_t fragment_length;
155 uint32_t msg_size;
156 uint32_t remaining_constituent_count;
157 struct ffa_value ret;
158 ffa_memory_handle_t handle;
159
160 /* Send everything except the last constituent in the first fragment. */
161 remaining_constituent_count = ffa_memory_region_init(
162 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
163 constituent_count, 0, flags, send_data_access,
164 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100165 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 &total_length, &fragment_length);
167 EXPECT_EQ(remaining_constituent_count, 0);
168 EXPECT_EQ(total_length, fragment_length);
169 /* Don't include the last constituent in the first fragment. */
170 fragment_length -= sizeof(struct ffa_memory_region_constituent);
171 switch (share_func) {
172 case FFA_MEM_DONATE_32:
173 ret = ffa_mem_donate(total_length, fragment_length);
174 break;
175 case FFA_MEM_LEND_32:
176 ret = ffa_mem_lend(total_length, fragment_length);
177 break;
178 case FFA_MEM_SHARE_32:
179 ret = ffa_mem_share(total_length, fragment_length);
180 break;
181 default:
182 FAIL("Invalid share_func %#x.\n", share_func);
183 /* Never reached, but needed to keep clang-analyser happy. */
184 return 0;
185 }
186 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
187 EXPECT_EQ(ret.arg3, fragment_length);
188 /* Sender MBZ at virtual instance. */
189 EXPECT_EQ(ffa_frag_sender(ret), 0);
190
191 handle = ffa_frag_handle(ret);
192
193 /* Send the last constituent in a separate fragment. */
194 remaining_constituent_count = ffa_memory_fragment_init(
195 tx_buffer, HF_MAILBOX_SIZE,
196 &constituents[constituent_count - 1], 1, &fragment_length);
197 EXPECT_EQ(remaining_constituent_count, 0);
198 ret = ffa_mem_frag_tx(handle, fragment_length);
199 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
200 EXPECT_EQ(ffa_mem_success_handle(ret), handle);
201
202 /*
203 * Send the appropriate retrieve request to the VM so that it can use it
204 * to retrieve the memory.
205 */
206 msg_size = ffa_memory_retrieve_request_init(
207 tx_buffer, handle, sender, recipient, 0, 0,
208 retrieve_data_access, retrieve_instruction_access,
209 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
Olivier Deprezc3eb3b82020-10-25 07:01:48 +0100210 FFA_MEMORY_INNER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100211 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100212 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
213 FFA_SUCCESS_32);
214
215 return handle;
216}
217
218/*
219 * Use the retrieve request from the receive buffer to retrieve a memory region
Andrew Walbranca808b12020-05-15 17:22:28 +0100220 * which has been sent to us. Copies all the fragments into the provided buffer
221 * if any, and checks that the total length of all fragments is no more than
222 * `memory_region_max_size`. Returns the sender, and the handle via a return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100223 * parameter.
224 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100225ffa_vm_id_t retrieve_memory_from_message(
226 void *recv_buf, void *send_buf, struct ffa_value msg_ret,
227 ffa_memory_handle_t *handle,
228 struct ffa_memory_region *memory_region_ret,
229 size_t memory_region_max_size)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100230{
231 uint32_t msg_size;
232 struct ffa_value ret;
233 struct ffa_memory_region *memory_region;
234 ffa_vm_id_t sender;
Andrew Walbranca808b12020-05-15 17:22:28 +0100235 struct ffa_memory_region *retrieve_request;
236 ffa_memory_handle_t handle_;
237 uint32_t fragment_length;
238 uint32_t total_length;
239 uint32_t fragment_offset;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100240
241 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
242 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000243 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100244
Andrew Walbranca808b12020-05-15 17:22:28 +0100245 retrieve_request = (struct ffa_memory_region *)recv_buf;
246 handle_ = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100247 if (handle != NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100248 *handle = handle_;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100249 }
250 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
251 ffa_rx_release();
252 ret = ffa_mem_retrieve_req(msg_size, msg_size);
253 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
Andrew Walbranca808b12020-05-15 17:22:28 +0100254 total_length = ret.arg1;
255 fragment_length = ret.arg2;
256 EXPECT_GE(fragment_length,
257 sizeof(struct ffa_memory_region) +
258 sizeof(struct ffa_memory_access) +
259 sizeof(struct ffa_composite_memory_region));
260 EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
261 EXPECT_LE(fragment_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100262 memory_region = (struct ffa_memory_region *)recv_buf;
263 EXPECT_EQ(memory_region->receiver_count, 1);
264 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
265 hf_vm_get_id());
266
Andrew Walbranca808b12020-05-15 17:22:28 +0100267 /* Copy into the return buffer. */
268 if (memory_region_ret != NULL) {
269 memcpy_s(memory_region_ret, memory_region_max_size,
270 memory_region, fragment_length);
271 }
272
273 /*
274 * Release the RX buffer now that we have read everything we need from
275 * it.
276 */
277 memory_region = NULL;
278 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
279
280 /* Retrieve the remaining fragments. */
281 fragment_offset = fragment_length;
282 while (fragment_offset < total_length) {
283 ret = ffa_mem_frag_rx(handle_, fragment_offset);
284 EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
285 EXPECT_EQ(ffa_frag_handle(ret), handle_);
286 /* Sender MBZ at virtual instance. */
287 EXPECT_EQ(ffa_frag_sender(ret), 0);
288 fragment_length = ret.arg3;
289 EXPECT_GT(fragment_length, 0);
290 ASSERT_LE(fragment_offset + fragment_length,
291 memory_region_max_size);
292 if (memory_region_ret != NULL) {
293 memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
294 memory_region_max_size - fragment_offset,
295 recv_buf, fragment_length);
296 }
297 fragment_offset += fragment_length;
298 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
299 }
300 EXPECT_EQ(fragment_offset, total_length);
301
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100302 return sender;
303}
304
305/*
306 * Use the retrieve request from the receive buffer to retrieve a memory region
307 * which has been sent to us, expecting it to fail with the given error code.
308 * Returns the sender.
309 */
310ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
311 void *send_buf,
312 struct ffa_value msg_ret,
313 int32_t expected_error)
314{
315 uint32_t msg_size;
316 struct ffa_value ret;
317 ffa_vm_id_t sender;
318
319 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
320 msg_size = ffa_msg_send_size(msg_ret);
J-Alvesd6f4e142021-03-05 13:33:59 +0000321 sender = ffa_sender(msg_ret);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100322
323 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
324 ffa_rx_release();
325 ret = ffa_mem_retrieve_req(msg_size, msg_size);
326 EXPECT_FFA_ERROR(ret, expected_error);
327
328 return sender;
329}
J-Alves8d6a35e2022-01-24 14:26:55 +0000330
331ffa_vm_count_t get_ffa_partition_info(struct ffa_uuid *uuid,
332 struct ffa_partition_info *info,
333 size_t info_size)
334{
335 struct ffa_value ret;
336 struct ffa_partition_info *ret_info = set_up_mailbox().recv;
337
338 CHECK(uuid != NULL);
339 CHECK(info != NULL);
340
341 ffa_version(MAKE_FFA_VERSION(1, 1));
342
343 ret = ffa_partition_info_get(uuid, 0);
344
345 if (ffa_func_id(ret) != FFA_SUCCESS_32) {
346 return 0;
347 }
348
349 if (ret.arg2 != 0) {
350 size_t src_size = ret.arg2 * sizeof(struct ffa_partition_info);
351 size_t dest_size =
352 info_size * sizeof(struct ffa_partition_info);
353
354 memcpy_s(info, dest_size, ret_info, src_size);
355 }
356
357 ffa_rx_release();
358
359 return ret.arg2;
360}
J-Alves57caa382022-01-27 13:54:50 +0000361
362/**
363 * Dump the boot information passed to the partition.
364 */
365void dump_boot_info(struct ffa_boot_info_header *boot_info_header)
366{
367 struct ffa_boot_info_desc *boot_info_desc;
368
369 if (boot_info_header == NULL) {
370 HFTEST_LOG("SP doesn't have boot arguments!\n");
371 return;
372 }
373
374 HFTEST_LOG("SP boot info (%x):", (uintptr_t)boot_info_header);
375 HFTEST_LOG(" Signature: %x", boot_info_header->signature);
376 HFTEST_LOG(" Version: %x", boot_info_header->version);
377 HFTEST_LOG(" Blob Size: %u", boot_info_header->info_blob_size);
378 HFTEST_LOG(" Descriptor Size: %u", boot_info_header->desc_size);
379 HFTEST_LOG(" Descriptor Count: %u", boot_info_header->desc_count);
380
381 boot_info_desc = boot_info_header->boot_info;
382
383 if (boot_info_desc == NULL) {
384 dlog_error("Boot data arguments error...");
385 return;
386 }
387
388 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
389 HFTEST_LOG(" Type: %u", boot_info_desc[i].type);
390 HFTEST_LOG(" Flags:");
391 HFTEST_LOG(" Name Format: %x",
392 ffa_boot_info_name_format(&boot_info_desc[i]));
393 HFTEST_LOG(" Content Format: %x",
394 ffa_boot_info_content_format(&boot_info_desc[i]));
395 HFTEST_LOG(" Size: %u", boot_info_desc[i].size);
396 HFTEST_LOG(" Value: %x", boot_info_desc[i].content);
397 }
398}
399
400/**
401 * Retrieve the boot info descriptor related to the provided type and type ID.
402 */
403struct ffa_boot_info_desc *get_boot_info_desc(
404 struct ffa_boot_info_header *boot_info_header, uint8_t type,
405 uint8_t type_id)
406{
407 struct ffa_boot_info_desc *boot_info_desc;
408
409 assert(boot_info_header != NULL);
410
411 ASSERT_EQ(boot_info_header->signature, 0xFFAU);
412 ASSERT_EQ(boot_info_header->version, 0x10001U);
413 ASSERT_EQ(boot_info_header->desc_size,
414 sizeof(struct ffa_boot_info_desc));
415 ASSERT_EQ((uintptr_t)boot_info_header + boot_info_header->desc_offset,
416 (uintptr_t)boot_info_header->boot_info);
417
418 boot_info_desc = boot_info_header->boot_info;
419
420 for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
421 if (ffa_boot_info_type_id(&boot_info_desc[i]) == type_id &&
422 ffa_boot_info_type(&boot_info_desc[i]) == type) {
423 return &boot_info_desc[i];
424 }
425 }
426
427 return NULL;
428}