blob: 71c9db262d40b5999b847adba6eca2a455d707ae [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include "hf/mm.h"
12#include "hf/static_assert.h"
13
14#include "vmapi/hf/call.h"
15
16#include "test/hftest.h"
17#include "test/vmapi/ffa.h"
18
19static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
20static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
21static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
22static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
23
24static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
25static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
26
27struct mailbox_buffers set_up_mailbox(void)
28{
29 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
30 FFA_SUCCESS_32);
31 return (struct mailbox_buffers){
32 .send = send_page,
33 .recv = recv_page,
34 };
35}
36
37/*
38 * Helper function to send memory to a VM then send a message with the retrieve
39 * request it needs to retrieve it.
40 */
41ffa_memory_handle_t send_memory_and_retrieve_request(
42 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
43 ffa_vm_id_t recipient,
44 struct ffa_memory_region_constituent constituents[],
45 uint32_t constituent_count, ffa_memory_region_flags_t flags,
46 enum ffa_data_access send_data_access,
47 enum ffa_data_access retrieve_data_access,
48 enum ffa_instruction_access send_instruction_access,
49 enum ffa_instruction_access retrieve_instruction_access)
50{
Andrew Walbranca808b12020-05-15 17:22:28 +010051 uint32_t total_length;
52 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010053 uint32_t msg_size;
54 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +010055 const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
56 ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010057 ffa_memory_handle_t handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010058 uint32_t remaining_constituent_count;
59 uint32_t sent_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010060
Andrew Walbranca808b12020-05-15 17:22:28 +010061 /* Send the first fragment of the memory. */
62 remaining_constituent_count = ffa_memory_region_init(
63 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
64 constituent_count, 0, flags, send_data_access,
65 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
66 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
67 &total_length, &fragment_length);
68 if (remaining_constituent_count == 0) {
69 EXPECT_EQ(total_length, fragment_length);
70 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010071 switch (share_func) {
72 case FFA_MEM_DONATE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010073 ret = ffa_mem_donate(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010074 break;
75 case FFA_MEM_LEND_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010076 ret = ffa_mem_lend(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010077 break;
78 case FFA_MEM_SHARE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010079 ret = ffa_mem_share(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010080 break;
81 default:
82 FAIL("Invalid share_func %#x.\n", share_func);
83 /* Never reached, but needed to keep clang-analyser happy. */
84 return 0;
85 }
Andrew Walbranca808b12020-05-15 17:22:28 +010086 sent_length = fragment_length;
87
88 /* Send the remaining fragments. */
89 while (remaining_constituent_count != 0) {
90 dlog_verbose("%d constituents left to send.\n",
91 remaining_constituent_count);
92 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
93 if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
94 fragment_handle = ffa_frag_handle(ret);
95 } else {
96 EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
97 }
98 EXPECT_EQ(ret.arg3, sent_length);
99 /* Sender MBZ at virtual instance. */
100 EXPECT_EQ(ffa_frag_sender(ret), 0);
101
102 remaining_constituent_count = ffa_memory_fragment_init(
103 tx_buffer, HF_MAILBOX_SIZE,
104 constituents + constituent_count -
105 remaining_constituent_count,
106 remaining_constituent_count, &fragment_length);
107
108 ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
109 sent_length += fragment_length;
110 }
111
112 EXPECT_EQ(sent_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100113 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
114 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100115 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
116 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranca808b12020-05-15 17:22:28 +0100117 if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
118 EXPECT_EQ(handle, fragment_handle);
119 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100120
121 /*
122 * Send the appropriate retrieve request to the VM so that it can use it
123 * to retrieve the memory.
124 */
125 msg_size = ffa_memory_retrieve_request_init(
126 tx_buffer, handle, sender, recipient, 0, 0,
127 retrieve_data_access, retrieve_instruction_access,
128 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
129 FFA_MEMORY_OUTER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100130 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
131 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
132 FFA_SUCCESS_32);
133
134 return handle;
135}
136
137/*
138 * Helper function to send memory to a VM then send a message with the retrieve
139 * request it needs to retrieve it, forcing the request to be made in at least
140 * two fragments even if it could fit in one.
141 */
142ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
143 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
144 ffa_vm_id_t recipient,
145 struct ffa_memory_region_constituent constituents[],
146 uint32_t constituent_count, ffa_memory_region_flags_t flags,
147 enum ffa_data_access send_data_access,
148 enum ffa_data_access retrieve_data_access,
149 enum ffa_instruction_access send_instruction_access,
150 enum ffa_instruction_access retrieve_instruction_access)
151{
152 uint32_t total_length;
153 uint32_t fragment_length;
154 uint32_t msg_size;
155 uint32_t remaining_constituent_count;
156 struct ffa_value ret;
157 ffa_memory_handle_t handle;
158
159 /* Send everything except the last constituent in the first fragment. */
160 remaining_constituent_count = ffa_memory_region_init(
161 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
162 constituent_count, 0, flags, send_data_access,
163 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
164 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
165 &total_length, &fragment_length);
166 EXPECT_EQ(remaining_constituent_count, 0);
167 EXPECT_EQ(total_length, fragment_length);
168 /* Don't include the last constituent in the first fragment. */
169 fragment_length -= sizeof(struct ffa_memory_region_constituent);
170 switch (share_func) {
171 case FFA_MEM_DONATE_32:
172 ret = ffa_mem_donate(total_length, fragment_length);
173 break;
174 case FFA_MEM_LEND_32:
175 ret = ffa_mem_lend(total_length, fragment_length);
176 break;
177 case FFA_MEM_SHARE_32:
178 ret = ffa_mem_share(total_length, fragment_length);
179 break;
180 default:
181 FAIL("Invalid share_func %#x.\n", share_func);
182 /* Never reached, but needed to keep clang-analyser happy. */
183 return 0;
184 }
185 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
186 EXPECT_EQ(ret.arg3, fragment_length);
187 /* Sender MBZ at virtual instance. */
188 EXPECT_EQ(ffa_frag_sender(ret), 0);
189
190 handle = ffa_frag_handle(ret);
191
192 /* Send the last constituent in a separate fragment. */
193 remaining_constituent_count = ffa_memory_fragment_init(
194 tx_buffer, HF_MAILBOX_SIZE,
195 &constituents[constituent_count - 1], 1, &fragment_length);
196 EXPECT_EQ(remaining_constituent_count, 0);
197 ret = ffa_mem_frag_tx(handle, fragment_length);
198 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
199 EXPECT_EQ(ffa_mem_success_handle(ret), handle);
200
201 /*
202 * Send the appropriate retrieve request to the VM so that it can use it
203 * to retrieve the memory.
204 */
205 msg_size = ffa_memory_retrieve_request_init(
206 tx_buffer, handle, sender, recipient, 0, 0,
207 retrieve_data_access, retrieve_instruction_access,
208 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
209 FFA_MEMORY_OUTER_SHAREABLE);
210 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100211 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
212 FFA_SUCCESS_32);
213
214 return handle;
215}
216
217/*
218 * Use the retrieve request from the receive buffer to retrieve a memory region
Andrew Walbranca808b12020-05-15 17:22:28 +0100219 * which has been sent to us. Copies all the fragments into the provided buffer
220 * if any, and checks that the total length of all fragments is no more than
221 * `memory_region_max_size`. Returns the sender, and the handle via a return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100222 * parameter.
223 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100224ffa_vm_id_t retrieve_memory_from_message(
225 void *recv_buf, void *send_buf, struct ffa_value msg_ret,
226 ffa_memory_handle_t *handle,
227 struct ffa_memory_region *memory_region_ret,
228 size_t memory_region_max_size)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229{
230 uint32_t msg_size;
231 struct ffa_value ret;
232 struct ffa_memory_region *memory_region;
233 ffa_vm_id_t sender;
Andrew Walbranca808b12020-05-15 17:22:28 +0100234 struct ffa_memory_region *retrieve_request;
235 ffa_memory_handle_t handle_;
236 uint32_t fragment_length;
237 uint32_t total_length;
238 uint32_t fragment_offset;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100239
240 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
241 msg_size = ffa_msg_send_size(msg_ret);
242 sender = ffa_msg_send_sender(msg_ret);
243
Andrew Walbranca808b12020-05-15 17:22:28 +0100244 retrieve_request = (struct ffa_memory_region *)recv_buf;
245 handle_ = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246 if (handle != NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100247 *handle = handle_;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100248 }
249 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
250 ffa_rx_release();
251 ret = ffa_mem_retrieve_req(msg_size, msg_size);
252 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
Andrew Walbranca808b12020-05-15 17:22:28 +0100253 total_length = ret.arg1;
254 fragment_length = ret.arg2;
255 EXPECT_GE(fragment_length,
256 sizeof(struct ffa_memory_region) +
257 sizeof(struct ffa_memory_access) +
258 sizeof(struct ffa_composite_memory_region));
259 EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
260 EXPECT_LE(fragment_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100261 memory_region = (struct ffa_memory_region *)recv_buf;
262 EXPECT_EQ(memory_region->receiver_count, 1);
263 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
264 hf_vm_get_id());
265
Andrew Walbranca808b12020-05-15 17:22:28 +0100266 /* Copy into the return buffer. */
267 if (memory_region_ret != NULL) {
268 memcpy_s(memory_region_ret, memory_region_max_size,
269 memory_region, fragment_length);
270 }
271
272 /*
273 * Release the RX buffer now that we have read everything we need from
274 * it.
275 */
276 memory_region = NULL;
277 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
278
279 /* Retrieve the remaining fragments. */
280 fragment_offset = fragment_length;
281 while (fragment_offset < total_length) {
282 ret = ffa_mem_frag_rx(handle_, fragment_offset);
283 EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
284 EXPECT_EQ(ffa_frag_handle(ret), handle_);
285 /* Sender MBZ at virtual instance. */
286 EXPECT_EQ(ffa_frag_sender(ret), 0);
287 fragment_length = ret.arg3;
288 EXPECT_GT(fragment_length, 0);
289 ASSERT_LE(fragment_offset + fragment_length,
290 memory_region_max_size);
291 if (memory_region_ret != NULL) {
292 memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
293 memory_region_max_size - fragment_offset,
294 recv_buf, fragment_length);
295 }
296 fragment_offset += fragment_length;
297 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
298 }
299 EXPECT_EQ(fragment_offset, total_length);
300
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100301 return sender;
302}
303
304/*
305 * Use the retrieve request from the receive buffer to retrieve a memory region
306 * which has been sent to us, expecting it to fail with the given error code.
307 * Returns the sender.
308 */
309ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
310 void *send_buf,
311 struct ffa_value msg_ret,
312 int32_t expected_error)
313{
314 uint32_t msg_size;
315 struct ffa_value ret;
316 ffa_vm_id_t sender;
317
318 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
319 msg_size = ffa_msg_send_size(msg_ret);
320 sender = ffa_msg_send_sender(msg_ret);
321
322 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
323 ffa_rx_release();
324 ret = ffa_mem_retrieve_req(msg_size, msg_size);
325 EXPECT_FFA_ERROR(ret, expected_error);
326
327 return sender;
328}