blob: b1c9ccbea6acce0b27594bf7fe58623818f7cba0 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/ffa.h"
18
19#include "hf/mm.h"
20#include "hf/static_assert.h"
21
22#include "vmapi/hf/call.h"
23
24#include "test/hftest.h"
25#include "test/vmapi/ffa.h"
26
27static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
28static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
29static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
30static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
31
32static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
33static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
34
35struct mailbox_buffers set_up_mailbox(void)
36{
37 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
38 FFA_SUCCESS_32);
39 return (struct mailbox_buffers){
40 .send = send_page,
41 .recv = recv_page,
42 };
43}
44
45/*
46 * Helper function to send memory to a VM then send a message with the retrieve
47 * request it needs to retrieve it.
48 */
49ffa_memory_handle_t send_memory_and_retrieve_request(
50 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
51 ffa_vm_id_t recipient,
52 struct ffa_memory_region_constituent constituents[],
53 uint32_t constituent_count, ffa_memory_region_flags_t flags,
54 enum ffa_data_access send_data_access,
55 enum ffa_data_access retrieve_data_access,
56 enum ffa_instruction_access send_instruction_access,
57 enum ffa_instruction_access retrieve_instruction_access)
58{
Andrew Walbranca808b12020-05-15 17:22:28 +010059 uint32_t total_length;
60 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010061 uint32_t msg_size;
62 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +010063 const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
64 ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010065 ffa_memory_handle_t handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010066 uint32_t remaining_constituent_count;
67 uint32_t sent_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010068
Andrew Walbranca808b12020-05-15 17:22:28 +010069 /* Send the first fragment of the memory. */
70 remaining_constituent_count = ffa_memory_region_init(
71 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
72 constituent_count, 0, flags, send_data_access,
73 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
74 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
75 &total_length, &fragment_length);
76 if (remaining_constituent_count == 0) {
77 EXPECT_EQ(total_length, fragment_length);
78 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010079 switch (share_func) {
80 case FFA_MEM_DONATE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010081 ret = ffa_mem_donate(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010082 break;
83 case FFA_MEM_LEND_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010084 ret = ffa_mem_lend(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010085 break;
86 case FFA_MEM_SHARE_32:
Andrew Walbranca808b12020-05-15 17:22:28 +010087 ret = ffa_mem_share(total_length, fragment_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010088 break;
89 default:
90 FAIL("Invalid share_func %#x.\n", share_func);
91 /* Never reached, but needed to keep clang-analyser happy. */
92 return 0;
93 }
Andrew Walbranca808b12020-05-15 17:22:28 +010094 sent_length = fragment_length;
95
96 /* Send the remaining fragments. */
97 while (remaining_constituent_count != 0) {
98 dlog_verbose("%d constituents left to send.\n",
99 remaining_constituent_count);
100 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
101 if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
102 fragment_handle = ffa_frag_handle(ret);
103 } else {
104 EXPECT_EQ(ffa_frag_handle(ret), fragment_handle);
105 }
106 EXPECT_EQ(ret.arg3, sent_length);
107 /* Sender MBZ at virtual instance. */
108 EXPECT_EQ(ffa_frag_sender(ret), 0);
109
110 remaining_constituent_count = ffa_memory_fragment_init(
111 tx_buffer, HF_MAILBOX_SIZE,
112 constituents + constituent_count -
113 remaining_constituent_count,
114 remaining_constituent_count, &fragment_length);
115
116 ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
117 sent_length += fragment_length;
118 }
119
120 EXPECT_EQ(sent_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100121 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
122 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +0100123 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
124 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranca808b12020-05-15 17:22:28 +0100125 if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
126 EXPECT_EQ(handle, fragment_handle);
127 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100128
129 /*
130 * Send the appropriate retrieve request to the VM so that it can use it
131 * to retrieve the memory.
132 */
133 msg_size = ffa_memory_retrieve_request_init(
134 tx_buffer, handle, sender, recipient, 0, 0,
135 retrieve_data_access, retrieve_instruction_access,
136 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
137 FFA_MEMORY_OUTER_SHAREABLE);
Andrew Walbranca808b12020-05-15 17:22:28 +0100138 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
139 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
140 FFA_SUCCESS_32);
141
142 return handle;
143}
144
145/*
146 * Helper function to send memory to a VM then send a message with the retrieve
147 * request it needs to retrieve it, forcing the request to be made in at least
148 * two fragments even if it could fit in one.
149 */
150ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
151 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
152 ffa_vm_id_t recipient,
153 struct ffa_memory_region_constituent constituents[],
154 uint32_t constituent_count, ffa_memory_region_flags_t flags,
155 enum ffa_data_access send_data_access,
156 enum ffa_data_access retrieve_data_access,
157 enum ffa_instruction_access send_instruction_access,
158 enum ffa_instruction_access retrieve_instruction_access)
159{
160 uint32_t total_length;
161 uint32_t fragment_length;
162 uint32_t msg_size;
163 uint32_t remaining_constituent_count;
164 struct ffa_value ret;
165 ffa_memory_handle_t handle;
166
167 /* Send everything except the last constituent in the first fragment. */
168 remaining_constituent_count = ffa_memory_region_init(
169 tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
170 constituent_count, 0, flags, send_data_access,
171 send_instruction_access, FFA_MEMORY_NORMAL_MEM,
172 FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_OUTER_SHAREABLE,
173 &total_length, &fragment_length);
174 EXPECT_EQ(remaining_constituent_count, 0);
175 EXPECT_EQ(total_length, fragment_length);
176 /* Don't include the last constituent in the first fragment. */
177 fragment_length -= sizeof(struct ffa_memory_region_constituent);
178 switch (share_func) {
179 case FFA_MEM_DONATE_32:
180 ret = ffa_mem_donate(total_length, fragment_length);
181 break;
182 case FFA_MEM_LEND_32:
183 ret = ffa_mem_lend(total_length, fragment_length);
184 break;
185 case FFA_MEM_SHARE_32:
186 ret = ffa_mem_share(total_length, fragment_length);
187 break;
188 default:
189 FAIL("Invalid share_func %#x.\n", share_func);
190 /* Never reached, but needed to keep clang-analyser happy. */
191 return 0;
192 }
193 EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
194 EXPECT_EQ(ret.arg3, fragment_length);
195 /* Sender MBZ at virtual instance. */
196 EXPECT_EQ(ffa_frag_sender(ret), 0);
197
198 handle = ffa_frag_handle(ret);
199
200 /* Send the last constituent in a separate fragment. */
201 remaining_constituent_count = ffa_memory_fragment_init(
202 tx_buffer, HF_MAILBOX_SIZE,
203 &constituents[constituent_count - 1], 1, &fragment_length);
204 EXPECT_EQ(remaining_constituent_count, 0);
205 ret = ffa_mem_frag_tx(handle, fragment_length);
206 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
207 EXPECT_EQ(ffa_mem_success_handle(ret), handle);
208
209 /*
210 * Send the appropriate retrieve request to the VM so that it can use it
211 * to retrieve the memory.
212 */
213 msg_size = ffa_memory_retrieve_request_init(
214 tx_buffer, handle, sender, recipient, 0, 0,
215 retrieve_data_access, retrieve_instruction_access,
216 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
217 FFA_MEMORY_OUTER_SHAREABLE);
218 EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100219 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
220 FFA_SUCCESS_32);
221
222 return handle;
223}
224
225/*
226 * Use the retrieve request from the receive buffer to retrieve a memory region
Andrew Walbranca808b12020-05-15 17:22:28 +0100227 * which has been sent to us. Copies all the fragments into the provided buffer
228 * if any, and checks that the total length of all fragments is no more than
229 * `memory_region_max_size`. Returns the sender, and the handle via a return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100230 * parameter.
231 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100232ffa_vm_id_t retrieve_memory_from_message(
233 void *recv_buf, void *send_buf, struct ffa_value msg_ret,
234 ffa_memory_handle_t *handle,
235 struct ffa_memory_region *memory_region_ret,
236 size_t memory_region_max_size)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237{
238 uint32_t msg_size;
239 struct ffa_value ret;
240 struct ffa_memory_region *memory_region;
241 ffa_vm_id_t sender;
Andrew Walbranca808b12020-05-15 17:22:28 +0100242 struct ffa_memory_region *retrieve_request;
243 ffa_memory_handle_t handle_;
244 uint32_t fragment_length;
245 uint32_t total_length;
246 uint32_t fragment_offset;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100247
248 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
249 msg_size = ffa_msg_send_size(msg_ret);
250 sender = ffa_msg_send_sender(msg_ret);
251
Andrew Walbranca808b12020-05-15 17:22:28 +0100252 retrieve_request = (struct ffa_memory_region *)recv_buf;
253 handle_ = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 if (handle != NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100255 *handle = handle_;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100256 }
257 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
258 ffa_rx_release();
259 ret = ffa_mem_retrieve_req(msg_size, msg_size);
260 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
Andrew Walbranca808b12020-05-15 17:22:28 +0100261 total_length = ret.arg1;
262 fragment_length = ret.arg2;
263 EXPECT_GE(fragment_length,
264 sizeof(struct ffa_memory_region) +
265 sizeof(struct ffa_memory_access) +
266 sizeof(struct ffa_composite_memory_region));
267 EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
268 EXPECT_LE(fragment_length, total_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100269 memory_region = (struct ffa_memory_region *)recv_buf;
270 EXPECT_EQ(memory_region->receiver_count, 1);
271 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
272 hf_vm_get_id());
273
Andrew Walbranca808b12020-05-15 17:22:28 +0100274 /* Copy into the return buffer. */
275 if (memory_region_ret != NULL) {
276 memcpy_s(memory_region_ret, memory_region_max_size,
277 memory_region, fragment_length);
278 }
279
280 /*
281 * Release the RX buffer now that we have read everything we need from
282 * it.
283 */
284 memory_region = NULL;
285 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
286
287 /* Retrieve the remaining fragments. */
288 fragment_offset = fragment_length;
289 while (fragment_offset < total_length) {
290 ret = ffa_mem_frag_rx(handle_, fragment_offset);
291 EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
292 EXPECT_EQ(ffa_frag_handle(ret), handle_);
293 /* Sender MBZ at virtual instance. */
294 EXPECT_EQ(ffa_frag_sender(ret), 0);
295 fragment_length = ret.arg3;
296 EXPECT_GT(fragment_length, 0);
297 ASSERT_LE(fragment_offset + fragment_length,
298 memory_region_max_size);
299 if (memory_region_ret != NULL) {
300 memcpy_s((uint8_t *)memory_region_ret + fragment_offset,
301 memory_region_max_size - fragment_offset,
302 recv_buf, fragment_length);
303 }
304 fragment_offset += fragment_length;
305 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
306 }
307 EXPECT_EQ(fragment_offset, total_length);
308
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100309 return sender;
310}
311
312/*
313 * Use the retrieve request from the receive buffer to retrieve a memory region
314 * which has been sent to us, expecting it to fail with the given error code.
315 * Returns the sender.
316 */
317ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
318 void *send_buf,
319 struct ffa_value msg_ret,
320 int32_t expected_error)
321{
322 uint32_t msg_size;
323 struct ffa_value ret;
324 ffa_vm_id_t sender;
325
326 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
327 msg_size = ffa_msg_send_size(msg_ret);
328 sender = ffa_msg_send_sender(msg_ret);
329
330 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
331 ffa_rx_release();
332 ret = ffa_mem_retrieve_req(msg_size, msg_size);
333 EXPECT_FFA_ERROR(ret, expected_error);
334
335 return sender;
336}