blob: cf7f4ccd58a09aaa9aa77558891ebe753a50448f [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
13#include "hf/types.h"
14
15#if defined(__linux__) && defined(__KERNEL__)
16#include <linux/kernel.h>
17#include <linux/string.h>
18
19#else
Federico Recanati392be392022-02-08 20:53:03 +010020#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/std.h"
22#endif
23
Federico Recanati392be392022-02-08 20:53:03 +010024static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
25 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
26 "bytes long.");
27
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070028static void ffa_copy_memory_region_constituents(
29 struct ffa_memory_region_constituent *dest,
30 const struct ffa_memory_region_constituent *src)
31{
32 dest->address = src->address;
33 dest->page_count = src->page_count;
34 dest->reserved = 0;
35}
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037/**
J-Alvesb44bb7d2022-04-25 15:43:46 +010038 * Initializes receiver permissions, in a memory transaction descriptor.
39 */
40void ffa_memory_access_init_permissions(
41 struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
42 enum ffa_data_access data_access,
43 enum ffa_instruction_access instruction_access,
44 ffa_memory_receiver_flags_t flags)
45{
46 ffa_memory_access_permissions_t permissions = 0;
47
48 /* Set memory region's permissions. */
49 ffa_set_data_access_attr(&permissions, data_access);
50 ffa_set_instruction_access_attr(&permissions, instruction_access);
51
52 receiver->receiver_permissions.receiver = receiver_id;
53 receiver->receiver_permissions.permissions = permissions;
54 receiver->receiver_permissions.flags = flags;
55
56 receiver->reserved_0 = 0ULL;
57}
58
59/**
60 * Initialises the header of the given `ffa_memory_region`, not
61 * including the composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010062 */
Andrew Walbranca808b12020-05-15 17:22:28 +010063static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010064 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
65 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +010066 ffa_memory_handle_t handle, uint32_t tag, uint32_t receiver_count)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010067{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010068 memory_region->sender = sender;
69 memory_region->attributes = attributes;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010070 memory_region->flags = flags;
71 memory_region->handle = handle;
72 memory_region->tag = tag;
J-Alves0b6653d2022-04-22 13:17:38 +010073 memory_region->memory_access_desc_size =
74 sizeof(struct ffa_memory_access);
J-Alvesb44bb7d2022-04-25 15:43:46 +010075 memory_region->receiver_count = receiver_count;
J-Alves0b6653d2022-04-22 13:17:38 +010076 memory_region->receivers_offset =
77 offsetof(struct ffa_memory_region, receivers);
78#if defined(__linux__) && defined(__KERNEL__)
79 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
80#else
81 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
82 sizeof(memory_region->reserved));
83#endif
Andrew Walbranca808b12020-05-15 17:22:28 +010084}
85
86/**
J-Alvesf55620e2022-04-25 14:03:02 +010087 * Copies as many as possible of the given constituents to the respective
88 * memory region and sets the respective offset.
Andrew Walbranca808b12020-05-15 17:22:28 +010089 *
90 * Returns the number of constituents remaining which wouldn't fit, and (via
91 * return parameters) the size in bytes of the first fragment of data copied to
92 * `memory_region` (attributes, constituents and memory region header size), and
93 * the total size of the memory sharing message including all constituents.
94 */
J-Alvesf55620e2022-04-25 14:03:02 +010095static uint32_t ffa_memory_region_init_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +010096 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
Andrew Walbranca808b12020-05-15 17:22:28 +010097 const struct ffa_memory_region_constituent constituents[],
J-Alvesf55620e2022-04-25 14:03:02 +010098 uint32_t constituent_count, uint32_t *total_length,
Andrew Walbranca808b12020-05-15 17:22:28 +010099 uint32_t *fragment_length)
100{
Andrew Walbranca808b12020-05-15 17:22:28 +0100101 struct ffa_composite_memory_region *composite_memory_region;
102 uint32_t fragment_max_constituents;
J-Alvesf55620e2022-04-25 14:03:02 +0100103 uint32_t constituents_offset;
Andrew Walbranca808b12020-05-15 17:22:28 +0100104 uint32_t count_to_copy;
105 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100106
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 /*
108 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
109 * ffa_memory_access)` must both be multiples of 16 (as verified by the
110 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
111 * calculate here is aligned to a 64-bit boundary and so 64-bit values
112 * can be copied without alignment faults.
J-Alvesf55620e2022-04-25 14:03:02 +0100113 * If there are multiple receiver endpoints, their respective access
114 * structure should point to the same offset value.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115 */
J-Alvesf55620e2022-04-25 14:03:02 +0100116 for (i = 0U; i < memory_region->receiver_count; i++) {
117 memory_region->receivers[i].composite_memory_region_offset =
118 sizeof(struct ffa_memory_region) +
119 memory_region->receiver_count *
120 sizeof(struct ffa_memory_access);
121 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100122
123 composite_memory_region =
124 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100125 composite_memory_region->page_count = 0;
126 composite_memory_region->constituent_count = constituent_count;
127 composite_memory_region->reserved_0 = 0;
128
Andrew Walbranca808b12020-05-15 17:22:28 +0100129 constituents_offset =
130 memory_region->receivers[0].composite_memory_region_offset +
131 sizeof(struct ffa_composite_memory_region);
132 fragment_max_constituents =
133 (memory_region_max_size - constituents_offset) /
134 sizeof(struct ffa_memory_region_constituent);
135
136 count_to_copy = constituent_count;
137 if (count_to_copy > fragment_max_constituents) {
138 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100139 }
140
J-Alvesf55620e2022-04-25 14:03:02 +0100141 for (i = 0U; i < constituent_count; i++) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100142 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700143 ffa_copy_memory_region_constituents(
144 &composite_memory_region->constituents[i],
145 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100146 }
147 composite_memory_region->page_count +=
148 constituents[i].page_count;
149 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100150
Andrew Walbranca808b12020-05-15 17:22:28 +0100151 if (total_length != NULL) {
152 *total_length =
153 constituents_offset +
154 composite_memory_region->constituent_count *
155 sizeof(struct ffa_memory_region_constituent);
156 }
157 if (fragment_length != NULL) {
158 *fragment_length =
159 constituents_offset +
160 count_to_copy *
161 sizeof(struct ffa_memory_region_constituent);
162 }
163
164 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100165}
166
167/**
J-Alvesf55620e2022-04-25 14:03:02 +0100168 * Initialises the given `ffa_memory_region` and copies as many as possible of
169 * the given constituents to it.
170 *
171 * Returns the number of constituents remaining which wouldn't fit, and (via
172 * return parameters) the size in bytes of the first fragment of data copied to
173 * `memory_region` (attributes, constituents and memory region header size), and
174 * the total size of the memory sharing message including all constituents.
175 */
176uint32_t ffa_memory_region_init_single_receiver(
177 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
178 ffa_vm_id_t sender, ffa_vm_id_t receiver,
179 const struct ffa_memory_region_constituent constituents[],
180 uint32_t constituent_count, uint32_t tag,
181 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
182 enum ffa_instruction_access instruction_access,
183 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
184 enum ffa_memory_shareability shareability, uint32_t *total_length,
185 uint32_t *fragment_length)
186{
J-Alvesf4eecf72022-07-20 16:05:34 +0100187 struct ffa_memory_access receiver_access;
188
189 ffa_memory_access_init_permissions(&receiver_access, receiver,
190 data_access, instruction_access, 0);
191
192 return ffa_memory_region_init(
193 memory_region, memory_region_max_size, sender, &receiver_access,
194 1, constituents, constituent_count, tag, flags, type,
195 cacheability, shareability, total_length, fragment_length);
196}
197
198uint32_t ffa_memory_region_init(
199 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
200 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
201 uint32_t receiver_count,
202 const struct ffa_memory_region_constituent constituents[],
203 uint32_t constituent_count, uint32_t tag,
204 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
205 enum ffa_memory_cacheability cacheability,
206 enum ffa_memory_shareability shareability, uint32_t *total_length,
207 uint32_t *fragment_length)
208{
J-Alvesf55620e2022-04-25 14:03:02 +0100209 ffa_memory_attributes_t attributes = 0;
210
J-Alvesf55620e2022-04-25 14:03:02 +0100211 /* Set memory region's page attributes. */
212 ffa_set_memory_type_attr(&attributes, type);
213 ffa_set_memory_cacheability_attr(&attributes, cacheability);
214 ffa_set_memory_shareability_attr(&attributes, shareability);
215
216 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alvesf4eecf72022-07-20 16:05:34 +0100217 0, tag, receiver_count);
218
219#if defined(__linux__) && defined(__KERNEL__)
220 memcpy(memory_region->receivers, receivers,
221 receiver_count * sizeof(struct ffa_memory_access));
222#else
223 memcpy_s(memory_region->receivers,
224 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
225 receivers, receiver_count * sizeof(struct ffa_memory_access));
226#endif
J-Alvesf55620e2022-04-25 14:03:02 +0100227
228 return ffa_memory_region_init_constituents(
229 memory_region, memory_region_max_size, constituents,
230 constituent_count, total_length, fragment_length);
231}
232
233/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100234 * Initialises the given `ffa_memory_region` to be used for an
235 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
236 *
237 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100238 */
J-Alves9b24ed82022-08-04 13:12:45 +0100239uint32_t ffa_memory_retrieve_request_init_single_receiver(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100240 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
241 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
242 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
243 enum ffa_instruction_access instruction_access,
244 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
245 enum ffa_memory_shareability shareability)
246{
J-Alves9b24ed82022-08-04 13:12:45 +0100247 struct ffa_memory_access receiver_permissions;
248
249 ffa_memory_access_init_permissions(&receiver_permissions, receiver,
250 data_access, instruction_access, 0);
251
252 return ffa_memory_retrieve_request_init(
253 memory_region, handle, sender, &receiver_permissions, 1, tag,
254 flags, type, cacheability, shareability);
255}
256
257uint32_t ffa_memory_retrieve_request_init(
258 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
259 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
260 uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
261 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
262 enum ffa_memory_shareability shareability)
263{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100264 ffa_memory_attributes_t attributes = 0;
J-Alves9b24ed82022-08-04 13:12:45 +0100265 uint32_t i;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100266
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267 /* Set memory region's page attributes. */
268 ffa_set_memory_type_attr(&attributes, type);
269 ffa_set_memory_cacheability_attr(&attributes, cacheability);
270 ffa_set_memory_shareability_attr(&attributes, shareability);
271
Andrew Walbranca808b12020-05-15 17:22:28 +0100272 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alves9b24ed82022-08-04 13:12:45 +0100273 handle, tag, receiver_count);
J-Alvesb44bb7d2022-04-25 15:43:46 +0100274
J-Alves9b24ed82022-08-04 13:12:45 +0100275#if defined(__linux__) && defined(__KERNEL__)
276 memcpy(memory_region->receivers, receivers,
277 receiver_count * sizeof(struct ffa_memory_access));
278#else
279 memcpy_s(memory_region->receivers,
280 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
281 receivers, receiver_count * sizeof(struct ffa_memory_access));
282#endif
283 /* Zero the composite offset for all receivers */
284 for (i = 0U; i < receiver_count; i++) {
285 memory_region->receivers[i].composite_memory_region_offset = 0U;
286 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100287
288 return sizeof(struct ffa_memory_region) +
289 memory_region->receiver_count * sizeof(struct ffa_memory_access);
290}
291
Andrew Walbranca808b12020-05-15 17:22:28 +0100292/**
293 * Initialises the given `ffa_memory_region` to be used for an
294 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
295 *
296 * Returns the size of the message written.
297 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100298uint32_t ffa_memory_lender_retrieve_request_init(
299 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
300 ffa_vm_id_t sender)
301{
302 memory_region->sender = sender;
303 memory_region->attributes = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100304 memory_region->flags = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100305 memory_region->handle = handle;
306 memory_region->tag = 0;
307 memory_region->receiver_count = 0;
J-Alves0b6653d2022-04-22 13:17:38 +0100308#if defined(__linux__) && defined(__KERNEL__)
309 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
310#else
311 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
312 sizeof(memory_region->reserved));
313#endif
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100314 return sizeof(struct ffa_memory_region);
315}
316
Andrew Walbranca808b12020-05-15 17:22:28 +0100317/**
318 * Initialises the given `ffa_memory_region` to be used for an
319 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
320 * fragment.
321 *
322 * Returns true on success, or false if the given constituents won't all fit in
323 * the first fragment.
324 */
325bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326 struct ffa_memory_region *response, size_t response_max_size,
327 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
328 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
329 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100330 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100331 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100332 uint32_t fragment_constituent_count, uint32_t *total_length,
333 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100334{
Andrew Walbranca808b12020-05-15 17:22:28 +0100335 struct ffa_composite_memory_region *composite_memory_region;
336 uint32_t i;
337 uint32_t constituents_offset;
338
339 ffa_memory_region_init_header(response, sender, attributes, flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +0100340 handle, 0, 1);
341 /*
342 * Initialized here as in memory retrieve responses we currently expect
343 * one borrower to be specified.
344 */
345 ffa_memory_access_init_permissions(&response->receivers[0], receiver, 0,
J-Alvesd9a13cd2022-06-15 18:38:11 +0100346 0, flags);
347 response->receivers[0].receiver_permissions.permissions = permissions;
J-Alvesb44bb7d2022-04-25 15:43:46 +0100348
Andrew Walbranca808b12020-05-15 17:22:28 +0100349 /*
350 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
351 * ffa_memory_access)` must both be multiples of 16 (as verified by the
352 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
353 * calculate here is aligned to a 64-bit boundary and so 64-bit values
354 * can be copied without alignment faults.
355 */
356 response->receivers[0].composite_memory_region_offset =
357 sizeof(struct ffa_memory_region) +
358 response->receiver_count * sizeof(struct ffa_memory_access);
359
360 composite_memory_region = ffa_memory_region_get_composite(response, 0);
361 composite_memory_region->page_count = page_count;
362 composite_memory_region->constituent_count = total_constituent_count;
363 composite_memory_region->reserved_0 = 0;
364
365 constituents_offset =
366 response->receivers[0].composite_memory_region_offset +
367 sizeof(struct ffa_composite_memory_region);
368 if (constituents_offset +
369 fragment_constituent_count *
370 sizeof(struct ffa_memory_region_constituent) >
371 response_max_size) {
372 return false;
373 }
374
375 for (i = 0; i < fragment_constituent_count; ++i) {
376 composite_memory_region->constituents[i] = constituents[i];
377 }
378
379 if (total_length != NULL) {
380 *total_length =
381 constituents_offset +
382 composite_memory_region->constituent_count *
383 sizeof(struct ffa_memory_region_constituent);
384 }
385 if (fragment_length != NULL) {
386 *fragment_length =
387 constituents_offset +
388 fragment_constituent_count *
389 sizeof(struct ffa_memory_region_constituent);
390 }
391
392 return true;
393}
394
395uint32_t ffa_memory_fragment_init(
396 struct ffa_memory_region_constituent *fragment,
397 size_t fragment_max_size,
398 const struct ffa_memory_region_constituent constituents[],
399 uint32_t constituent_count, uint32_t *fragment_length)
400{
401 uint32_t fragment_max_constituents =
402 fragment_max_size /
403 sizeof(struct ffa_memory_region_constituent);
404 uint32_t count_to_copy = constituent_count;
405 uint32_t i;
406
407 if (count_to_copy > fragment_max_constituents) {
408 count_to_copy = fragment_max_constituents;
409 }
410
411 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700412 ffa_copy_memory_region_constituents(&fragment[i],
413 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100414 }
415
416 if (fragment_length != NULL) {
417 *fragment_length = count_to_copy *
418 sizeof(struct ffa_memory_region_constituent);
419 }
420
421 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100422}
Federico Recanati392be392022-02-08 20:53:03 +0100423
424static void ffa_composite_memory_region_init(
425 struct ffa_composite_memory_region *composite, uint64_t address,
426 uint32_t page_count)
427{
428 composite->page_count = page_count;
429 composite->constituent_count = 1;
430 composite->reserved_0 = 0;
431
432 composite->constituents[0].page_count = page_count;
433 composite->constituents[0].address = address;
434 composite->constituents[0].reserved = 0;
435}
436
437/**
438 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
439 * `FFA_RXTX_MAP` forwarding.
440 * Each buffer is described by an `ffa_composite_memory_region` containing
441 * one `ffa_memory_region_constituent`.
442 */
443void ffa_endpoint_rx_tx_descriptor_init(
444 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
445 uint64_t rx_address, uint64_t tx_address)
446{
447 desc->endpoint_id = endpoint_id;
448 desc->reserved = 0;
449 desc->pad = 0;
450
451 /*
452 * RX's composite descriptor is allocated after the enpoint descriptor.
453 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
454 * 16-byte aligned.
455 */
456 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
457
458 ffa_composite_memory_region_init(
459 (struct ffa_composite_memory_region *)((uintptr_t)desc +
460 desc->rx_offset),
461 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
462
463 /*
464 * TX's composite descriptor is allocated after the RX descriptor.
465 * `sizeof(struct ffa_composite_memory_region)` and
466 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
467 * 16-byte aligned in ffa_memory.c.
468 */
469 desc->tx_offset = desc->rx_offset +
470 sizeof(struct ffa_composite_memory_region) +
471 sizeof(struct ffa_memory_region_constituent);
472
473 ffa_composite_memory_region_init(
474 (struct ffa_composite_memory_region *)((uintptr_t)desc +
475 desc->tx_offset),
476 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
477}