blob: 40122c52fd88d50d78ab1d2e06b16caf5999180c [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
J-Alves126ab502022-09-29 11:37:33 +010013#include "hf/ffa_v1_0.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "hf/types.h"
15
16#if defined(__linux__) && defined(__KERNEL__)
17#include <linux/kernel.h>
18#include <linux/string.h>
19
20#else
Federico Recanati392be392022-02-08 20:53:03 +010021#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010022#include "hf/std.h"
23#endif
24
Federico Recanati392be392022-02-08 20:53:03 +010025static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
26 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
27 "bytes long.");
28
J-Alves126ab502022-09-29 11:37:33 +010029void ffa_copy_memory_region_constituents(
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070030 struct ffa_memory_region_constituent *dest,
31 const struct ffa_memory_region_constituent *src)
32{
33 dest->address = src->address;
34 dest->page_count = src->page_count;
35 dest->reserved = 0;
36}
37
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038/**
J-Alvesb44bb7d2022-04-25 15:43:46 +010039 * Initializes receiver permissions, in a memory transaction descriptor.
40 */
41void ffa_memory_access_init_permissions(
42 struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
43 enum ffa_data_access data_access,
44 enum ffa_instruction_access instruction_access,
45 ffa_memory_receiver_flags_t flags)
46{
47 ffa_memory_access_permissions_t permissions = 0;
48
49 /* Set memory region's permissions. */
50 ffa_set_data_access_attr(&permissions, data_access);
51 ffa_set_instruction_access_attr(&permissions, instruction_access);
52
53 receiver->receiver_permissions.receiver = receiver_id;
54 receiver->receiver_permissions.permissions = permissions;
55 receiver->receiver_permissions.flags = flags;
56
57 receiver->reserved_0 = 0ULL;
58}
59
60/**
61 * Initialises the header of the given `ffa_memory_region`, not
62 * including the composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010063 */
J-Alves2d8457f2022-10-05 11:06:41 +010064void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
65 ffa_vm_id_t sender,
66 ffa_memory_attributes_t attributes,
67 ffa_memory_region_flags_t flags,
68 ffa_memory_handle_t handle, uint32_t tag,
69 uint32_t receiver_count)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010070{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010071 memory_region->sender = sender;
72 memory_region->attributes = attributes;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010073 memory_region->flags = flags;
74 memory_region->handle = handle;
75 memory_region->tag = tag;
J-Alves0b6653d2022-04-22 13:17:38 +010076 memory_region->memory_access_desc_size =
77 sizeof(struct ffa_memory_access);
J-Alvesb44bb7d2022-04-25 15:43:46 +010078 memory_region->receiver_count = receiver_count;
J-Alves0b6653d2022-04-22 13:17:38 +010079 memory_region->receivers_offset =
80 offsetof(struct ffa_memory_region, receivers);
81#if defined(__linux__) && defined(__KERNEL__)
82 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
83#else
84 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
85 sizeof(memory_region->reserved));
86#endif
Andrew Walbranca808b12020-05-15 17:22:28 +010087}
88
89/**
J-Alvesf55620e2022-04-25 14:03:02 +010090 * Copies as many as possible of the given constituents to the respective
91 * memory region and sets the respective offset.
Andrew Walbranca808b12020-05-15 17:22:28 +010092 *
93 * Returns the number of constituents remaining which wouldn't fit, and (via
94 * return parameters) the size in bytes of the first fragment of data copied to
95 * `memory_region` (attributes, constituents and memory region header size), and
96 * the total size of the memory sharing message including all constituents.
97 */
J-Alvesf55620e2022-04-25 14:03:02 +010098static uint32_t ffa_memory_region_init_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +010099 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
Andrew Walbranca808b12020-05-15 17:22:28 +0100100 const struct ffa_memory_region_constituent constituents[],
J-Alvesf55620e2022-04-25 14:03:02 +0100101 uint32_t constituent_count, uint32_t *total_length,
Andrew Walbranca808b12020-05-15 17:22:28 +0100102 uint32_t *fragment_length)
103{
Andrew Walbranca808b12020-05-15 17:22:28 +0100104 struct ffa_composite_memory_region *composite_memory_region;
105 uint32_t fragment_max_constituents;
J-Alvesf55620e2022-04-25 14:03:02 +0100106 uint32_t constituents_offset;
Andrew Walbranca808b12020-05-15 17:22:28 +0100107 uint32_t count_to_copy;
108 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100109
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100110 /*
111 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
112 * ffa_memory_access)` must both be multiples of 16 (as verified by the
113 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
114 * calculate here is aligned to a 64-bit boundary and so 64-bit values
115 * can be copied without alignment faults.
J-Alvesf55620e2022-04-25 14:03:02 +0100116 * If there are multiple receiver endpoints, their respective access
117 * structure should point to the same offset value.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100118 */
J-Alvesf55620e2022-04-25 14:03:02 +0100119 for (i = 0U; i < memory_region->receiver_count; i++) {
120 memory_region->receivers[i].composite_memory_region_offset =
121 sizeof(struct ffa_memory_region) +
122 memory_region->receiver_count *
123 sizeof(struct ffa_memory_access);
124 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100125
126 composite_memory_region =
127 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100128 composite_memory_region->page_count = 0;
129 composite_memory_region->constituent_count = constituent_count;
130 composite_memory_region->reserved_0 = 0;
131
Andrew Walbranca808b12020-05-15 17:22:28 +0100132 constituents_offset =
133 memory_region->receivers[0].composite_memory_region_offset +
134 sizeof(struct ffa_composite_memory_region);
135 fragment_max_constituents =
136 (memory_region_max_size - constituents_offset) /
137 sizeof(struct ffa_memory_region_constituent);
138
139 count_to_copy = constituent_count;
140 if (count_to_copy > fragment_max_constituents) {
141 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100142 }
143
J-Alvesf55620e2022-04-25 14:03:02 +0100144 for (i = 0U; i < constituent_count; i++) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100145 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700146 ffa_copy_memory_region_constituents(
147 &composite_memory_region->constituents[i],
148 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100149 }
150 composite_memory_region->page_count +=
151 constituents[i].page_count;
152 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100153
Andrew Walbranca808b12020-05-15 17:22:28 +0100154 if (total_length != NULL) {
155 *total_length =
156 constituents_offset +
157 composite_memory_region->constituent_count *
158 sizeof(struct ffa_memory_region_constituent);
159 }
160 if (fragment_length != NULL) {
161 *fragment_length =
162 constituents_offset +
163 count_to_copy *
164 sizeof(struct ffa_memory_region_constituent);
165 }
166
167 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100168}
169
170/**
J-Alvesf55620e2022-04-25 14:03:02 +0100171 * Initialises the given `ffa_memory_region` and copies as many as possible of
172 * the given constituents to it.
173 *
174 * Returns the number of constituents remaining which wouldn't fit, and (via
175 * return parameters) the size in bytes of the first fragment of data copied to
176 * `memory_region` (attributes, constituents and memory region header size), and
177 * the total size of the memory sharing message including all constituents.
178 */
179uint32_t ffa_memory_region_init_single_receiver(
180 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
181 ffa_vm_id_t sender, ffa_vm_id_t receiver,
182 const struct ffa_memory_region_constituent constituents[],
183 uint32_t constituent_count, uint32_t tag,
184 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
185 enum ffa_instruction_access instruction_access,
186 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
187 enum ffa_memory_shareability shareability, uint32_t *total_length,
188 uint32_t *fragment_length)
189{
J-Alvesf4eecf72022-07-20 16:05:34 +0100190 struct ffa_memory_access receiver_access;
191
192 ffa_memory_access_init_permissions(&receiver_access, receiver,
193 data_access, instruction_access, 0);
194
195 return ffa_memory_region_init(
196 memory_region, memory_region_max_size, sender, &receiver_access,
197 1, constituents, constituent_count, tag, flags, type,
198 cacheability, shareability, total_length, fragment_length);
199}
200
201uint32_t ffa_memory_region_init(
202 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
203 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
204 uint32_t receiver_count,
205 const struct ffa_memory_region_constituent constituents[],
206 uint32_t constituent_count, uint32_t tag,
207 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
208 enum ffa_memory_cacheability cacheability,
209 enum ffa_memory_shareability shareability, uint32_t *total_length,
210 uint32_t *fragment_length)
211{
J-Alvesf55620e2022-04-25 14:03:02 +0100212 ffa_memory_attributes_t attributes = 0;
213
J-Alvesf55620e2022-04-25 14:03:02 +0100214 /* Set memory region's page attributes. */
215 ffa_set_memory_type_attr(&attributes, type);
216 ffa_set_memory_cacheability_attr(&attributes, cacheability);
217 ffa_set_memory_shareability_attr(&attributes, shareability);
218
219 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alvesf4eecf72022-07-20 16:05:34 +0100220 0, tag, receiver_count);
221
222#if defined(__linux__) && defined(__KERNEL__)
223 memcpy(memory_region->receivers, receivers,
224 receiver_count * sizeof(struct ffa_memory_access));
225#else
226 memcpy_s(memory_region->receivers,
227 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
228 receivers, receiver_count * sizeof(struct ffa_memory_access));
229#endif
J-Alvesf55620e2022-04-25 14:03:02 +0100230
231 return ffa_memory_region_init_constituents(
232 memory_region, memory_region_max_size, constituents,
233 constituent_count, total_length, fragment_length);
234}
235
236/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100237 * Initialises the given `ffa_memory_region` to be used for an
238 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
239 *
240 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100241 */
J-Alves9b24ed82022-08-04 13:12:45 +0100242uint32_t ffa_memory_retrieve_request_init_single_receiver(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100243 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
244 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
245 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
246 enum ffa_instruction_access instruction_access,
247 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
248 enum ffa_memory_shareability shareability)
249{
J-Alves9b24ed82022-08-04 13:12:45 +0100250 struct ffa_memory_access receiver_permissions;
251
252 ffa_memory_access_init_permissions(&receiver_permissions, receiver,
253 data_access, instruction_access, 0);
254
255 return ffa_memory_retrieve_request_init(
256 memory_region, handle, sender, &receiver_permissions, 1, tag,
257 flags, type, cacheability, shareability);
258}
259
260uint32_t ffa_memory_retrieve_request_init(
261 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
262 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
263 uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
264 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
265 enum ffa_memory_shareability shareability)
266{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267 ffa_memory_attributes_t attributes = 0;
J-Alves9b24ed82022-08-04 13:12:45 +0100268 uint32_t i;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100269
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100270 /* Set memory region's page attributes. */
271 ffa_set_memory_type_attr(&attributes, type);
272 ffa_set_memory_cacheability_attr(&attributes, cacheability);
273 ffa_set_memory_shareability_attr(&attributes, shareability);
274
Andrew Walbranca808b12020-05-15 17:22:28 +0100275 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alves9b24ed82022-08-04 13:12:45 +0100276 handle, tag, receiver_count);
J-Alvesb44bb7d2022-04-25 15:43:46 +0100277
J-Alves9b24ed82022-08-04 13:12:45 +0100278#if defined(__linux__) && defined(__KERNEL__)
279 memcpy(memory_region->receivers, receivers,
280 receiver_count * sizeof(struct ffa_memory_access));
281#else
282 memcpy_s(memory_region->receivers,
283 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
284 receivers, receiver_count * sizeof(struct ffa_memory_access));
285#endif
286 /* Zero the composite offset for all receivers */
287 for (i = 0U; i < receiver_count; i++) {
288 memory_region->receivers[i].composite_memory_region_offset = 0U;
289 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100290
291 return sizeof(struct ffa_memory_region) +
292 memory_region->receiver_count * sizeof(struct ffa_memory_access);
293}
294
Andrew Walbranca808b12020-05-15 17:22:28 +0100295/**
296 * Initialises the given `ffa_memory_region` to be used for an
297 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
298 *
299 * Returns the size of the message written.
300 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100301uint32_t ffa_memory_lender_retrieve_request_init(
302 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
303 ffa_vm_id_t sender)
304{
305 memory_region->sender = sender;
306 memory_region->attributes = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100307 memory_region->flags = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100308 memory_region->handle = handle;
309 memory_region->tag = 0;
310 memory_region->receiver_count = 0;
J-Alves0b6653d2022-04-22 13:17:38 +0100311#if defined(__linux__) && defined(__KERNEL__)
312 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
313#else
314 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
315 sizeof(memory_region->reserved));
316#endif
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100317 return sizeof(struct ffa_memory_region);
318}
319
Andrew Walbranca808b12020-05-15 17:22:28 +0100320uint32_t ffa_memory_fragment_init(
321 struct ffa_memory_region_constituent *fragment,
322 size_t fragment_max_size,
323 const struct ffa_memory_region_constituent constituents[],
324 uint32_t constituent_count, uint32_t *fragment_length)
325{
326 uint32_t fragment_max_constituents =
327 fragment_max_size /
328 sizeof(struct ffa_memory_region_constituent);
329 uint32_t count_to_copy = constituent_count;
330 uint32_t i;
331
332 if (count_to_copy > fragment_max_constituents) {
333 count_to_copy = fragment_max_constituents;
334 }
335
336 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700337 ffa_copy_memory_region_constituents(&fragment[i],
338 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100339 }
340
341 if (fragment_length != NULL) {
342 *fragment_length = count_to_copy *
343 sizeof(struct ffa_memory_region_constituent);
344 }
345
346 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100347}
Federico Recanati392be392022-02-08 20:53:03 +0100348
349static void ffa_composite_memory_region_init(
350 struct ffa_composite_memory_region *composite, uint64_t address,
351 uint32_t page_count)
352{
353 composite->page_count = page_count;
354 composite->constituent_count = 1;
355 composite->reserved_0 = 0;
356
357 composite->constituents[0].page_count = page_count;
358 composite->constituents[0].address = address;
359 composite->constituents[0].reserved = 0;
360}
361
362/**
363 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
364 * `FFA_RXTX_MAP` forwarding.
365 * Each buffer is described by an `ffa_composite_memory_region` containing
366 * one `ffa_memory_region_constituent`.
367 */
368void ffa_endpoint_rx_tx_descriptor_init(
369 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
370 uint64_t rx_address, uint64_t tx_address)
371{
372 desc->endpoint_id = endpoint_id;
373 desc->reserved = 0;
374 desc->pad = 0;
375
376 /*
377 * RX's composite descriptor is allocated after the enpoint descriptor.
378 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
379 * 16-byte aligned.
380 */
381 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
382
383 ffa_composite_memory_region_init(
384 (struct ffa_composite_memory_region *)((uintptr_t)desc +
385 desc->rx_offset),
386 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
387
388 /*
389 * TX's composite descriptor is allocated after the RX descriptor.
390 * `sizeof(struct ffa_composite_memory_region)` and
391 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
392 * 16-byte aligned in ffa_memory.c.
393 */
394 desc->tx_offset = desc->rx_offset +
395 sizeof(struct ffa_composite_memory_region) +
396 sizeof(struct ffa_memory_region_constituent);
397
398 ffa_composite_memory_region_init(
399 (struct ffa_composite_memory_region *)((uintptr_t)desc +
400 desc->tx_offset),
401 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
402}