blob: 13b5d95c52a3c3f5b30085607619ad9f6cff1b83 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
13#include "hf/types.h"
14
15#if defined(__linux__) && defined(__KERNEL__)
16#include <linux/kernel.h>
17#include <linux/string.h>
18
19#else
Federico Recanati392be392022-02-08 20:53:03 +010020#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/std.h"
22#endif
23
Federico Recanati392be392022-02-08 20:53:03 +010024static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
25 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
26 "bytes long.");
27
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070028static void ffa_copy_memory_region_constituents(
29 struct ffa_memory_region_constituent *dest,
30 const struct ffa_memory_region_constituent *src)
31{
32 dest->address = src->address;
33 dest->page_count = src->page_count;
34 dest->reserved = 0;
35}
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037/**
J-Alvesb44bb7d2022-04-25 15:43:46 +010038 * Initializes receiver permissions, in a memory transaction descriptor.
39 */
40void ffa_memory_access_init_permissions(
41 struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
42 enum ffa_data_access data_access,
43 enum ffa_instruction_access instruction_access,
44 ffa_memory_receiver_flags_t flags)
45{
46 ffa_memory_access_permissions_t permissions = 0;
47
48 /* Set memory region's permissions. */
49 ffa_set_data_access_attr(&permissions, data_access);
50 ffa_set_instruction_access_attr(&permissions, instruction_access);
51
52 receiver->receiver_permissions.receiver = receiver_id;
53 receiver->receiver_permissions.permissions = permissions;
54 receiver->receiver_permissions.flags = flags;
55
56 receiver->reserved_0 = 0ULL;
57}
58
59/**
60 * Initialises the header of the given `ffa_memory_region`, not
61 * including the composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010062 */
Andrew Walbranca808b12020-05-15 17:22:28 +010063static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010064 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
65 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +010066 ffa_memory_handle_t handle, uint32_t tag, uint32_t receiver_count)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010067{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010068 memory_region->sender = sender;
69 memory_region->attributes = attributes;
70 memory_region->reserved_0 = 0;
71 memory_region->flags = flags;
72 memory_region->handle = handle;
73 memory_region->tag = tag;
74 memory_region->reserved_1 = 0;
J-Alvesb44bb7d2022-04-25 15:43:46 +010075 memory_region->receiver_count = receiver_count;
Andrew Walbranca808b12020-05-15 17:22:28 +010076}
77
78/**
J-Alvesf55620e2022-04-25 14:03:02 +010079 * Copies as many as possible of the given constituents to the respective
80 * memory region and sets the respective offset.
Andrew Walbranca808b12020-05-15 17:22:28 +010081 *
82 * Returns the number of constituents remaining which wouldn't fit, and (via
83 * return parameters) the size in bytes of the first fragment of data copied to
84 * `memory_region` (attributes, constituents and memory region header size), and
85 * the total size of the memory sharing message including all constituents.
86 */
J-Alvesf55620e2022-04-25 14:03:02 +010087static uint32_t ffa_memory_region_init_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +010088 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
Andrew Walbranca808b12020-05-15 17:22:28 +010089 const struct ffa_memory_region_constituent constituents[],
J-Alvesf55620e2022-04-25 14:03:02 +010090 uint32_t constituent_count, uint32_t *total_length,
Andrew Walbranca808b12020-05-15 17:22:28 +010091 uint32_t *fragment_length)
92{
Andrew Walbranca808b12020-05-15 17:22:28 +010093 struct ffa_composite_memory_region *composite_memory_region;
94 uint32_t fragment_max_constituents;
J-Alvesf55620e2022-04-25 14:03:02 +010095 uint32_t constituents_offset;
Andrew Walbranca808b12020-05-15 17:22:28 +010096 uint32_t count_to_copy;
97 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +010098
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010099 /*
100 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
101 * ffa_memory_access)` must both be multiples of 16 (as verified by the
102 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
103 * calculate here is aligned to a 64-bit boundary and so 64-bit values
104 * can be copied without alignment faults.
J-Alvesf55620e2022-04-25 14:03:02 +0100105 * If there are multiple receiver endpoints, their respective access
106 * structure should point to the same offset value.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 */
J-Alvesf55620e2022-04-25 14:03:02 +0100108 for (i = 0U; i < memory_region->receiver_count; i++) {
109 memory_region->receivers[i].composite_memory_region_offset =
110 sizeof(struct ffa_memory_region) +
111 memory_region->receiver_count *
112 sizeof(struct ffa_memory_access);
113 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100114
115 composite_memory_region =
116 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100117 composite_memory_region->page_count = 0;
118 composite_memory_region->constituent_count = constituent_count;
119 composite_memory_region->reserved_0 = 0;
120
Andrew Walbranca808b12020-05-15 17:22:28 +0100121 constituents_offset =
122 memory_region->receivers[0].composite_memory_region_offset +
123 sizeof(struct ffa_composite_memory_region);
124 fragment_max_constituents =
125 (memory_region_max_size - constituents_offset) /
126 sizeof(struct ffa_memory_region_constituent);
127
128 count_to_copy = constituent_count;
129 if (count_to_copy > fragment_max_constituents) {
130 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100131 }
132
J-Alvesf55620e2022-04-25 14:03:02 +0100133 for (i = 0U; i < constituent_count; i++) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700135 ffa_copy_memory_region_constituents(
136 &composite_memory_region->constituents[i],
137 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100138 }
139 composite_memory_region->page_count +=
140 constituents[i].page_count;
141 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100142
Andrew Walbranca808b12020-05-15 17:22:28 +0100143 if (total_length != NULL) {
144 *total_length =
145 constituents_offset +
146 composite_memory_region->constituent_count *
147 sizeof(struct ffa_memory_region_constituent);
148 }
149 if (fragment_length != NULL) {
150 *fragment_length =
151 constituents_offset +
152 count_to_copy *
153 sizeof(struct ffa_memory_region_constituent);
154 }
155
156 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100157}
158
159/**
J-Alvesf55620e2022-04-25 14:03:02 +0100160 * Initialises the given `ffa_memory_region` and copies as many as possible of
161 * the given constituents to it.
162 *
163 * Returns the number of constituents remaining which wouldn't fit, and (via
164 * return parameters) the size in bytes of the first fragment of data copied to
165 * `memory_region` (attributes, constituents and memory region header size), and
166 * the total size of the memory sharing message including all constituents.
167 */
168uint32_t ffa_memory_region_init_single_receiver(
169 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
170 ffa_vm_id_t sender, ffa_vm_id_t receiver,
171 const struct ffa_memory_region_constituent constituents[],
172 uint32_t constituent_count, uint32_t tag,
173 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
174 enum ffa_instruction_access instruction_access,
175 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
176 enum ffa_memory_shareability shareability, uint32_t *total_length,
177 uint32_t *fragment_length)
178{
J-Alvesf55620e2022-04-25 14:03:02 +0100179 ffa_memory_attributes_t attributes = 0;
180
J-Alvesf55620e2022-04-25 14:03:02 +0100181 /* Set memory region's page attributes. */
182 ffa_set_memory_type_attr(&attributes, type);
183 ffa_set_memory_cacheability_attr(&attributes, cacheability);
184 ffa_set_memory_shareability_attr(&attributes, shareability);
185
186 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +0100187 0, tag, 1);
188 ffa_memory_access_init_permissions(&memory_region->receivers[0],
189 receiver, data_access,
190 instruction_access, 0);
J-Alvesf55620e2022-04-25 14:03:02 +0100191
192 return ffa_memory_region_init_constituents(
193 memory_region, memory_region_max_size, constituents,
194 constituent_count, total_length, fragment_length);
195}
196
197/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100198 * Initialises the given `ffa_memory_region` to be used for an
199 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
200 *
201 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100202 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100203uint32_t ffa_memory_retrieve_request_init(
204 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
205 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
206 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
207 enum ffa_instruction_access instruction_access,
208 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
209 enum ffa_memory_shareability shareability)
210{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100211 ffa_memory_attributes_t attributes = 0;
212
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100213 /* Set memory region's page attributes. */
214 ffa_set_memory_type_attr(&attributes, type);
215 ffa_set_memory_cacheability_attr(&attributes, cacheability);
216 ffa_set_memory_shareability_attr(&attributes, shareability);
217
Andrew Walbranca808b12020-05-15 17:22:28 +0100218 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +0100219 handle, tag, 1);
220 ffa_memory_access_init_permissions(&memory_region->receivers[0],
221 receiver, data_access,
222 instruction_access, 0);
223
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100224 /*
225 * Offset 0 in this case means that the hypervisor should allocate the
226 * address ranges. This is the only configuration supported by Hafnium,
227 * as it enforces 1:1 mappings in the stage 2 page tables.
228 */
229 memory_region->receivers[0].composite_memory_region_offset = 0;
230 memory_region->receivers[0].reserved_0 = 0;
231
232 return sizeof(struct ffa_memory_region) +
233 memory_region->receiver_count * sizeof(struct ffa_memory_access);
234}
235
Andrew Walbranca808b12020-05-15 17:22:28 +0100236/**
237 * Initialises the given `ffa_memory_region` to be used for an
238 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
239 *
240 * Returns the size of the message written.
241 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242uint32_t ffa_memory_lender_retrieve_request_init(
243 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
244 ffa_vm_id_t sender)
245{
246 memory_region->sender = sender;
247 memory_region->attributes = 0;
248 memory_region->reserved_0 = 0;
249 memory_region->flags = 0;
250 memory_region->reserved_1 = 0;
251 memory_region->handle = handle;
252 memory_region->tag = 0;
253 memory_region->receiver_count = 0;
254
255 return sizeof(struct ffa_memory_region);
256}
257
Andrew Walbranca808b12020-05-15 17:22:28 +0100258/**
259 * Initialises the given `ffa_memory_region` to be used for an
260 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
261 * fragment.
262 *
263 * Returns true on success, or false if the given constituents won't all fit in
264 * the first fragment.
265 */
266bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267 struct ffa_memory_region *response, size_t response_max_size,
268 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
269 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
270 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100271 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100272 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100273 uint32_t fragment_constituent_count, uint32_t *total_length,
274 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100275{
Andrew Walbranca808b12020-05-15 17:22:28 +0100276 struct ffa_composite_memory_region *composite_memory_region;
277 uint32_t i;
278 uint32_t constituents_offset;
279
280 ffa_memory_region_init_header(response, sender, attributes, flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +0100281 handle, 0, 1);
282 /*
283 * Initialized here as in memory retrieve responses we currently expect
284 * one borrower to be specified.
285 */
286 ffa_memory_access_init_permissions(&response->receivers[0], receiver, 0,
287 0, 0);
288
Andrew Walbranca808b12020-05-15 17:22:28 +0100289 /*
290 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
291 * ffa_memory_access)` must both be multiples of 16 (as verified by the
292 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
293 * calculate here is aligned to a 64-bit boundary and so 64-bit values
294 * can be copied without alignment faults.
295 */
296 response->receivers[0].composite_memory_region_offset =
297 sizeof(struct ffa_memory_region) +
298 response->receiver_count * sizeof(struct ffa_memory_access);
299
300 composite_memory_region = ffa_memory_region_get_composite(response, 0);
301 composite_memory_region->page_count = page_count;
302 composite_memory_region->constituent_count = total_constituent_count;
303 composite_memory_region->reserved_0 = 0;
304
305 constituents_offset =
306 response->receivers[0].composite_memory_region_offset +
307 sizeof(struct ffa_composite_memory_region);
308 if (constituents_offset +
309 fragment_constituent_count *
310 sizeof(struct ffa_memory_region_constituent) >
311 response_max_size) {
312 return false;
313 }
314
315 for (i = 0; i < fragment_constituent_count; ++i) {
316 composite_memory_region->constituents[i] = constituents[i];
317 }
318
319 if (total_length != NULL) {
320 *total_length =
321 constituents_offset +
322 composite_memory_region->constituent_count *
323 sizeof(struct ffa_memory_region_constituent);
324 }
325 if (fragment_length != NULL) {
326 *fragment_length =
327 constituents_offset +
328 fragment_constituent_count *
329 sizeof(struct ffa_memory_region_constituent);
330 }
331
332 return true;
333}
334
335uint32_t ffa_memory_fragment_init(
336 struct ffa_memory_region_constituent *fragment,
337 size_t fragment_max_size,
338 const struct ffa_memory_region_constituent constituents[],
339 uint32_t constituent_count, uint32_t *fragment_length)
340{
341 uint32_t fragment_max_constituents =
342 fragment_max_size /
343 sizeof(struct ffa_memory_region_constituent);
344 uint32_t count_to_copy = constituent_count;
345 uint32_t i;
346
347 if (count_to_copy > fragment_max_constituents) {
348 count_to_copy = fragment_max_constituents;
349 }
350
351 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700352 ffa_copy_memory_region_constituents(&fragment[i],
353 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100354 }
355
356 if (fragment_length != NULL) {
357 *fragment_length = count_to_copy *
358 sizeof(struct ffa_memory_region_constituent);
359 }
360
361 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100362}
Federico Recanati392be392022-02-08 20:53:03 +0100363
364static void ffa_composite_memory_region_init(
365 struct ffa_composite_memory_region *composite, uint64_t address,
366 uint32_t page_count)
367{
368 composite->page_count = page_count;
369 composite->constituent_count = 1;
370 composite->reserved_0 = 0;
371
372 composite->constituents[0].page_count = page_count;
373 composite->constituents[0].address = address;
374 composite->constituents[0].reserved = 0;
375}
376
377/**
378 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
379 * `FFA_RXTX_MAP` forwarding.
380 * Each buffer is described by an `ffa_composite_memory_region` containing
381 * one `ffa_memory_region_constituent`.
382 */
383void ffa_endpoint_rx_tx_descriptor_init(
384 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
385 uint64_t rx_address, uint64_t tx_address)
386{
387 desc->endpoint_id = endpoint_id;
388 desc->reserved = 0;
389 desc->pad = 0;
390
391 /*
392 * RX's composite descriptor is allocated after the enpoint descriptor.
393 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
394 * 16-byte aligned.
395 */
396 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
397
398 ffa_composite_memory_region_init(
399 (struct ffa_composite_memory_region *)((uintptr_t)desc +
400 desc->rx_offset),
401 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
402
403 /*
404 * TX's composite descriptor is allocated after the RX descriptor.
405 * `sizeof(struct ffa_composite_memory_region)` and
406 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
407 * 16-byte aligned in ffa_memory.c.
408 */
409 desc->tx_offset = desc->rx_offset +
410 sizeof(struct ffa_composite_memory_region) +
411 sizeof(struct ffa_memory_region_constituent);
412
413 ffa_composite_memory_region_init(
414 (struct ffa_composite_memory_region *)((uintptr_t)desc +
415 desc->tx_offset),
416 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
417}