blob: cc9d55898a40c3c106dbbbd0fa6eb293b94def60 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
13#include "hf/types.h"
14
15#if defined(__linux__) && defined(__KERNEL__)
16#include <linux/kernel.h>
17#include <linux/string.h>
18
19#else
Federico Recanati392be392022-02-08 20:53:03 +010020#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/std.h"
22#endif
23
Federico Recanati392be392022-02-08 20:53:03 +010024static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
25 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
26 "bytes long.");
27
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070028static void ffa_copy_memory_region_constituents(
29 struct ffa_memory_region_constituent *dest,
30 const struct ffa_memory_region_constituent *src)
31{
32 dest->address = src->address;
33 dest->page_count = src->page_count;
34 dest->reserved = 0;
35}
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037/**
Andrew Walbranca808b12020-05-15 17:22:28 +010038 * Initialises the header of the given `ffa_memory_region`, not including the
39 * composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040 */
Andrew Walbranca808b12020-05-15 17:22:28 +010041static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010042 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
43 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
44 ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
Andrew Walbranca808b12020-05-15 17:22:28 +010045 ffa_memory_access_permissions_t permissions)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010046{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010047 memory_region->sender = sender;
48 memory_region->attributes = attributes;
49 memory_region->reserved_0 = 0;
50 memory_region->flags = flags;
51 memory_region->handle = handle;
52 memory_region->tag = tag;
53 memory_region->reserved_1 = 0;
54 memory_region->receiver_count = 1;
55 memory_region->receivers[0].receiver_permissions.receiver = receiver;
56 memory_region->receivers[0].receiver_permissions.permissions =
57 permissions;
58 memory_region->receivers[0].receiver_permissions.flags = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +010059 memory_region->receivers[0].reserved_0 = 0;
60}
61
62/**
J-Alvesf55620e2022-04-25 14:03:02 +010063 * Copies as many as possible of the given constituents to the respective
64 * memory region and sets the respective offset.
Andrew Walbranca808b12020-05-15 17:22:28 +010065 *
66 * Returns the number of constituents remaining which wouldn't fit, and (via
67 * return parameters) the size in bytes of the first fragment of data copied to
68 * `memory_region` (attributes, constituents and memory region header size), and
69 * the total size of the memory sharing message including all constituents.
70 */
J-Alvesf55620e2022-04-25 14:03:02 +010071static uint32_t ffa_memory_region_init_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +010072 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
Andrew Walbranca808b12020-05-15 17:22:28 +010073 const struct ffa_memory_region_constituent constituents[],
J-Alvesf55620e2022-04-25 14:03:02 +010074 uint32_t constituent_count, uint32_t *total_length,
Andrew Walbranca808b12020-05-15 17:22:28 +010075 uint32_t *fragment_length)
76{
Andrew Walbranca808b12020-05-15 17:22:28 +010077 struct ffa_composite_memory_region *composite_memory_region;
78 uint32_t fragment_max_constituents;
J-Alvesf55620e2022-04-25 14:03:02 +010079 uint32_t constituents_offset;
Andrew Walbranca808b12020-05-15 17:22:28 +010080 uint32_t count_to_copy;
81 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +010082
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010083 /*
84 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
85 * ffa_memory_access)` must both be multiples of 16 (as verified by the
86 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
87 * calculate here is aligned to a 64-bit boundary and so 64-bit values
88 * can be copied without alignment faults.
J-Alvesf55620e2022-04-25 14:03:02 +010089 * If there are multiple receiver endpoints, their respective access
90 * structure should point to the same offset value.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010091 */
J-Alvesf55620e2022-04-25 14:03:02 +010092 for (i = 0U; i < memory_region->receiver_count; i++) {
93 memory_region->receivers[i].composite_memory_region_offset =
94 sizeof(struct ffa_memory_region) +
95 memory_region->receiver_count *
96 sizeof(struct ffa_memory_access);
97 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010098
99 composite_memory_region =
100 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100101 composite_memory_region->page_count = 0;
102 composite_memory_region->constituent_count = constituent_count;
103 composite_memory_region->reserved_0 = 0;
104
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 constituents_offset =
106 memory_region->receivers[0].composite_memory_region_offset +
107 sizeof(struct ffa_composite_memory_region);
108 fragment_max_constituents =
109 (memory_region_max_size - constituents_offset) /
110 sizeof(struct ffa_memory_region_constituent);
111
112 count_to_copy = constituent_count;
113 if (count_to_copy > fragment_max_constituents) {
114 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115 }
116
J-Alvesf55620e2022-04-25 14:03:02 +0100117 for (i = 0U; i < constituent_count; i++) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100118 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700119 ffa_copy_memory_region_constituents(
120 &composite_memory_region->constituents[i],
121 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100122 }
123 composite_memory_region->page_count +=
124 constituents[i].page_count;
125 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126
Andrew Walbranca808b12020-05-15 17:22:28 +0100127 if (total_length != NULL) {
128 *total_length =
129 constituents_offset +
130 composite_memory_region->constituent_count *
131 sizeof(struct ffa_memory_region_constituent);
132 }
133 if (fragment_length != NULL) {
134 *fragment_length =
135 constituents_offset +
136 count_to_copy *
137 sizeof(struct ffa_memory_region_constituent);
138 }
139
140 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100141}
142
143/**
J-Alvesf55620e2022-04-25 14:03:02 +0100144 * Initialises the given `ffa_memory_region` and copies as many as possible of
145 * the given constituents to it.
146 *
147 * Returns the number of constituents remaining which wouldn't fit, and (via
148 * return parameters) the size in bytes of the first fragment of data copied to
149 * `memory_region` (attributes, constituents and memory region header size), and
150 * the total size of the memory sharing message including all constituents.
151 */
152uint32_t ffa_memory_region_init_single_receiver(
153 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
154 ffa_vm_id_t sender, ffa_vm_id_t receiver,
155 const struct ffa_memory_region_constituent constituents[],
156 uint32_t constituent_count, uint32_t tag,
157 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
158 enum ffa_instruction_access instruction_access,
159 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
160 enum ffa_memory_shareability shareability, uint32_t *total_length,
161 uint32_t *fragment_length)
162{
163 ffa_memory_access_permissions_t permissions = 0;
164 ffa_memory_attributes_t attributes = 0;
165
166 /* Set memory region's permissions. */
167 ffa_set_data_access_attr(&permissions, data_access);
168 ffa_set_instruction_access_attr(&permissions, instruction_access);
169
170 /* Set memory region's page attributes. */
171 ffa_set_memory_type_attr(&attributes, type);
172 ffa_set_memory_cacheability_attr(&attributes, cacheability);
173 ffa_set_memory_shareability_attr(&attributes, shareability);
174
175 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
176 0, tag, receiver, permissions);
177
178 return ffa_memory_region_init_constituents(
179 memory_region, memory_region_max_size, constituents,
180 constituent_count, total_length, fragment_length);
181}
182
183/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100184 * Initialises the given `ffa_memory_region` to be used for an
185 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
186 *
187 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100188 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100189uint32_t ffa_memory_retrieve_request_init(
190 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
191 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
192 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
193 enum ffa_instruction_access instruction_access,
194 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
195 enum ffa_memory_shareability shareability)
196{
197 ffa_memory_access_permissions_t permissions = 0;
198 ffa_memory_attributes_t attributes = 0;
199
200 /* Set memory region's permissions. */
201 ffa_set_data_access_attr(&permissions, data_access);
202 ffa_set_instruction_access_attr(&permissions, instruction_access);
203
204 /* Set memory region's page attributes. */
205 ffa_set_memory_type_attr(&attributes, type);
206 ffa_set_memory_cacheability_attr(&attributes, cacheability);
207 ffa_set_memory_shareability_attr(&attributes, shareability);
208
Andrew Walbranca808b12020-05-15 17:22:28 +0100209 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
210 handle, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100211 /*
212 * Offset 0 in this case means that the hypervisor should allocate the
213 * address ranges. This is the only configuration supported by Hafnium,
214 * as it enforces 1:1 mappings in the stage 2 page tables.
215 */
216 memory_region->receivers[0].composite_memory_region_offset = 0;
217 memory_region->receivers[0].reserved_0 = 0;
218
219 return sizeof(struct ffa_memory_region) +
220 memory_region->receiver_count * sizeof(struct ffa_memory_access);
221}
222
Andrew Walbranca808b12020-05-15 17:22:28 +0100223/**
224 * Initialises the given `ffa_memory_region` to be used for an
225 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
226 *
227 * Returns the size of the message written.
228 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229uint32_t ffa_memory_lender_retrieve_request_init(
230 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
231 ffa_vm_id_t sender)
232{
233 memory_region->sender = sender;
234 memory_region->attributes = 0;
235 memory_region->reserved_0 = 0;
236 memory_region->flags = 0;
237 memory_region->reserved_1 = 0;
238 memory_region->handle = handle;
239 memory_region->tag = 0;
240 memory_region->receiver_count = 0;
241
242 return sizeof(struct ffa_memory_region);
243}
244
Andrew Walbranca808b12020-05-15 17:22:28 +0100245/**
246 * Initialises the given `ffa_memory_region` to be used for an
247 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
248 * fragment.
249 *
250 * Returns true on success, or false if the given constituents won't all fit in
251 * the first fragment.
252 */
253bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 struct ffa_memory_region *response, size_t response_max_size,
255 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
256 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
257 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100258 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100259 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100260 uint32_t fragment_constituent_count, uint32_t *total_length,
261 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100262{
Andrew Walbranca808b12020-05-15 17:22:28 +0100263 struct ffa_composite_memory_region *composite_memory_region;
264 uint32_t i;
265 uint32_t constituents_offset;
266
267 ffa_memory_region_init_header(response, sender, attributes, flags,
268 handle, 0, receiver, permissions);
269 /*
270 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
271 * ffa_memory_access)` must both be multiples of 16 (as verified by the
272 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
273 * calculate here is aligned to a 64-bit boundary and so 64-bit values
274 * can be copied without alignment faults.
275 */
276 response->receivers[0].composite_memory_region_offset =
277 sizeof(struct ffa_memory_region) +
278 response->receiver_count * sizeof(struct ffa_memory_access);
279
280 composite_memory_region = ffa_memory_region_get_composite(response, 0);
281 composite_memory_region->page_count = page_count;
282 composite_memory_region->constituent_count = total_constituent_count;
283 composite_memory_region->reserved_0 = 0;
284
285 constituents_offset =
286 response->receivers[0].composite_memory_region_offset +
287 sizeof(struct ffa_composite_memory_region);
288 if (constituents_offset +
289 fragment_constituent_count *
290 sizeof(struct ffa_memory_region_constituent) >
291 response_max_size) {
292 return false;
293 }
294
295 for (i = 0; i < fragment_constituent_count; ++i) {
296 composite_memory_region->constituents[i] = constituents[i];
297 }
298
299 if (total_length != NULL) {
300 *total_length =
301 constituents_offset +
302 composite_memory_region->constituent_count *
303 sizeof(struct ffa_memory_region_constituent);
304 }
305 if (fragment_length != NULL) {
306 *fragment_length =
307 constituents_offset +
308 fragment_constituent_count *
309 sizeof(struct ffa_memory_region_constituent);
310 }
311
312 return true;
313}
314
315uint32_t ffa_memory_fragment_init(
316 struct ffa_memory_region_constituent *fragment,
317 size_t fragment_max_size,
318 const struct ffa_memory_region_constituent constituents[],
319 uint32_t constituent_count, uint32_t *fragment_length)
320{
321 uint32_t fragment_max_constituents =
322 fragment_max_size /
323 sizeof(struct ffa_memory_region_constituent);
324 uint32_t count_to_copy = constituent_count;
325 uint32_t i;
326
327 if (count_to_copy > fragment_max_constituents) {
328 count_to_copy = fragment_max_constituents;
329 }
330
331 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700332 ffa_copy_memory_region_constituents(&fragment[i],
333 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100334 }
335
336 if (fragment_length != NULL) {
337 *fragment_length = count_to_copy *
338 sizeof(struct ffa_memory_region_constituent);
339 }
340
341 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100342}
Federico Recanati392be392022-02-08 20:53:03 +0100343
344static void ffa_composite_memory_region_init(
345 struct ffa_composite_memory_region *composite, uint64_t address,
346 uint32_t page_count)
347{
348 composite->page_count = page_count;
349 composite->constituent_count = 1;
350 composite->reserved_0 = 0;
351
352 composite->constituents[0].page_count = page_count;
353 composite->constituents[0].address = address;
354 composite->constituents[0].reserved = 0;
355}
356
357/**
358 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
359 * `FFA_RXTX_MAP` forwarding.
360 * Each buffer is described by an `ffa_composite_memory_region` containing
361 * one `ffa_memory_region_constituent`.
362 */
363void ffa_endpoint_rx_tx_descriptor_init(
364 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
365 uint64_t rx_address, uint64_t tx_address)
366{
367 desc->endpoint_id = endpoint_id;
368 desc->reserved = 0;
369 desc->pad = 0;
370
371 /*
372 * RX's composite descriptor is allocated after the enpoint descriptor.
373 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
374 * 16-byte aligned.
375 */
376 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
377
378 ffa_composite_memory_region_init(
379 (struct ffa_composite_memory_region *)((uintptr_t)desc +
380 desc->rx_offset),
381 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
382
383 /*
384 * TX's composite descriptor is allocated after the RX descriptor.
385 * `sizeof(struct ffa_composite_memory_region)` and
386 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
387 * 16-byte aligned in ffa_memory.c.
388 */
389 desc->tx_offset = desc->rx_offset +
390 sizeof(struct ffa_composite_memory_region) +
391 sizeof(struct ffa_memory_region_constituent);
392
393 ffa_composite_memory_region_init(
394 (struct ffa_composite_memory_region *)((uintptr_t)desc +
395 desc->tx_offset),
396 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
397}