blob: 5cf2fb4ed1954d3883400cc33b666bc0c2f8f2f5 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
13#include "hf/types.h"
14
15#if defined(__linux__) && defined(__KERNEL__)
16#include <linux/kernel.h>
17#include <linux/string.h>
18
19#else
Federico Recanati392be392022-02-08 20:53:03 +010020#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/std.h"
22#endif
23
Federico Recanati392be392022-02-08 20:53:03 +010024static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
25 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
26 "bytes long.");
27
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070028static void ffa_copy_memory_region_constituents(
29 struct ffa_memory_region_constituent *dest,
30 const struct ffa_memory_region_constituent *src)
31{
32 dest->address = src->address;
33 dest->page_count = src->page_count;
34 dest->reserved = 0;
35}
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037/**
Andrew Walbranca808b12020-05-15 17:22:28 +010038 * Initialises the header of the given `ffa_memory_region`, not including the
39 * composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040 */
Andrew Walbranca808b12020-05-15 17:22:28 +010041static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010042 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
43 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
44 ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
Andrew Walbranca808b12020-05-15 17:22:28 +010045 ffa_memory_access_permissions_t permissions)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010046{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010047 memory_region->sender = sender;
48 memory_region->attributes = attributes;
49 memory_region->reserved_0 = 0;
50 memory_region->flags = flags;
51 memory_region->handle = handle;
52 memory_region->tag = tag;
53 memory_region->reserved_1 = 0;
54 memory_region->receiver_count = 1;
55 memory_region->receivers[0].receiver_permissions.receiver = receiver;
56 memory_region->receivers[0].receiver_permissions.permissions =
57 permissions;
58 memory_region->receivers[0].receiver_permissions.flags = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +010059 memory_region->receivers[0].reserved_0 = 0;
60}
61
62/**
63 * Initialises the given `ffa_memory_region` and copies as many as possible of
64 * the given constituents to it.
65 *
66 * Returns the number of constituents remaining which wouldn't fit, and (via
67 * return parameters) the size in bytes of the first fragment of data copied to
68 * `memory_region` (attributes, constituents and memory region header size), and
69 * the total size of the memory sharing message including all constituents.
70 */
71uint32_t ffa_memory_region_init(
72 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
73 ffa_vm_id_t sender, ffa_vm_id_t receiver,
74 const struct ffa_memory_region_constituent constituents[],
75 uint32_t constituent_count, uint32_t tag,
76 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
77 enum ffa_instruction_access instruction_access,
78 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
79 enum ffa_memory_shareability shareability, uint32_t *total_length,
80 uint32_t *fragment_length)
81{
82 ffa_memory_access_permissions_t permissions = 0;
83 ffa_memory_attributes_t attributes = 0;
84 struct ffa_composite_memory_region *composite_memory_region;
85 uint32_t fragment_max_constituents;
86 uint32_t count_to_copy;
87 uint32_t i;
88 uint32_t constituents_offset;
89
90 /* Set memory region's permissions. */
91 ffa_set_data_access_attr(&permissions, data_access);
92 ffa_set_instruction_access_attr(&permissions, instruction_access);
93
94 /* Set memory region's page attributes. */
95 ffa_set_memory_type_attr(&attributes, type);
96 ffa_set_memory_cacheability_attr(&attributes, cacheability);
97 ffa_set_memory_shareability_attr(&attributes, shareability);
98
99 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
100 0, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100101 /*
102 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
103 * ffa_memory_access)` must both be multiples of 16 (as verified by the
104 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
105 * calculate here is aligned to a 64-bit boundary and so 64-bit values
106 * can be copied without alignment faults.
107 */
108 memory_region->receivers[0].composite_memory_region_offset =
109 sizeof(struct ffa_memory_region) +
110 memory_region->receiver_count *
111 sizeof(struct ffa_memory_access);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100112
113 composite_memory_region =
114 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115 composite_memory_region->page_count = 0;
116 composite_memory_region->constituent_count = constituent_count;
117 composite_memory_region->reserved_0 = 0;
118
Andrew Walbranca808b12020-05-15 17:22:28 +0100119 constituents_offset =
120 memory_region->receivers[0].composite_memory_region_offset +
121 sizeof(struct ffa_composite_memory_region);
122 fragment_max_constituents =
123 (memory_region_max_size - constituents_offset) /
124 sizeof(struct ffa_memory_region_constituent);
125
126 count_to_copy = constituent_count;
127 if (count_to_copy > fragment_max_constituents) {
128 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100129 }
130
Andrew Walbranca808b12020-05-15 17:22:28 +0100131 for (i = 0; i < constituent_count; ++i) {
132 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700133 ffa_copy_memory_region_constituents(
134 &composite_memory_region->constituents[i],
135 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100136 }
137 composite_memory_region->page_count +=
138 constituents[i].page_count;
139 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100140
Andrew Walbranca808b12020-05-15 17:22:28 +0100141 if (total_length != NULL) {
142 *total_length =
143 constituents_offset +
144 composite_memory_region->constituent_count *
145 sizeof(struct ffa_memory_region_constituent);
146 }
147 if (fragment_length != NULL) {
148 *fragment_length =
149 constituents_offset +
150 count_to_copy *
151 sizeof(struct ffa_memory_region_constituent);
152 }
153
154 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100155}
156
157/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100158 * Initialises the given `ffa_memory_region` to be used for an
159 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
160 *
161 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100162 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100163uint32_t ffa_memory_retrieve_request_init(
164 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
165 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
166 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
167 enum ffa_instruction_access instruction_access,
168 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
169 enum ffa_memory_shareability shareability)
170{
171 ffa_memory_access_permissions_t permissions = 0;
172 ffa_memory_attributes_t attributes = 0;
173
174 /* Set memory region's permissions. */
175 ffa_set_data_access_attr(&permissions, data_access);
176 ffa_set_instruction_access_attr(&permissions, instruction_access);
177
178 /* Set memory region's page attributes. */
179 ffa_set_memory_type_attr(&attributes, type);
180 ffa_set_memory_cacheability_attr(&attributes, cacheability);
181 ffa_set_memory_shareability_attr(&attributes, shareability);
182
Andrew Walbranca808b12020-05-15 17:22:28 +0100183 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
184 handle, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100185 /*
186 * Offset 0 in this case means that the hypervisor should allocate the
187 * address ranges. This is the only configuration supported by Hafnium,
188 * as it enforces 1:1 mappings in the stage 2 page tables.
189 */
190 memory_region->receivers[0].composite_memory_region_offset = 0;
191 memory_region->receivers[0].reserved_0 = 0;
192
193 return sizeof(struct ffa_memory_region) +
194 memory_region->receiver_count * sizeof(struct ffa_memory_access);
195}
196
Andrew Walbranca808b12020-05-15 17:22:28 +0100197/**
198 * Initialises the given `ffa_memory_region` to be used for an
199 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
200 *
201 * Returns the size of the message written.
202 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100203uint32_t ffa_memory_lender_retrieve_request_init(
204 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
205 ffa_vm_id_t sender)
206{
207 memory_region->sender = sender;
208 memory_region->attributes = 0;
209 memory_region->reserved_0 = 0;
210 memory_region->flags = 0;
211 memory_region->reserved_1 = 0;
212 memory_region->handle = handle;
213 memory_region->tag = 0;
214 memory_region->receiver_count = 0;
215
216 return sizeof(struct ffa_memory_region);
217}
218
Andrew Walbranca808b12020-05-15 17:22:28 +0100219/**
220 * Initialises the given `ffa_memory_region` to be used for an
221 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
222 * fragment.
223 *
224 * Returns true on success, or false if the given constituents won't all fit in
225 * the first fragment.
226 */
227bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100228 struct ffa_memory_region *response, size_t response_max_size,
229 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
230 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
231 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100232 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100233 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100234 uint32_t fragment_constituent_count, uint32_t *total_length,
235 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100236{
Andrew Walbranca808b12020-05-15 17:22:28 +0100237 struct ffa_composite_memory_region *composite_memory_region;
238 uint32_t i;
239 uint32_t constituents_offset;
240
241 ffa_memory_region_init_header(response, sender, attributes, flags,
242 handle, 0, receiver, permissions);
243 /*
244 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
245 * ffa_memory_access)` must both be multiples of 16 (as verified by the
246 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
247 * calculate here is aligned to a 64-bit boundary and so 64-bit values
248 * can be copied without alignment faults.
249 */
250 response->receivers[0].composite_memory_region_offset =
251 sizeof(struct ffa_memory_region) +
252 response->receiver_count * sizeof(struct ffa_memory_access);
253
254 composite_memory_region = ffa_memory_region_get_composite(response, 0);
255 composite_memory_region->page_count = page_count;
256 composite_memory_region->constituent_count = total_constituent_count;
257 composite_memory_region->reserved_0 = 0;
258
259 constituents_offset =
260 response->receivers[0].composite_memory_region_offset +
261 sizeof(struct ffa_composite_memory_region);
262 if (constituents_offset +
263 fragment_constituent_count *
264 sizeof(struct ffa_memory_region_constituent) >
265 response_max_size) {
266 return false;
267 }
268
269 for (i = 0; i < fragment_constituent_count; ++i) {
270 composite_memory_region->constituents[i] = constituents[i];
271 }
272
273 if (total_length != NULL) {
274 *total_length =
275 constituents_offset +
276 composite_memory_region->constituent_count *
277 sizeof(struct ffa_memory_region_constituent);
278 }
279 if (fragment_length != NULL) {
280 *fragment_length =
281 constituents_offset +
282 fragment_constituent_count *
283 sizeof(struct ffa_memory_region_constituent);
284 }
285
286 return true;
287}
288
289uint32_t ffa_memory_fragment_init(
290 struct ffa_memory_region_constituent *fragment,
291 size_t fragment_max_size,
292 const struct ffa_memory_region_constituent constituents[],
293 uint32_t constituent_count, uint32_t *fragment_length)
294{
295 uint32_t fragment_max_constituents =
296 fragment_max_size /
297 sizeof(struct ffa_memory_region_constituent);
298 uint32_t count_to_copy = constituent_count;
299 uint32_t i;
300
301 if (count_to_copy > fragment_max_constituents) {
302 count_to_copy = fragment_max_constituents;
303 }
304
305 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700306 ffa_copy_memory_region_constituents(&fragment[i],
307 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100308 }
309
310 if (fragment_length != NULL) {
311 *fragment_length = count_to_copy *
312 sizeof(struct ffa_memory_region_constituent);
313 }
314
315 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100316}
Federico Recanati392be392022-02-08 20:53:03 +0100317
318static void ffa_composite_memory_region_init(
319 struct ffa_composite_memory_region *composite, uint64_t address,
320 uint32_t page_count)
321{
322 composite->page_count = page_count;
323 composite->constituent_count = 1;
324 composite->reserved_0 = 0;
325
326 composite->constituents[0].page_count = page_count;
327 composite->constituents[0].address = address;
328 composite->constituents[0].reserved = 0;
329}
330
331/**
332 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
333 * `FFA_RXTX_MAP` forwarding.
334 * Each buffer is described by an `ffa_composite_memory_region` containing
335 * one `ffa_memory_region_constituent`.
336 */
337void ffa_endpoint_rx_tx_descriptor_init(
338 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
339 uint64_t rx_address, uint64_t tx_address)
340{
341 desc->endpoint_id = endpoint_id;
342 desc->reserved = 0;
343 desc->pad = 0;
344
345 /*
346 * RX's composite descriptor is allocated after the enpoint descriptor.
347 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
348 * 16-byte aligned.
349 */
350 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
351
352 ffa_composite_memory_region_init(
353 (struct ffa_composite_memory_region *)((uintptr_t)desc +
354 desc->rx_offset),
355 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
356
357 /*
358 * TX's composite descriptor is allocated after the RX descriptor.
359 * `sizeof(struct ffa_composite_memory_region)` and
360 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
361 * 16-byte aligned in ffa_memory.c.
362 */
363 desc->tx_offset = desc->rx_offset +
364 sizeof(struct ffa_composite_memory_region) +
365 sizeof(struct ffa_memory_region_constituent);
366
367 ffa_composite_memory_region_init(
368 (struct ffa_composite_memory_region *)((uintptr_t)desc +
369 desc->tx_offset),
370 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
371}