blob: efa3193f92a482ac44279e0b4acedaa2c33e8a28 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
13#include "hf/types.h"
14
15#if defined(__linux__) && defined(__KERNEL__)
16#include <linux/kernel.h>
17#include <linux/string.h>
18
19#else
20#include "hf/std.h"
21#endif
22
23/**
Andrew Walbranca808b12020-05-15 17:22:28 +010024 * Initialises the header of the given `ffa_memory_region`, not including the
25 * composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026 */
Andrew Walbranca808b12020-05-15 17:22:28 +010027static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
29 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
30 ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
Andrew Walbranca808b12020-05-15 17:22:28 +010031 ffa_memory_access_permissions_t permissions)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010032{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010033 memory_region->sender = sender;
34 memory_region->attributes = attributes;
35 memory_region->reserved_0 = 0;
36 memory_region->flags = flags;
37 memory_region->handle = handle;
38 memory_region->tag = tag;
39 memory_region->reserved_1 = 0;
40 memory_region->receiver_count = 1;
41 memory_region->receivers[0].receiver_permissions.receiver = receiver;
42 memory_region->receivers[0].receiver_permissions.permissions =
43 permissions;
44 memory_region->receivers[0].receiver_permissions.flags = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +010045 memory_region->receivers[0].reserved_0 = 0;
46}
47
48/**
49 * Initialises the given `ffa_memory_region` and copies as many as possible of
50 * the given constituents to it.
51 *
52 * Returns the number of constituents remaining which wouldn't fit, and (via
53 * return parameters) the size in bytes of the first fragment of data copied to
54 * `memory_region` (attributes, constituents and memory region header size), and
55 * the total size of the memory sharing message including all constituents.
56 */
57uint32_t ffa_memory_region_init(
58 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
59 ffa_vm_id_t sender, ffa_vm_id_t receiver,
60 const struct ffa_memory_region_constituent constituents[],
61 uint32_t constituent_count, uint32_t tag,
62 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
63 enum ffa_instruction_access instruction_access,
64 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
65 enum ffa_memory_shareability shareability, uint32_t *total_length,
66 uint32_t *fragment_length)
67{
68 ffa_memory_access_permissions_t permissions = 0;
69 ffa_memory_attributes_t attributes = 0;
70 struct ffa_composite_memory_region *composite_memory_region;
71 uint32_t fragment_max_constituents;
72 uint32_t count_to_copy;
73 uint32_t i;
74 uint32_t constituents_offset;
75
76 /* Set memory region's permissions. */
77 ffa_set_data_access_attr(&permissions, data_access);
78 ffa_set_instruction_access_attr(&permissions, instruction_access);
79
80 /* Set memory region's page attributes. */
81 ffa_set_memory_type_attr(&attributes, type);
82 ffa_set_memory_cacheability_attr(&attributes, cacheability);
83 ffa_set_memory_shareability_attr(&attributes, shareability);
84
85 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
86 0, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010087 /*
88 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
89 * ffa_memory_access)` must both be multiples of 16 (as verified by the
90 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
91 * calculate here is aligned to a 64-bit boundary and so 64-bit values
92 * can be copied without alignment faults.
93 */
94 memory_region->receivers[0].composite_memory_region_offset =
95 sizeof(struct ffa_memory_region) +
96 memory_region->receiver_count *
97 sizeof(struct ffa_memory_access);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010098
99 composite_memory_region =
100 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100101 composite_memory_region->page_count = 0;
102 composite_memory_region->constituent_count = constituent_count;
103 composite_memory_region->reserved_0 = 0;
104
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 constituents_offset =
106 memory_region->receivers[0].composite_memory_region_offset +
107 sizeof(struct ffa_composite_memory_region);
108 fragment_max_constituents =
109 (memory_region_max_size - constituents_offset) /
110 sizeof(struct ffa_memory_region_constituent);
111
112 count_to_copy = constituent_count;
113 if (count_to_copy > fragment_max_constituents) {
114 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115 }
116
Andrew Walbranca808b12020-05-15 17:22:28 +0100117 for (i = 0; i < constituent_count; ++i) {
118 if (i < count_to_copy) {
119 composite_memory_region->constituents[i] =
120 constituents[i];
121 }
122 composite_memory_region->page_count +=
123 constituents[i].page_count;
124 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100125
Andrew Walbranca808b12020-05-15 17:22:28 +0100126 if (total_length != NULL) {
127 *total_length =
128 constituents_offset +
129 composite_memory_region->constituent_count *
130 sizeof(struct ffa_memory_region_constituent);
131 }
132 if (fragment_length != NULL) {
133 *fragment_length =
134 constituents_offset +
135 count_to_copy *
136 sizeof(struct ffa_memory_region_constituent);
137 }
138
139 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100140}
141
142/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100143 * Initialises the given `ffa_memory_region` to be used for an
144 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
145 *
146 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148uint32_t ffa_memory_retrieve_request_init(
149 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
150 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
151 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
152 enum ffa_instruction_access instruction_access,
153 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
154 enum ffa_memory_shareability shareability)
155{
156 ffa_memory_access_permissions_t permissions = 0;
157 ffa_memory_attributes_t attributes = 0;
158
159 /* Set memory region's permissions. */
160 ffa_set_data_access_attr(&permissions, data_access);
161 ffa_set_instruction_access_attr(&permissions, instruction_access);
162
163 /* Set memory region's page attributes. */
164 ffa_set_memory_type_attr(&attributes, type);
165 ffa_set_memory_cacheability_attr(&attributes, cacheability);
166 ffa_set_memory_shareability_attr(&attributes, shareability);
167
Andrew Walbranca808b12020-05-15 17:22:28 +0100168 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
169 handle, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100170 /*
171 * Offset 0 in this case means that the hypervisor should allocate the
172 * address ranges. This is the only configuration supported by Hafnium,
173 * as it enforces 1:1 mappings in the stage 2 page tables.
174 */
175 memory_region->receivers[0].composite_memory_region_offset = 0;
176 memory_region->receivers[0].reserved_0 = 0;
177
178 return sizeof(struct ffa_memory_region) +
179 memory_region->receiver_count * sizeof(struct ffa_memory_access);
180}
181
Andrew Walbranca808b12020-05-15 17:22:28 +0100182/**
183 * Initialises the given `ffa_memory_region` to be used for an
184 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
185 *
186 * Returns the size of the message written.
187 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100188uint32_t ffa_memory_lender_retrieve_request_init(
189 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
190 ffa_vm_id_t sender)
191{
192 memory_region->sender = sender;
193 memory_region->attributes = 0;
194 memory_region->reserved_0 = 0;
195 memory_region->flags = 0;
196 memory_region->reserved_1 = 0;
197 memory_region->handle = handle;
198 memory_region->tag = 0;
199 memory_region->receiver_count = 0;
200
201 return sizeof(struct ffa_memory_region);
202}
203
Andrew Walbranca808b12020-05-15 17:22:28 +0100204/**
205 * Initialises the given `ffa_memory_region` to be used for an
206 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
207 * fragment.
208 *
209 * Returns true on success, or false if the given constituents won't all fit in
210 * the first fragment.
211 */
212bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100213 struct ffa_memory_region *response, size_t response_max_size,
214 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
215 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
216 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100217 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100218 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100219 uint32_t fragment_constituent_count, uint32_t *total_length,
220 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100221{
Andrew Walbranca808b12020-05-15 17:22:28 +0100222 struct ffa_composite_memory_region *composite_memory_region;
223 uint32_t i;
224 uint32_t constituents_offset;
225
226 ffa_memory_region_init_header(response, sender, attributes, flags,
227 handle, 0, receiver, permissions);
228 /*
229 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
230 * ffa_memory_access)` must both be multiples of 16 (as verified by the
231 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
232 * calculate here is aligned to a 64-bit boundary and so 64-bit values
233 * can be copied without alignment faults.
234 */
235 response->receivers[0].composite_memory_region_offset =
236 sizeof(struct ffa_memory_region) +
237 response->receiver_count * sizeof(struct ffa_memory_access);
238
239 composite_memory_region = ffa_memory_region_get_composite(response, 0);
240 composite_memory_region->page_count = page_count;
241 composite_memory_region->constituent_count = total_constituent_count;
242 composite_memory_region->reserved_0 = 0;
243
244 constituents_offset =
245 response->receivers[0].composite_memory_region_offset +
246 sizeof(struct ffa_composite_memory_region);
247 if (constituents_offset +
248 fragment_constituent_count *
249 sizeof(struct ffa_memory_region_constituent) >
250 response_max_size) {
251 return false;
252 }
253
254 for (i = 0; i < fragment_constituent_count; ++i) {
255 composite_memory_region->constituents[i] = constituents[i];
256 }
257
258 if (total_length != NULL) {
259 *total_length =
260 constituents_offset +
261 composite_memory_region->constituent_count *
262 sizeof(struct ffa_memory_region_constituent);
263 }
264 if (fragment_length != NULL) {
265 *fragment_length =
266 constituents_offset +
267 fragment_constituent_count *
268 sizeof(struct ffa_memory_region_constituent);
269 }
270
271 return true;
272}
273
274uint32_t ffa_memory_fragment_init(
275 struct ffa_memory_region_constituent *fragment,
276 size_t fragment_max_size,
277 const struct ffa_memory_region_constituent constituents[],
278 uint32_t constituent_count, uint32_t *fragment_length)
279{
280 uint32_t fragment_max_constituents =
281 fragment_max_size /
282 sizeof(struct ffa_memory_region_constituent);
283 uint32_t count_to_copy = constituent_count;
284 uint32_t i;
285
286 if (count_to_copy > fragment_max_constituents) {
287 count_to_copy = fragment_max_constituents;
288 }
289
290 for (i = 0; i < count_to_copy; ++i) {
291 fragment[i] = constituents[i];
292 }
293
294 if (fragment_length != NULL) {
295 *fragment_length = count_to_copy *
296 sizeof(struct ffa_memory_region_constituent);
297 }
298
299 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100300}