blob: 20401710db7e4bc0df6c144fddec87bf909083b6 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/ffa.h"
18
19#include <stddef.h>
20
21#include "hf/types.h"
22
23#if defined(__linux__) && defined(__KERNEL__)
24#include <linux/kernel.h>
25#include <linux/string.h>
26
27#else
28#include "hf/std.h"
29#endif
30
31/**
Andrew Walbranca808b12020-05-15 17:22:28 +010032 * Initialises the header of the given `ffa_memory_region`, not including the
33 * composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034 */
Andrew Walbranca808b12020-05-15 17:22:28 +010035static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010036 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
37 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
38 ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
Andrew Walbranca808b12020-05-15 17:22:28 +010039 ffa_memory_access_permissions_t permissions)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010041 memory_region->sender = sender;
42 memory_region->attributes = attributes;
43 memory_region->reserved_0 = 0;
44 memory_region->flags = flags;
45 memory_region->handle = handle;
46 memory_region->tag = tag;
47 memory_region->reserved_1 = 0;
48 memory_region->receiver_count = 1;
49 memory_region->receivers[0].receiver_permissions.receiver = receiver;
50 memory_region->receivers[0].receiver_permissions.permissions =
51 permissions;
52 memory_region->receivers[0].receiver_permissions.flags = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +010053 memory_region->receivers[0].reserved_0 = 0;
54}
55
56/**
57 * Initialises the given `ffa_memory_region` and copies as many as possible of
58 * the given constituents to it.
59 *
60 * Returns the number of constituents remaining which wouldn't fit, and (via
61 * return parameters) the size in bytes of the first fragment of data copied to
62 * `memory_region` (attributes, constituents and memory region header size), and
63 * the total size of the memory sharing message including all constituents.
64 */
65uint32_t ffa_memory_region_init(
66 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
67 ffa_vm_id_t sender, ffa_vm_id_t receiver,
68 const struct ffa_memory_region_constituent constituents[],
69 uint32_t constituent_count, uint32_t tag,
70 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
71 enum ffa_instruction_access instruction_access,
72 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
73 enum ffa_memory_shareability shareability, uint32_t *total_length,
74 uint32_t *fragment_length)
75{
76 ffa_memory_access_permissions_t permissions = 0;
77 ffa_memory_attributes_t attributes = 0;
78 struct ffa_composite_memory_region *composite_memory_region;
79 uint32_t fragment_max_constituents;
80 uint32_t count_to_copy;
81 uint32_t i;
82 uint32_t constituents_offset;
83
84 /* Set memory region's permissions. */
85 ffa_set_data_access_attr(&permissions, data_access);
86 ffa_set_instruction_access_attr(&permissions, instruction_access);
87
88 /* Set memory region's page attributes. */
89 ffa_set_memory_type_attr(&attributes, type);
90 ffa_set_memory_cacheability_attr(&attributes, cacheability);
91 ffa_set_memory_shareability_attr(&attributes, shareability);
92
93 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
94 0, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010095 /*
96 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
97 * ffa_memory_access)` must both be multiples of 16 (as verified by the
98 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
99 * calculate here is aligned to a 64-bit boundary and so 64-bit values
100 * can be copied without alignment faults.
101 */
102 memory_region->receivers[0].composite_memory_region_offset =
103 sizeof(struct ffa_memory_region) +
104 memory_region->receiver_count *
105 sizeof(struct ffa_memory_access);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100106
107 composite_memory_region =
108 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109 composite_memory_region->page_count = 0;
110 composite_memory_region->constituent_count = constituent_count;
111 composite_memory_region->reserved_0 = 0;
112
Andrew Walbranca808b12020-05-15 17:22:28 +0100113 constituents_offset =
114 memory_region->receivers[0].composite_memory_region_offset +
115 sizeof(struct ffa_composite_memory_region);
116 fragment_max_constituents =
117 (memory_region_max_size - constituents_offset) /
118 sizeof(struct ffa_memory_region_constituent);
119
120 count_to_copy = constituent_count;
121 if (count_to_copy > fragment_max_constituents) {
122 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123 }
124
Andrew Walbranca808b12020-05-15 17:22:28 +0100125 for (i = 0; i < constituent_count; ++i) {
126 if (i < count_to_copy) {
127 composite_memory_region->constituents[i] =
128 constituents[i];
129 }
130 composite_memory_region->page_count +=
131 constituents[i].page_count;
132 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100133
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 if (total_length != NULL) {
135 *total_length =
136 constituents_offset +
137 composite_memory_region->constituent_count *
138 sizeof(struct ffa_memory_region_constituent);
139 }
140 if (fragment_length != NULL) {
141 *fragment_length =
142 constituents_offset +
143 count_to_copy *
144 sizeof(struct ffa_memory_region_constituent);
145 }
146
147 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148}
149
150/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100151 * Initialises the given `ffa_memory_region` to be used for an
152 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
153 *
154 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100155 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100156uint32_t ffa_memory_retrieve_request_init(
157 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
158 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
159 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
160 enum ffa_instruction_access instruction_access,
161 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
162 enum ffa_memory_shareability shareability)
163{
164 ffa_memory_access_permissions_t permissions = 0;
165 ffa_memory_attributes_t attributes = 0;
166
167 /* Set memory region's permissions. */
168 ffa_set_data_access_attr(&permissions, data_access);
169 ffa_set_instruction_access_attr(&permissions, instruction_access);
170
171 /* Set memory region's page attributes. */
172 ffa_set_memory_type_attr(&attributes, type);
173 ffa_set_memory_cacheability_attr(&attributes, cacheability);
174 ffa_set_memory_shareability_attr(&attributes, shareability);
175
Andrew Walbranca808b12020-05-15 17:22:28 +0100176 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
177 handle, tag, receiver, permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100178 /*
179 * Offset 0 in this case means that the hypervisor should allocate the
180 * address ranges. This is the only configuration supported by Hafnium,
181 * as it enforces 1:1 mappings in the stage 2 page tables.
182 */
183 memory_region->receivers[0].composite_memory_region_offset = 0;
184 memory_region->receivers[0].reserved_0 = 0;
185
186 return sizeof(struct ffa_memory_region) +
187 memory_region->receiver_count * sizeof(struct ffa_memory_access);
188}
189
Andrew Walbranca808b12020-05-15 17:22:28 +0100190/**
191 * Initialises the given `ffa_memory_region` to be used for an
192 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
193 *
194 * Returns the size of the message written.
195 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100196uint32_t ffa_memory_lender_retrieve_request_init(
197 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
198 ffa_vm_id_t sender)
199{
200 memory_region->sender = sender;
201 memory_region->attributes = 0;
202 memory_region->reserved_0 = 0;
203 memory_region->flags = 0;
204 memory_region->reserved_1 = 0;
205 memory_region->handle = handle;
206 memory_region->tag = 0;
207 memory_region->receiver_count = 0;
208
209 return sizeof(struct ffa_memory_region);
210}
211
Andrew Walbranca808b12020-05-15 17:22:28 +0100212/**
213 * Initialises the given `ffa_memory_region` to be used for an
214 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
215 * fragment.
216 *
217 * Returns true on success, or false if the given constituents won't all fit in
218 * the first fragment.
219 */
220bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100221 struct ffa_memory_region *response, size_t response_max_size,
222 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
223 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
224 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100225 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100226 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100227 uint32_t fragment_constituent_count, uint32_t *total_length,
228 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229{
Andrew Walbranca808b12020-05-15 17:22:28 +0100230 struct ffa_composite_memory_region *composite_memory_region;
231 uint32_t i;
232 uint32_t constituents_offset;
233
234 ffa_memory_region_init_header(response, sender, attributes, flags,
235 handle, 0, receiver, permissions);
236 /*
237 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
238 * ffa_memory_access)` must both be multiples of 16 (as verified by the
239 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
240 * calculate here is aligned to a 64-bit boundary and so 64-bit values
241 * can be copied without alignment faults.
242 */
243 response->receivers[0].composite_memory_region_offset =
244 sizeof(struct ffa_memory_region) +
245 response->receiver_count * sizeof(struct ffa_memory_access);
246
247 composite_memory_region = ffa_memory_region_get_composite(response, 0);
248 composite_memory_region->page_count = page_count;
249 composite_memory_region->constituent_count = total_constituent_count;
250 composite_memory_region->reserved_0 = 0;
251
252 constituents_offset =
253 response->receivers[0].composite_memory_region_offset +
254 sizeof(struct ffa_composite_memory_region);
255 if (constituents_offset +
256 fragment_constituent_count *
257 sizeof(struct ffa_memory_region_constituent) >
258 response_max_size) {
259 return false;
260 }
261
262 for (i = 0; i < fragment_constituent_count; ++i) {
263 composite_memory_region->constituents[i] = constituents[i];
264 }
265
266 if (total_length != NULL) {
267 *total_length =
268 constituents_offset +
269 composite_memory_region->constituent_count *
270 sizeof(struct ffa_memory_region_constituent);
271 }
272 if (fragment_length != NULL) {
273 *fragment_length =
274 constituents_offset +
275 fragment_constituent_count *
276 sizeof(struct ffa_memory_region_constituent);
277 }
278
279 return true;
280}
281
282uint32_t ffa_memory_fragment_init(
283 struct ffa_memory_region_constituent *fragment,
284 size_t fragment_max_size,
285 const struct ffa_memory_region_constituent constituents[],
286 uint32_t constituent_count, uint32_t *fragment_length)
287{
288 uint32_t fragment_max_constituents =
289 fragment_max_size /
290 sizeof(struct ffa_memory_region_constituent);
291 uint32_t count_to_copy = constituent_count;
292 uint32_t i;
293
294 if (count_to_copy > fragment_max_constituents) {
295 count_to_copy = fragment_max_constituents;
296 }
297
298 for (i = 0; i < count_to_copy; ++i) {
299 fragment[i] = constituents[i];
300 }
301
302 if (fragment_length != NULL) {
303 *fragment_length = count_to_copy *
304 sizeof(struct ffa_memory_region_constituent);
305 }
306
307 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100308}