blob: b4ff37ae050c5450c510c5a91cdd3b91ecf1d6c8 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Olivier Deprez112d2b52020-09-30 07:39:23 +020011#include "hf/arch/other_world.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000012
Jose Marinho75509b42019-04-09 09:34:59 +010013#include "hf/api.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010014#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000017#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010020
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000021/** The maximum number of recipients a memory region may be sent to. */
22#define MAX_MEM_SHARE_RECIPIENTS 1
23
24/**
25 * The maximum number of memory sharing handles which may be active at once. A
26 * DONATE handle is active from when it is sent to when it is retrieved; a SHARE
27 * or LEND handle is active from when it is sent to when it is reclaimed.
28 */
29#define MAX_MEM_SHARES 100
30
Andrew Walbranca808b12020-05-15 17:22:28 +010031/**
32 * The maximum number of fragments into which a memory sharing message may be
33 * broken.
34 */
35#define MAX_FRAGMENTS 20
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
38 "struct ffa_memory_region_constituent must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000039 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0,
41 "struct ffa_composite_memory_region must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000042 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010043static_assert(sizeof(struct ffa_memory_region_attributes) == 4,
Andrew Walbran41890ff2020-09-23 15:09:39 +010044 "struct ffa_memory_region_attributes must be 4 bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010045static_assert(sizeof(struct ffa_memory_access) % 16 == 0,
46 "struct ffa_memory_access must be a multiple of 16 bytes long.");
47static_assert(sizeof(struct ffa_memory_region) % 16 == 0,
48 "struct ffa_memory_region must be a multiple of 16 bytes long.");
49static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0,
50 "struct ffa_mem_relinquish must be a multiple of 16 "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000051 "bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000052
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010053struct ffa_memory_share_state {
Andrew Walbranca808b12020-05-15 17:22:28 +010054 ffa_memory_handle_t handle;
55
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000056 /**
57 * The memory region being shared, or NULL if this share state is
58 * unallocated.
59 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010060 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000061
Andrew Walbranca808b12020-05-15 17:22:28 +010062 struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS];
63
64 /** The number of constituents in each fragment. */
65 uint32_t fragment_constituent_counts[MAX_FRAGMENTS];
66
67 /**
68 * The number of valid elements in the `fragments` and
69 * `fragment_constituent_counts` arrays.
70 */
71 uint32_t fragment_count;
72
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000073 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010074 * The FF-A function used for sharing the memory. Must be one of
75 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000076 * share state is allocated, or 0.
77 */
78 uint32_t share_func;
79
80 /**
J-Alves2a0d2882020-10-29 14:49:50 +000081 * The sender's original mode before invoking the FF-A function for
82 * sharing the memory.
83 * This is used to reset the original configuration when sender invokes
84 * FFA_MEM_RECLAIM_32.
85 */
86 uint32_t sender_orig_mode;
87
88 /**
Andrew Walbranca808b12020-05-15 17:22:28 +010089 * True if all the fragments of this sharing request have been sent and
90 * Hafnium has updated the sender page table accordingly.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000091 */
Andrew Walbranca808b12020-05-15 17:22:28 +010092 bool sending_complete;
93
94 /**
95 * How many fragments of the memory region each recipient has retrieved
96 * so far. The order of this array matches the order of the endpoint
97 * memory access descriptors in the memory region descriptor. Any
98 * entries beyond the receiver_count will always be 0.
99 */
100 uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS];
Andrew Walbran475c1452020-02-07 13:22:22 +0000101};
102
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000103/**
104 * Encapsulates the set of share states while the `share_states_lock` is held.
105 */
106struct share_states_locked {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 struct ffa_memory_share_state *share_states;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000108};
109
110/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100111 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112 * by this lock.
113 */
114static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000116
117/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100118 * Buffer for retrieving memory region information from the TEE for when a
119 * region is reclaimed by a VM. Access to this buffer must be guarded by the VM
120 * lock of the TEE VM.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000121 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100122alignas(PAGE_SIZE) static uint8_t
123 tee_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
124
125/**
126 * Initialises the next available `struct ffa_memory_share_state` and sets
127 * `share_state_ret` to a pointer to it. If `handle` is
128 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
129 * uses the provided handle which is assumed to be globally unique.
130 *
131 * Returns true on success or false if none are available.
132 */
133static bool allocate_share_state(
134 struct share_states_locked share_states, uint32_t share_func,
135 struct ffa_memory_region *memory_region, uint32_t fragment_length,
136 ffa_memory_handle_t handle,
137 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000138{
Andrew Walbrana65a1322020-04-06 19:32:32 +0100139 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140
Andrew Walbranca808b12020-05-15 17:22:28 +0100141 CHECK(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 CHECK(memory_region != NULL);
143
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000144 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100145 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000146 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +0100148 &share_states.share_states[i];
149 struct ffa_composite_memory_region *composite =
150 ffa_memory_region_get_composite(memory_region,
151 0);
152
153 if (handle == FFA_MEMORY_HANDLE_INVALID) {
154 allocated_state->handle =
155 i |
156 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
157 } else {
158 allocated_state->handle = handle;
159 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000160 allocated_state->share_func = share_func;
161 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100162 allocated_state->fragment_count = 1;
163 allocated_state->fragments[0] = composite->constituents;
164 allocated_state->fragment_constituent_counts[0] =
165 (fragment_length -
166 ffa_composite_constituent_offset(memory_region,
167 0)) /
168 sizeof(struct ffa_memory_region_constituent);
169 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000170 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 allocated_state->retrieved_fragment_count[j] =
172 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000173 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100174 if (share_state_ret != NULL) {
175 *share_state_ret = allocated_state;
176 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000177 return true;
178 }
179 }
180
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000181 return false;
182}
183
184/** Locks the share states lock. */
185struct share_states_locked share_states_lock(void)
186{
187 sl_lock(&share_states_lock_instance);
188
189 return (struct share_states_locked){.share_states = share_states};
190}
191
192/** Unlocks the share states lock. */
193static void share_states_unlock(struct share_states_locked *share_states)
194{
195 CHECK(share_states->share_states != NULL);
196 share_states->share_states = NULL;
197 sl_unlock(&share_states_lock_instance);
198}
199
200/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100201 * If the given handle is a valid handle for an allocated share state then
202 * initialises `share_state_ret` to point to the share state and returns true.
203 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000204 */
205static bool get_share_state(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100206 ffa_memory_handle_t handle,
207 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000208{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100209 struct ffa_memory_share_state *share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100210 uint32_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000211
Andrew Walbranca808b12020-05-15 17:22:28 +0100212 CHECK(share_states.share_states != NULL);
213 CHECK(share_state_ret != NULL);
214
215 /*
216 * First look for a share_state allocated by us, in which case the
217 * handle is based on the index.
218 */
219 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
220 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
221 index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
222 if (index < MAX_MEM_SHARES) {
223 share_state = &share_states.share_states[index];
224 if (share_state->share_func != 0) {
225 *share_state_ret = share_state;
226 return true;
227 }
228 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000229 }
230
Andrew Walbranca808b12020-05-15 17:22:28 +0100231 /* Fall back to a linear scan. */
232 for (index = 0; index < MAX_MEM_SHARES; ++index) {
233 share_state = &share_states.share_states[index];
234 if (share_state->handle == handle &&
235 share_state->share_func != 0) {
236 *share_state_ret = share_state;
237 return true;
238 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000239 }
240
Andrew Walbranca808b12020-05-15 17:22:28 +0100241 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000242}
243
244/** Marks a share state as unallocated. */
245static void share_state_free(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246 struct ffa_memory_share_state *share_state,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000247 struct mpool *page_pool)
248{
Andrew Walbranca808b12020-05-15 17:22:28 +0100249 uint32_t i;
250
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000251 CHECK(share_states.share_states != NULL);
252 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100253 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000254 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100255 /*
256 * First fragment is part of the same page as the `memory_region`, so it
257 * doesn't need to be freed separately.
258 */
259 share_state->fragments[0] = NULL;
260 share_state->fragment_constituent_counts[0] = 0;
261 for (i = 1; i < share_state->fragment_count; ++i) {
262 mpool_free(page_pool, share_state->fragments[i]);
263 share_state->fragments[i] = NULL;
264 share_state->fragment_constituent_counts[i] = 0;
265 }
266 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000267 share_state->memory_region = NULL;
268}
269
Andrew Walbranca808b12020-05-15 17:22:28 +0100270/** Checks whether the given share state has been fully sent. */
271static bool share_state_sending_complete(
272 struct share_states_locked share_states,
273 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000274{
Andrew Walbranca808b12020-05-15 17:22:28 +0100275 struct ffa_composite_memory_region *composite;
276 uint32_t expected_constituent_count;
277 uint32_t fragment_constituent_count_total = 0;
278 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000279
Andrew Walbranca808b12020-05-15 17:22:28 +0100280 /* Lock must be held. */
281 CHECK(share_states.share_states != NULL);
282
283 /*
284 * Share state must already be valid, or it's not possible to get hold
285 * of it.
286 */
287 CHECK(share_state->memory_region != NULL &&
288 share_state->share_func != 0);
289
290 composite =
291 ffa_memory_region_get_composite(share_state->memory_region, 0);
292 expected_constituent_count = composite->constituent_count;
293 for (i = 0; i < share_state->fragment_count; ++i) {
294 fragment_constituent_count_total +=
295 share_state->fragment_constituent_counts[i];
296 }
297 dlog_verbose(
298 "Checking completion: constituent count %d/%d from %d "
299 "fragments.\n",
300 fragment_constituent_count_total, expected_constituent_count,
301 share_state->fragment_count);
302
303 return fragment_constituent_count_total == expected_constituent_count;
304}
305
306/**
307 * Calculates the offset of the next fragment expected for the given share
308 * state.
309 */
310static uint32_t share_state_next_fragment_offset(
311 struct share_states_locked share_states,
312 struct ffa_memory_share_state *share_state)
313{
314 uint32_t next_fragment_offset;
315 uint32_t i;
316
317 /* Lock must be held. */
318 CHECK(share_states.share_states != NULL);
319
320 next_fragment_offset =
321 ffa_composite_constituent_offset(share_state->memory_region, 0);
322 for (i = 0; i < share_state->fragment_count; ++i) {
323 next_fragment_offset +=
324 share_state->fragment_constituent_counts[i] *
325 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000326 }
327
Andrew Walbranca808b12020-05-15 17:22:28 +0100328 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000329}
330
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100331static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000332{
333 uint32_t i;
334
335 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
336 return;
337 }
338
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100339 dlog("from VM %#x, attributes %#x, flags %#x, handle %#x, tag %u, to "
340 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100341 "recipients [",
342 memory_region->sender, memory_region->attributes,
343 memory_region->flags, memory_region->handle, memory_region->tag,
344 memory_region->receiver_count);
345 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000346 if (i != 0) {
347 dlog(", ");
348 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100349 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100350 memory_region->receivers[i].receiver_permissions.receiver,
351 memory_region->receivers[i]
352 .receiver_permissions.permissions,
353 memory_region->receivers[i]
354 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000355 }
356 dlog("]");
357}
358
359static void dump_share_states(void)
360{
361 uint32_t i;
362
363 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
364 return;
365 }
366
367 dlog("Current share states:\n");
368 sl_lock(&share_states_lock_instance);
369 for (i = 0; i < MAX_MEM_SHARES; ++i) {
370 if (share_states[i].share_func != 0) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100371 dlog("%#x: ", share_states[i].handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000372 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100373 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000374 dlog("SHARE");
375 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100376 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000377 dlog("LEND");
378 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100379 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000380 dlog("DONATE");
381 break;
382 default:
383 dlog("invalid share_func %#x",
384 share_states[i].share_func);
385 }
386 dlog(" (");
387 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100388 if (share_states[i].sending_complete) {
389 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000390 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100391 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000392 }
J-Alves2a0d2882020-10-29 14:49:50 +0000393 dlog(" with %d fragments, %d retrieved, "
394 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100395 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000396 share_states[i].retrieved_fragment_count[0],
397 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000398 }
399 }
400 sl_unlock(&share_states_lock_instance);
401}
402
Andrew Walbran475c1452020-02-07 13:22:22 +0000403/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100404static inline uint32_t ffa_memory_permissions_to_mode(
405 ffa_memory_access_permissions_t permissions)
Andrew Walbran475c1452020-02-07 13:22:22 +0000406{
407 uint32_t mode = 0;
408
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100409 switch (ffa_get_data_access_attr(permissions)) {
410 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000411 mode = MM_MODE_R;
412 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100413 case FFA_DATA_ACCESS_RW:
414 case FFA_DATA_ACCESS_NOT_SPECIFIED:
Andrew Walbran475c1452020-02-07 13:22:22 +0000415 mode = MM_MODE_R | MM_MODE_W;
416 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100417 case FFA_DATA_ACCESS_RESERVED:
418 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100419 }
420
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100421 switch (ffa_get_instruction_access_attr(permissions)) {
422 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000423 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100424 case FFA_INSTRUCTION_ACCESS_X:
425 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100426 mode |= MM_MODE_X;
427 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428 case FFA_INSTRUCTION_ACCESS_RESERVED:
429 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000430 }
431
432 return mode;
433}
434
Jose Marinho75509b42019-04-09 09:34:59 +0100435/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000436 * Get the current mode in the stage-2 page table of the given vm of all the
437 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100438 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100439 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100440static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000441 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100442 struct ffa_memory_region_constituent **fragments,
443 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100444{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100445 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100446 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100447
Andrew Walbranca808b12020-05-15 17:22:28 +0100448 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100449 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000450 * Fail if there are no constituents. Otherwise we would get an
451 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100452 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100453 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100454 }
455
Andrew Walbranca808b12020-05-15 17:22:28 +0100456 for (i = 0; i < fragment_count; ++i) {
457 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
458 ipaddr_t begin = ipa_init(fragments[i][j].address);
459 size_t size = fragments[i][j].page_count * PAGE_SIZE;
460 ipaddr_t end = ipa_add(begin, size);
461 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100462
Andrew Walbranca808b12020-05-15 17:22:28 +0100463 /* Fail if addresses are not page-aligned. */
464 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
465 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
466 return ffa_error(FFA_INVALID_PARAMETERS);
467 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100468
Andrew Walbranca808b12020-05-15 17:22:28 +0100469 /*
470 * Ensure that this constituent memory range is all
471 * mapped with the same mode.
472 */
473 if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
474 &current_mode)) {
475 return ffa_error(FFA_DENIED);
476 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100477
Andrew Walbranca808b12020-05-15 17:22:28 +0100478 /*
479 * Ensure that all constituents are mapped with the same
480 * mode.
481 */
482 if (i == 0) {
483 *orig_mode = current_mode;
484 } else if (current_mode != *orig_mode) {
485 dlog_verbose(
486 "Expected mode %#x but was %#x for %d "
487 "pages at %#x.\n",
488 *orig_mode, current_mode,
489 fragments[i][j].page_count,
490 ipa_addr(begin));
491 return ffa_error(FFA_DENIED);
492 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100493 }
Jose Marinho75509b42019-04-09 09:34:59 +0100494 }
495
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100496 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000497}
498
499/**
500 * Verify that all pages have the same mode, that the starting mode
501 * constitutes a valid state and obtain the next mode to apply
502 * to the sending VM.
503 *
504 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100505 * 1) FFA_DENIED if a state transition was not found;
506 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100507 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100508 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100509 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100510 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
511 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000512 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100513static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100514 struct vm_locked from, uint32_t share_func,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100515 ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100516 struct ffa_memory_region_constituent **fragments,
517 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
518 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000519{
520 const uint32_t state_mask =
521 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100522 const uint32_t required_from_mode =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100523 ffa_memory_permissions_to_mode(permissions);
524 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000525
Andrew Walbranca808b12020-05-15 17:22:28 +0100526 ret = constituents_get_mode(from, orig_from_mode, fragments,
527 fragment_constituent_counts,
528 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100529 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100530 dlog_verbose("Inconsistent modes.\n", fragment_count);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100531 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100532 }
533
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000534 /* Ensure the address range is normal memory and not a device. */
535 if (*orig_from_mode & MM_MODE_D) {
536 dlog_verbose("Can't share device memory (mode is %#x).\n",
537 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100538 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000539 }
540
541 /*
542 * Ensure the sender is the owner and has exclusive access to the
543 * memory.
544 */
545 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100546 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100547 }
548
549 if ((*orig_from_mode & required_from_mode) != required_from_mode) {
550 dlog_verbose(
551 "Sender tried to send memory with permissions which "
552 "required mode %#x but only had %#x itself.\n",
553 required_from_mode, *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100554 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000555 }
556
557 /* Find the appropriate new mode. */
558 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000559 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100560 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000561 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100562 break;
563
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100564 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000565 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100566 break;
567
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100568 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000569 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100570 break;
571
Jose Marinho75509b42019-04-09 09:34:59 +0100572 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100573 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100574 }
575
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100576 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000577}
578
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100579static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000580 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100581 struct ffa_memory_region_constituent **fragments,
582 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
583 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000584{
585 const uint32_t state_mask =
586 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
587 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100588 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000589
Andrew Walbranca808b12020-05-15 17:22:28 +0100590 ret = constituents_get_mode(from, orig_from_mode, fragments,
591 fragment_constituent_counts,
592 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100593 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100594 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000595 }
596
597 /* Ensure the address range is normal memory and not a device. */
598 if (*orig_from_mode & MM_MODE_D) {
599 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
600 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100601 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000602 }
603
604 /*
605 * Ensure the relinquishing VM is not the owner but has access to the
606 * memory.
607 */
608 orig_from_state = *orig_from_mode & state_mask;
609 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
610 dlog_verbose(
611 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100612 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000613 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100614 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000615 }
616
617 /* Find the appropriate new mode. */
618 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
619
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100620 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000621}
622
623/**
624 * Verify that all pages have the same mode, that the starting mode
625 * constitutes a valid state and obtain the next mode to apply
626 * to the retrieving VM.
627 *
628 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100629 * 1) FFA_DENIED if a state transition was not found;
630 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100631 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100632 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100633 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100634 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
635 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000636 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100637static struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000638 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100639 struct ffa_memory_region_constituent **fragments,
640 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
641 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000642{
643 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100644 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000645
Andrew Walbranca808b12020-05-15 17:22:28 +0100646 ret = constituents_get_mode(to, &orig_to_mode, fragments,
647 fragment_constituent_counts,
648 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100649 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100650 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100651 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000652 }
653
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100654 if (share_func == FFA_MEM_RECLAIM_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000655 const uint32_t state_mask =
656 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
657 uint32_t orig_to_state = orig_to_mode & state_mask;
658
659 if (orig_to_state != MM_MODE_INVALID &&
660 orig_to_state != MM_MODE_SHARED) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100661 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000662 }
663 } else {
664 /*
665 * Ensure the retriever has the expected state. We don't care
666 * about the MM_MODE_SHARED bit; either with or without it set
667 * are both valid representations of the !O-NA state.
668 */
669 if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
670 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100671 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000672 }
673 }
674
675 /* Find the appropriate new mode. */
676 *to_mode = memory_to_attributes;
677 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100678 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000679 *to_mode |= 0;
680 break;
681
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100682 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000683 *to_mode |= MM_MODE_UNOWNED;
684 break;
685
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100686 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000687 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
688 break;
689
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100690 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000691 *to_mode |= 0;
692 break;
693
694 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100695 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100696 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000697 }
698
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100699 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100700}
Jose Marinho09b1db82019-08-08 09:16:59 +0100701
702/**
703 * Updates a VM's page table such that the given set of physical address ranges
704 * are mapped in the address space at the corresponding address ranges, in the
705 * mode provided.
706 *
707 * If commit is false, the page tables will be allocated from the mpool but no
708 * mappings will actually be updated. This function must always be called first
709 * with commit false to check that it will succeed before calling with commit
710 * true, to avoid leaving the page table in a half-updated state. To make a
711 * series of changes atomically you can call them all with commit false before
712 * calling them all with commit true.
713 *
714 * mm_vm_defrag should always be called after a series of page table updates,
715 * whether they succeed or fail.
716 *
717 * Returns true on success, or false if the update failed and no changes were
718 * made to memory mappings.
719 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100720static bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000721 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100722 struct ffa_memory_region_constituent **fragments,
723 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
724 int mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100725{
Andrew Walbranca808b12020-05-15 17:22:28 +0100726 uint32_t i;
727 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100728
Andrew Walbranca808b12020-05-15 17:22:28 +0100729 /* Iterate over the memory region constituents within each fragment. */
730 for (i = 0; i < fragment_count; ++i) {
731 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
732 size_t size = fragments[i][j].page_count * PAGE_SIZE;
733 paddr_t pa_begin =
734 pa_from_ipa(ipa_init(fragments[i][j].address));
735 paddr_t pa_end = pa_add(pa_begin, size);
736
737 if (commit) {
738 vm_identity_commit(vm_locked, pa_begin, pa_end,
739 mode, ppool, NULL);
740 } else if (!vm_identity_prepare(vm_locked, pa_begin,
741 pa_end, mode, ppool)) {
742 return false;
743 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100744 }
745 }
746
747 return true;
748}
749
750/**
751 * Clears a region of physical memory by overwriting it with zeros. The data is
752 * flushed from the cache so the memory has been cleared across the system.
753 */
754static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
755{
756 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000757 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100758 * global mapping of the whole range. Such an approach will limit
759 * the changes to stage-1 tables and will allow only local
760 * invalidation.
761 */
762 bool ret;
763 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
764 void *ptr =
765 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
766 size_t size = pa_difference(begin, end);
767
768 if (!ptr) {
769 /* TODO: partial defrag of failed range. */
770 /* Recover any memory consumed in failed mapping. */
771 mm_defrag(stage1_locked, ppool);
772 goto fail;
773 }
774
775 memset_s(ptr, size, 0, size);
776 arch_mm_flush_dcache(ptr, size);
777 mm_unmap(stage1_locked, begin, end, ppool);
778
779 ret = true;
780 goto out;
781
782fail:
783 ret = false;
784
785out:
786 mm_unlock_stage1(&stage1_locked);
787
788 return ret;
789}
790
791/**
792 * Clears a region of physical memory by overwriting it with zeros. The data is
793 * flushed from the cache so the memory has been cleared across the system.
794 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100795static bool ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100796 struct ffa_memory_region_constituent **fragments,
797 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
798 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100799{
800 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100801 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100802 struct mm_stage1_locked stage1_locked;
803 bool ret = false;
804
805 /*
806 * Create a local pool so any freed memory can't be used by another
807 * thread. This is to ensure each constituent that is mapped can be
808 * unmapped again afterwards.
809 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000810 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100811
Andrew Walbranca808b12020-05-15 17:22:28 +0100812 /* Iterate over the memory region constituents within each fragment. */
813 for (i = 0; i < fragment_count; ++i) {
814 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100815
Andrew Walbranca808b12020-05-15 17:22:28 +0100816 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
817 size_t size = fragments[i][j].page_count * PAGE_SIZE;
818 paddr_t begin =
819 pa_from_ipa(ipa_init(fragments[i][j].address));
820 paddr_t end = pa_add(begin, size);
821
822 if (!clear_memory(begin, end, &local_page_pool)) {
823 /*
824 * api_clear_memory will defrag on failure, so
825 * no need to do it here.
826 */
827 goto out;
828 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100829 }
830 }
831
832 /*
833 * Need to defrag after clearing, as it may have added extra mappings to
834 * the stage 1 page table.
835 */
836 stage1_locked = mm_lock_stage1();
837 mm_defrag(stage1_locked, &local_page_pool);
838 mm_unlock_stage1(&stage1_locked);
839
840 ret = true;
841
842out:
843 mpool_fini(&local_page_pool);
844 return ret;
845}
846
847/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000848 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100849 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000850 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100851 *
852 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000853 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100854 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100855 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100856 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
857 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100858 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100859 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100860 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100861 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100862static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000863 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100864 struct ffa_memory_region_constituent **fragments,
865 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
866 uint32_t share_func, ffa_memory_access_permissions_t permissions,
Andrew Walbran37c574e2020-06-03 11:45:46 +0100867 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100868{
Jose Marinho09b1db82019-08-08 09:16:59 +0100869 struct vm *from = from_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100870 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100871 uint32_t orig_from_mode;
872 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100873 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100874 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100875
876 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100877 * Make sure constituents are properly aligned to a 64-bit boundary. If
878 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100879 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100880 for (i = 0; i < fragment_count; ++i) {
881 if (!is_aligned(fragments[i], 8)) {
882 dlog_verbose("Constituents not aligned.\n");
883 return ffa_error(FFA_INVALID_PARAMETERS);
884 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100885 }
886
887 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000888 * Check if the state transition is lawful for the sender, ensure that
889 * all constituents of a memory region being shared are at the same
890 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100891 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100892 ret = ffa_send_check_transition(from_locked, share_func, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100893 &orig_from_mode, fragments,
894 fragment_constituent_counts,
895 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100896 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100897 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100898 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100899 }
900
Andrew Walbran37c574e2020-06-03 11:45:46 +0100901 if (orig_from_mode_ret != NULL) {
902 *orig_from_mode_ret = orig_from_mode;
903 }
904
Jose Marinho09b1db82019-08-08 09:16:59 +0100905 /*
906 * Create a local pool so any freed memory can't be used by another
907 * thread. This is to ensure the original mapping can be restored if the
908 * clear fails.
909 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000910 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100911
912 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000913 * First reserve all required memory for the new page table entries
914 * without committing, to make sure the entire operation will succeed
915 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100916 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100917 if (!ffa_region_group_identity_map(
918 from_locked, fragments, fragment_constituent_counts,
919 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100920 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100921 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100922 goto out;
923 }
924
925 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000926 * Update the mapping for the sender. This won't allocate because the
927 * transaction was already prepared above, but may free pages in the
928 * case that a whole block is being unmapped that was previously
929 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100930 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100931 CHECK(ffa_region_group_identity_map(
932 from_locked, fragments, fragment_constituent_counts,
933 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100934
935 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100936 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100937 fragments, fragment_constituent_counts,
938 fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100939 /*
940 * On failure, roll back by returning memory to the sender. This
941 * may allocate pages which were previously freed into
942 * `local_page_pool` by the call above, but will never allocate
943 * more pages than that so can never fail.
944 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100945 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100946 from_locked, fragments, fragment_constituent_counts,
947 fragment_count, orig_from_mode, &local_page_pool,
948 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100949
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100950 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100951 goto out;
952 }
953
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100954 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000955
956out:
957 mpool_fini(&local_page_pool);
958
959 /*
960 * Tidy up the page table by reclaiming failed mappings (if there was an
961 * error) or merging entries into blocks where possible (on success).
962 */
963 mm_vm_defrag(&from->ptable, page_pool);
964
965 return ret;
966}
967
968/**
969 * Validates and maps memory shared from one VM to another.
970 *
971 * This function requires the calling context to hold the <to> lock.
972 *
973 * Returns:
974 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100975 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000976 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100977 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000978 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100979 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000980 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100981static struct ffa_value ffa_retrieve_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000982 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100983 struct ffa_memory_region_constituent **fragments,
984 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
985 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
986 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000987{
988 struct vm *to = to_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100989 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000990 uint32_t to_mode;
991 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000993
994 /*
Andrew Walbranca808b12020-05-15 17:22:28 +0100995 * Make sure constituents are properly aligned to a 64-bit boundary. If
996 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000997 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100998 for (i = 0; i < fragment_count; ++i) {
999 if (!is_aligned(fragments[i], 8)) {
1000 return ffa_error(FFA_INVALID_PARAMETERS);
1001 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001002 }
1003
1004 /*
1005 * Check if the state transition is lawful for the recipient, and ensure
1006 * that all constituents of the memory region being retrieved are at the
1007 * same state.
1008 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001009 ret = ffa_retrieve_check_transition(
1010 to_locked, share_func, fragments, fragment_constituent_counts,
1011 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001012 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001013 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001014 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001015 }
1016
1017 /*
1018 * Create a local pool so any freed memory can't be used by another
1019 * thread. This is to ensure the original mapping can be restored if the
1020 * clear fails.
1021 */
1022 mpool_init_with_fallback(&local_page_pool, page_pool);
1023
1024 /*
1025 * First reserve all required memory for the new page table entries in
1026 * the recipient page tables without committing, to make sure the entire
1027 * operation will succeed without exhausting the page pool.
1028 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001029 if (!ffa_region_group_identity_map(
1030 to_locked, fragments, fragment_constituent_counts,
1031 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001032 /* TODO: partial defrag of failed range. */
1033 dlog_verbose(
1034 "Insufficient memory to update recipient page "
1035 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001036 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001037 goto out;
1038 }
1039
1040 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001041 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001042 fragments, fragment_constituent_counts,
1043 fragment_count, page_pool)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001044 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001045 goto out;
1046 }
1047
Jose Marinho09b1db82019-08-08 09:16:59 +01001048 /*
1049 * Complete the transfer by mapping the memory into the recipient. This
1050 * won't allocate because the transaction was already prepared above, so
1051 * it doesn't need to use the `local_page_pool`.
1052 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001053 CHECK(ffa_region_group_identity_map(
1054 to_locked, fragments, fragment_constituent_counts,
1055 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001056
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001057 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001058
1059out:
1060 mpool_fini(&local_page_pool);
1061
1062 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001063 * Tidy up the page table by reclaiming failed mappings (if there was an
1064 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001065 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001066 mm_vm_defrag(&to->ptable, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001067
1068 return ret;
1069}
1070
Andrew Walbran290b0c92020-02-03 16:37:14 +00001071/**
1072 * Reclaims the given memory from the TEE. To do this space is first reserved in
1073 * the <to> VM's page table, then the reclaim request is sent on to the TEE,
1074 * then (if that is successful) the memory is mapped back into the <to> VM's
1075 * page table.
1076 *
1077 * This function requires the calling context to hold the <to> lock.
1078 *
1079 * Returns:
1080 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001081 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran290b0c92020-02-03 16:37:14 +00001082 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001083 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran290b0c92020-02-03 16:37:14 +00001084 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001085 * Success is indicated by FFA_SUCCESS.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001086 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01001087static struct ffa_value ffa_tee_reclaim_check_update(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001088 struct vm_locked to_locked, ffa_memory_handle_t handle,
1089 struct ffa_memory_region_constituent *constituents,
Andrew Walbran290b0c92020-02-03 16:37:14 +00001090 uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
1091 struct mpool *page_pool)
1092{
1093 struct vm *to = to_locked.vm;
1094 uint32_t to_mode;
1095 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001096 struct ffa_value ret;
1097 ffa_memory_region_flags_t tee_flags;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001098
1099 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001100 * Make sure constituents are properly aligned to a 64-bit boundary. If
1101 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001102 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001103 if (!is_aligned(constituents, 8)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001104 dlog_verbose("Constituents not aligned.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001105 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001106 }
1107
1108 /*
1109 * Check if the state transition is lawful for the recipient, and ensure
1110 * that all constituents of the memory region being retrieved are at the
1111 * same state.
1112 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001113 ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01001114 &constituents, &constituent_count,
1115 1, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001116 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001117 dlog_verbose("Invalid transition.\n");
1118 return ret;
1119 }
1120
1121 /*
1122 * Create a local pool so any freed memory can't be used by another
1123 * thread. This is to ensure the original mapping can be restored if the
1124 * clear fails.
1125 */
1126 mpool_init_with_fallback(&local_page_pool, page_pool);
1127
1128 /*
1129 * First reserve all required memory for the new page table entries in
1130 * the recipient page tables without committing, to make sure the entire
1131 * operation will succeed without exhausting the page pool.
1132 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001133 if (!ffa_region_group_identity_map(to_locked, &constituents,
1134 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001135 page_pool, false)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001136 /* TODO: partial defrag of failed range. */
1137 dlog_verbose(
1138 "Insufficient memory to update recipient page "
1139 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001140 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001141 goto out;
1142 }
1143
1144 /*
1145 * Forward the request to the TEE and see what happens.
1146 */
1147 tee_flags = 0;
1148 if (clear) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001149 tee_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001150 }
Olivier Deprez112d2b52020-09-30 07:39:23 +02001151 ret = arch_other_world_call(
1152 (struct ffa_value){.func = FFA_MEM_RECLAIM_32,
1153 .arg1 = (uint32_t)handle,
1154 .arg2 = (uint32_t)(handle >> 32),
1155 .arg3 = tee_flags});
Andrew Walbran290b0c92020-02-03 16:37:14 +00001156
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001157 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001158 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01001159 "Got %#x (%d) from TEE in response to FFA_MEM_RECLAIM, "
1160 "expected FFA_SUCCESS.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00001161 ret.func, ret.arg2);
1162 goto out;
1163 }
1164
1165 /*
1166 * The TEE was happy with it, so complete the reclaim by mapping the
1167 * memory into the recipient. This won't allocate because the
1168 * transaction was already prepared above, so it doesn't need to use the
1169 * `local_page_pool`.
1170 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001171 CHECK(ffa_region_group_identity_map(to_locked, &constituents,
1172 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001173 page_pool, true));
Andrew Walbran290b0c92020-02-03 16:37:14 +00001174
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001175 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran290b0c92020-02-03 16:37:14 +00001176
1177out:
1178 mpool_fini(&local_page_pool);
1179
1180 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001181 * Tidy up the page table by reclaiming failed mappings (if there was an
1182 * error) or merging entries into blocks where possible (on success).
Andrew Walbran290b0c92020-02-03 16:37:14 +00001183 */
1184 mm_vm_defrag(&to->ptable, page_pool);
1185
1186 return ret;
1187}
1188
Andrew Walbran996d1d12020-05-27 14:08:43 +01001189static struct ffa_value ffa_relinquish_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001190 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001191 struct ffa_memory_region_constituent **fragments,
1192 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1193 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001194{
1195 uint32_t orig_from_mode;
1196 uint32_t from_mode;
1197 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001198 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001199
Andrew Walbranca808b12020-05-15 17:22:28 +01001200 ret = ffa_relinquish_check_transition(
1201 from_locked, &orig_from_mode, fragments,
1202 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001203 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001204 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001205 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001206 }
1207
1208 /*
1209 * Create a local pool so any freed memory can't be used by another
1210 * thread. This is to ensure the original mapping can be restored if the
1211 * clear fails.
1212 */
1213 mpool_init_with_fallback(&local_page_pool, page_pool);
1214
1215 /*
1216 * First reserve all required memory for the new page table entries
1217 * without committing, to make sure the entire operation will succeed
1218 * without exhausting the page pool.
1219 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001220 if (!ffa_region_group_identity_map(
1221 from_locked, fragments, fragment_constituent_counts,
1222 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001223 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001224 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001225 goto out;
1226 }
1227
1228 /*
1229 * Update the mapping for the sender. This won't allocate because the
1230 * transaction was already prepared above, but may free pages in the
1231 * case that a whole block is being unmapped that was previously
1232 * partially mapped.
1233 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001234 CHECK(ffa_region_group_identity_map(
1235 from_locked, fragments, fragment_constituent_counts,
1236 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001237
1238 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001239 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001240 fragments, fragment_constituent_counts,
1241 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001242 /*
1243 * On failure, roll back by returning memory to the sender. This
1244 * may allocate pages which were previously freed into
1245 * `local_page_pool` by the call above, but will never allocate
1246 * more pages than that so can never fail.
1247 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001248 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001249 from_locked, fragments, fragment_constituent_counts,
1250 fragment_count, orig_from_mode, &local_page_pool,
1251 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001252
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001253 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001254 goto out;
1255 }
1256
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001257 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001258
1259out:
1260 mpool_fini(&local_page_pool);
1261
1262 /*
1263 * Tidy up the page table by reclaiming failed mappings (if there was an
1264 * error) or merging entries into blocks where possible (on success).
1265 */
1266 mm_vm_defrag(&from_locked.vm->ptable, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001267
1268 return ret;
1269}
1270
1271/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001272 * Complete a memory sending operation by checking that it is valid, updating
1273 * the sender page table, and then either marking the share state as having
1274 * completed sending (on success) or freeing it (on failure).
1275 *
1276 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1277 */
1278static struct ffa_value ffa_memory_send_complete(
1279 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001280 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1281 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001282{
1283 struct ffa_memory_region *memory_region = share_state->memory_region;
1284 struct ffa_value ret;
1285
1286 /* Lock must be held. */
1287 CHECK(share_states.share_states != NULL);
1288
1289 /* Check that state is valid in sender page table and update. */
1290 ret = ffa_send_check_update(
1291 from_locked, share_state->fragments,
1292 share_state->fragment_constituent_counts,
1293 share_state->fragment_count, share_state->share_func,
1294 memory_region->receivers[0].receiver_permissions.permissions,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001295 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1296 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001297 if (ret.func != FFA_SUCCESS_32) {
1298 /*
1299 * Free share state, it failed to send so it can't be retrieved.
1300 */
1301 dlog_verbose("Complete failed, freeing share state.\n");
1302 share_state_free(share_states, share_state, page_pool);
1303 return ret;
1304 }
1305
1306 share_state->sending_complete = true;
1307 dlog_verbose("Marked sending complete.\n");
1308
1309 return ffa_mem_success(share_state->handle);
1310}
1311
1312/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001313 * Check that the given `memory_region` represents a valid memory send request
1314 * of the given `share_func` type, return the clear flag and permissions via the
1315 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001316 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001317 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001318 * not.
1319 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001320static struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001321 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1322 uint32_t memory_share_length, uint32_t fragment_length,
1323 uint32_t share_func, ffa_memory_access_permissions_t *permissions)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001324{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001325 struct ffa_composite_memory_region *composite;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001326 uint32_t receivers_length;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001327 uint32_t constituents_offset;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001328 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001329 enum ffa_data_access data_access;
1330 enum ffa_instruction_access instruction_access;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001331
Andrew Walbrana65a1322020-04-06 19:32:32 +01001332 CHECK(permissions != NULL);
1333
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001334 /*
1335 * This should already be checked by the caller, just making the
1336 * assumption clear here.
1337 */
1338 CHECK(memory_region->receiver_count == 1);
1339
Andrew Walbrana65a1322020-04-06 19:32:32 +01001340 /* The sender must match the message sender. */
1341 if (memory_region->sender != from_locked.vm->id) {
1342 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001343 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001344 }
1345
Andrew Walbrana65a1322020-04-06 19:32:32 +01001346 /*
1347 * Ensure that the composite header is within the memory bounds and
1348 * doesn't overlap the first part of the message.
1349 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001350 receivers_length = sizeof(struct ffa_memory_access) *
1351 memory_region->receiver_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001352 constituents_offset =
1353 ffa_composite_constituent_offset(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001354 if (memory_region->receivers[0].composite_memory_region_offset <
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001355 sizeof(struct ffa_memory_region) + receivers_length ||
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001356 constituents_offset > fragment_length) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001357 dlog_verbose(
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001358 "Invalid composite memory region descriptor offset "
1359 "%d.\n",
1360 memory_region->receivers[0]
1361 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001362 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001363 }
1364
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001365 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001366
1367 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001368 * Ensure the number of constituents are within the memory bounds.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001369 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001370 constituents_length = sizeof(struct ffa_memory_region_constituent) *
1371 composite->constituent_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001372 if (memory_share_length != constituents_offset + constituents_length) {
1373 dlog_verbose("Invalid length %d or composite offset %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001374 memory_share_length,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001375 memory_region->receivers[0]
1376 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001377 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001378 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001379 if (fragment_length < memory_share_length &&
1380 fragment_length < HF_MAILBOX_SIZE) {
1381 dlog_warning(
1382 "Initial fragment length %d smaller than mailbox "
1383 "size.\n",
1384 fragment_length);
1385 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001386
Andrew Walbrana65a1322020-04-06 19:32:32 +01001387 /*
1388 * Clear is not allowed for memory sharing, as the sender still has
1389 * access to the memory.
1390 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001391 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1392 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001393 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001394 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001395 }
1396
1397 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001398 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001399 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001400 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001401 }
1402
1403 /* Check that the permissions are valid. */
1404 *permissions =
1405 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001406 data_access = ffa_get_data_access_attr(*permissions);
1407 instruction_access = ffa_get_instruction_access_attr(*permissions);
1408 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1409 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001410 dlog_verbose("Reserved value for receiver permissions %#x.\n",
1411 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001412 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001413 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001414 if (instruction_access != FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001415 dlog_verbose(
1416 "Invalid instruction access permissions %#x for "
1417 "sending memory.\n",
1418 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001419 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001420 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001421 if (share_func == FFA_MEM_SHARE_32) {
1422 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001423 dlog_verbose(
1424 "Invalid data access permissions %#x for "
1425 "sharing memory.\n",
1426 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001427 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001428 }
1429 /*
Andrew Walbrandd8248f2020-06-22 13:39:30 +01001430 * According to section 5.11.3 of the FF-A 1.0 spec NX is
1431 * required for share operations (but must not be specified by
1432 * the sender) so set it in the copy that we store, ready to be
Andrew Walbrana65a1322020-04-06 19:32:32 +01001433 * returned to the retriever.
1434 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001435 ffa_set_instruction_access_attr(permissions,
1436 FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001437 memory_region->receivers[0].receiver_permissions.permissions =
1438 *permissions;
1439 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001440 if (share_func == FFA_MEM_LEND_32 &&
1441 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001442 dlog_verbose(
1443 "Invalid data access permissions %#x for lending "
1444 "memory.\n",
1445 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001446 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001447 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001448 if (share_func == FFA_MEM_DONATE_32 &&
1449 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001450 dlog_verbose(
1451 "Invalid data access permissions %#x for donating "
1452 "memory.\n",
1453 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001454 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001455 }
1456
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001457 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001458}
1459
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001460/** Forwards a memory send message on to the TEE. */
1461static struct ffa_value memory_send_tee_forward(
1462 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id,
1463 uint32_t share_func, struct ffa_memory_region *memory_region,
1464 uint32_t memory_share_length, uint32_t fragment_length)
1465{
1466 struct ffa_value ret;
1467
1468 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
1469 memory_region, fragment_length);
1470 tee_locked.vm->mailbox.recv_size = fragment_length;
1471 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1472 tee_locked.vm->mailbox.recv_func = share_func;
1473 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001474 ret = arch_other_world_call(
1475 (struct ffa_value){.func = share_func,
1476 .arg1 = memory_share_length,
1477 .arg2 = fragment_length});
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001478 /*
1479 * After the call to the TEE completes it must have finished reading its
1480 * RX buffer, so it is ready for another message.
1481 */
1482 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1483
1484 return ret;
1485}
1486
Andrew Walbrana65a1322020-04-06 19:32:32 +01001487/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001488 * Gets the share state for continuing an operation to donate, lend or share
1489 * memory, and checks that it is a valid request.
1490 *
1491 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1492 * not.
1493 */
1494static struct ffa_value ffa_memory_send_continue_validate(
1495 struct share_states_locked share_states, ffa_memory_handle_t handle,
1496 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1497 struct mpool *page_pool)
1498{
1499 struct ffa_memory_share_state *share_state;
1500 struct ffa_memory_region *memory_region;
1501
1502 CHECK(share_state_ret != NULL);
1503
1504 /*
1505 * Look up the share state by handle and make sure that the VM ID
1506 * matches.
1507 */
1508 if (!get_share_state(share_states, handle, &share_state)) {
1509 dlog_verbose(
1510 "Invalid handle %#x for memory send continuation.\n",
1511 handle);
1512 return ffa_error(FFA_INVALID_PARAMETERS);
1513 }
1514 memory_region = share_state->memory_region;
1515
1516 if (memory_region->sender != from_vm_id) {
1517 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1518 return ffa_error(FFA_INVALID_PARAMETERS);
1519 }
1520
1521 if (share_state->sending_complete) {
1522 dlog_verbose(
1523 "Sending of memory handle %#x is already complete.\n",
1524 handle);
1525 return ffa_error(FFA_INVALID_PARAMETERS);
1526 }
1527
1528 if (share_state->fragment_count == MAX_FRAGMENTS) {
1529 /*
1530 * Log a warning as this is a sign that MAX_FRAGMENTS should
1531 * probably be increased.
1532 */
1533 dlog_warning(
1534 "Too many fragments for memory share with handle %#x; "
1535 "only %d supported.\n",
1536 handle, MAX_FRAGMENTS);
1537 /* Free share state, as it's not possible to complete it. */
1538 share_state_free(share_states, share_state, page_pool);
1539 return ffa_error(FFA_NO_MEMORY);
1540 }
1541
1542 *share_state_ret = share_state;
1543
1544 return (struct ffa_value){.func = FFA_SUCCESS_32};
1545}
1546
1547/**
1548 * Forwards a memory send continuation message on to the TEE.
1549 */
1550static struct ffa_value memory_send_continue_tee_forward(
1551 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, void *fragment,
1552 uint32_t fragment_length, ffa_memory_handle_t handle)
1553{
1554 struct ffa_value ret;
1555
1556 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, fragment,
1557 fragment_length);
1558 tee_locked.vm->mailbox.recv_size = fragment_length;
1559 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1560 tee_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
1561 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001562 ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01001563 (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
1564 .arg1 = (uint32_t)handle,
1565 .arg2 = (uint32_t)(handle >> 32),
1566 .arg3 = fragment_length,
1567 .arg4 = (uint64_t)sender_vm_id << 16});
1568 /*
1569 * After the call to the TEE completes it must have finished reading its
1570 * RX buffer, so it is ready for another message.
1571 */
1572 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1573
1574 return ret;
1575}
1576
1577/**
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001578 * Validates a call to donate, lend or share memory to a non-TEE VM and then
1579 * updates the stage-2 page tables. Specifically, check if the message length
1580 * and number of memory region constituents match, and if the transition is
1581 * valid for the type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001582 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001583 * Assumes that the caller has already found and locked the sender VM and copied
1584 * the memory region descriptor from the sender's TX buffer to a freshly
1585 * allocated page from Hafnium's internal pool. The caller must have also
1586 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001587 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001588 * This function takes ownership of the `memory_region` passed in and will free
1589 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001590 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001591struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001592 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001593 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001594 uint32_t fragment_length, uint32_t share_func,
1595 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001596{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001597 ffa_memory_access_permissions_t permissions;
1598 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001599 struct share_states_locked share_states;
1600 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001601
1602 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001603 * If there is an error validating the `memory_region` then we need to
1604 * free it because we own it but we won't be storing it in a share state
1605 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001606 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001607 ret = ffa_memory_send_validate(from_locked, memory_region,
1608 memory_share_length, fragment_length,
1609 share_func, &permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001610 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001611 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001612 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001613 }
1614
Andrew Walbrana65a1322020-04-06 19:32:32 +01001615 /* Set flag for share function, ready to be retrieved later. */
1616 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001617 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001618 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001619 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001620 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001621 case FFA_MEM_LEND_32:
1622 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001623 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001624 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001625 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001626 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001627 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001628 }
1629
Andrew Walbranca808b12020-05-15 17:22:28 +01001630 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001631 /*
1632 * Allocate a share state before updating the page table. Otherwise if
1633 * updating the page table succeeded but allocating the share state
1634 * failed then it would leave the memory in a state where nobody could
1635 * get it back.
1636 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001637 if (!allocate_share_state(share_states, share_func, memory_region,
1638 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1639 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001640 dlog_verbose("Failed to allocate share state.\n");
1641 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001642 ret = ffa_error(FFA_NO_MEMORY);
1643 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001644 }
1645
Andrew Walbranca808b12020-05-15 17:22:28 +01001646 if (fragment_length == memory_share_length) {
1647 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001648 ret = ffa_memory_send_complete(
1649 from_locked, share_states, share_state, page_pool,
1650 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001651 } else {
1652 ret = (struct ffa_value){
1653 .func = FFA_MEM_FRAG_RX_32,
1654 .arg1 = (uint32_t)share_state->handle,
1655 .arg2 = (uint32_t)(share_state->handle >> 32),
1656 .arg3 = fragment_length};
1657 }
1658
1659out:
1660 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001661 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001662 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001663}
1664
1665/**
1666 * Validates a call to donate, lend or share memory to the TEE and then updates
1667 * the stage-2 page tables. Specifically, check if the message length and number
1668 * of memory region constituents match, and if the transition is valid for the
1669 * type of memory sending operation.
1670 *
1671 * Assumes that the caller has already found and locked the sender VM and the
1672 * TEE VM, and copied the memory region descriptor from the sender's TX buffer
1673 * to a freshly allocated page from Hafnium's internal pool. The caller must
1674 * have also validated that the receiver VM ID is valid.
1675 *
1676 * This function takes ownership of the `memory_region` passed in and will free
1677 * it when necessary; it must not be freed by the caller.
1678 */
1679struct ffa_value ffa_memory_tee_send(
1680 struct vm_locked from_locked, struct vm_locked to_locked,
1681 struct ffa_memory_region *memory_region, uint32_t memory_share_length,
1682 uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool)
1683{
1684 ffa_memory_access_permissions_t permissions;
1685 struct ffa_value ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001686
1687 /*
1688 * If there is an error validating the `memory_region` then we need to
1689 * free it because we own it but we won't be storing it in a share state
1690 * after all.
1691 */
1692 ret = ffa_memory_send_validate(from_locked, memory_region,
1693 memory_share_length, fragment_length,
1694 share_func, &permissions);
1695 if (ret.func != FFA_SUCCESS_32) {
1696 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001697 }
1698
Andrew Walbranca808b12020-05-15 17:22:28 +01001699 if (fragment_length == memory_share_length) {
1700 /* No more fragments to come, everything fit in one message. */
1701 struct ffa_composite_memory_region *composite =
1702 ffa_memory_region_get_composite(memory_region, 0);
1703 struct ffa_memory_region_constituent *constituents =
1704 composite->constituents;
Andrew Walbran37c574e2020-06-03 11:45:46 +01001705 struct mpool local_page_pool;
1706 uint32_t orig_from_mode;
1707
1708 /*
1709 * Use a local page pool so that we can roll back if necessary.
1710 */
1711 mpool_init_with_fallback(&local_page_pool, page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001712
1713 ret = ffa_send_check_update(
1714 from_locked, &constituents,
1715 &composite->constituent_count, 1, share_func,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001716 permissions, &local_page_pool,
1717 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1718 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01001719 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01001720 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001721 goto out;
1722 }
1723
1724 /* Forward memory send message on to TEE. */
1725 ret = memory_send_tee_forward(
1726 to_locked, from_locked.vm->id, share_func,
1727 memory_region, memory_share_length, fragment_length);
Andrew Walbran37c574e2020-06-03 11:45:46 +01001728
1729 if (ret.func != FFA_SUCCESS_32) {
1730 dlog_verbose(
1731 "TEE didn't successfully complete memory send "
1732 "operation; returned %#x (%d). Rolling back.\n",
1733 ret.func, ret.arg2);
1734
1735 /*
1736 * The TEE failed to complete the send operation, so
1737 * roll back the page table update for the VM. This
1738 * can't fail because it won't try to allocate more
1739 * memory than was freed into the `local_page_pool` by
1740 * `ffa_send_check_update` in the initial update.
1741 */
1742 CHECK(ffa_region_group_identity_map(
1743 from_locked, &constituents,
1744 &composite->constituent_count, 1,
1745 orig_from_mode, &local_page_pool, true));
1746 }
1747
1748 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001749 } else {
1750 struct share_states_locked share_states = share_states_lock();
1751 ffa_memory_handle_t handle;
1752
1753 /*
1754 * We need to wait for the rest of the fragments before we can
1755 * check whether the transaction is valid and unmap the memory.
1756 * Call the TEE so it can do its initial validation and assign a
1757 * handle, and allocate a share state to keep what we have so
1758 * far.
1759 */
1760 ret = memory_send_tee_forward(
1761 to_locked, from_locked.vm->id, share_func,
1762 memory_region, memory_share_length, fragment_length);
1763 if (ret.func == FFA_ERROR_32) {
1764 goto out_unlock;
1765 } else if (ret.func != FFA_MEM_FRAG_RX_32) {
1766 dlog_warning(
1767 "Got %#x from TEE in response to %#x for "
1768 "fragment with with %d/%d, expected "
1769 "FFA_MEM_FRAG_RX.\n",
1770 ret.func, share_func, fragment_length,
1771 memory_share_length);
1772 ret = ffa_error(FFA_INVALID_PARAMETERS);
1773 goto out_unlock;
1774 }
1775 handle = ffa_frag_handle(ret);
1776 if (ret.arg3 != fragment_length) {
1777 dlog_warning(
1778 "Got unexpected fragment offset %d for "
1779 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1780 ret.arg3, fragment_length);
1781 ret = ffa_error(FFA_INVALID_PARAMETERS);
1782 goto out_unlock;
1783 }
1784 if (ffa_frag_sender(ret) != from_locked.vm->id) {
1785 dlog_warning(
1786 "Got unexpected sender ID %d for "
1787 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1788 ffa_frag_sender(ret), from_locked.vm->id);
1789 ret = ffa_error(FFA_INVALID_PARAMETERS);
1790 goto out_unlock;
1791 }
1792
1793 if (!allocate_share_state(share_states, share_func,
1794 memory_region, fragment_length,
1795 handle, NULL)) {
1796 dlog_verbose("Failed to allocate share state.\n");
1797 ret = ffa_error(FFA_NO_MEMORY);
1798 goto out_unlock;
1799 }
1800 /*
1801 * Don't free the memory region fragment, as it has been stored
1802 * in the share state.
1803 */
1804 memory_region = NULL;
1805 out_unlock:
1806 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001807 }
1808
Andrew Walbranca808b12020-05-15 17:22:28 +01001809out:
1810 if (memory_region != NULL) {
1811 mpool_free(page_pool, memory_region);
1812 }
1813 dump_share_states();
1814 return ret;
1815}
1816
1817/**
1818 * Continues an operation to donate, lend or share memory to a non-TEE VM. If
1819 * this is the last fragment then checks that the transition is valid for the
1820 * type of memory sending operation and updates the stage-2 page tables of the
1821 * sender.
1822 *
1823 * Assumes that the caller has already found and locked the sender VM and copied
1824 * the memory region descriptor from the sender's TX buffer to a freshly
1825 * allocated page from Hafnium's internal pool.
1826 *
1827 * This function takes ownership of the `fragment` passed in; it must not be
1828 * freed by the caller.
1829 */
1830struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1831 void *fragment,
1832 uint32_t fragment_length,
1833 ffa_memory_handle_t handle,
1834 struct mpool *page_pool)
1835{
1836 struct share_states_locked share_states = share_states_lock();
1837 struct ffa_memory_share_state *share_state;
1838 struct ffa_value ret;
1839 struct ffa_memory_region *memory_region;
1840
1841 ret = ffa_memory_send_continue_validate(share_states, handle,
1842 &share_state,
1843 from_locked.vm->id, page_pool);
1844 if (ret.func != FFA_SUCCESS_32) {
1845 goto out_free_fragment;
1846 }
1847 memory_region = share_state->memory_region;
1848
1849 if (memory_region->receivers[0].receiver_permissions.receiver ==
1850 HF_TEE_VM_ID) {
1851 dlog_error(
1852 "Got hypervisor-allocated handle for memory send to "
1853 "TEE. This should never happen, and indicates a bug in "
1854 "EL3 code.\n");
1855 ret = ffa_error(FFA_INVALID_PARAMETERS);
1856 goto out_free_fragment;
1857 }
1858
1859 /* Add this fragment. */
1860 share_state->fragments[share_state->fragment_count] = fragment;
1861 share_state->fragment_constituent_counts[share_state->fragment_count] =
1862 fragment_length / sizeof(struct ffa_memory_region_constituent);
1863 share_state->fragment_count++;
1864
1865 /* Check whether the memory send operation is now ready to complete. */
1866 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001867 ret = ffa_memory_send_complete(
1868 from_locked, share_states, share_state, page_pool,
1869 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001870 } else {
1871 ret = (struct ffa_value){
1872 .func = FFA_MEM_FRAG_RX_32,
1873 .arg1 = (uint32_t)handle,
1874 .arg2 = (uint32_t)(handle >> 32),
1875 .arg3 = share_state_next_fragment_offset(share_states,
1876 share_state)};
1877 }
1878 goto out;
1879
1880out_free_fragment:
1881 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001882
1883out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001884 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001885 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001886}
1887
Andrew Walbranca808b12020-05-15 17:22:28 +01001888/**
1889 * Continues an operation to donate, lend or share memory to the TEE VM. If this
1890 * is the last fragment then checks that the transition is valid for the type of
1891 * memory sending operation and updates the stage-2 page tables of the sender.
1892 *
1893 * Assumes that the caller has already found and locked the sender VM and copied
1894 * the memory region descriptor from the sender's TX buffer to a freshly
1895 * allocated page from Hafnium's internal pool.
1896 *
1897 * This function takes ownership of the `memory_region` passed in and will free
1898 * it when necessary; it must not be freed by the caller.
1899 */
1900struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked,
1901 struct vm_locked to_locked,
1902 void *fragment,
1903 uint32_t fragment_length,
1904 ffa_memory_handle_t handle,
1905 struct mpool *page_pool)
1906{
1907 struct share_states_locked share_states = share_states_lock();
1908 struct ffa_memory_share_state *share_state;
1909 struct ffa_value ret;
1910 struct ffa_memory_region *memory_region;
1911
1912 ret = ffa_memory_send_continue_validate(share_states, handle,
1913 &share_state,
1914 from_locked.vm->id, page_pool);
1915 if (ret.func != FFA_SUCCESS_32) {
1916 goto out_free_fragment;
1917 }
1918 memory_region = share_state->memory_region;
1919
1920 if (memory_region->receivers[0].receiver_permissions.receiver !=
1921 HF_TEE_VM_ID) {
1922 dlog_error(
1923 "Got SPM-allocated handle for memory send to non-TEE "
1924 "VM. This should never happen, and indicates a bug.\n");
1925 ret = ffa_error(FFA_INVALID_PARAMETERS);
1926 goto out_free_fragment;
1927 }
1928
1929 if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
1930 to_locked.vm->mailbox.recv == NULL) {
1931 /*
1932 * If the TEE RX buffer is not available, tell the sender to
1933 * retry by returning the current offset again.
1934 */
1935 ret = (struct ffa_value){
1936 .func = FFA_MEM_FRAG_RX_32,
1937 .arg1 = (uint32_t)handle,
1938 .arg2 = (uint32_t)(handle >> 32),
1939 .arg3 = share_state_next_fragment_offset(share_states,
1940 share_state),
1941 };
1942 goto out_free_fragment;
1943 }
1944
1945 /* Add this fragment. */
1946 share_state->fragments[share_state->fragment_count] = fragment;
1947 share_state->fragment_constituent_counts[share_state->fragment_count] =
1948 fragment_length / sizeof(struct ffa_memory_region_constituent);
1949 share_state->fragment_count++;
1950
1951 /* Check whether the memory send operation is now ready to complete. */
1952 if (share_state_sending_complete(share_states, share_state)) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01001953 struct mpool local_page_pool;
1954 uint32_t orig_from_mode;
1955
1956 /*
1957 * Use a local page pool so that we can roll back if necessary.
1958 */
1959 mpool_init_with_fallback(&local_page_pool, page_pool);
1960
Andrew Walbranca808b12020-05-15 17:22:28 +01001961 ret = ffa_memory_send_complete(from_locked, share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001962 share_state, &local_page_pool,
1963 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01001964
1965 if (ret.func == FFA_SUCCESS_32) {
1966 /*
1967 * Forward final fragment on to the TEE so that
1968 * it can complete the memory sending operation.
1969 */
1970 ret = memory_send_continue_tee_forward(
1971 to_locked, from_locked.vm->id, fragment,
1972 fragment_length, handle);
1973
1974 if (ret.func != FFA_SUCCESS_32) {
1975 /*
1976 * The error will be passed on to the caller,
1977 * but log it here too.
1978 */
1979 dlog_verbose(
1980 "TEE didn't successfully complete "
1981 "memory send operation; returned %#x "
Andrew Walbran37c574e2020-06-03 11:45:46 +01001982 "(%d). Rolling back.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01001983 ret.func, ret.arg2);
Andrew Walbran37c574e2020-06-03 11:45:46 +01001984
1985 /*
1986 * The TEE failed to complete the send
1987 * operation, so roll back the page table update
1988 * for the VM. This can't fail because it won't
1989 * try to allocate more memory than was freed
1990 * into the `local_page_pool` by
1991 * `ffa_send_check_update` in the initial
1992 * update.
1993 */
1994 CHECK(ffa_region_group_identity_map(
1995 from_locked, share_state->fragments,
1996 share_state
1997 ->fragment_constituent_counts,
1998 share_state->fragment_count,
1999 orig_from_mode, &local_page_pool,
2000 true));
Andrew Walbranca808b12020-05-15 17:22:28 +01002001 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002002
Andrew Walbranca808b12020-05-15 17:22:28 +01002003 /* Free share state. */
2004 share_state_free(share_states, share_state, page_pool);
2005 } else {
2006 /* Abort sending to TEE. */
2007 struct ffa_value tee_ret =
Olivier Deprez112d2b52020-09-30 07:39:23 +02002008 arch_other_world_call((struct ffa_value){
Andrew Walbranca808b12020-05-15 17:22:28 +01002009 .func = FFA_MEM_RECLAIM_32,
2010 .arg1 = (uint32_t)handle,
2011 .arg2 = (uint32_t)(handle >> 32)});
2012
2013 if (tee_ret.func != FFA_SUCCESS_32) {
2014 /*
2015 * Nothing we can do if TEE doesn't abort
2016 * properly, just log it.
2017 */
2018 dlog_verbose(
2019 "TEE didn't successfully abort failed "
2020 "memory send operation; returned %#x "
2021 "(%d).\n",
2022 tee_ret.func, tee_ret.arg2);
2023 }
2024 /*
2025 * We don't need to free the share state in this case
2026 * because ffa_memory_send_complete does that already.
2027 */
2028 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002029
2030 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01002031 } else {
2032 uint32_t next_fragment_offset =
2033 share_state_next_fragment_offset(share_states,
2034 share_state);
2035
2036 ret = memory_send_continue_tee_forward(
2037 to_locked, from_locked.vm->id, fragment,
2038 fragment_length, handle);
2039
2040 if (ret.func != FFA_MEM_FRAG_RX_32 ||
2041 ffa_frag_handle(ret) != handle ||
2042 ret.arg3 != next_fragment_offset ||
2043 ffa_frag_sender(ret) != from_locked.vm->id) {
2044 dlog_verbose(
2045 "Got unexpected result from forwarding "
2046 "FFA_MEM_FRAG_TX to TEE: %#x (handle %#x, "
2047 "offset %d, sender %d); expected "
2048 "FFA_MEM_FRAG_RX (handle %#x, offset %d, "
2049 "sender %d).\n",
2050 ret.func, ffa_frag_handle(ret), ret.arg3,
2051 ffa_frag_sender(ret), handle,
2052 next_fragment_offset, from_locked.vm->id);
2053 /* Free share state. */
2054 share_state_free(share_states, share_state, page_pool);
2055 ret = ffa_error(FFA_INVALID_PARAMETERS);
2056 goto out;
2057 }
2058
2059 ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2060 .arg1 = (uint32_t)handle,
2061 .arg2 = (uint32_t)(handle >> 32),
2062 .arg3 = next_fragment_offset};
2063 }
2064 goto out;
2065
2066out_free_fragment:
2067 mpool_free(page_pool, fragment);
2068
2069out:
2070 share_states_unlock(&share_states);
2071 return ret;
2072}
2073
2074/** Clean up after the receiver has finished retrieving a memory region. */
2075static void ffa_memory_retrieve_complete(
2076 struct share_states_locked share_states,
2077 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2078{
2079 if (share_state->share_func == FFA_MEM_DONATE_32) {
2080 /*
2081 * Memory that has been donated can't be relinquished,
2082 * so no need to keep the share state around.
2083 */
2084 share_state_free(share_states, share_state, page_pool);
2085 dlog_verbose("Freed share state for donate.\n");
2086 }
2087}
2088
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002089struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2090 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002091 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002092 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002093{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002094 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002095 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002096 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002097 sizeof(struct ffa_memory_access);
2098 ffa_memory_handle_t handle = retrieve_request->handle;
2099 ffa_memory_region_flags_t transaction_type =
Andrew Walbrana65a1322020-04-06 19:32:32 +01002100 retrieve_request->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002101 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
2102 struct ffa_memory_region *memory_region;
2103 ffa_memory_access_permissions_t sent_permissions;
2104 enum ffa_data_access sent_data_access;
2105 enum ffa_instruction_access sent_instruction_access;
2106 ffa_memory_access_permissions_t requested_permissions;
2107 enum ffa_data_access requested_data_access;
2108 enum ffa_instruction_access requested_instruction_access;
2109 ffa_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002110 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002111 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002112 struct ffa_memory_share_state *share_state;
2113 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002114 struct ffa_composite_memory_region *composite;
2115 uint32_t total_length;
2116 uint32_t fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002117
2118 dump_share_states();
2119
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002120 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002121 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002122 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002123 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002124 expected_retrieve_request_length,
2125 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002126 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002127 }
2128
Andrew Walbrana65a1322020-04-06 19:32:32 +01002129 if (retrieve_request->receiver_count != 1) {
2130 dlog_verbose(
2131 "Multi-way memory sharing not supported (got %d "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002132 "receivers descriptors on FFA_MEM_RETRIEVE_REQ, "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002133 "expected 1).\n",
2134 retrieve_request->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002135 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002136 }
2137
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002138 share_states = share_states_lock();
2139 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002140 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002141 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002142 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002143 goto out;
2144 }
2145
Andrew Walbrana65a1322020-04-06 19:32:32 +01002146 memory_region = share_state->memory_region;
2147 CHECK(memory_region != NULL);
2148
2149 /*
2150 * Check that the transaction type expected by the receiver is correct,
2151 * if it has been specified.
2152 */
2153 if (transaction_type !=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002154 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
Andrew Walbrana65a1322020-04-06 19:32:32 +01002155 transaction_type != (memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002156 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002157 dlog_verbose(
2158 "Incorrect transaction type %#x for "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002159 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002160 transaction_type,
2161 memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002162 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002163 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002164 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002165 goto out;
2166 }
2167
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002168 if (retrieve_request->sender != memory_region->sender) {
2169 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002170 "Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002171 "expected %d for handle %#x.\n",
2172 retrieve_request->sender, memory_region->sender,
2173 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002174 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002175 goto out;
2176 }
2177
2178 if (retrieve_request->tag != memory_region->tag) {
2179 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002180 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002181 "%d for handle %#x.\n",
2182 retrieve_request->tag, memory_region->tag, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002183 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002184 goto out;
2185 }
2186
Andrew Walbrana65a1322020-04-06 19:32:32 +01002187 if (retrieve_request->receivers[0].receiver_permissions.receiver !=
2188 to_locked.vm->id) {
2189 dlog_verbose(
2190 "Retrieve request receiver VM ID %d didn't match "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002191 "caller of FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002192 retrieve_request->receivers[0]
2193 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002194 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002195 goto out;
2196 }
2197
2198 if (memory_region->receivers[0].receiver_permissions.receiver !=
2199 to_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002200 dlog_verbose(
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002201 "Incorrect receiver VM ID %d for FFA_MEM_RETRIEVE_REQ, "
2202 "expected %d for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002203 to_locked.vm->id,
2204 memory_region->receivers[0]
2205 .receiver_permissions.receiver,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002206 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002207 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002208 goto out;
2209 }
2210
Andrew Walbranca808b12020-05-15 17:22:28 +01002211 if (!share_state->sending_complete) {
2212 dlog_verbose(
2213 "Memory with handle %#x not fully sent, can't "
2214 "retrieve.\n",
2215 handle);
2216 ret = ffa_error(FFA_INVALID_PARAMETERS);
2217 goto out;
2218 }
2219
2220 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002221 dlog_verbose("Memory with handle %#x already retrieved.\n",
2222 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002223 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002224 goto out;
2225 }
2226
Andrew Walbrana65a1322020-04-06 19:32:32 +01002227 if (retrieve_request->receivers[0].composite_memory_region_offset !=
2228 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002229 dlog_verbose(
2230 "Retriever specified address ranges not supported (got "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002231 "offset %d).\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002232 retrieve_request->receivers[0]
2233 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002234 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002235 goto out;
2236 }
2237
Andrew Walbrana65a1322020-04-06 19:32:32 +01002238 /*
2239 * Check permissions from sender against permissions requested by
2240 * receiver.
2241 */
2242 /* TODO: Check attributes too. */
2243 sent_permissions =
2244 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002245 sent_data_access = ffa_get_data_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002246 sent_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002247 ffa_get_instruction_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002248 requested_permissions =
2249 retrieve_request->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002250 requested_data_access = ffa_get_data_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002251 requested_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002252 ffa_get_instruction_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002253 permissions = 0;
2254 switch (sent_data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002255 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2256 case FFA_DATA_ACCESS_RW:
2257 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2258 requested_data_access == FFA_DATA_ACCESS_RW) {
2259 ffa_set_data_access_attr(&permissions,
2260 FFA_DATA_ACCESS_RW);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002261 break;
2262 }
2263 /* Intentional fall-through. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002264 case FFA_DATA_ACCESS_RO:
2265 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2266 requested_data_access == FFA_DATA_ACCESS_RO) {
2267 ffa_set_data_access_attr(&permissions,
2268 FFA_DATA_ACCESS_RO);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002269 break;
2270 }
2271 dlog_verbose(
2272 "Invalid data access requested; sender specified "
2273 "permissions %#x but receiver requested %#x.\n",
2274 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002275 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002276 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002277 case FFA_DATA_ACCESS_RESERVED:
2278 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002279 "checked before this point.");
2280 }
2281 switch (sent_instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002282 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2283 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002284 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002285 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2286 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
2287 ffa_set_instruction_access_attr(
2288 &permissions, FFA_INSTRUCTION_ACCESS_X);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002289 break;
2290 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002291 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002292 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002293 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2294 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2295 ffa_set_instruction_access_attr(
2296 &permissions, FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002297 break;
2298 }
2299 dlog_verbose(
2300 "Invalid instruction access requested; sender "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002301 "specified permissions %#x but receiver requested "
2302 "%#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002303 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002304 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002305 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002306 case FFA_INSTRUCTION_ACCESS_RESERVED:
2307 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002308 "be checked before this point.");
2309 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002310 memory_to_attributes = ffa_memory_permissions_to_mode(permissions);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002311
Andrew Walbran996d1d12020-05-27 14:08:43 +01002312 ret = ffa_retrieve_check_update(
Andrew Walbranca808b12020-05-15 17:22:28 +01002313 to_locked, share_state->fragments,
2314 share_state->fragment_constituent_counts,
2315 share_state->fragment_count, memory_to_attributes,
Andrew Walbran996d1d12020-05-27 14:08:43 +01002316 share_state->share_func, false, page_pool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002317 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002318 goto out;
2319 }
2320
2321 /*
2322 * Copy response to RX buffer of caller and deliver the message. This
2323 * must be done before the share_state is (possibly) freed.
2324 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002325 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002326 composite = ffa_memory_region_get_composite(memory_region, 0);
2327 /*
2328 * Constituents which we received in the first fragment should always
2329 * fit in the first fragment we are sending, because the header is the
2330 * same size in both cases and we have a fixed message buffer size. So
2331 * `ffa_retrieved_memory_region_init` should never fail.
2332 */
2333 CHECK(ffa_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002334 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2335 memory_region->sender, memory_region->attributes,
2336 memory_region->flags, handle, to_locked.vm->id, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +01002337 composite->page_count, composite->constituent_count,
2338 share_state->fragments[0],
2339 share_state->fragment_constituent_counts[0], &total_length,
2340 &fragment_length));
2341 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002342 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002343 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002344 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2345
Andrew Walbranca808b12020-05-15 17:22:28 +01002346 share_state->retrieved_fragment_count[0] = 1;
2347 if (share_state->retrieved_fragment_count[0] ==
2348 share_state->fragment_count) {
2349 ffa_memory_retrieve_complete(share_states, share_state,
2350 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002351 }
2352
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002353 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002354 .arg1 = total_length,
2355 .arg2 = fragment_length};
2356
2357out:
2358 share_states_unlock(&share_states);
2359 dump_share_states();
2360 return ret;
2361}
2362
2363struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2364 ffa_memory_handle_t handle,
2365 uint32_t fragment_offset,
2366 struct mpool *page_pool)
2367{
2368 struct ffa_memory_region *memory_region;
2369 struct share_states_locked share_states;
2370 struct ffa_memory_share_state *share_state;
2371 struct ffa_value ret;
2372 uint32_t fragment_index;
2373 uint32_t retrieved_constituents_count;
2374 uint32_t i;
2375 uint32_t expected_fragment_offset;
2376 uint32_t remaining_constituent_count;
2377 uint32_t fragment_length;
2378
2379 dump_share_states();
2380
2381 share_states = share_states_lock();
2382 if (!get_share_state(share_states, handle, &share_state)) {
2383 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2384 handle);
2385 ret = ffa_error(FFA_INVALID_PARAMETERS);
2386 goto out;
2387 }
2388
2389 memory_region = share_state->memory_region;
2390 CHECK(memory_region != NULL);
2391
2392 if (memory_region->receivers[0].receiver_permissions.receiver !=
2393 to_locked.vm->id) {
2394 dlog_verbose(
2395 "Caller of FFA_MEM_FRAG_RX (%d) is not receiver (%d) "
2396 "of handle %#x.\n",
2397 to_locked.vm->id,
2398 memory_region->receivers[0]
2399 .receiver_permissions.receiver,
2400 handle);
2401 ret = ffa_error(FFA_INVALID_PARAMETERS);
2402 goto out;
2403 }
2404
2405 if (!share_state->sending_complete) {
2406 dlog_verbose(
2407 "Memory with handle %#x not fully sent, can't "
2408 "retrieve.\n",
2409 handle);
2410 ret = ffa_error(FFA_INVALID_PARAMETERS);
2411 goto out;
2412 }
2413
2414 if (share_state->retrieved_fragment_count[0] == 0 ||
2415 share_state->retrieved_fragment_count[0] >=
2416 share_state->fragment_count) {
2417 dlog_verbose(
2418 "Retrieval of memory with handle %#x not yet started "
2419 "or already completed (%d/%d fragments retrieved).\n",
2420 handle, share_state->retrieved_fragment_count[0],
2421 share_state->fragment_count);
2422 ret = ffa_error(FFA_INVALID_PARAMETERS);
2423 goto out;
2424 }
2425
2426 fragment_index = share_state->retrieved_fragment_count[0];
2427
2428 /*
2429 * Check that the given fragment offset is correct by counting how many
2430 * constituents were in the fragments previously sent.
2431 */
2432 retrieved_constituents_count = 0;
2433 for (i = 0; i < fragment_index; ++i) {
2434 retrieved_constituents_count +=
2435 share_state->fragment_constituent_counts[i];
2436 }
2437 expected_fragment_offset =
2438 ffa_composite_constituent_offset(memory_region, 0) +
2439 retrieved_constituents_count *
2440 sizeof(struct ffa_memory_region_constituent);
2441 if (fragment_offset != expected_fragment_offset) {
2442 dlog_verbose("Fragment offset was %d but expected %d.\n",
2443 fragment_offset, expected_fragment_offset);
2444 ret = ffa_error(FFA_INVALID_PARAMETERS);
2445 goto out;
2446 }
2447
2448 remaining_constituent_count = ffa_memory_fragment_init(
2449 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2450 share_state->fragments[fragment_index],
2451 share_state->fragment_constituent_counts[fragment_index],
2452 &fragment_length);
2453 CHECK(remaining_constituent_count == 0);
2454 to_locked.vm->mailbox.recv_size = fragment_length;
2455 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2456 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
2457 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2458 share_state->retrieved_fragment_count[0]++;
2459 if (share_state->retrieved_fragment_count[0] ==
2460 share_state->fragment_count) {
2461 ffa_memory_retrieve_complete(share_states, share_state,
2462 page_pool);
2463 }
2464
2465 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2466 .arg1 = (uint32_t)handle,
2467 .arg2 = (uint32_t)(handle >> 32),
2468 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002469
2470out:
2471 share_states_unlock(&share_states);
2472 dump_share_states();
2473 return ret;
2474}
2475
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002476struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002477 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002478 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002479{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002480 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002481 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002482 struct ffa_memory_share_state *share_state;
2483 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002484 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002485 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002486
Andrew Walbrana65a1322020-04-06 19:32:32 +01002487 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002488 dlog_verbose(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002489 "Stream endpoints not supported (got %d endpoints on "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002490 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002491 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002492 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002493 }
2494
Andrew Walbrana65a1322020-04-06 19:32:32 +01002495 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002496 dlog_verbose(
2497 "VM ID %d in relinquish message doesn't match calling "
2498 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002499 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002500 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002501 }
2502
2503 dump_share_states();
2504
2505 share_states = share_states_lock();
2506 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002507 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002508 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002509 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002510 goto out;
2511 }
2512
Andrew Walbranca808b12020-05-15 17:22:28 +01002513 if (!share_state->sending_complete) {
2514 dlog_verbose(
2515 "Memory with handle %#x not fully sent, can't "
2516 "relinquish.\n",
2517 handle);
2518 ret = ffa_error(FFA_INVALID_PARAMETERS);
2519 goto out;
2520 }
2521
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002522 memory_region = share_state->memory_region;
2523 CHECK(memory_region != NULL);
2524
Andrew Walbrana65a1322020-04-06 19:32:32 +01002525 if (memory_region->receivers[0].receiver_permissions.receiver !=
2526 from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002527 dlog_verbose(
2528 "VM ID %d tried to relinquish memory region with "
2529 "handle %#x but receiver was %d.\n",
2530 from_locked.vm->id, handle,
Andrew Walbrana65a1322020-04-06 19:32:32 +01002531 memory_region->receivers[0]
2532 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002533 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002534 goto out;
2535 }
2536
Andrew Walbranca808b12020-05-15 17:22:28 +01002537 if (share_state->retrieved_fragment_count[0] !=
2538 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002539 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01002540 "Memory with handle %#x not yet fully retrieved, can't "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002541 "relinquish.\n",
2542 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002543 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002544 goto out;
2545 }
2546
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002547 clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002548
2549 /*
2550 * Clear is not allowed for memory that was shared, as the original
2551 * sender still has access to the memory.
2552 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002553 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002554 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002555 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002556 goto out;
2557 }
2558
Andrew Walbranca808b12020-05-15 17:22:28 +01002559 ret = ffa_relinquish_check_update(
2560 from_locked, share_state->fragments,
2561 share_state->fragment_constituent_counts,
2562 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002563
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002564 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002565 /*
2566 * Mark memory handle as not retrieved, so it can be reclaimed
2567 * (or retrieved again).
2568 */
Andrew Walbranca808b12020-05-15 17:22:28 +01002569 share_state->retrieved_fragment_count[0] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002570 }
2571
2572out:
2573 share_states_unlock(&share_states);
2574 dump_share_states();
2575 return ret;
2576}
2577
2578/**
2579 * Validates that the reclaim transition is allowed for the given handle,
2580 * updates the page table of the reclaiming VM, and frees the internal state
2581 * associated with the handle.
2582 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002583struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002584 ffa_memory_handle_t handle,
2585 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002586 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002587{
2588 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002589 struct ffa_memory_share_state *share_state;
2590 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002591 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002592
2593 dump_share_states();
2594
2595 share_states = share_states_lock();
2596 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002597 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002598 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002599 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002600 goto out;
2601 }
2602
2603 memory_region = share_state->memory_region;
2604 CHECK(memory_region != NULL);
2605
2606 if (to_locked.vm->id != memory_region->sender) {
2607 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002608 "VM %#x attempted to reclaim memory handle %#x "
2609 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002610 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002611 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002612 goto out;
2613 }
2614
Andrew Walbranca808b12020-05-15 17:22:28 +01002615 if (!share_state->sending_complete) {
2616 dlog_verbose(
2617 "Memory with handle %#x not fully sent, can't "
2618 "reclaim.\n",
2619 handle);
2620 ret = ffa_error(FFA_INVALID_PARAMETERS);
2621 goto out;
2622 }
2623
2624 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002625 dlog_verbose(
2626 "Tried to reclaim memory handle %#x that has not been "
2627 "relinquished.\n",
2628 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002629 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002630 goto out;
2631 }
2632
Andrew Walbranca808b12020-05-15 17:22:28 +01002633 ret = ffa_retrieve_check_update(
2634 to_locked, share_state->fragments,
2635 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00002636 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01002637 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002638
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002639 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002640 share_state_free(share_states, share_state, page_pool);
2641 dlog_verbose("Freed share state after successful reclaim.\n");
2642 }
2643
2644out:
2645 share_states_unlock(&share_states);
2646 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002647}
Andrew Walbran290b0c92020-02-03 16:37:14 +00002648
2649/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002650 * Validates that the reclaim transition is allowed for the memory region with
2651 * the given handle which was previously shared with the TEE, tells the TEE to
2652 * mark it as reclaimed, and updates the page table of the reclaiming VM.
2653 *
2654 * To do this information about the memory region is first fetched from the TEE.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002655 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002656struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002657 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002658 ffa_memory_handle_t handle,
Andrew Walbranca808b12020-05-15 17:22:28 +01002659 ffa_memory_region_flags_t flags,
2660 struct mpool *page_pool)
Andrew Walbran290b0c92020-02-03 16:37:14 +00002661{
Andrew Walbranca808b12020-05-15 17:22:28 +01002662 uint32_t request_length = ffa_memory_lender_retrieve_request_init(
2663 from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
2664 struct ffa_value tee_ret;
2665 uint32_t length;
2666 uint32_t fragment_length;
2667 uint32_t fragment_offset;
2668 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002669 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01002670 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
2671
2672 CHECK(request_length <= HF_MAILBOX_SIZE);
2673 CHECK(from_locked.vm->id == HF_TEE_VM_ID);
2674
2675 /* Retrieve memory region information from the TEE. */
Olivier Deprez112d2b52020-09-30 07:39:23 +02002676 tee_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01002677 (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
2678 .arg1 = request_length,
2679 .arg2 = request_length});
2680 if (tee_ret.func == FFA_ERROR_32) {
2681 dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
2682 return tee_ret;
2683 }
2684 if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
2685 dlog_verbose(
2686 "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
2687 tee_ret.func);
2688 return ffa_error(FFA_INVALID_PARAMETERS);
2689 }
2690
2691 length = tee_ret.arg1;
2692 fragment_length = tee_ret.arg2;
2693
2694 if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
2695 length > sizeof(tee_retrieve_buffer)) {
2696 dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
2697 fragment_length, length, HF_MAILBOX_SIZE,
2698 sizeof(tee_retrieve_buffer));
2699 return ffa_error(FFA_INVALID_PARAMETERS);
2700 }
2701
2702 /*
2703 * Copy the first fragment of the memory region descriptor to an
2704 * internal buffer.
2705 */
2706 memcpy_s(tee_retrieve_buffer, sizeof(tee_retrieve_buffer),
2707 from_locked.vm->mailbox.send, fragment_length);
2708
2709 /* Fetch the remaining fragments into the same buffer. */
2710 fragment_offset = fragment_length;
2711 while (fragment_offset < length) {
Olivier Deprez112d2b52020-09-30 07:39:23 +02002712 tee_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01002713 (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2714 .arg1 = (uint32_t)handle,
2715 .arg2 = (uint32_t)(handle >> 32),
2716 .arg3 = fragment_offset});
2717 if (tee_ret.func != FFA_MEM_FRAG_TX_32) {
2718 dlog_verbose(
2719 "Got %#x (%d) from TEE in response to "
2720 "FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
2721 tee_ret.func, tee_ret.arg2);
2722 return tee_ret;
2723 }
2724 if (ffa_frag_handle(tee_ret) != handle) {
2725 dlog_verbose(
2726 "Got FFA_MEM_FRAG_TX for unexpected handle %#x "
2727 "in response to FFA_MEM_FRAG_RX for handle "
2728 "%#x.\n",
2729 ffa_frag_handle(tee_ret), handle);
2730 return ffa_error(FFA_INVALID_PARAMETERS);
2731 }
2732 if (ffa_frag_sender(tee_ret) != 0) {
2733 dlog_verbose(
2734 "Got FFA_MEM_FRAG_TX with unexpected sender %d "
2735 "(expected 0).\n",
2736 ffa_frag_sender(tee_ret));
2737 return ffa_error(FFA_INVALID_PARAMETERS);
2738 }
2739 fragment_length = tee_ret.arg3;
2740 if (fragment_length > HF_MAILBOX_SIZE ||
2741 fragment_offset + fragment_length > length) {
2742 dlog_verbose(
2743 "Invalid fragment length %d at offset %d (max "
2744 "%d).\n",
2745 fragment_length, fragment_offset,
2746 HF_MAILBOX_SIZE);
2747 return ffa_error(FFA_INVALID_PARAMETERS);
2748 }
2749 memcpy_s(tee_retrieve_buffer + fragment_offset,
2750 sizeof(tee_retrieve_buffer) - fragment_offset,
2751 from_locked.vm->mailbox.send, fragment_length);
2752
2753 fragment_offset += fragment_length;
2754 }
2755
2756 memory_region = (struct ffa_memory_region *)tee_retrieve_buffer;
Andrew Walbran290b0c92020-02-03 16:37:14 +00002757
2758 if (memory_region->receiver_count != 1) {
2759 /* Only one receiver supported by Hafnium for now. */
2760 dlog_verbose(
2761 "Multiple recipients not supported (got %d, expected "
2762 "1).\n",
2763 memory_region->receiver_count);
Andrew Walbranca808b12020-05-15 17:22:28 +01002764 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002765 }
2766
2767 if (memory_region->handle != handle) {
2768 dlog_verbose(
2769 "Got memory region handle %#x from TEE but requested "
2770 "handle %#x.\n",
2771 memory_region->handle, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002772 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002773 }
2774
2775 /* The original sender must match the caller. */
2776 if (to_locked.vm->id != memory_region->sender) {
2777 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002778 "VM %#x attempted to reclaim memory handle %#x "
2779 "originally sent by VM %#x.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00002780 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002781 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002782 }
2783
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002784 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002785
2786 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01002787 * Validate that the reclaim transition is allowed for the given memory
2788 * region, forward the request to the TEE and then map the memory back
2789 * into the caller's stage-2 page table.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002790 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01002791 return ffa_tee_reclaim_check_update(
2792 to_locked, handle, composite->constituents,
Andrew Walbranca808b12020-05-15 17:22:28 +01002793 composite->constituent_count, memory_to_attributes,
2794 flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002795}