blob: 3f11a0ff3539189180fbd7d067409524b145b733 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Olivier Deprez112d2b52020-09-30 07:39:23 +020011#include "hf/arch/other_world.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000012
Jose Marinho75509b42019-04-09 09:34:59 +010013#include "hf/api.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010014#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000017#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010020
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000021/** The maximum number of recipients a memory region may be sent to. */
22#define MAX_MEM_SHARE_RECIPIENTS 1
23
24/**
25 * The maximum number of memory sharing handles which may be active at once. A
26 * DONATE handle is active from when it is sent to when it is retrieved; a SHARE
27 * or LEND handle is active from when it is sent to when it is reclaimed.
28 */
29#define MAX_MEM_SHARES 100
30
Andrew Walbranca808b12020-05-15 17:22:28 +010031/**
32 * The maximum number of fragments into which a memory sharing message may be
33 * broken.
34 */
35#define MAX_FRAGMENTS 20
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
38 "struct ffa_memory_region_constituent must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000039 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0,
41 "struct ffa_composite_memory_region must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000042 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010043static_assert(sizeof(struct ffa_memory_region_attributes) == 4,
Andrew Walbran41890ff2020-09-23 15:09:39 +010044 "struct ffa_memory_region_attributes must be 4 bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010045static_assert(sizeof(struct ffa_memory_access) % 16 == 0,
46 "struct ffa_memory_access must be a multiple of 16 bytes long.");
47static_assert(sizeof(struct ffa_memory_region) % 16 == 0,
48 "struct ffa_memory_region must be a multiple of 16 bytes long.");
49static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0,
50 "struct ffa_mem_relinquish must be a multiple of 16 "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000051 "bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000052
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010053struct ffa_memory_share_state {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000054 /**
55 * The memory region being shared, or NULL if this share state is
56 * unallocated.
57 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010058 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000059
Andrew Walbranca808b12020-05-15 17:22:28 +010060 struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS];
61
62 /** The number of constituents in each fragment. */
63 uint32_t fragment_constituent_counts[MAX_FRAGMENTS];
64
65 /**
66 * The number of valid elements in the `fragments` and
67 * `fragment_constituent_counts` arrays.
68 */
69 uint32_t fragment_count;
70
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000071 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010072 * The FF-A function used for sharing the memory. Must be one of
73 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000074 * share state is allocated, or 0.
75 */
76 uint32_t share_func;
77
78 /**
J-Alves2a0d2882020-10-29 14:49:50 +000079 * The sender's original mode before invoking the FF-A function for
80 * sharing the memory.
81 * This is used to reset the original configuration when sender invokes
82 * FFA_MEM_RECLAIM_32.
83 */
84 uint32_t sender_orig_mode;
85
86 /**
Andrew Walbranca808b12020-05-15 17:22:28 +010087 * True if all the fragments of this sharing request have been sent and
88 * Hafnium has updated the sender page table accordingly.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000089 */
Andrew Walbranca808b12020-05-15 17:22:28 +010090 bool sending_complete;
91
92 /**
93 * How many fragments of the memory region each recipient has retrieved
94 * so far. The order of this array matches the order of the endpoint
95 * memory access descriptors in the memory region descriptor. Any
96 * entries beyond the receiver_count will always be 0.
97 */
98 uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS];
Andrew Walbran475c1452020-02-07 13:22:22 +000099};
100
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000101/**
102 * Encapsulates the set of share states while the `share_states_lock` is held.
103 */
104struct share_states_locked {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100105 struct ffa_memory_share_state *share_states;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000106};
107
108/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110 * by this lock.
111 */
112static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100113static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000114
115/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100116 * Buffer for retrieving memory region information from the TEE for when a
117 * region is reclaimed by a VM. Access to this buffer must be guarded by the VM
118 * lock of the TEE VM.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100120alignas(PAGE_SIZE) static uint8_t
121 tee_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
122
123/**
124 * Initialises the next available `struct ffa_memory_share_state` and sets
125 * `share_state_ret` to a pointer to it. If `handle` is
126 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
127 * uses the provided handle which is assumed to be globally unique.
128 *
129 * Returns true on success or false if none are available.
130 */
131static bool allocate_share_state(
132 struct share_states_locked share_states, uint32_t share_func,
133 struct ffa_memory_region *memory_region, uint32_t fragment_length,
134 ffa_memory_handle_t handle,
135 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000136{
Andrew Walbrana65a1322020-04-06 19:32:32 +0100137 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000138
Andrew Walbranca808b12020-05-15 17:22:28 +0100139 CHECK(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140 CHECK(memory_region != NULL);
141
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100143 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000144 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100145 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +0100146 &share_states.share_states[i];
147 struct ffa_composite_memory_region *composite =
148 ffa_memory_region_get_composite(memory_region,
149 0);
150
151 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000152 memory_region->handle =
Andrew Walbranca808b12020-05-15 17:22:28 +0100153 i |
154 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
155 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000156 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100157 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000158 allocated_state->share_func = share_func;
159 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100160 allocated_state->fragment_count = 1;
161 allocated_state->fragments[0] = composite->constituents;
162 allocated_state->fragment_constituent_counts[0] =
163 (fragment_length -
164 ffa_composite_constituent_offset(memory_region,
165 0)) /
166 sizeof(struct ffa_memory_region_constituent);
167 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000168 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100169 allocated_state->retrieved_fragment_count[j] =
170 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000171 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100172 if (share_state_ret != NULL) {
173 *share_state_ret = allocated_state;
174 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000175 return true;
176 }
177 }
178
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000179 return false;
180}
181
182/** Locks the share states lock. */
183struct share_states_locked share_states_lock(void)
184{
185 sl_lock(&share_states_lock_instance);
186
187 return (struct share_states_locked){.share_states = share_states};
188}
189
190/** Unlocks the share states lock. */
191static void share_states_unlock(struct share_states_locked *share_states)
192{
193 CHECK(share_states->share_states != NULL);
194 share_states->share_states = NULL;
195 sl_unlock(&share_states_lock_instance);
196}
197
198/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100199 * If the given handle is a valid handle for an allocated share state then
200 * initialises `share_state_ret` to point to the share state and returns true.
201 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000202 */
203static bool get_share_state(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100204 ffa_memory_handle_t handle,
205 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000206{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100207 struct ffa_memory_share_state *share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100208 uint32_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000209
Andrew Walbranca808b12020-05-15 17:22:28 +0100210 CHECK(share_states.share_states != NULL);
211 CHECK(share_state_ret != NULL);
212
213 /*
214 * First look for a share_state allocated by us, in which case the
215 * handle is based on the index.
216 */
217 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
218 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
219 index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
220 if (index < MAX_MEM_SHARES) {
221 share_state = &share_states.share_states[index];
222 if (share_state->share_func != 0) {
223 *share_state_ret = share_state;
224 return true;
225 }
226 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000227 }
228
Andrew Walbranca808b12020-05-15 17:22:28 +0100229 /* Fall back to a linear scan. */
230 for (index = 0; index < MAX_MEM_SHARES; ++index) {
231 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000232 if (share_state->memory_region != NULL &&
233 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100234 share_state->share_func != 0) {
235 *share_state_ret = share_state;
236 return true;
237 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000238 }
239
Andrew Walbranca808b12020-05-15 17:22:28 +0100240 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000241}
242
243/** Marks a share state as unallocated. */
244static void share_state_free(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100245 struct ffa_memory_share_state *share_state,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000246 struct mpool *page_pool)
247{
Andrew Walbranca808b12020-05-15 17:22:28 +0100248 uint32_t i;
249
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000250 CHECK(share_states.share_states != NULL);
251 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100252 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000253 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100254 /*
255 * First fragment is part of the same page as the `memory_region`, so it
256 * doesn't need to be freed separately.
257 */
258 share_state->fragments[0] = NULL;
259 share_state->fragment_constituent_counts[0] = 0;
260 for (i = 1; i < share_state->fragment_count; ++i) {
261 mpool_free(page_pool, share_state->fragments[i]);
262 share_state->fragments[i] = NULL;
263 share_state->fragment_constituent_counts[i] = 0;
264 }
265 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000266 share_state->memory_region = NULL;
267}
268
Andrew Walbranca808b12020-05-15 17:22:28 +0100269/** Checks whether the given share state has been fully sent. */
270static bool share_state_sending_complete(
271 struct share_states_locked share_states,
272 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000273{
Andrew Walbranca808b12020-05-15 17:22:28 +0100274 struct ffa_composite_memory_region *composite;
275 uint32_t expected_constituent_count;
276 uint32_t fragment_constituent_count_total = 0;
277 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000278
Andrew Walbranca808b12020-05-15 17:22:28 +0100279 /* Lock must be held. */
280 CHECK(share_states.share_states != NULL);
281
282 /*
283 * Share state must already be valid, or it's not possible to get hold
284 * of it.
285 */
286 CHECK(share_state->memory_region != NULL &&
287 share_state->share_func != 0);
288
289 composite =
290 ffa_memory_region_get_composite(share_state->memory_region, 0);
291 expected_constituent_count = composite->constituent_count;
292 for (i = 0; i < share_state->fragment_count; ++i) {
293 fragment_constituent_count_total +=
294 share_state->fragment_constituent_counts[i];
295 }
296 dlog_verbose(
297 "Checking completion: constituent count %d/%d from %d "
298 "fragments.\n",
299 fragment_constituent_count_total, expected_constituent_count,
300 share_state->fragment_count);
301
302 return fragment_constituent_count_total == expected_constituent_count;
303}
304
305/**
306 * Calculates the offset of the next fragment expected for the given share
307 * state.
308 */
309static uint32_t share_state_next_fragment_offset(
310 struct share_states_locked share_states,
311 struct ffa_memory_share_state *share_state)
312{
313 uint32_t next_fragment_offset;
314 uint32_t i;
315
316 /* Lock must be held. */
317 CHECK(share_states.share_states != NULL);
318
319 next_fragment_offset =
320 ffa_composite_constituent_offset(share_state->memory_region, 0);
321 for (i = 0; i < share_state->fragment_count; ++i) {
322 next_fragment_offset +=
323 share_state->fragment_constituent_counts[i] *
324 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000325 }
326
Andrew Walbranca808b12020-05-15 17:22:28 +0100327 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000328}
329
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100330static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000331{
332 uint32_t i;
333
334 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
335 return;
336 }
337
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100338 dlog("from VM %#x, attributes %#x, flags %#x, handle %#x, tag %u, to "
339 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100340 "recipients [",
341 memory_region->sender, memory_region->attributes,
342 memory_region->flags, memory_region->handle, memory_region->tag,
343 memory_region->receiver_count);
344 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000345 if (i != 0) {
346 dlog(", ");
347 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100348 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100349 memory_region->receivers[i].receiver_permissions.receiver,
350 memory_region->receivers[i]
351 .receiver_permissions.permissions,
352 memory_region->receivers[i]
353 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000354 }
355 dlog("]");
356}
357
358static void dump_share_states(void)
359{
360 uint32_t i;
361
362 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
363 return;
364 }
365
366 dlog("Current share states:\n");
367 sl_lock(&share_states_lock_instance);
368 for (i = 0; i < MAX_MEM_SHARES; ++i) {
369 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000370 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100371 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000372 dlog("SHARE");
373 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100374 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000375 dlog("LEND");
376 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100377 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000378 dlog("DONATE");
379 break;
380 default:
381 dlog("invalid share_func %#x",
382 share_states[i].share_func);
383 }
384 dlog(" (");
385 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100386 if (share_states[i].sending_complete) {
387 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000388 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100389 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000390 }
J-Alves2a0d2882020-10-29 14:49:50 +0000391 dlog(" with %d fragments, %d retrieved, "
392 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100393 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000394 share_states[i].retrieved_fragment_count[0],
395 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000396 }
397 }
398 sl_unlock(&share_states_lock_instance);
399}
400
Andrew Walbran475c1452020-02-07 13:22:22 +0000401/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100402static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100403 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000404{
405 uint32_t mode = 0;
406
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100407 switch (ffa_get_data_access_attr(permissions)) {
408 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000409 mode = MM_MODE_R;
410 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100411 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000412 mode = MM_MODE_R | MM_MODE_W;
413 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100414 case FFA_DATA_ACCESS_NOT_SPECIFIED:
415 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
416 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100417 case FFA_DATA_ACCESS_RESERVED:
418 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100419 }
420
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100421 switch (ffa_get_instruction_access_attr(permissions)) {
422 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000423 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100424 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 mode |= MM_MODE_X;
426 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100427 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
428 mode |= (default_mode & MM_MODE_X);
429 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100430 case FFA_INSTRUCTION_ACCESS_RESERVED:
431 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000432 }
433
434 return mode;
435}
436
Jose Marinho75509b42019-04-09 09:34:59 +0100437/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000438 * Get the current mode in the stage-2 page table of the given vm of all the
439 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100440 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100441 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100442static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000443 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100444 struct ffa_memory_region_constituent **fragments,
445 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100446{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100447 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100448 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100449
Andrew Walbranca808b12020-05-15 17:22:28 +0100450 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100451 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000452 * Fail if there are no constituents. Otherwise we would get an
453 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100454 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100455 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100456 }
457
Andrew Walbranca808b12020-05-15 17:22:28 +0100458 for (i = 0; i < fragment_count; ++i) {
459 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
460 ipaddr_t begin = ipa_init(fragments[i][j].address);
461 size_t size = fragments[i][j].page_count * PAGE_SIZE;
462 ipaddr_t end = ipa_add(begin, size);
463 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100464
Andrew Walbranca808b12020-05-15 17:22:28 +0100465 /* Fail if addresses are not page-aligned. */
466 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
467 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
468 return ffa_error(FFA_INVALID_PARAMETERS);
469 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100470
Andrew Walbranca808b12020-05-15 17:22:28 +0100471 /*
472 * Ensure that this constituent memory range is all
473 * mapped with the same mode.
474 */
475 if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
476 &current_mode)) {
477 return ffa_error(FFA_DENIED);
478 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100479
Andrew Walbranca808b12020-05-15 17:22:28 +0100480 /*
481 * Ensure that all constituents are mapped with the same
482 * mode.
483 */
484 if (i == 0) {
485 *orig_mode = current_mode;
486 } else if (current_mode != *orig_mode) {
487 dlog_verbose(
488 "Expected mode %#x but was %#x for %d "
489 "pages at %#x.\n",
490 *orig_mode, current_mode,
491 fragments[i][j].page_count,
492 ipa_addr(begin));
493 return ffa_error(FFA_DENIED);
494 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100495 }
Jose Marinho75509b42019-04-09 09:34:59 +0100496 }
497
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100498 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000499}
500
501/**
502 * Verify that all pages have the same mode, that the starting mode
503 * constitutes a valid state and obtain the next mode to apply
504 * to the sending VM.
505 *
506 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100507 * 1) FFA_DENIED if a state transition was not found;
508 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100509 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100510 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100511 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100512 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
513 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000514 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100515static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100516 struct vm_locked from, uint32_t share_func,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100517 ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100518 struct ffa_memory_region_constituent **fragments,
519 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
520 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000521{
522 const uint32_t state_mask =
523 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
J-Alves7cd5eb32020-10-16 19:06:10 +0100524 uint32_t required_from_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100525 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000526
Andrew Walbranca808b12020-05-15 17:22:28 +0100527 ret = constituents_get_mode(from, orig_from_mode, fragments,
528 fragment_constituent_counts,
529 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100530 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100531 dlog_verbose("Inconsistent modes.\n", fragment_count);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100532 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100533 }
534
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000535 /* Ensure the address range is normal memory and not a device. */
536 if (*orig_from_mode & MM_MODE_D) {
537 dlog_verbose("Can't share device memory (mode is %#x).\n",
538 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100539 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000540 }
541
542 /*
543 * Ensure the sender is the owner and has exclusive access to the
544 * memory.
545 */
546 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100547 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100548 }
549
J-Alves7cd5eb32020-10-16 19:06:10 +0100550 required_from_mode =
551 ffa_memory_permissions_to_mode(permissions, *orig_from_mode);
552
Andrew Walbrana65a1322020-04-06 19:32:32 +0100553 if ((*orig_from_mode & required_from_mode) != required_from_mode) {
554 dlog_verbose(
555 "Sender tried to send memory with permissions which "
556 "required mode %#x but only had %#x itself.\n",
557 required_from_mode, *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100558 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000559 }
560
561 /* Find the appropriate new mode. */
562 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000563 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100564 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000565 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100566 break;
567
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100568 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000569 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100570 break;
571
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100572 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000573 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100574 break;
575
Jose Marinho75509b42019-04-09 09:34:59 +0100576 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100578 }
579
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100580 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000581}
582
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100583static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000584 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100585 struct ffa_memory_region_constituent **fragments,
586 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
587 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000588{
589 const uint32_t state_mask =
590 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
591 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100592 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000593
Andrew Walbranca808b12020-05-15 17:22:28 +0100594 ret = constituents_get_mode(from, orig_from_mode, fragments,
595 fragment_constituent_counts,
596 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100597 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100598 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000599 }
600
601 /* Ensure the address range is normal memory and not a device. */
602 if (*orig_from_mode & MM_MODE_D) {
603 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
604 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100605 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000606 }
607
608 /*
609 * Ensure the relinquishing VM is not the owner but has access to the
610 * memory.
611 */
612 orig_from_state = *orig_from_mode & state_mask;
613 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
614 dlog_verbose(
615 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100616 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000617 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100618 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000619 }
620
621 /* Find the appropriate new mode. */
622 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
623
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100624 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000625}
626
627/**
628 * Verify that all pages have the same mode, that the starting mode
629 * constitutes a valid state and obtain the next mode to apply
630 * to the retrieving VM.
631 *
632 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100633 * 1) FFA_DENIED if a state transition was not found;
634 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100635 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100636 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100637 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100638 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
639 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000640 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100641static struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000642 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100643 struct ffa_memory_region_constituent **fragments,
644 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
645 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000646{
647 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100648 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000649
Andrew Walbranca808b12020-05-15 17:22:28 +0100650 ret = constituents_get_mode(to, &orig_to_mode, fragments,
651 fragment_constituent_counts,
652 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100653 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100654 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100655 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000656 }
657
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100658 if (share_func == FFA_MEM_RECLAIM_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000659 const uint32_t state_mask =
660 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
661 uint32_t orig_to_state = orig_to_mode & state_mask;
662
663 if (orig_to_state != MM_MODE_INVALID &&
664 orig_to_state != MM_MODE_SHARED) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100665 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000666 }
667 } else {
668 /*
669 * Ensure the retriever has the expected state. We don't care
670 * about the MM_MODE_SHARED bit; either with or without it set
671 * are both valid representations of the !O-NA state.
672 */
673 if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
674 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100675 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000676 }
677 }
678
679 /* Find the appropriate new mode. */
680 *to_mode = memory_to_attributes;
681 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100682 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000683 *to_mode |= 0;
684 break;
685
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100686 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000687 *to_mode |= MM_MODE_UNOWNED;
688 break;
689
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100690 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000691 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
692 break;
693
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100694 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000695 *to_mode |= 0;
696 break;
697
698 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100699 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100700 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000701 }
702
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100703 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100704}
Jose Marinho09b1db82019-08-08 09:16:59 +0100705
706/**
707 * Updates a VM's page table such that the given set of physical address ranges
708 * are mapped in the address space at the corresponding address ranges, in the
709 * mode provided.
710 *
711 * If commit is false, the page tables will be allocated from the mpool but no
712 * mappings will actually be updated. This function must always be called first
713 * with commit false to check that it will succeed before calling with commit
714 * true, to avoid leaving the page table in a half-updated state. To make a
715 * series of changes atomically you can call them all with commit false before
716 * calling them all with commit true.
717 *
718 * mm_vm_defrag should always be called after a series of page table updates,
719 * whether they succeed or fail.
720 *
721 * Returns true on success, or false if the update failed and no changes were
722 * made to memory mappings.
723 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100724static bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000725 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100726 struct ffa_memory_region_constituent **fragments,
727 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
728 int mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100729{
Andrew Walbranca808b12020-05-15 17:22:28 +0100730 uint32_t i;
731 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100732
Andrew Walbranca808b12020-05-15 17:22:28 +0100733 /* Iterate over the memory region constituents within each fragment. */
734 for (i = 0; i < fragment_count; ++i) {
735 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
736 size_t size = fragments[i][j].page_count * PAGE_SIZE;
737 paddr_t pa_begin =
738 pa_from_ipa(ipa_init(fragments[i][j].address));
739 paddr_t pa_end = pa_add(pa_begin, size);
740
741 if (commit) {
742 vm_identity_commit(vm_locked, pa_begin, pa_end,
743 mode, ppool, NULL);
744 } else if (!vm_identity_prepare(vm_locked, pa_begin,
745 pa_end, mode, ppool)) {
746 return false;
747 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100748 }
749 }
750
751 return true;
752}
753
754/**
755 * Clears a region of physical memory by overwriting it with zeros. The data is
756 * flushed from the cache so the memory has been cleared across the system.
757 */
758static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
759{
760 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000761 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100762 * global mapping of the whole range. Such an approach will limit
763 * the changes to stage-1 tables and will allow only local
764 * invalidation.
765 */
766 bool ret;
767 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
768 void *ptr =
769 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
770 size_t size = pa_difference(begin, end);
771
772 if (!ptr) {
773 /* TODO: partial defrag of failed range. */
774 /* Recover any memory consumed in failed mapping. */
775 mm_defrag(stage1_locked, ppool);
776 goto fail;
777 }
778
779 memset_s(ptr, size, 0, size);
780 arch_mm_flush_dcache(ptr, size);
781 mm_unmap(stage1_locked, begin, end, ppool);
782
783 ret = true;
784 goto out;
785
786fail:
787 ret = false;
788
789out:
790 mm_unlock_stage1(&stage1_locked);
791
792 return ret;
793}
794
795/**
796 * Clears a region of physical memory by overwriting it with zeros. The data is
797 * flushed from the cache so the memory has been cleared across the system.
798 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100799static bool ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100800 struct ffa_memory_region_constituent **fragments,
801 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
802 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100803{
804 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100805 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100806 struct mm_stage1_locked stage1_locked;
807 bool ret = false;
808
809 /*
810 * Create a local pool so any freed memory can't be used by another
811 * thread. This is to ensure each constituent that is mapped can be
812 * unmapped again afterwards.
813 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000814 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100815
Andrew Walbranca808b12020-05-15 17:22:28 +0100816 /* Iterate over the memory region constituents within each fragment. */
817 for (i = 0; i < fragment_count; ++i) {
818 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100819
Andrew Walbranca808b12020-05-15 17:22:28 +0100820 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
821 size_t size = fragments[i][j].page_count * PAGE_SIZE;
822 paddr_t begin =
823 pa_from_ipa(ipa_init(fragments[i][j].address));
824 paddr_t end = pa_add(begin, size);
825
826 if (!clear_memory(begin, end, &local_page_pool)) {
827 /*
828 * api_clear_memory will defrag on failure, so
829 * no need to do it here.
830 */
831 goto out;
832 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100833 }
834 }
835
836 /*
837 * Need to defrag after clearing, as it may have added extra mappings to
838 * the stage 1 page table.
839 */
840 stage1_locked = mm_lock_stage1();
841 mm_defrag(stage1_locked, &local_page_pool);
842 mm_unlock_stage1(&stage1_locked);
843
844 ret = true;
845
846out:
847 mpool_fini(&local_page_pool);
848 return ret;
849}
850
851/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000852 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100853 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000854 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100855 *
856 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000857 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100858 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100859 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100860 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
861 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100862 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100863 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100864 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100865 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100866static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100868 struct ffa_memory_region_constituent **fragments,
869 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
870 uint32_t share_func, ffa_memory_access_permissions_t permissions,
Andrew Walbran37c574e2020-06-03 11:45:46 +0100871 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100872{
Jose Marinho09b1db82019-08-08 09:16:59 +0100873 struct vm *from = from_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100874 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100875 uint32_t orig_from_mode;
876 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100877 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100878 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100879
880 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100881 * Make sure constituents are properly aligned to a 64-bit boundary. If
882 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100883 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100884 for (i = 0; i < fragment_count; ++i) {
885 if (!is_aligned(fragments[i], 8)) {
886 dlog_verbose("Constituents not aligned.\n");
887 return ffa_error(FFA_INVALID_PARAMETERS);
888 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100889 }
890
891 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000892 * Check if the state transition is lawful for the sender, ensure that
893 * all constituents of a memory region being shared are at the same
894 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100895 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100896 ret = ffa_send_check_transition(from_locked, share_func, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100897 &orig_from_mode, fragments,
898 fragment_constituent_counts,
899 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100900 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100901 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100902 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100903 }
904
Andrew Walbran37c574e2020-06-03 11:45:46 +0100905 if (orig_from_mode_ret != NULL) {
906 *orig_from_mode_ret = orig_from_mode;
907 }
908
Jose Marinho09b1db82019-08-08 09:16:59 +0100909 /*
910 * Create a local pool so any freed memory can't be used by another
911 * thread. This is to ensure the original mapping can be restored if the
912 * clear fails.
913 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000914 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100915
916 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000917 * First reserve all required memory for the new page table entries
918 * without committing, to make sure the entire operation will succeed
919 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100920 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100921 if (!ffa_region_group_identity_map(
922 from_locked, fragments, fragment_constituent_counts,
923 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100924 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100925 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100926 goto out;
927 }
928
929 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000930 * Update the mapping for the sender. This won't allocate because the
931 * transaction was already prepared above, but may free pages in the
932 * case that a whole block is being unmapped that was previously
933 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100934 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100935 CHECK(ffa_region_group_identity_map(
936 from_locked, fragments, fragment_constituent_counts,
937 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100938
939 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100940 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100941 fragments, fragment_constituent_counts,
942 fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100943 /*
944 * On failure, roll back by returning memory to the sender. This
945 * may allocate pages which were previously freed into
946 * `local_page_pool` by the call above, but will never allocate
947 * more pages than that so can never fail.
948 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100949 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100950 from_locked, fragments, fragment_constituent_counts,
951 fragment_count, orig_from_mode, &local_page_pool,
952 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100953
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100954 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100955 goto out;
956 }
957
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100958 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000959
960out:
961 mpool_fini(&local_page_pool);
962
963 /*
964 * Tidy up the page table by reclaiming failed mappings (if there was an
965 * error) or merging entries into blocks where possible (on success).
966 */
967 mm_vm_defrag(&from->ptable, page_pool);
968
969 return ret;
970}
971
972/**
973 * Validates and maps memory shared from one VM to another.
974 *
975 * This function requires the calling context to hold the <to> lock.
976 *
977 * Returns:
978 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100979 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000980 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100981 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000982 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100983 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000984 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100985static struct ffa_value ffa_retrieve_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000986 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100987 struct ffa_memory_region_constituent **fragments,
988 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
989 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
990 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000991{
992 struct vm *to = to_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100993 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000994 uint32_t to_mode;
995 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100996 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000997
998 /*
Andrew Walbranca808b12020-05-15 17:22:28 +0100999 * Make sure constituents are properly aligned to a 64-bit boundary. If
1000 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001001 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001002 for (i = 0; i < fragment_count; ++i) {
1003 if (!is_aligned(fragments[i], 8)) {
1004 return ffa_error(FFA_INVALID_PARAMETERS);
1005 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001006 }
1007
1008 /*
1009 * Check if the state transition is lawful for the recipient, and ensure
1010 * that all constituents of the memory region being retrieved are at the
1011 * same state.
1012 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001013 ret = ffa_retrieve_check_transition(
1014 to_locked, share_func, fragments, fragment_constituent_counts,
1015 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001016 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001017 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001018 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001019 }
1020
1021 /*
1022 * Create a local pool so any freed memory can't be used by another
1023 * thread. This is to ensure the original mapping can be restored if the
1024 * clear fails.
1025 */
1026 mpool_init_with_fallback(&local_page_pool, page_pool);
1027
1028 /*
1029 * First reserve all required memory for the new page table entries in
1030 * the recipient page tables without committing, to make sure the entire
1031 * operation will succeed without exhausting the page pool.
1032 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001033 if (!ffa_region_group_identity_map(
1034 to_locked, fragments, fragment_constituent_counts,
1035 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001036 /* TODO: partial defrag of failed range. */
1037 dlog_verbose(
1038 "Insufficient memory to update recipient page "
1039 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001040 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001041 goto out;
1042 }
1043
1044 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001045 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001046 fragments, fragment_constituent_counts,
1047 fragment_count, page_pool)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001048 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001049 goto out;
1050 }
1051
Jose Marinho09b1db82019-08-08 09:16:59 +01001052 /*
1053 * Complete the transfer by mapping the memory into the recipient. This
1054 * won't allocate because the transaction was already prepared above, so
1055 * it doesn't need to use the `local_page_pool`.
1056 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001057 CHECK(ffa_region_group_identity_map(
1058 to_locked, fragments, fragment_constituent_counts,
1059 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001060
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001061 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001062
1063out:
1064 mpool_fini(&local_page_pool);
1065
1066 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001067 * Tidy up the page table by reclaiming failed mappings (if there was an
1068 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001069 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001070 mm_vm_defrag(&to->ptable, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001071
1072 return ret;
1073}
1074
Andrew Walbran290b0c92020-02-03 16:37:14 +00001075/**
1076 * Reclaims the given memory from the TEE. To do this space is first reserved in
1077 * the <to> VM's page table, then the reclaim request is sent on to the TEE,
1078 * then (if that is successful) the memory is mapped back into the <to> VM's
1079 * page table.
1080 *
1081 * This function requires the calling context to hold the <to> lock.
1082 *
1083 * Returns:
1084 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001085 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran290b0c92020-02-03 16:37:14 +00001086 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001087 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran290b0c92020-02-03 16:37:14 +00001088 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001089 * Success is indicated by FFA_SUCCESS.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001090 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01001091static struct ffa_value ffa_tee_reclaim_check_update(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001092 struct vm_locked to_locked, ffa_memory_handle_t handle,
1093 struct ffa_memory_region_constituent *constituents,
Andrew Walbran290b0c92020-02-03 16:37:14 +00001094 uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
1095 struct mpool *page_pool)
1096{
1097 struct vm *to = to_locked.vm;
1098 uint32_t to_mode;
1099 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001100 struct ffa_value ret;
1101 ffa_memory_region_flags_t tee_flags;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001102
1103 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001104 * Make sure constituents are properly aligned to a 64-bit boundary. If
1105 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001106 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001107 if (!is_aligned(constituents, 8)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001108 dlog_verbose("Constituents not aligned.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001109 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001110 }
1111
1112 /*
1113 * Check if the state transition is lawful for the recipient, and ensure
1114 * that all constituents of the memory region being retrieved are at the
1115 * same state.
1116 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001117 ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01001118 &constituents, &constituent_count,
1119 1, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001120 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001121 dlog_verbose("Invalid transition.\n");
1122 return ret;
1123 }
1124
1125 /*
1126 * Create a local pool so any freed memory can't be used by another
1127 * thread. This is to ensure the original mapping can be restored if the
1128 * clear fails.
1129 */
1130 mpool_init_with_fallback(&local_page_pool, page_pool);
1131
1132 /*
1133 * First reserve all required memory for the new page table entries in
1134 * the recipient page tables without committing, to make sure the entire
1135 * operation will succeed without exhausting the page pool.
1136 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001137 if (!ffa_region_group_identity_map(to_locked, &constituents,
1138 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001139 page_pool, false)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001140 /* TODO: partial defrag of failed range. */
1141 dlog_verbose(
1142 "Insufficient memory to update recipient page "
1143 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001144 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001145 goto out;
1146 }
1147
1148 /*
1149 * Forward the request to the TEE and see what happens.
1150 */
1151 tee_flags = 0;
1152 if (clear) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001153 tee_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001154 }
Olivier Deprez112d2b52020-09-30 07:39:23 +02001155 ret = arch_other_world_call(
1156 (struct ffa_value){.func = FFA_MEM_RECLAIM_32,
1157 .arg1 = (uint32_t)handle,
1158 .arg2 = (uint32_t)(handle >> 32),
1159 .arg3 = tee_flags});
Andrew Walbran290b0c92020-02-03 16:37:14 +00001160
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001161 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001162 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01001163 "Got %#x (%d) from TEE in response to FFA_MEM_RECLAIM, "
1164 "expected FFA_SUCCESS.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00001165 ret.func, ret.arg2);
1166 goto out;
1167 }
1168
1169 /*
1170 * The TEE was happy with it, so complete the reclaim by mapping the
1171 * memory into the recipient. This won't allocate because the
1172 * transaction was already prepared above, so it doesn't need to use the
1173 * `local_page_pool`.
1174 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001175 CHECK(ffa_region_group_identity_map(to_locked, &constituents,
1176 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001177 page_pool, true));
Andrew Walbran290b0c92020-02-03 16:37:14 +00001178
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001179 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran290b0c92020-02-03 16:37:14 +00001180
1181out:
1182 mpool_fini(&local_page_pool);
1183
1184 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001185 * Tidy up the page table by reclaiming failed mappings (if there was an
1186 * error) or merging entries into blocks where possible (on success).
Andrew Walbran290b0c92020-02-03 16:37:14 +00001187 */
1188 mm_vm_defrag(&to->ptable, page_pool);
1189
1190 return ret;
1191}
1192
Andrew Walbran996d1d12020-05-27 14:08:43 +01001193static struct ffa_value ffa_relinquish_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001194 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001195 struct ffa_memory_region_constituent **fragments,
1196 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1197 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001198{
1199 uint32_t orig_from_mode;
1200 uint32_t from_mode;
1201 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001202 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001203
Andrew Walbranca808b12020-05-15 17:22:28 +01001204 ret = ffa_relinquish_check_transition(
1205 from_locked, &orig_from_mode, fragments,
1206 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001207 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001208 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001209 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001210 }
1211
1212 /*
1213 * Create a local pool so any freed memory can't be used by another
1214 * thread. This is to ensure the original mapping can be restored if the
1215 * clear fails.
1216 */
1217 mpool_init_with_fallback(&local_page_pool, page_pool);
1218
1219 /*
1220 * First reserve all required memory for the new page table entries
1221 * without committing, to make sure the entire operation will succeed
1222 * without exhausting the page pool.
1223 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001224 if (!ffa_region_group_identity_map(
1225 from_locked, fragments, fragment_constituent_counts,
1226 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001227 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001228 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001229 goto out;
1230 }
1231
1232 /*
1233 * Update the mapping for the sender. This won't allocate because the
1234 * transaction was already prepared above, but may free pages in the
1235 * case that a whole block is being unmapped that was previously
1236 * partially mapped.
1237 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001238 CHECK(ffa_region_group_identity_map(
1239 from_locked, fragments, fragment_constituent_counts,
1240 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001241
1242 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001243 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001244 fragments, fragment_constituent_counts,
1245 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001246 /*
1247 * On failure, roll back by returning memory to the sender. This
1248 * may allocate pages which were previously freed into
1249 * `local_page_pool` by the call above, but will never allocate
1250 * more pages than that so can never fail.
1251 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001252 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001253 from_locked, fragments, fragment_constituent_counts,
1254 fragment_count, orig_from_mode, &local_page_pool,
1255 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001256
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001257 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001258 goto out;
1259 }
1260
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001261 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001262
1263out:
1264 mpool_fini(&local_page_pool);
1265
1266 /*
1267 * Tidy up the page table by reclaiming failed mappings (if there was an
1268 * error) or merging entries into blocks where possible (on success).
1269 */
1270 mm_vm_defrag(&from_locked.vm->ptable, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001271
1272 return ret;
1273}
1274
1275/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001276 * Complete a memory sending operation by checking that it is valid, updating
1277 * the sender page table, and then either marking the share state as having
1278 * completed sending (on success) or freeing it (on failure).
1279 *
1280 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1281 */
1282static struct ffa_value ffa_memory_send_complete(
1283 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001284 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1285 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001286{
1287 struct ffa_memory_region *memory_region = share_state->memory_region;
1288 struct ffa_value ret;
1289
1290 /* Lock must be held. */
1291 CHECK(share_states.share_states != NULL);
1292
1293 /* Check that state is valid in sender page table and update. */
1294 ret = ffa_send_check_update(
1295 from_locked, share_state->fragments,
1296 share_state->fragment_constituent_counts,
1297 share_state->fragment_count, share_state->share_func,
1298 memory_region->receivers[0].receiver_permissions.permissions,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001299 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1300 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001301 if (ret.func != FFA_SUCCESS_32) {
1302 /*
1303 * Free share state, it failed to send so it can't be retrieved.
1304 */
1305 dlog_verbose("Complete failed, freeing share state.\n");
1306 share_state_free(share_states, share_state, page_pool);
1307 return ret;
1308 }
1309
1310 share_state->sending_complete = true;
1311 dlog_verbose("Marked sending complete.\n");
1312
J-Alvesee68c542020-10-29 17:48:20 +00001313 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001314}
1315
1316/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001317 * Check that the given `memory_region` represents a valid memory send request
1318 * of the given `share_func` type, return the clear flag and permissions via the
1319 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001320 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001321 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001322 * not.
1323 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001324static struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001325 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1326 uint32_t memory_share_length, uint32_t fragment_length,
1327 uint32_t share_func, ffa_memory_access_permissions_t *permissions)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001328{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001329 struct ffa_composite_memory_region *composite;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001330 uint32_t receivers_length;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001331 uint32_t constituents_offset;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001332 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001333 enum ffa_data_access data_access;
1334 enum ffa_instruction_access instruction_access;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001335
Andrew Walbrana65a1322020-04-06 19:32:32 +01001336 CHECK(permissions != NULL);
1337
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001338 /*
1339 * This should already be checked by the caller, just making the
1340 * assumption clear here.
1341 */
1342 CHECK(memory_region->receiver_count == 1);
1343
Andrew Walbrana65a1322020-04-06 19:32:32 +01001344 /* The sender must match the message sender. */
1345 if (memory_region->sender != from_locked.vm->id) {
1346 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001347 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001348 }
1349
Andrew Walbrana65a1322020-04-06 19:32:32 +01001350 /*
1351 * Ensure that the composite header is within the memory bounds and
1352 * doesn't overlap the first part of the message.
1353 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001354 receivers_length = sizeof(struct ffa_memory_access) *
1355 memory_region->receiver_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001356 constituents_offset =
1357 ffa_composite_constituent_offset(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001358 if (memory_region->receivers[0].composite_memory_region_offset <
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001359 sizeof(struct ffa_memory_region) + receivers_length ||
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001360 constituents_offset > fragment_length) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001361 dlog_verbose(
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001362 "Invalid composite memory region descriptor offset "
1363 "%d.\n",
1364 memory_region->receivers[0]
1365 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001366 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001367 }
1368
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001369 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001370
1371 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001372 * Ensure the number of constituents are within the memory bounds.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001373 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001374 constituents_length = sizeof(struct ffa_memory_region_constituent) *
1375 composite->constituent_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001376 if (memory_share_length != constituents_offset + constituents_length) {
1377 dlog_verbose("Invalid length %d or composite offset %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001378 memory_share_length,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001379 memory_region->receivers[0]
1380 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001381 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001382 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001383 if (fragment_length < memory_share_length &&
1384 fragment_length < HF_MAILBOX_SIZE) {
1385 dlog_warning(
1386 "Initial fragment length %d smaller than mailbox "
1387 "size.\n",
1388 fragment_length);
1389 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001390
Andrew Walbrana65a1322020-04-06 19:32:32 +01001391 /*
1392 * Clear is not allowed for memory sharing, as the sender still has
1393 * access to the memory.
1394 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001395 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1396 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001397 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001398 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001399 }
1400
1401 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001402 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001403 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001404 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001405 }
1406
1407 /* Check that the permissions are valid. */
1408 *permissions =
1409 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001410 data_access = ffa_get_data_access_attr(*permissions);
1411 instruction_access = ffa_get_instruction_access_attr(*permissions);
1412 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1413 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001414 dlog_verbose("Reserved value for receiver permissions %#x.\n",
1415 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001416 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001417 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001418 if (instruction_access != FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001419 dlog_verbose(
1420 "Invalid instruction access permissions %#x for "
1421 "sending memory.\n",
1422 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001423 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001424 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001425 if (share_func == FFA_MEM_SHARE_32) {
1426 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001427 dlog_verbose(
1428 "Invalid data access permissions %#x for "
1429 "sharing memory.\n",
1430 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001431 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001432 }
1433 /*
Andrew Walbrandd8248f2020-06-22 13:39:30 +01001434 * According to section 5.11.3 of the FF-A 1.0 spec NX is
1435 * required for share operations (but must not be specified by
1436 * the sender) so set it in the copy that we store, ready to be
Andrew Walbrana65a1322020-04-06 19:32:32 +01001437 * returned to the retriever.
1438 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001439 ffa_set_instruction_access_attr(permissions,
1440 FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001441 memory_region->receivers[0].receiver_permissions.permissions =
1442 *permissions;
1443 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001444 if (share_func == FFA_MEM_LEND_32 &&
1445 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001446 dlog_verbose(
1447 "Invalid data access permissions %#x for lending "
1448 "memory.\n",
1449 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001450 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001451 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001452 if (share_func == FFA_MEM_DONATE_32 &&
1453 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001454 dlog_verbose(
1455 "Invalid data access permissions %#x for donating "
1456 "memory.\n",
1457 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001458 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001459 }
1460
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001461 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001462}
1463
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001464/** Forwards a memory send message on to the TEE. */
1465static struct ffa_value memory_send_tee_forward(
1466 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id,
1467 uint32_t share_func, struct ffa_memory_region *memory_region,
1468 uint32_t memory_share_length, uint32_t fragment_length)
1469{
1470 struct ffa_value ret;
1471
1472 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
1473 memory_region, fragment_length);
1474 tee_locked.vm->mailbox.recv_size = fragment_length;
1475 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1476 tee_locked.vm->mailbox.recv_func = share_func;
1477 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001478 ret = arch_other_world_call(
1479 (struct ffa_value){.func = share_func,
1480 .arg1 = memory_share_length,
1481 .arg2 = fragment_length});
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001482 /*
1483 * After the call to the TEE completes it must have finished reading its
1484 * RX buffer, so it is ready for another message.
1485 */
1486 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1487
1488 return ret;
1489}
1490
Andrew Walbrana65a1322020-04-06 19:32:32 +01001491/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001492 * Gets the share state for continuing an operation to donate, lend or share
1493 * memory, and checks that it is a valid request.
1494 *
1495 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1496 * not.
1497 */
1498static struct ffa_value ffa_memory_send_continue_validate(
1499 struct share_states_locked share_states, ffa_memory_handle_t handle,
1500 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1501 struct mpool *page_pool)
1502{
1503 struct ffa_memory_share_state *share_state;
1504 struct ffa_memory_region *memory_region;
1505
1506 CHECK(share_state_ret != NULL);
1507
1508 /*
1509 * Look up the share state by handle and make sure that the VM ID
1510 * matches.
1511 */
1512 if (!get_share_state(share_states, handle, &share_state)) {
1513 dlog_verbose(
1514 "Invalid handle %#x for memory send continuation.\n",
1515 handle);
1516 return ffa_error(FFA_INVALID_PARAMETERS);
1517 }
1518 memory_region = share_state->memory_region;
1519
1520 if (memory_region->sender != from_vm_id) {
1521 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1522 return ffa_error(FFA_INVALID_PARAMETERS);
1523 }
1524
1525 if (share_state->sending_complete) {
1526 dlog_verbose(
1527 "Sending of memory handle %#x is already complete.\n",
1528 handle);
1529 return ffa_error(FFA_INVALID_PARAMETERS);
1530 }
1531
1532 if (share_state->fragment_count == MAX_FRAGMENTS) {
1533 /*
1534 * Log a warning as this is a sign that MAX_FRAGMENTS should
1535 * probably be increased.
1536 */
1537 dlog_warning(
1538 "Too many fragments for memory share with handle %#x; "
1539 "only %d supported.\n",
1540 handle, MAX_FRAGMENTS);
1541 /* Free share state, as it's not possible to complete it. */
1542 share_state_free(share_states, share_state, page_pool);
1543 return ffa_error(FFA_NO_MEMORY);
1544 }
1545
1546 *share_state_ret = share_state;
1547
1548 return (struct ffa_value){.func = FFA_SUCCESS_32};
1549}
1550
1551/**
1552 * Forwards a memory send continuation message on to the TEE.
1553 */
1554static struct ffa_value memory_send_continue_tee_forward(
1555 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, void *fragment,
1556 uint32_t fragment_length, ffa_memory_handle_t handle)
1557{
1558 struct ffa_value ret;
1559
1560 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, fragment,
1561 fragment_length);
1562 tee_locked.vm->mailbox.recv_size = fragment_length;
1563 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1564 tee_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
1565 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001566 ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01001567 (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
1568 .arg1 = (uint32_t)handle,
1569 .arg2 = (uint32_t)(handle >> 32),
1570 .arg3 = fragment_length,
1571 .arg4 = (uint64_t)sender_vm_id << 16});
1572 /*
1573 * After the call to the TEE completes it must have finished reading its
1574 * RX buffer, so it is ready for another message.
1575 */
1576 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1577
1578 return ret;
1579}
1580
1581/**
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001582 * Validates a call to donate, lend or share memory to a non-TEE VM and then
1583 * updates the stage-2 page tables. Specifically, check if the message length
1584 * and number of memory region constituents match, and if the transition is
1585 * valid for the type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001586 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001587 * Assumes that the caller has already found and locked the sender VM and copied
1588 * the memory region descriptor from the sender's TX buffer to a freshly
1589 * allocated page from Hafnium's internal pool. The caller must have also
1590 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001591 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001592 * This function takes ownership of the `memory_region` passed in and will free
1593 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001594 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001595struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001596 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001597 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001598 uint32_t fragment_length, uint32_t share_func,
1599 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001600{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001601 ffa_memory_access_permissions_t permissions;
1602 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001603 struct share_states_locked share_states;
1604 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001605
1606 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001607 * If there is an error validating the `memory_region` then we need to
1608 * free it because we own it but we won't be storing it in a share state
1609 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001610 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001611 ret = ffa_memory_send_validate(from_locked, memory_region,
1612 memory_share_length, fragment_length,
1613 share_func, &permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001614 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001615 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001616 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001617 }
1618
Andrew Walbrana65a1322020-04-06 19:32:32 +01001619 /* Set flag for share function, ready to be retrieved later. */
1620 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001621 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001622 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001623 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001624 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001625 case FFA_MEM_LEND_32:
1626 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001627 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001628 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001629 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001630 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001631 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001632 }
1633
Andrew Walbranca808b12020-05-15 17:22:28 +01001634 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001635 /*
1636 * Allocate a share state before updating the page table. Otherwise if
1637 * updating the page table succeeded but allocating the share state
1638 * failed then it would leave the memory in a state where nobody could
1639 * get it back.
1640 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001641 if (!allocate_share_state(share_states, share_func, memory_region,
1642 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1643 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001644 dlog_verbose("Failed to allocate share state.\n");
1645 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001646 ret = ffa_error(FFA_NO_MEMORY);
1647 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001648 }
1649
Andrew Walbranca808b12020-05-15 17:22:28 +01001650 if (fragment_length == memory_share_length) {
1651 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001652 ret = ffa_memory_send_complete(
1653 from_locked, share_states, share_state, page_pool,
1654 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001655 } else {
1656 ret = (struct ffa_value){
1657 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001658 .arg1 = (uint32_t)memory_region->handle,
1659 .arg2 = (uint32_t)(memory_region->handle >> 32),
Andrew Walbranca808b12020-05-15 17:22:28 +01001660 .arg3 = fragment_length};
1661 }
1662
1663out:
1664 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001665 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001666 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001667}
1668
1669/**
1670 * Validates a call to donate, lend or share memory to the TEE and then updates
1671 * the stage-2 page tables. Specifically, check if the message length and number
1672 * of memory region constituents match, and if the transition is valid for the
1673 * type of memory sending operation.
1674 *
1675 * Assumes that the caller has already found and locked the sender VM and the
1676 * TEE VM, and copied the memory region descriptor from the sender's TX buffer
1677 * to a freshly allocated page from Hafnium's internal pool. The caller must
1678 * have also validated that the receiver VM ID is valid.
1679 *
1680 * This function takes ownership of the `memory_region` passed in and will free
1681 * it when necessary; it must not be freed by the caller.
1682 */
1683struct ffa_value ffa_memory_tee_send(
1684 struct vm_locked from_locked, struct vm_locked to_locked,
1685 struct ffa_memory_region *memory_region, uint32_t memory_share_length,
1686 uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool)
1687{
1688 ffa_memory_access_permissions_t permissions;
1689 struct ffa_value ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001690
1691 /*
1692 * If there is an error validating the `memory_region` then we need to
1693 * free it because we own it but we won't be storing it in a share state
1694 * after all.
1695 */
1696 ret = ffa_memory_send_validate(from_locked, memory_region,
1697 memory_share_length, fragment_length,
1698 share_func, &permissions);
1699 if (ret.func != FFA_SUCCESS_32) {
1700 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001701 }
1702
Andrew Walbranca808b12020-05-15 17:22:28 +01001703 if (fragment_length == memory_share_length) {
1704 /* No more fragments to come, everything fit in one message. */
1705 struct ffa_composite_memory_region *composite =
1706 ffa_memory_region_get_composite(memory_region, 0);
1707 struct ffa_memory_region_constituent *constituents =
1708 composite->constituents;
Andrew Walbran37c574e2020-06-03 11:45:46 +01001709 struct mpool local_page_pool;
1710 uint32_t orig_from_mode;
1711
1712 /*
1713 * Use a local page pool so that we can roll back if necessary.
1714 */
1715 mpool_init_with_fallback(&local_page_pool, page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001716
1717 ret = ffa_send_check_update(
1718 from_locked, &constituents,
1719 &composite->constituent_count, 1, share_func,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001720 permissions, &local_page_pool,
1721 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1722 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01001723 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01001724 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001725 goto out;
1726 }
1727
1728 /* Forward memory send message on to TEE. */
1729 ret = memory_send_tee_forward(
1730 to_locked, from_locked.vm->id, share_func,
1731 memory_region, memory_share_length, fragment_length);
Andrew Walbran37c574e2020-06-03 11:45:46 +01001732
1733 if (ret.func != FFA_SUCCESS_32) {
1734 dlog_verbose(
1735 "TEE didn't successfully complete memory send "
1736 "operation; returned %#x (%d). Rolling back.\n",
1737 ret.func, ret.arg2);
1738
1739 /*
1740 * The TEE failed to complete the send operation, so
1741 * roll back the page table update for the VM. This
1742 * can't fail because it won't try to allocate more
1743 * memory than was freed into the `local_page_pool` by
1744 * `ffa_send_check_update` in the initial update.
1745 */
1746 CHECK(ffa_region_group_identity_map(
1747 from_locked, &constituents,
1748 &composite->constituent_count, 1,
1749 orig_from_mode, &local_page_pool, true));
1750 }
1751
1752 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001753 } else {
1754 struct share_states_locked share_states = share_states_lock();
1755 ffa_memory_handle_t handle;
1756
1757 /*
1758 * We need to wait for the rest of the fragments before we can
1759 * check whether the transaction is valid and unmap the memory.
1760 * Call the TEE so it can do its initial validation and assign a
1761 * handle, and allocate a share state to keep what we have so
1762 * far.
1763 */
1764 ret = memory_send_tee_forward(
1765 to_locked, from_locked.vm->id, share_func,
1766 memory_region, memory_share_length, fragment_length);
1767 if (ret.func == FFA_ERROR_32) {
1768 goto out_unlock;
1769 } else if (ret.func != FFA_MEM_FRAG_RX_32) {
1770 dlog_warning(
1771 "Got %#x from TEE in response to %#x for "
1772 "fragment with with %d/%d, expected "
1773 "FFA_MEM_FRAG_RX.\n",
1774 ret.func, share_func, fragment_length,
1775 memory_share_length);
1776 ret = ffa_error(FFA_INVALID_PARAMETERS);
1777 goto out_unlock;
1778 }
1779 handle = ffa_frag_handle(ret);
1780 if (ret.arg3 != fragment_length) {
1781 dlog_warning(
1782 "Got unexpected fragment offset %d for "
1783 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1784 ret.arg3, fragment_length);
1785 ret = ffa_error(FFA_INVALID_PARAMETERS);
1786 goto out_unlock;
1787 }
1788 if (ffa_frag_sender(ret) != from_locked.vm->id) {
1789 dlog_warning(
1790 "Got unexpected sender ID %d for "
1791 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1792 ffa_frag_sender(ret), from_locked.vm->id);
1793 ret = ffa_error(FFA_INVALID_PARAMETERS);
1794 goto out_unlock;
1795 }
1796
1797 if (!allocate_share_state(share_states, share_func,
1798 memory_region, fragment_length,
1799 handle, NULL)) {
1800 dlog_verbose("Failed to allocate share state.\n");
1801 ret = ffa_error(FFA_NO_MEMORY);
1802 goto out_unlock;
1803 }
1804 /*
1805 * Don't free the memory region fragment, as it has been stored
1806 * in the share state.
1807 */
1808 memory_region = NULL;
1809 out_unlock:
1810 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001811 }
1812
Andrew Walbranca808b12020-05-15 17:22:28 +01001813out:
1814 if (memory_region != NULL) {
1815 mpool_free(page_pool, memory_region);
1816 }
1817 dump_share_states();
1818 return ret;
1819}
1820
1821/**
1822 * Continues an operation to donate, lend or share memory to a non-TEE VM. If
1823 * this is the last fragment then checks that the transition is valid for the
1824 * type of memory sending operation and updates the stage-2 page tables of the
1825 * sender.
1826 *
1827 * Assumes that the caller has already found and locked the sender VM and copied
1828 * the memory region descriptor from the sender's TX buffer to a freshly
1829 * allocated page from Hafnium's internal pool.
1830 *
1831 * This function takes ownership of the `fragment` passed in; it must not be
1832 * freed by the caller.
1833 */
1834struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1835 void *fragment,
1836 uint32_t fragment_length,
1837 ffa_memory_handle_t handle,
1838 struct mpool *page_pool)
1839{
1840 struct share_states_locked share_states = share_states_lock();
1841 struct ffa_memory_share_state *share_state;
1842 struct ffa_value ret;
1843 struct ffa_memory_region *memory_region;
1844
1845 ret = ffa_memory_send_continue_validate(share_states, handle,
1846 &share_state,
1847 from_locked.vm->id, page_pool);
1848 if (ret.func != FFA_SUCCESS_32) {
1849 goto out_free_fragment;
1850 }
1851 memory_region = share_state->memory_region;
1852
1853 if (memory_region->receivers[0].receiver_permissions.receiver ==
1854 HF_TEE_VM_ID) {
1855 dlog_error(
1856 "Got hypervisor-allocated handle for memory send to "
1857 "TEE. This should never happen, and indicates a bug in "
1858 "EL3 code.\n");
1859 ret = ffa_error(FFA_INVALID_PARAMETERS);
1860 goto out_free_fragment;
1861 }
1862
1863 /* Add this fragment. */
1864 share_state->fragments[share_state->fragment_count] = fragment;
1865 share_state->fragment_constituent_counts[share_state->fragment_count] =
1866 fragment_length / sizeof(struct ffa_memory_region_constituent);
1867 share_state->fragment_count++;
1868
1869 /* Check whether the memory send operation is now ready to complete. */
1870 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001871 ret = ffa_memory_send_complete(
1872 from_locked, share_states, share_state, page_pool,
1873 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001874 } else {
1875 ret = (struct ffa_value){
1876 .func = FFA_MEM_FRAG_RX_32,
1877 .arg1 = (uint32_t)handle,
1878 .arg2 = (uint32_t)(handle >> 32),
1879 .arg3 = share_state_next_fragment_offset(share_states,
1880 share_state)};
1881 }
1882 goto out;
1883
1884out_free_fragment:
1885 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001886
1887out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001888 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001889 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001890}
1891
Andrew Walbranca808b12020-05-15 17:22:28 +01001892/**
1893 * Continues an operation to donate, lend or share memory to the TEE VM. If this
1894 * is the last fragment then checks that the transition is valid for the type of
1895 * memory sending operation and updates the stage-2 page tables of the sender.
1896 *
1897 * Assumes that the caller has already found and locked the sender VM and copied
1898 * the memory region descriptor from the sender's TX buffer to a freshly
1899 * allocated page from Hafnium's internal pool.
1900 *
1901 * This function takes ownership of the `memory_region` passed in and will free
1902 * it when necessary; it must not be freed by the caller.
1903 */
1904struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked,
1905 struct vm_locked to_locked,
1906 void *fragment,
1907 uint32_t fragment_length,
1908 ffa_memory_handle_t handle,
1909 struct mpool *page_pool)
1910{
1911 struct share_states_locked share_states = share_states_lock();
1912 struct ffa_memory_share_state *share_state;
1913 struct ffa_value ret;
1914 struct ffa_memory_region *memory_region;
1915
1916 ret = ffa_memory_send_continue_validate(share_states, handle,
1917 &share_state,
1918 from_locked.vm->id, page_pool);
1919 if (ret.func != FFA_SUCCESS_32) {
1920 goto out_free_fragment;
1921 }
1922 memory_region = share_state->memory_region;
1923
1924 if (memory_region->receivers[0].receiver_permissions.receiver !=
1925 HF_TEE_VM_ID) {
1926 dlog_error(
1927 "Got SPM-allocated handle for memory send to non-TEE "
1928 "VM. This should never happen, and indicates a bug.\n");
1929 ret = ffa_error(FFA_INVALID_PARAMETERS);
1930 goto out_free_fragment;
1931 }
1932
1933 if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
1934 to_locked.vm->mailbox.recv == NULL) {
1935 /*
1936 * If the TEE RX buffer is not available, tell the sender to
1937 * retry by returning the current offset again.
1938 */
1939 ret = (struct ffa_value){
1940 .func = FFA_MEM_FRAG_RX_32,
1941 .arg1 = (uint32_t)handle,
1942 .arg2 = (uint32_t)(handle >> 32),
1943 .arg3 = share_state_next_fragment_offset(share_states,
1944 share_state),
1945 };
1946 goto out_free_fragment;
1947 }
1948
1949 /* Add this fragment. */
1950 share_state->fragments[share_state->fragment_count] = fragment;
1951 share_state->fragment_constituent_counts[share_state->fragment_count] =
1952 fragment_length / sizeof(struct ffa_memory_region_constituent);
1953 share_state->fragment_count++;
1954
1955 /* Check whether the memory send operation is now ready to complete. */
1956 if (share_state_sending_complete(share_states, share_state)) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01001957 struct mpool local_page_pool;
1958 uint32_t orig_from_mode;
1959
1960 /*
1961 * Use a local page pool so that we can roll back if necessary.
1962 */
1963 mpool_init_with_fallback(&local_page_pool, page_pool);
1964
Andrew Walbranca808b12020-05-15 17:22:28 +01001965 ret = ffa_memory_send_complete(from_locked, share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001966 share_state, &local_page_pool,
1967 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01001968
1969 if (ret.func == FFA_SUCCESS_32) {
1970 /*
1971 * Forward final fragment on to the TEE so that
1972 * it can complete the memory sending operation.
1973 */
1974 ret = memory_send_continue_tee_forward(
1975 to_locked, from_locked.vm->id, fragment,
1976 fragment_length, handle);
1977
1978 if (ret.func != FFA_SUCCESS_32) {
1979 /*
1980 * The error will be passed on to the caller,
1981 * but log it here too.
1982 */
1983 dlog_verbose(
1984 "TEE didn't successfully complete "
1985 "memory send operation; returned %#x "
Andrew Walbran37c574e2020-06-03 11:45:46 +01001986 "(%d). Rolling back.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01001987 ret.func, ret.arg2);
Andrew Walbran37c574e2020-06-03 11:45:46 +01001988
1989 /*
1990 * The TEE failed to complete the send
1991 * operation, so roll back the page table update
1992 * for the VM. This can't fail because it won't
1993 * try to allocate more memory than was freed
1994 * into the `local_page_pool` by
1995 * `ffa_send_check_update` in the initial
1996 * update.
1997 */
1998 CHECK(ffa_region_group_identity_map(
1999 from_locked, share_state->fragments,
2000 share_state
2001 ->fragment_constituent_counts,
2002 share_state->fragment_count,
2003 orig_from_mode, &local_page_pool,
2004 true));
Andrew Walbranca808b12020-05-15 17:22:28 +01002005 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002006
Andrew Walbranca808b12020-05-15 17:22:28 +01002007 /* Free share state. */
2008 share_state_free(share_states, share_state, page_pool);
2009 } else {
2010 /* Abort sending to TEE. */
2011 struct ffa_value tee_ret =
Olivier Deprez112d2b52020-09-30 07:39:23 +02002012 arch_other_world_call((struct ffa_value){
Andrew Walbranca808b12020-05-15 17:22:28 +01002013 .func = FFA_MEM_RECLAIM_32,
2014 .arg1 = (uint32_t)handle,
2015 .arg2 = (uint32_t)(handle >> 32)});
2016
2017 if (tee_ret.func != FFA_SUCCESS_32) {
2018 /*
2019 * Nothing we can do if TEE doesn't abort
2020 * properly, just log it.
2021 */
2022 dlog_verbose(
2023 "TEE didn't successfully abort failed "
2024 "memory send operation; returned %#x "
2025 "(%d).\n",
2026 tee_ret.func, tee_ret.arg2);
2027 }
2028 /*
2029 * We don't need to free the share state in this case
2030 * because ffa_memory_send_complete does that already.
2031 */
2032 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002033
2034 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01002035 } else {
2036 uint32_t next_fragment_offset =
2037 share_state_next_fragment_offset(share_states,
2038 share_state);
2039
2040 ret = memory_send_continue_tee_forward(
2041 to_locked, from_locked.vm->id, fragment,
2042 fragment_length, handle);
2043
2044 if (ret.func != FFA_MEM_FRAG_RX_32 ||
2045 ffa_frag_handle(ret) != handle ||
2046 ret.arg3 != next_fragment_offset ||
2047 ffa_frag_sender(ret) != from_locked.vm->id) {
2048 dlog_verbose(
2049 "Got unexpected result from forwarding "
2050 "FFA_MEM_FRAG_TX to TEE: %#x (handle %#x, "
2051 "offset %d, sender %d); expected "
2052 "FFA_MEM_FRAG_RX (handle %#x, offset %d, "
2053 "sender %d).\n",
2054 ret.func, ffa_frag_handle(ret), ret.arg3,
2055 ffa_frag_sender(ret), handle,
2056 next_fragment_offset, from_locked.vm->id);
2057 /* Free share state. */
2058 share_state_free(share_states, share_state, page_pool);
2059 ret = ffa_error(FFA_INVALID_PARAMETERS);
2060 goto out;
2061 }
2062
2063 ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2064 .arg1 = (uint32_t)handle,
2065 .arg2 = (uint32_t)(handle >> 32),
2066 .arg3 = next_fragment_offset};
2067 }
2068 goto out;
2069
2070out_free_fragment:
2071 mpool_free(page_pool, fragment);
2072
2073out:
2074 share_states_unlock(&share_states);
2075 return ret;
2076}
2077
2078/** Clean up after the receiver has finished retrieving a memory region. */
2079static void ffa_memory_retrieve_complete(
2080 struct share_states_locked share_states,
2081 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2082{
2083 if (share_state->share_func == FFA_MEM_DONATE_32) {
2084 /*
2085 * Memory that has been donated can't be relinquished,
2086 * so no need to keep the share state around.
2087 */
2088 share_state_free(share_states, share_state, page_pool);
2089 dlog_verbose("Freed share state for donate.\n");
2090 }
2091}
2092
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002093struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2094 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002095 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002096 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002097{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002098 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002099 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002100 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002101 sizeof(struct ffa_memory_access);
2102 ffa_memory_handle_t handle = retrieve_request->handle;
2103 ffa_memory_region_flags_t transaction_type =
Andrew Walbrana65a1322020-04-06 19:32:32 +01002104 retrieve_request->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002105 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
2106 struct ffa_memory_region *memory_region;
2107 ffa_memory_access_permissions_t sent_permissions;
2108 enum ffa_data_access sent_data_access;
2109 enum ffa_instruction_access sent_instruction_access;
2110 ffa_memory_access_permissions_t requested_permissions;
2111 enum ffa_data_access requested_data_access;
2112 enum ffa_instruction_access requested_instruction_access;
2113 ffa_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002114 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002115 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002116 struct ffa_memory_share_state *share_state;
2117 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002118 struct ffa_composite_memory_region *composite;
2119 uint32_t total_length;
2120 uint32_t fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002121
2122 dump_share_states();
2123
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002124 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002125 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002126 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002127 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002128 expected_retrieve_request_length,
2129 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002130 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002131 }
2132
Andrew Walbrana65a1322020-04-06 19:32:32 +01002133 if (retrieve_request->receiver_count != 1) {
2134 dlog_verbose(
2135 "Multi-way memory sharing not supported (got %d "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002136 "receivers descriptors on FFA_MEM_RETRIEVE_REQ, "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002137 "expected 1).\n",
2138 retrieve_request->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002139 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002140 }
2141
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002142 share_states = share_states_lock();
2143 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002144 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002145 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002146 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002147 goto out;
2148 }
2149
Andrew Walbrana65a1322020-04-06 19:32:32 +01002150 memory_region = share_state->memory_region;
2151 CHECK(memory_region != NULL);
2152
2153 /*
2154 * Check that the transaction type expected by the receiver is correct,
2155 * if it has been specified.
2156 */
2157 if (transaction_type !=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002158 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
Andrew Walbrana65a1322020-04-06 19:32:32 +01002159 transaction_type != (memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002160 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002161 dlog_verbose(
2162 "Incorrect transaction type %#x for "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002163 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002164 transaction_type,
2165 memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002166 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002167 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002168 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002169 goto out;
2170 }
2171
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002172 if (retrieve_request->sender != memory_region->sender) {
2173 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002174 "Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002175 "expected %d for handle %#x.\n",
2176 retrieve_request->sender, memory_region->sender,
2177 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002178 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002179 goto out;
2180 }
2181
2182 if (retrieve_request->tag != memory_region->tag) {
2183 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002184 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002185 "%d for handle %#x.\n",
2186 retrieve_request->tag, memory_region->tag, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002187 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002188 goto out;
2189 }
2190
Andrew Walbrana65a1322020-04-06 19:32:32 +01002191 if (retrieve_request->receivers[0].receiver_permissions.receiver !=
2192 to_locked.vm->id) {
2193 dlog_verbose(
2194 "Retrieve request receiver VM ID %d didn't match "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002195 "caller of FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002196 retrieve_request->receivers[0]
2197 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002198 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002199 goto out;
2200 }
2201
2202 if (memory_region->receivers[0].receiver_permissions.receiver !=
2203 to_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002204 dlog_verbose(
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002205 "Incorrect receiver VM ID %d for FFA_MEM_RETRIEVE_REQ, "
2206 "expected %d for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002207 to_locked.vm->id,
2208 memory_region->receivers[0]
2209 .receiver_permissions.receiver,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002210 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002211 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002212 goto out;
2213 }
2214
Andrew Walbranca808b12020-05-15 17:22:28 +01002215 if (!share_state->sending_complete) {
2216 dlog_verbose(
2217 "Memory with handle %#x not fully sent, can't "
2218 "retrieve.\n",
2219 handle);
2220 ret = ffa_error(FFA_INVALID_PARAMETERS);
2221 goto out;
2222 }
2223
2224 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002225 dlog_verbose("Memory with handle %#x already retrieved.\n",
2226 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002227 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002228 goto out;
2229 }
2230
Andrew Walbrana65a1322020-04-06 19:32:32 +01002231 if (retrieve_request->receivers[0].composite_memory_region_offset !=
2232 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002233 dlog_verbose(
2234 "Retriever specified address ranges not supported (got "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002235 "offset %d).\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002236 retrieve_request->receivers[0]
2237 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002238 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002239 goto out;
2240 }
2241
Andrew Walbrana65a1322020-04-06 19:32:32 +01002242 /*
2243 * Check permissions from sender against permissions requested by
2244 * receiver.
2245 */
2246 /* TODO: Check attributes too. */
2247 sent_permissions =
2248 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002249 sent_data_access = ffa_get_data_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002250 sent_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002251 ffa_get_instruction_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002252 requested_permissions =
2253 retrieve_request->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002254 requested_data_access = ffa_get_data_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002255 requested_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002256 ffa_get_instruction_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002257 permissions = 0;
2258 switch (sent_data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002259 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2260 case FFA_DATA_ACCESS_RW:
2261 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2262 requested_data_access == FFA_DATA_ACCESS_RW) {
2263 ffa_set_data_access_attr(&permissions,
2264 FFA_DATA_ACCESS_RW);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002265 break;
2266 }
2267 /* Intentional fall-through. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002268 case FFA_DATA_ACCESS_RO:
2269 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2270 requested_data_access == FFA_DATA_ACCESS_RO) {
2271 ffa_set_data_access_attr(&permissions,
2272 FFA_DATA_ACCESS_RO);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002273 break;
2274 }
2275 dlog_verbose(
2276 "Invalid data access requested; sender specified "
2277 "permissions %#x but receiver requested %#x.\n",
2278 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002279 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002280 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002281 case FFA_DATA_ACCESS_RESERVED:
2282 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002283 "checked before this point.");
2284 }
2285 switch (sent_instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002286 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2287 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002288 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002289 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2290 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
2291 ffa_set_instruction_access_attr(
2292 &permissions, FFA_INSTRUCTION_ACCESS_X);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002293 break;
2294 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002295 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002296 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002297 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2298 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2299 ffa_set_instruction_access_attr(
2300 &permissions, FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002301 break;
2302 }
2303 dlog_verbose(
2304 "Invalid instruction access requested; sender "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002305 "specified permissions %#x but receiver requested "
2306 "%#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002307 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002308 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002309 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002310 case FFA_INSTRUCTION_ACCESS_RESERVED:
2311 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002312 "be checked before this point.");
2313 }
J-Alves7cd5eb32020-10-16 19:06:10 +01002314 memory_to_attributes = ffa_memory_permissions_to_mode(
2315 permissions, share_state->sender_orig_mode);
Andrew Walbran996d1d12020-05-27 14:08:43 +01002316 ret = ffa_retrieve_check_update(
Andrew Walbranca808b12020-05-15 17:22:28 +01002317 to_locked, share_state->fragments,
2318 share_state->fragment_constituent_counts,
2319 share_state->fragment_count, memory_to_attributes,
Andrew Walbran996d1d12020-05-27 14:08:43 +01002320 share_state->share_func, false, page_pool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002321 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002322 goto out;
2323 }
2324
2325 /*
2326 * Copy response to RX buffer of caller and deliver the message. This
2327 * must be done before the share_state is (possibly) freed.
2328 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002329 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002330 composite = ffa_memory_region_get_composite(memory_region, 0);
2331 /*
2332 * Constituents which we received in the first fragment should always
2333 * fit in the first fragment we are sending, because the header is the
2334 * same size in both cases and we have a fixed message buffer size. So
2335 * `ffa_retrieved_memory_region_init` should never fail.
2336 */
2337 CHECK(ffa_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002338 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2339 memory_region->sender, memory_region->attributes,
2340 memory_region->flags, handle, to_locked.vm->id, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +01002341 composite->page_count, composite->constituent_count,
2342 share_state->fragments[0],
2343 share_state->fragment_constituent_counts[0], &total_length,
2344 &fragment_length));
2345 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002346 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002347 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002348 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2349
Andrew Walbranca808b12020-05-15 17:22:28 +01002350 share_state->retrieved_fragment_count[0] = 1;
2351 if (share_state->retrieved_fragment_count[0] ==
2352 share_state->fragment_count) {
2353 ffa_memory_retrieve_complete(share_states, share_state,
2354 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002355 }
2356
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002357 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002358 .arg1 = total_length,
2359 .arg2 = fragment_length};
2360
2361out:
2362 share_states_unlock(&share_states);
2363 dump_share_states();
2364 return ret;
2365}
2366
2367struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2368 ffa_memory_handle_t handle,
2369 uint32_t fragment_offset,
2370 struct mpool *page_pool)
2371{
2372 struct ffa_memory_region *memory_region;
2373 struct share_states_locked share_states;
2374 struct ffa_memory_share_state *share_state;
2375 struct ffa_value ret;
2376 uint32_t fragment_index;
2377 uint32_t retrieved_constituents_count;
2378 uint32_t i;
2379 uint32_t expected_fragment_offset;
2380 uint32_t remaining_constituent_count;
2381 uint32_t fragment_length;
2382
2383 dump_share_states();
2384
2385 share_states = share_states_lock();
2386 if (!get_share_state(share_states, handle, &share_state)) {
2387 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2388 handle);
2389 ret = ffa_error(FFA_INVALID_PARAMETERS);
2390 goto out;
2391 }
2392
2393 memory_region = share_state->memory_region;
2394 CHECK(memory_region != NULL);
2395
2396 if (memory_region->receivers[0].receiver_permissions.receiver !=
2397 to_locked.vm->id) {
2398 dlog_verbose(
2399 "Caller of FFA_MEM_FRAG_RX (%d) is not receiver (%d) "
2400 "of handle %#x.\n",
2401 to_locked.vm->id,
2402 memory_region->receivers[0]
2403 .receiver_permissions.receiver,
2404 handle);
2405 ret = ffa_error(FFA_INVALID_PARAMETERS);
2406 goto out;
2407 }
2408
2409 if (!share_state->sending_complete) {
2410 dlog_verbose(
2411 "Memory with handle %#x not fully sent, can't "
2412 "retrieve.\n",
2413 handle);
2414 ret = ffa_error(FFA_INVALID_PARAMETERS);
2415 goto out;
2416 }
2417
2418 if (share_state->retrieved_fragment_count[0] == 0 ||
2419 share_state->retrieved_fragment_count[0] >=
2420 share_state->fragment_count) {
2421 dlog_verbose(
2422 "Retrieval of memory with handle %#x not yet started "
2423 "or already completed (%d/%d fragments retrieved).\n",
2424 handle, share_state->retrieved_fragment_count[0],
2425 share_state->fragment_count);
2426 ret = ffa_error(FFA_INVALID_PARAMETERS);
2427 goto out;
2428 }
2429
2430 fragment_index = share_state->retrieved_fragment_count[0];
2431
2432 /*
2433 * Check that the given fragment offset is correct by counting how many
2434 * constituents were in the fragments previously sent.
2435 */
2436 retrieved_constituents_count = 0;
2437 for (i = 0; i < fragment_index; ++i) {
2438 retrieved_constituents_count +=
2439 share_state->fragment_constituent_counts[i];
2440 }
2441 expected_fragment_offset =
2442 ffa_composite_constituent_offset(memory_region, 0) +
2443 retrieved_constituents_count *
2444 sizeof(struct ffa_memory_region_constituent);
2445 if (fragment_offset != expected_fragment_offset) {
2446 dlog_verbose("Fragment offset was %d but expected %d.\n",
2447 fragment_offset, expected_fragment_offset);
2448 ret = ffa_error(FFA_INVALID_PARAMETERS);
2449 goto out;
2450 }
2451
2452 remaining_constituent_count = ffa_memory_fragment_init(
2453 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2454 share_state->fragments[fragment_index],
2455 share_state->fragment_constituent_counts[fragment_index],
2456 &fragment_length);
2457 CHECK(remaining_constituent_count == 0);
2458 to_locked.vm->mailbox.recv_size = fragment_length;
2459 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2460 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
2461 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2462 share_state->retrieved_fragment_count[0]++;
2463 if (share_state->retrieved_fragment_count[0] ==
2464 share_state->fragment_count) {
2465 ffa_memory_retrieve_complete(share_states, share_state,
2466 page_pool);
2467 }
2468
2469 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2470 .arg1 = (uint32_t)handle,
2471 .arg2 = (uint32_t)(handle >> 32),
2472 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002473
2474out:
2475 share_states_unlock(&share_states);
2476 dump_share_states();
2477 return ret;
2478}
2479
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002480struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002481 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002482 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002483{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002484 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002485 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002486 struct ffa_memory_share_state *share_state;
2487 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002488 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002489 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002490
Andrew Walbrana65a1322020-04-06 19:32:32 +01002491 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002492 dlog_verbose(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002493 "Stream endpoints not supported (got %d endpoints on "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002494 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002495 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002496 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002497 }
2498
Andrew Walbrana65a1322020-04-06 19:32:32 +01002499 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002500 dlog_verbose(
2501 "VM ID %d in relinquish message doesn't match calling "
2502 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002503 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002504 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002505 }
2506
2507 dump_share_states();
2508
2509 share_states = share_states_lock();
2510 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002511 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002512 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002513 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002514 goto out;
2515 }
2516
Andrew Walbranca808b12020-05-15 17:22:28 +01002517 if (!share_state->sending_complete) {
2518 dlog_verbose(
2519 "Memory with handle %#x not fully sent, can't "
2520 "relinquish.\n",
2521 handle);
2522 ret = ffa_error(FFA_INVALID_PARAMETERS);
2523 goto out;
2524 }
2525
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002526 memory_region = share_state->memory_region;
2527 CHECK(memory_region != NULL);
2528
Andrew Walbrana65a1322020-04-06 19:32:32 +01002529 if (memory_region->receivers[0].receiver_permissions.receiver !=
2530 from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002531 dlog_verbose(
2532 "VM ID %d tried to relinquish memory region with "
2533 "handle %#x but receiver was %d.\n",
2534 from_locked.vm->id, handle,
Andrew Walbrana65a1322020-04-06 19:32:32 +01002535 memory_region->receivers[0]
2536 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002537 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002538 goto out;
2539 }
2540
Andrew Walbranca808b12020-05-15 17:22:28 +01002541 if (share_state->retrieved_fragment_count[0] !=
2542 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002543 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01002544 "Memory with handle %#x not yet fully retrieved, can't "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002545 "relinquish.\n",
2546 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002547 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002548 goto out;
2549 }
2550
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002551 clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002552
2553 /*
2554 * Clear is not allowed for memory that was shared, as the original
2555 * sender still has access to the memory.
2556 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002557 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002558 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002559 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002560 goto out;
2561 }
2562
Andrew Walbranca808b12020-05-15 17:22:28 +01002563 ret = ffa_relinquish_check_update(
2564 from_locked, share_state->fragments,
2565 share_state->fragment_constituent_counts,
2566 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002567
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002568 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002569 /*
2570 * Mark memory handle as not retrieved, so it can be reclaimed
2571 * (or retrieved again).
2572 */
Andrew Walbranca808b12020-05-15 17:22:28 +01002573 share_state->retrieved_fragment_count[0] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002574 }
2575
2576out:
2577 share_states_unlock(&share_states);
2578 dump_share_states();
2579 return ret;
2580}
2581
2582/**
2583 * Validates that the reclaim transition is allowed for the given handle,
2584 * updates the page table of the reclaiming VM, and frees the internal state
2585 * associated with the handle.
2586 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002587struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002588 ffa_memory_handle_t handle,
2589 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002590 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002591{
2592 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002593 struct ffa_memory_share_state *share_state;
2594 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002595 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002596
2597 dump_share_states();
2598
2599 share_states = share_states_lock();
2600 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002601 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002602 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002603 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002604 goto out;
2605 }
2606
2607 memory_region = share_state->memory_region;
2608 CHECK(memory_region != NULL);
2609
2610 if (to_locked.vm->id != memory_region->sender) {
2611 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002612 "VM %#x attempted to reclaim memory handle %#x "
2613 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002614 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002615 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002616 goto out;
2617 }
2618
Andrew Walbranca808b12020-05-15 17:22:28 +01002619 if (!share_state->sending_complete) {
2620 dlog_verbose(
2621 "Memory with handle %#x not fully sent, can't "
2622 "reclaim.\n",
2623 handle);
2624 ret = ffa_error(FFA_INVALID_PARAMETERS);
2625 goto out;
2626 }
2627
2628 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002629 dlog_verbose(
2630 "Tried to reclaim memory handle %#x that has not been "
2631 "relinquished.\n",
2632 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002633 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002634 goto out;
2635 }
2636
Andrew Walbranca808b12020-05-15 17:22:28 +01002637 ret = ffa_retrieve_check_update(
2638 to_locked, share_state->fragments,
2639 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00002640 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01002641 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002642
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002643 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002644 share_state_free(share_states, share_state, page_pool);
2645 dlog_verbose("Freed share state after successful reclaim.\n");
2646 }
2647
2648out:
2649 share_states_unlock(&share_states);
2650 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002651}
Andrew Walbran290b0c92020-02-03 16:37:14 +00002652
2653/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002654 * Validates that the reclaim transition is allowed for the memory region with
2655 * the given handle which was previously shared with the TEE, tells the TEE to
2656 * mark it as reclaimed, and updates the page table of the reclaiming VM.
2657 *
2658 * To do this information about the memory region is first fetched from the TEE.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002659 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002660struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002661 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002662 ffa_memory_handle_t handle,
Andrew Walbranca808b12020-05-15 17:22:28 +01002663 ffa_memory_region_flags_t flags,
2664 struct mpool *page_pool)
Andrew Walbran290b0c92020-02-03 16:37:14 +00002665{
Andrew Walbranca808b12020-05-15 17:22:28 +01002666 uint32_t request_length = ffa_memory_lender_retrieve_request_init(
2667 from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
2668 struct ffa_value tee_ret;
2669 uint32_t length;
2670 uint32_t fragment_length;
2671 uint32_t fragment_offset;
2672 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002673 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01002674 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
2675
2676 CHECK(request_length <= HF_MAILBOX_SIZE);
2677 CHECK(from_locked.vm->id == HF_TEE_VM_ID);
2678
2679 /* Retrieve memory region information from the TEE. */
Olivier Deprez112d2b52020-09-30 07:39:23 +02002680 tee_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01002681 (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
2682 .arg1 = request_length,
2683 .arg2 = request_length});
2684 if (tee_ret.func == FFA_ERROR_32) {
2685 dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
2686 return tee_ret;
2687 }
2688 if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
2689 dlog_verbose(
2690 "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
2691 tee_ret.func);
2692 return ffa_error(FFA_INVALID_PARAMETERS);
2693 }
2694
2695 length = tee_ret.arg1;
2696 fragment_length = tee_ret.arg2;
2697
2698 if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
2699 length > sizeof(tee_retrieve_buffer)) {
2700 dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
2701 fragment_length, length, HF_MAILBOX_SIZE,
2702 sizeof(tee_retrieve_buffer));
2703 return ffa_error(FFA_INVALID_PARAMETERS);
2704 }
2705
2706 /*
2707 * Copy the first fragment of the memory region descriptor to an
2708 * internal buffer.
2709 */
2710 memcpy_s(tee_retrieve_buffer, sizeof(tee_retrieve_buffer),
2711 from_locked.vm->mailbox.send, fragment_length);
2712
2713 /* Fetch the remaining fragments into the same buffer. */
2714 fragment_offset = fragment_length;
2715 while (fragment_offset < length) {
Olivier Deprez112d2b52020-09-30 07:39:23 +02002716 tee_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01002717 (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2718 .arg1 = (uint32_t)handle,
2719 .arg2 = (uint32_t)(handle >> 32),
2720 .arg3 = fragment_offset});
2721 if (tee_ret.func != FFA_MEM_FRAG_TX_32) {
2722 dlog_verbose(
2723 "Got %#x (%d) from TEE in response to "
2724 "FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
2725 tee_ret.func, tee_ret.arg2);
2726 return tee_ret;
2727 }
2728 if (ffa_frag_handle(tee_ret) != handle) {
2729 dlog_verbose(
2730 "Got FFA_MEM_FRAG_TX for unexpected handle %#x "
2731 "in response to FFA_MEM_FRAG_RX for handle "
2732 "%#x.\n",
2733 ffa_frag_handle(tee_ret), handle);
2734 return ffa_error(FFA_INVALID_PARAMETERS);
2735 }
2736 if (ffa_frag_sender(tee_ret) != 0) {
2737 dlog_verbose(
2738 "Got FFA_MEM_FRAG_TX with unexpected sender %d "
2739 "(expected 0).\n",
2740 ffa_frag_sender(tee_ret));
2741 return ffa_error(FFA_INVALID_PARAMETERS);
2742 }
2743 fragment_length = tee_ret.arg3;
2744 if (fragment_length > HF_MAILBOX_SIZE ||
2745 fragment_offset + fragment_length > length) {
2746 dlog_verbose(
2747 "Invalid fragment length %d at offset %d (max "
2748 "%d).\n",
2749 fragment_length, fragment_offset,
2750 HF_MAILBOX_SIZE);
2751 return ffa_error(FFA_INVALID_PARAMETERS);
2752 }
2753 memcpy_s(tee_retrieve_buffer + fragment_offset,
2754 sizeof(tee_retrieve_buffer) - fragment_offset,
2755 from_locked.vm->mailbox.send, fragment_length);
2756
2757 fragment_offset += fragment_length;
2758 }
2759
2760 memory_region = (struct ffa_memory_region *)tee_retrieve_buffer;
Andrew Walbran290b0c92020-02-03 16:37:14 +00002761
2762 if (memory_region->receiver_count != 1) {
2763 /* Only one receiver supported by Hafnium for now. */
2764 dlog_verbose(
2765 "Multiple recipients not supported (got %d, expected "
2766 "1).\n",
2767 memory_region->receiver_count);
Andrew Walbranca808b12020-05-15 17:22:28 +01002768 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002769 }
2770
2771 if (memory_region->handle != handle) {
2772 dlog_verbose(
2773 "Got memory region handle %#x from TEE but requested "
2774 "handle %#x.\n",
2775 memory_region->handle, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002776 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002777 }
2778
2779 /* The original sender must match the caller. */
2780 if (to_locked.vm->id != memory_region->sender) {
2781 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002782 "VM %#x attempted to reclaim memory handle %#x "
2783 "originally sent by VM %#x.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00002784 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002785 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002786 }
2787
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002788 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002789
2790 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01002791 * Validate that the reclaim transition is allowed for the given memory
2792 * region, forward the request to the TEE and then map the memory back
2793 * into the caller's stage-2 page table.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002794 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01002795 return ffa_tee_reclaim_check_update(
2796 to_locked, handle, composite->constituents,
Andrew Walbranca808b12020-05-15 17:22:28 +01002797 composite->constituent_count, memory_to_attributes,
2798 flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002799}