blob: 849a7cbb4362b5a945d60ed1fa53b9350614b7fb [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010017#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000018
Andrew Walbran290b0c92020-02-03 16:37:14 +000019#include "hf/arch/tee.h"
20
Jose Marinho75509b42019-04-09 09:34:59 +010021#include "hf/api.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010022#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010023#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010024#include "hf/ffa_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010026#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000029/** The maximum number of recipients a memory region may be sent to. */
30#define MAX_MEM_SHARE_RECIPIENTS 1
31
32/**
33 * The maximum number of memory sharing handles which may be active at once. A
34 * DONATE handle is active from when it is sent to when it is retrieved; a SHARE
35 * or LEND handle is active from when it is sent to when it is reclaimed.
36 */
37#define MAX_MEM_SHARES 100
38
Andrew Walbranca808b12020-05-15 17:22:28 +010039/**
40 * The maximum number of fragments into which a memory sharing message may be
41 * broken.
42 */
43#define MAX_FRAGMENTS 20
44
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010045static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
46 "struct ffa_memory_region_constituent must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000047 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010048static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0,
49 "struct ffa_composite_memory_region must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000050 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010051static_assert(sizeof(struct ffa_memory_region_attributes) == 4,
52 "struct ffa_memory_region_attributes must be 4bytes long.");
53static_assert(sizeof(struct ffa_memory_access) % 16 == 0,
54 "struct ffa_memory_access must be a multiple of 16 bytes long.");
55static_assert(sizeof(struct ffa_memory_region) % 16 == 0,
56 "struct ffa_memory_region must be a multiple of 16 bytes long.");
57static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0,
58 "struct ffa_mem_relinquish must be a multiple of 16 "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000059 "bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000060
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010061struct ffa_memory_share_state {
Andrew Walbranca808b12020-05-15 17:22:28 +010062 ffa_memory_handle_t handle;
63
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000064 /**
65 * The memory region being shared, or NULL if this share state is
66 * unallocated.
67 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010068 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000069
Andrew Walbranca808b12020-05-15 17:22:28 +010070 struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS];
71
72 /** The number of constituents in each fragment. */
73 uint32_t fragment_constituent_counts[MAX_FRAGMENTS];
74
75 /**
76 * The number of valid elements in the `fragments` and
77 * `fragment_constituent_counts` arrays.
78 */
79 uint32_t fragment_count;
80
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000081 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010082 * The FF-A function used for sharing the memory. Must be one of
83 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000084 * share state is allocated, or 0.
85 */
86 uint32_t share_func;
87
88 /**
Andrew Walbranca808b12020-05-15 17:22:28 +010089 * True if all the fragments of this sharing request have been sent and
90 * Hafnium has updated the sender page table accordingly.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000091 */
Andrew Walbranca808b12020-05-15 17:22:28 +010092 bool sending_complete;
93
94 /**
95 * How many fragments of the memory region each recipient has retrieved
96 * so far. The order of this array matches the order of the endpoint
97 * memory access descriptors in the memory region descriptor. Any
98 * entries beyond the receiver_count will always be 0.
99 */
100 uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS];
Andrew Walbran475c1452020-02-07 13:22:22 +0000101};
102
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000103/**
104 * Encapsulates the set of share states while the `share_states_lock` is held.
105 */
106struct share_states_locked {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 struct ffa_memory_share_state *share_states;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000108};
109
110/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100111 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112 * by this lock.
113 */
114static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000116
117/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100118 * Buffer for retrieving memory region information from the TEE for when a
119 * region is reclaimed by a VM. Access to this buffer must be guarded by the VM
120 * lock of the TEE VM.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000121 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100122alignas(PAGE_SIZE) static uint8_t
123 tee_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
124
125/**
126 * Initialises the next available `struct ffa_memory_share_state` and sets
127 * `share_state_ret` to a pointer to it. If `handle` is
128 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
129 * uses the provided handle which is assumed to be globally unique.
130 *
131 * Returns true on success or false if none are available.
132 */
133static bool allocate_share_state(
134 struct share_states_locked share_states, uint32_t share_func,
135 struct ffa_memory_region *memory_region, uint32_t fragment_length,
136 ffa_memory_handle_t handle,
137 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000138{
Andrew Walbrana65a1322020-04-06 19:32:32 +0100139 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140
Andrew Walbranca808b12020-05-15 17:22:28 +0100141 CHECK(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 CHECK(memory_region != NULL);
143
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000144 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100145 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000146 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +0100148 &share_states.share_states[i];
149 struct ffa_composite_memory_region *composite =
150 ffa_memory_region_get_composite(memory_region,
151 0);
152
153 if (handle == FFA_MEMORY_HANDLE_INVALID) {
154 allocated_state->handle =
155 i |
156 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
157 } else {
158 allocated_state->handle = handle;
159 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000160 allocated_state->share_func = share_func;
161 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100162 allocated_state->fragment_count = 1;
163 allocated_state->fragments[0] = composite->constituents;
164 allocated_state->fragment_constituent_counts[0] =
165 (fragment_length -
166 ffa_composite_constituent_offset(memory_region,
167 0)) /
168 sizeof(struct ffa_memory_region_constituent);
169 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000170 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 allocated_state->retrieved_fragment_count[j] =
172 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000173 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100174 if (share_state_ret != NULL) {
175 *share_state_ret = allocated_state;
176 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000177 return true;
178 }
179 }
180
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000181 return false;
182}
183
184/** Locks the share states lock. */
185struct share_states_locked share_states_lock(void)
186{
187 sl_lock(&share_states_lock_instance);
188
189 return (struct share_states_locked){.share_states = share_states};
190}
191
192/** Unlocks the share states lock. */
193static void share_states_unlock(struct share_states_locked *share_states)
194{
195 CHECK(share_states->share_states != NULL);
196 share_states->share_states = NULL;
197 sl_unlock(&share_states_lock_instance);
198}
199
200/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100201 * If the given handle is a valid handle for an allocated share state then
202 * initialises `share_state_ret` to point to the share state and returns true.
203 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000204 */
205static bool get_share_state(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100206 ffa_memory_handle_t handle,
207 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000208{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100209 struct ffa_memory_share_state *share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100210 uint32_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000211
Andrew Walbranca808b12020-05-15 17:22:28 +0100212 CHECK(share_states.share_states != NULL);
213 CHECK(share_state_ret != NULL);
214
215 /*
216 * First look for a share_state allocated by us, in which case the
217 * handle is based on the index.
218 */
219 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
220 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
221 index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
222 if (index < MAX_MEM_SHARES) {
223 share_state = &share_states.share_states[index];
224 if (share_state->share_func != 0) {
225 *share_state_ret = share_state;
226 return true;
227 }
228 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000229 }
230
Andrew Walbranca808b12020-05-15 17:22:28 +0100231 /* Fall back to a linear scan. */
232 for (index = 0; index < MAX_MEM_SHARES; ++index) {
233 share_state = &share_states.share_states[index];
234 if (share_state->handle == handle &&
235 share_state->share_func != 0) {
236 *share_state_ret = share_state;
237 return true;
238 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000239 }
240
Andrew Walbranca808b12020-05-15 17:22:28 +0100241 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000242}
243
244/** Marks a share state as unallocated. */
245static void share_state_free(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246 struct ffa_memory_share_state *share_state,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000247 struct mpool *page_pool)
248{
Andrew Walbranca808b12020-05-15 17:22:28 +0100249 uint32_t i;
250
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000251 CHECK(share_states.share_states != NULL);
252 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100253 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000254 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100255 /*
256 * First fragment is part of the same page as the `memory_region`, so it
257 * doesn't need to be freed separately.
258 */
259 share_state->fragments[0] = NULL;
260 share_state->fragment_constituent_counts[0] = 0;
261 for (i = 1; i < share_state->fragment_count; ++i) {
262 mpool_free(page_pool, share_state->fragments[i]);
263 share_state->fragments[i] = NULL;
264 share_state->fragment_constituent_counts[i] = 0;
265 }
266 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000267 share_state->memory_region = NULL;
268}
269
Andrew Walbranca808b12020-05-15 17:22:28 +0100270/** Checks whether the given share state has been fully sent. */
271static bool share_state_sending_complete(
272 struct share_states_locked share_states,
273 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000274{
Andrew Walbranca808b12020-05-15 17:22:28 +0100275 struct ffa_composite_memory_region *composite;
276 uint32_t expected_constituent_count;
277 uint32_t fragment_constituent_count_total = 0;
278 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000279
Andrew Walbranca808b12020-05-15 17:22:28 +0100280 /* Lock must be held. */
281 CHECK(share_states.share_states != NULL);
282
283 /*
284 * Share state must already be valid, or it's not possible to get hold
285 * of it.
286 */
287 CHECK(share_state->memory_region != NULL &&
288 share_state->share_func != 0);
289
290 composite =
291 ffa_memory_region_get_composite(share_state->memory_region, 0);
292 expected_constituent_count = composite->constituent_count;
293 for (i = 0; i < share_state->fragment_count; ++i) {
294 fragment_constituent_count_total +=
295 share_state->fragment_constituent_counts[i];
296 }
297 dlog_verbose(
298 "Checking completion: constituent count %d/%d from %d "
299 "fragments.\n",
300 fragment_constituent_count_total, expected_constituent_count,
301 share_state->fragment_count);
302
303 return fragment_constituent_count_total == expected_constituent_count;
304}
305
306/**
307 * Calculates the offset of the next fragment expected for the given share
308 * state.
309 */
310static uint32_t share_state_next_fragment_offset(
311 struct share_states_locked share_states,
312 struct ffa_memory_share_state *share_state)
313{
314 uint32_t next_fragment_offset;
315 uint32_t i;
316
317 /* Lock must be held. */
318 CHECK(share_states.share_states != NULL);
319
320 next_fragment_offset =
321 ffa_composite_constituent_offset(share_state->memory_region, 0);
322 for (i = 0; i < share_state->fragment_count; ++i) {
323 next_fragment_offset +=
324 share_state->fragment_constituent_counts[i] *
325 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000326 }
327
Andrew Walbranca808b12020-05-15 17:22:28 +0100328 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000329}
330
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100331static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000332{
333 uint32_t i;
334
335 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
336 return;
337 }
338
Andrew Walbrana65a1322020-04-06 19:32:32 +0100339 dlog("from VM %d, attributes %#x, flags %#x, handle %#x, tag %d, to %d "
340 "recipients [",
341 memory_region->sender, memory_region->attributes,
342 memory_region->flags, memory_region->handle, memory_region->tag,
343 memory_region->receiver_count);
344 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000345 if (i != 0) {
346 dlog(", ");
347 }
Andrew Walbrana65a1322020-04-06 19:32:32 +0100348 dlog("VM %d: %#x (offset %d)",
349 memory_region->receivers[i].receiver_permissions.receiver,
350 memory_region->receivers[i]
351 .receiver_permissions.permissions,
352 memory_region->receivers[i]
353 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000354 }
355 dlog("]");
356}
357
358static void dump_share_states(void)
359{
360 uint32_t i;
361
362 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
363 return;
364 }
365
366 dlog("Current share states:\n");
367 sl_lock(&share_states_lock_instance);
368 for (i = 0; i < MAX_MEM_SHARES; ++i) {
369 if (share_states[i].share_func != 0) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100370 dlog("%#x: ", share_states[i].handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000371 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000373 dlog("SHARE");
374 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100375 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000376 dlog("LEND");
377 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100378 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000379 dlog("DONATE");
380 break;
381 default:
382 dlog("invalid share_func %#x",
383 share_states[i].share_func);
384 }
385 dlog(" (");
386 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100387 if (share_states[i].sending_complete) {
388 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000389 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100390 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000391 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100392 dlog(" with %d fragments, %d retrieved\n",
393 share_states[i].fragment_count,
394 share_states[i].retrieved_fragment_count[0]);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000395 break;
396 }
397 }
398 sl_unlock(&share_states_lock_instance);
399}
400
Andrew Walbran475c1452020-02-07 13:22:22 +0000401/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100402static inline uint32_t ffa_memory_permissions_to_mode(
403 ffa_memory_access_permissions_t permissions)
Andrew Walbran475c1452020-02-07 13:22:22 +0000404{
405 uint32_t mode = 0;
406
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100407 switch (ffa_get_data_access_attr(permissions)) {
408 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000409 mode = MM_MODE_R;
410 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100411 case FFA_DATA_ACCESS_RW:
412 case FFA_DATA_ACCESS_NOT_SPECIFIED:
Andrew Walbran475c1452020-02-07 13:22:22 +0000413 mode = MM_MODE_R | MM_MODE_W;
414 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100415 case FFA_DATA_ACCESS_RESERVED:
416 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100417 }
418
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100419 switch (ffa_get_instruction_access_attr(permissions)) {
420 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000421 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100422 case FFA_INSTRUCTION_ACCESS_X:
423 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100424 mode |= MM_MODE_X;
425 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 case FFA_INSTRUCTION_ACCESS_RESERVED:
427 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000428 }
429
430 return mode;
431}
432
Jose Marinho75509b42019-04-09 09:34:59 +0100433/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000434 * Get the current mode in the stage-2 page table of the given vm of all the
435 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100436 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100437 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100438static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000439 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100440 struct ffa_memory_region_constituent **fragments,
441 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100442{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100443 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100444 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100445
Andrew Walbranca808b12020-05-15 17:22:28 +0100446 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100447 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000448 * Fail if there are no constituents. Otherwise we would get an
449 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100450 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100451 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100452 }
453
Andrew Walbranca808b12020-05-15 17:22:28 +0100454 for (i = 0; i < fragment_count; ++i) {
455 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
456 ipaddr_t begin = ipa_init(fragments[i][j].address);
457 size_t size = fragments[i][j].page_count * PAGE_SIZE;
458 ipaddr_t end = ipa_add(begin, size);
459 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100460
Andrew Walbranca808b12020-05-15 17:22:28 +0100461 /* Fail if addresses are not page-aligned. */
462 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
463 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
464 return ffa_error(FFA_INVALID_PARAMETERS);
465 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100466
Andrew Walbranca808b12020-05-15 17:22:28 +0100467 /*
468 * Ensure that this constituent memory range is all
469 * mapped with the same mode.
470 */
471 if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
472 &current_mode)) {
473 return ffa_error(FFA_DENIED);
474 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100475
Andrew Walbranca808b12020-05-15 17:22:28 +0100476 /*
477 * Ensure that all constituents are mapped with the same
478 * mode.
479 */
480 if (i == 0) {
481 *orig_mode = current_mode;
482 } else if (current_mode != *orig_mode) {
483 dlog_verbose(
484 "Expected mode %#x but was %#x for %d "
485 "pages at %#x.\n",
486 *orig_mode, current_mode,
487 fragments[i][j].page_count,
488 ipa_addr(begin));
489 return ffa_error(FFA_DENIED);
490 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100491 }
Jose Marinho75509b42019-04-09 09:34:59 +0100492 }
493
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100494 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000495}
496
497/**
498 * Verify that all pages have the same mode, that the starting mode
499 * constitutes a valid state and obtain the next mode to apply
500 * to the sending VM.
501 *
502 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100503 * 1) FFA_DENIED if a state transition was not found;
504 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100505 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100506 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100507 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100508 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
509 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000510 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100511static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100512 struct vm_locked from, uint32_t share_func,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100513 ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100514 struct ffa_memory_region_constituent **fragments,
515 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
516 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000517{
518 const uint32_t state_mask =
519 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100520 const uint32_t required_from_mode =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100521 ffa_memory_permissions_to_mode(permissions);
522 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000523
Andrew Walbranca808b12020-05-15 17:22:28 +0100524 ret = constituents_get_mode(from, orig_from_mode, fragments,
525 fragment_constituent_counts,
526 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100527 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100528 dlog_verbose("Inconsistent modes.\n", fragment_count);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100529 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100530 }
531
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000532 /* Ensure the address range is normal memory and not a device. */
533 if (*orig_from_mode & MM_MODE_D) {
534 dlog_verbose("Can't share device memory (mode is %#x).\n",
535 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100536 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000537 }
538
539 /*
540 * Ensure the sender is the owner and has exclusive access to the
541 * memory.
542 */
543 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100544 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100545 }
546
547 if ((*orig_from_mode & required_from_mode) != required_from_mode) {
548 dlog_verbose(
549 "Sender tried to send memory with permissions which "
550 "required mode %#x but only had %#x itself.\n",
551 required_from_mode, *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100552 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000553 }
554
555 /* Find the appropriate new mode. */
556 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000557 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100558 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000559 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100560 break;
561
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100562 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000563 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100564 break;
565
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100566 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000567 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100568 break;
569
Jose Marinho75509b42019-04-09 09:34:59 +0100570 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100571 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100572 }
573
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100574 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000575}
576
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000578 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100579 struct ffa_memory_region_constituent **fragments,
580 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
581 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000582{
583 const uint32_t state_mask =
584 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
585 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100586 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000587
Andrew Walbranca808b12020-05-15 17:22:28 +0100588 ret = constituents_get_mode(from, orig_from_mode, fragments,
589 fragment_constituent_counts,
590 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100591 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100592 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000593 }
594
595 /* Ensure the address range is normal memory and not a device. */
596 if (*orig_from_mode & MM_MODE_D) {
597 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
598 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100599 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000600 }
601
602 /*
603 * Ensure the relinquishing VM is not the owner but has access to the
604 * memory.
605 */
606 orig_from_state = *orig_from_mode & state_mask;
607 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
608 dlog_verbose(
609 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100610 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000611 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100612 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000613 }
614
615 /* Find the appropriate new mode. */
616 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
617
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100618 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000619}
620
621/**
622 * Verify that all pages have the same mode, that the starting mode
623 * constitutes a valid state and obtain the next mode to apply
624 * to the retrieving VM.
625 *
626 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100627 * 1) FFA_DENIED if a state transition was not found;
628 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100629 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100630 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100631 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100632 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
633 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000634 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100635static struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000636 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100637 struct ffa_memory_region_constituent **fragments,
638 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
639 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000640{
641 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100642 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000643
Andrew Walbranca808b12020-05-15 17:22:28 +0100644 ret = constituents_get_mode(to, &orig_to_mode, fragments,
645 fragment_constituent_counts,
646 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100647 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100648 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100649 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000650 }
651
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100652 if (share_func == FFA_MEM_RECLAIM_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000653 const uint32_t state_mask =
654 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
655 uint32_t orig_to_state = orig_to_mode & state_mask;
656
657 if (orig_to_state != MM_MODE_INVALID &&
658 orig_to_state != MM_MODE_SHARED) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100659 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000660 }
661 } else {
662 /*
663 * Ensure the retriever has the expected state. We don't care
664 * about the MM_MODE_SHARED bit; either with or without it set
665 * are both valid representations of the !O-NA state.
666 */
667 if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
668 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100669 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000670 }
671 }
672
673 /* Find the appropriate new mode. */
674 *to_mode = memory_to_attributes;
675 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100676 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000677 *to_mode |= 0;
678 break;
679
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100680 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000681 *to_mode |= MM_MODE_UNOWNED;
682 break;
683
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100684 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000685 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
686 break;
687
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100688 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000689 *to_mode |= 0;
690 break;
691
692 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100693 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100694 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000695 }
696
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100697 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100698}
Jose Marinho09b1db82019-08-08 09:16:59 +0100699
700/**
701 * Updates a VM's page table such that the given set of physical address ranges
702 * are mapped in the address space at the corresponding address ranges, in the
703 * mode provided.
704 *
705 * If commit is false, the page tables will be allocated from the mpool but no
706 * mappings will actually be updated. This function must always be called first
707 * with commit false to check that it will succeed before calling with commit
708 * true, to avoid leaving the page table in a half-updated state. To make a
709 * series of changes atomically you can call them all with commit false before
710 * calling them all with commit true.
711 *
712 * mm_vm_defrag should always be called after a series of page table updates,
713 * whether they succeed or fail.
714 *
715 * Returns true on success, or false if the update failed and no changes were
716 * made to memory mappings.
717 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100718static bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000719 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100720 struct ffa_memory_region_constituent **fragments,
721 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
722 int mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100723{
Andrew Walbranca808b12020-05-15 17:22:28 +0100724 uint32_t i;
725 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100726
Andrew Walbranca808b12020-05-15 17:22:28 +0100727 /* Iterate over the memory region constituents within each fragment. */
728 for (i = 0; i < fragment_count; ++i) {
729 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
730 size_t size = fragments[i][j].page_count * PAGE_SIZE;
731 paddr_t pa_begin =
732 pa_from_ipa(ipa_init(fragments[i][j].address));
733 paddr_t pa_end = pa_add(pa_begin, size);
734
735 if (commit) {
736 vm_identity_commit(vm_locked, pa_begin, pa_end,
737 mode, ppool, NULL);
738 } else if (!vm_identity_prepare(vm_locked, pa_begin,
739 pa_end, mode, ppool)) {
740 return false;
741 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100742 }
743 }
744
745 return true;
746}
747
748/**
749 * Clears a region of physical memory by overwriting it with zeros. The data is
750 * flushed from the cache so the memory has been cleared across the system.
751 */
752static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
753{
754 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000755 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100756 * global mapping of the whole range. Such an approach will limit
757 * the changes to stage-1 tables and will allow only local
758 * invalidation.
759 */
760 bool ret;
761 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
762 void *ptr =
763 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
764 size_t size = pa_difference(begin, end);
765
766 if (!ptr) {
767 /* TODO: partial defrag of failed range. */
768 /* Recover any memory consumed in failed mapping. */
769 mm_defrag(stage1_locked, ppool);
770 goto fail;
771 }
772
773 memset_s(ptr, size, 0, size);
774 arch_mm_flush_dcache(ptr, size);
775 mm_unmap(stage1_locked, begin, end, ppool);
776
777 ret = true;
778 goto out;
779
780fail:
781 ret = false;
782
783out:
784 mm_unlock_stage1(&stage1_locked);
785
786 return ret;
787}
788
789/**
790 * Clears a region of physical memory by overwriting it with zeros. The data is
791 * flushed from the cache so the memory has been cleared across the system.
792 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100793static bool ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100794 struct ffa_memory_region_constituent **fragments,
795 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
796 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100797{
798 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100799 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100800 struct mm_stage1_locked stage1_locked;
801 bool ret = false;
802
803 /*
804 * Create a local pool so any freed memory can't be used by another
805 * thread. This is to ensure each constituent that is mapped can be
806 * unmapped again afterwards.
807 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000808 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100809
Andrew Walbranca808b12020-05-15 17:22:28 +0100810 /* Iterate over the memory region constituents within each fragment. */
811 for (i = 0; i < fragment_count; ++i) {
812 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100813
Andrew Walbranca808b12020-05-15 17:22:28 +0100814 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
815 size_t size = fragments[i][j].page_count * PAGE_SIZE;
816 paddr_t begin =
817 pa_from_ipa(ipa_init(fragments[i][j].address));
818 paddr_t end = pa_add(begin, size);
819
820 if (!clear_memory(begin, end, &local_page_pool)) {
821 /*
822 * api_clear_memory will defrag on failure, so
823 * no need to do it here.
824 */
825 goto out;
826 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100827 }
828 }
829
830 /*
831 * Need to defrag after clearing, as it may have added extra mappings to
832 * the stage 1 page table.
833 */
834 stage1_locked = mm_lock_stage1();
835 mm_defrag(stage1_locked, &local_page_pool);
836 mm_unlock_stage1(&stage1_locked);
837
838 ret = true;
839
840out:
841 mpool_fini(&local_page_pool);
842 return ret;
843}
844
845/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000846 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100847 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000848 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100849 *
850 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000851 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100852 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100853 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100854 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
855 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100856 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100857 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100858 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100859 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100860static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000861 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100862 struct ffa_memory_region_constituent **fragments,
863 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
864 uint32_t share_func, ffa_memory_access_permissions_t permissions,
865 struct mpool *page_pool, bool clear)
Jose Marinho09b1db82019-08-08 09:16:59 +0100866{
Jose Marinho09b1db82019-08-08 09:16:59 +0100867 struct vm *from = from_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100868 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100869 uint32_t orig_from_mode;
870 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100871 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100872 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100873
874 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100875 * Make sure constituents are properly aligned to a 64-bit boundary. If
876 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100877 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100878 for (i = 0; i < fragment_count; ++i) {
879 if (!is_aligned(fragments[i], 8)) {
880 dlog_verbose("Constituents not aligned.\n");
881 return ffa_error(FFA_INVALID_PARAMETERS);
882 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100883 }
884
885 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000886 * Check if the state transition is lawful for the sender, ensure that
887 * all constituents of a memory region being shared are at the same
888 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100889 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100890 ret = ffa_send_check_transition(from_locked, share_func, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100891 &orig_from_mode, fragments,
892 fragment_constituent_counts,
893 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100894 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100895 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100896 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100897 }
898
899 /*
900 * Create a local pool so any freed memory can't be used by another
901 * thread. This is to ensure the original mapping can be restored if the
902 * clear fails.
903 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000904 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100905
906 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000907 * First reserve all required memory for the new page table entries
908 * without committing, to make sure the entire operation will succeed
909 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100910 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100911 if (!ffa_region_group_identity_map(
912 from_locked, fragments, fragment_constituent_counts,
913 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100914 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100915 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100916 goto out;
917 }
918
919 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000920 * Update the mapping for the sender. This won't allocate because the
921 * transaction was already prepared above, but may free pages in the
922 * case that a whole block is being unmapped that was previously
923 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100924 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100925 CHECK(ffa_region_group_identity_map(
926 from_locked, fragments, fragment_constituent_counts,
927 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100928
929 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100930 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +0100931 fragments, fragment_constituent_counts,
932 fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100933 /*
934 * On failure, roll back by returning memory to the sender. This
935 * may allocate pages which were previously freed into
936 * `local_page_pool` by the call above, but will never allocate
937 * more pages than that so can never fail.
938 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100939 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100940 from_locked, fragments, fragment_constituent_counts,
941 fragment_count, orig_from_mode, &local_page_pool,
942 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100943
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100944 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100945 goto out;
946 }
947
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100948 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000949
950out:
951 mpool_fini(&local_page_pool);
952
953 /*
954 * Tidy up the page table by reclaiming failed mappings (if there was an
955 * error) or merging entries into blocks where possible (on success).
956 */
957 mm_vm_defrag(&from->ptable, page_pool);
958
959 return ret;
960}
961
962/**
963 * Validates and maps memory shared from one VM to another.
964 *
965 * This function requires the calling context to hold the <to> lock.
966 *
967 * Returns:
968 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100969 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000970 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100971 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000972 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100973 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000974 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100975static struct ffa_value ffa_retrieve_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000976 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100977 struct ffa_memory_region_constituent **fragments,
978 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
979 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
980 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000981{
982 struct vm *to = to_locked.vm;
Andrew Walbranca808b12020-05-15 17:22:28 +0100983 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000984 uint32_t to_mode;
985 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100986 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000987
988 /*
Andrew Walbranca808b12020-05-15 17:22:28 +0100989 * Make sure constituents are properly aligned to a 64-bit boundary. If
990 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000991 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100992 for (i = 0; i < fragment_count; ++i) {
993 if (!is_aligned(fragments[i], 8)) {
994 return ffa_error(FFA_INVALID_PARAMETERS);
995 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000996 }
997
998 /*
999 * Check if the state transition is lawful for the recipient, and ensure
1000 * that all constituents of the memory region being retrieved are at the
1001 * same state.
1002 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001003 ret = ffa_retrieve_check_transition(
1004 to_locked, share_func, fragments, fragment_constituent_counts,
1005 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001006 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001007 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001008 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001009 }
1010
1011 /*
1012 * Create a local pool so any freed memory can't be used by another
1013 * thread. This is to ensure the original mapping can be restored if the
1014 * clear fails.
1015 */
1016 mpool_init_with_fallback(&local_page_pool, page_pool);
1017
1018 /*
1019 * First reserve all required memory for the new page table entries in
1020 * the recipient page tables without committing, to make sure the entire
1021 * operation will succeed without exhausting the page pool.
1022 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001023 if (!ffa_region_group_identity_map(
1024 to_locked, fragments, fragment_constituent_counts,
1025 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001026 /* TODO: partial defrag of failed range. */
1027 dlog_verbose(
1028 "Insufficient memory to update recipient page "
1029 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001030 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001031 goto out;
1032 }
1033
1034 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001035 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001036 fragments, fragment_constituent_counts,
1037 fragment_count, page_pool)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001038 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001039 goto out;
1040 }
1041
Jose Marinho09b1db82019-08-08 09:16:59 +01001042 /*
1043 * Complete the transfer by mapping the memory into the recipient. This
1044 * won't allocate because the transaction was already prepared above, so
1045 * it doesn't need to use the `local_page_pool`.
1046 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001047 CHECK(ffa_region_group_identity_map(
1048 to_locked, fragments, fragment_constituent_counts,
1049 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001050
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001051 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001052
1053out:
1054 mpool_fini(&local_page_pool);
1055
1056 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001057 * Tidy up the page table by reclaiming failed mappings (if there was an
1058 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001059 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001060 mm_vm_defrag(&to->ptable, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001061
1062 return ret;
1063}
1064
Andrew Walbran290b0c92020-02-03 16:37:14 +00001065/**
1066 * Reclaims the given memory from the TEE. To do this space is first reserved in
1067 * the <to> VM's page table, then the reclaim request is sent on to the TEE,
1068 * then (if that is successful) the memory is mapped back into the <to> VM's
1069 * page table.
1070 *
1071 * This function requires the calling context to hold the <to> lock.
1072 *
1073 * Returns:
1074 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001075 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran290b0c92020-02-03 16:37:14 +00001076 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001077 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran290b0c92020-02-03 16:37:14 +00001078 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001079 * Success is indicated by FFA_SUCCESS.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001080 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01001081static struct ffa_value ffa_tee_reclaim_check_update(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001082 struct vm_locked to_locked, ffa_memory_handle_t handle,
1083 struct ffa_memory_region_constituent *constituents,
Andrew Walbran290b0c92020-02-03 16:37:14 +00001084 uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
1085 struct mpool *page_pool)
1086{
1087 struct vm *to = to_locked.vm;
1088 uint32_t to_mode;
1089 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001090 struct ffa_value ret;
1091 ffa_memory_region_flags_t tee_flags;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001092
1093 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001094 * Make sure constituents are properly aligned to a 64-bit boundary. If
1095 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001096 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001097 if (!is_aligned(constituents, 8)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001098 dlog_verbose("Constituents not aligned.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001099 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001100 }
1101
1102 /*
1103 * Check if the state transition is lawful for the recipient, and ensure
1104 * that all constituents of the memory region being retrieved are at the
1105 * same state.
1106 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001107 ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01001108 &constituents, &constituent_count,
1109 1, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001110 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001111 dlog_verbose("Invalid transition.\n");
1112 return ret;
1113 }
1114
1115 /*
1116 * Create a local pool so any freed memory can't be used by another
1117 * thread. This is to ensure the original mapping can be restored if the
1118 * clear fails.
1119 */
1120 mpool_init_with_fallback(&local_page_pool, page_pool);
1121
1122 /*
1123 * First reserve all required memory for the new page table entries in
1124 * the recipient page tables without committing, to make sure the entire
1125 * operation will succeed without exhausting the page pool.
1126 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001127 if (!ffa_region_group_identity_map(to_locked, &constituents,
1128 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001129 page_pool, false)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001130 /* TODO: partial defrag of failed range. */
1131 dlog_verbose(
1132 "Insufficient memory to update recipient page "
1133 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001134 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001135 goto out;
1136 }
1137
1138 /*
1139 * Forward the request to the TEE and see what happens.
1140 */
1141 tee_flags = 0;
1142 if (clear) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001143 tee_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001144 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001145 ret = arch_tee_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
1146 .arg1 = (uint32_t)handle,
1147 .arg2 = (uint32_t)(handle >> 32),
1148 .arg3 = tee_flags});
Andrew Walbran290b0c92020-02-03 16:37:14 +00001149
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001150 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001151 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01001152 "Got %#x (%d) from TEE in response to FFA_MEM_RECLAIM, "
1153 "expected FFA_SUCCESS.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00001154 ret.func, ret.arg2);
1155 goto out;
1156 }
1157
1158 /*
1159 * The TEE was happy with it, so complete the reclaim by mapping the
1160 * memory into the recipient. This won't allocate because the
1161 * transaction was already prepared above, so it doesn't need to use the
1162 * `local_page_pool`.
1163 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001164 CHECK(ffa_region_group_identity_map(to_locked, &constituents,
1165 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001166 page_pool, true));
Andrew Walbran290b0c92020-02-03 16:37:14 +00001167
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001168 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran290b0c92020-02-03 16:37:14 +00001169
1170out:
1171 mpool_fini(&local_page_pool);
1172
1173 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001174 * Tidy up the page table by reclaiming failed mappings (if there was an
1175 * error) or merging entries into blocks where possible (on success).
Andrew Walbran290b0c92020-02-03 16:37:14 +00001176 */
1177 mm_vm_defrag(&to->ptable, page_pool);
1178
1179 return ret;
1180}
1181
Andrew Walbran996d1d12020-05-27 14:08:43 +01001182static struct ffa_value ffa_relinquish_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001183 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001184 struct ffa_memory_region_constituent **fragments,
1185 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1186 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001187{
1188 uint32_t orig_from_mode;
1189 uint32_t from_mode;
1190 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001191 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001192
Andrew Walbranca808b12020-05-15 17:22:28 +01001193 ret = ffa_relinquish_check_transition(
1194 from_locked, &orig_from_mode, fragments,
1195 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001196 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001197 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001198 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001199 }
1200
1201 /*
1202 * Create a local pool so any freed memory can't be used by another
1203 * thread. This is to ensure the original mapping can be restored if the
1204 * clear fails.
1205 */
1206 mpool_init_with_fallback(&local_page_pool, page_pool);
1207
1208 /*
1209 * First reserve all required memory for the new page table entries
1210 * without committing, to make sure the entire operation will succeed
1211 * without exhausting the page pool.
1212 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001213 if (!ffa_region_group_identity_map(
1214 from_locked, fragments, fragment_constituent_counts,
1215 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001216 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001217 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001218 goto out;
1219 }
1220
1221 /*
1222 * Update the mapping for the sender. This won't allocate because the
1223 * transaction was already prepared above, but may free pages in the
1224 * case that a whole block is being unmapped that was previously
1225 * partially mapped.
1226 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001227 CHECK(ffa_region_group_identity_map(
1228 from_locked, fragments, fragment_constituent_counts,
1229 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001230
1231 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001232 if (clear && !ffa_clear_memory_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +01001233 fragments, fragment_constituent_counts,
1234 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001235 /*
1236 * On failure, roll back by returning memory to the sender. This
1237 * may allocate pages which were previously freed into
1238 * `local_page_pool` by the call above, but will never allocate
1239 * more pages than that so can never fail.
1240 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001241 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001242 from_locked, fragments, fragment_constituent_counts,
1243 fragment_count, orig_from_mode, &local_page_pool,
1244 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001245
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001246 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001247 goto out;
1248 }
1249
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001250 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001251
1252out:
1253 mpool_fini(&local_page_pool);
1254
1255 /*
1256 * Tidy up the page table by reclaiming failed mappings (if there was an
1257 * error) or merging entries into blocks where possible (on success).
1258 */
1259 mm_vm_defrag(&from_locked.vm->ptable, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001260
1261 return ret;
1262}
1263
1264/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001265 * Complete a memory sending operation by checking that it is valid, updating
1266 * the sender page table, and then either marking the share state as having
1267 * completed sending (on success) or freeing it (on failure).
1268 *
1269 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1270 */
1271static struct ffa_value ffa_memory_send_complete(
1272 struct vm_locked from_locked, struct share_states_locked share_states,
1273 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1274{
1275 struct ffa_memory_region *memory_region = share_state->memory_region;
1276 struct ffa_value ret;
1277
1278 /* Lock must be held. */
1279 CHECK(share_states.share_states != NULL);
1280
1281 /* Check that state is valid in sender page table and update. */
1282 ret = ffa_send_check_update(
1283 from_locked, share_state->fragments,
1284 share_state->fragment_constituent_counts,
1285 share_state->fragment_count, share_state->share_func,
1286 memory_region->receivers[0].receiver_permissions.permissions,
1287 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
1288 if (ret.func != FFA_SUCCESS_32) {
1289 /*
1290 * Free share state, it failed to send so it can't be retrieved.
1291 */
1292 dlog_verbose("Complete failed, freeing share state.\n");
1293 share_state_free(share_states, share_state, page_pool);
1294 return ret;
1295 }
1296
1297 share_state->sending_complete = true;
1298 dlog_verbose("Marked sending complete.\n");
1299
1300 return ffa_mem_success(share_state->handle);
1301}
1302
1303/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001304 * Check that the given `memory_region` represents a valid memory send request
1305 * of the given `share_func` type, return the clear flag and permissions via the
1306 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001307 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001308 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001309 * not.
1310 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001311static struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001312 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1313 uint32_t memory_share_length, uint32_t fragment_length,
1314 uint32_t share_func, ffa_memory_access_permissions_t *permissions)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001315{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001316 struct ffa_composite_memory_region *composite;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001317 uint32_t receivers_length;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001318 uint32_t constituents_offset;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001319 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001320 enum ffa_data_access data_access;
1321 enum ffa_instruction_access instruction_access;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001322
Andrew Walbrana65a1322020-04-06 19:32:32 +01001323 CHECK(permissions != NULL);
1324
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001325 /*
1326 * This should already be checked by the caller, just making the
1327 * assumption clear here.
1328 */
1329 CHECK(memory_region->receiver_count == 1);
1330
Andrew Walbrana65a1322020-04-06 19:32:32 +01001331 /* The sender must match the message sender. */
1332 if (memory_region->sender != from_locked.vm->id) {
1333 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001334 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001335 }
1336
Andrew Walbrana65a1322020-04-06 19:32:32 +01001337 /*
1338 * Ensure that the composite header is within the memory bounds and
1339 * doesn't overlap the first part of the message.
1340 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001341 receivers_length = sizeof(struct ffa_memory_access) *
1342 memory_region->receiver_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001343 constituents_offset =
1344 ffa_composite_constituent_offset(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001345 if (memory_region->receivers[0].composite_memory_region_offset <
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001346 sizeof(struct ffa_memory_region) + receivers_length ||
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001347 constituents_offset > fragment_length) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001348 dlog_verbose(
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001349 "Invalid composite memory region descriptor offset "
1350 "%d.\n",
1351 memory_region->receivers[0]
1352 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001353 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001354 }
1355
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001356 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001357
1358 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001359 * Ensure the number of constituents are within the memory bounds.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001360 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001361 constituents_length = sizeof(struct ffa_memory_region_constituent) *
1362 composite->constituent_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001363 if (memory_share_length != constituents_offset + constituents_length) {
1364 dlog_verbose("Invalid length %d or composite offset %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001365 memory_share_length,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001366 memory_region->receivers[0]
1367 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001368 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001369 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001370 if (fragment_length < memory_share_length &&
1371 fragment_length < HF_MAILBOX_SIZE) {
1372 dlog_warning(
1373 "Initial fragment length %d smaller than mailbox "
1374 "size.\n",
1375 fragment_length);
1376 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001377
Andrew Walbrana65a1322020-04-06 19:32:32 +01001378 /*
1379 * Clear is not allowed for memory sharing, as the sender still has
1380 * access to the memory.
1381 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001382 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1383 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001384 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001385 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001386 }
1387
1388 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001389 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001390 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001391 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001392 }
1393
1394 /* Check that the permissions are valid. */
1395 *permissions =
1396 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001397 data_access = ffa_get_data_access_attr(*permissions);
1398 instruction_access = ffa_get_instruction_access_attr(*permissions);
1399 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1400 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001401 dlog_verbose("Reserved value for receiver permissions %#x.\n",
1402 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001403 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001404 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001405 if (instruction_access != FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001406 dlog_verbose(
1407 "Invalid instruction access permissions %#x for "
1408 "sending memory.\n",
1409 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001410 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001411 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001412 if (share_func == FFA_MEM_SHARE_32) {
1413 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001414 dlog_verbose(
1415 "Invalid data access permissions %#x for "
1416 "sharing memory.\n",
1417 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001418 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001419 }
1420 /*
1421 * According to section 6.11.3 of the FF-A spec NX is required
1422 * for share operations (but must not be specified by the
1423 * sender) so set it in the copy that we store, ready to be
1424 * returned to the retriever.
1425 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001426 ffa_set_instruction_access_attr(permissions,
1427 FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001428 memory_region->receivers[0].receiver_permissions.permissions =
1429 *permissions;
1430 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001431 if (share_func == FFA_MEM_LEND_32 &&
1432 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001433 dlog_verbose(
1434 "Invalid data access permissions %#x for lending "
1435 "memory.\n",
1436 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001437 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001438 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001439 if (share_func == FFA_MEM_DONATE_32 &&
1440 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001441 dlog_verbose(
1442 "Invalid data access permissions %#x for donating "
1443 "memory.\n",
1444 *permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001445 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001446 }
1447
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001448 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001449}
1450
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001451/** Forwards a memory send message on to the TEE. */
1452static struct ffa_value memory_send_tee_forward(
1453 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id,
1454 uint32_t share_func, struct ffa_memory_region *memory_region,
1455 uint32_t memory_share_length, uint32_t fragment_length)
1456{
1457 struct ffa_value ret;
1458
1459 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
1460 memory_region, fragment_length);
1461 tee_locked.vm->mailbox.recv_size = fragment_length;
1462 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1463 tee_locked.vm->mailbox.recv_func = share_func;
1464 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1465 ret = arch_tee_call((struct ffa_value){.func = share_func,
1466 .arg1 = memory_share_length,
1467 .arg2 = fragment_length});
1468 /*
1469 * After the call to the TEE completes it must have finished reading its
1470 * RX buffer, so it is ready for another message.
1471 */
1472 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1473
1474 return ret;
1475}
1476
Andrew Walbrana65a1322020-04-06 19:32:32 +01001477/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001478 * Gets the share state for continuing an operation to donate, lend or share
1479 * memory, and checks that it is a valid request.
1480 *
1481 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1482 * not.
1483 */
1484static struct ffa_value ffa_memory_send_continue_validate(
1485 struct share_states_locked share_states, ffa_memory_handle_t handle,
1486 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1487 struct mpool *page_pool)
1488{
1489 struct ffa_memory_share_state *share_state;
1490 struct ffa_memory_region *memory_region;
1491
1492 CHECK(share_state_ret != NULL);
1493
1494 /*
1495 * Look up the share state by handle and make sure that the VM ID
1496 * matches.
1497 */
1498 if (!get_share_state(share_states, handle, &share_state)) {
1499 dlog_verbose(
1500 "Invalid handle %#x for memory send continuation.\n",
1501 handle);
1502 return ffa_error(FFA_INVALID_PARAMETERS);
1503 }
1504 memory_region = share_state->memory_region;
1505
1506 if (memory_region->sender != from_vm_id) {
1507 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1508 return ffa_error(FFA_INVALID_PARAMETERS);
1509 }
1510
1511 if (share_state->sending_complete) {
1512 dlog_verbose(
1513 "Sending of memory handle %#x is already complete.\n",
1514 handle);
1515 return ffa_error(FFA_INVALID_PARAMETERS);
1516 }
1517
1518 if (share_state->fragment_count == MAX_FRAGMENTS) {
1519 /*
1520 * Log a warning as this is a sign that MAX_FRAGMENTS should
1521 * probably be increased.
1522 */
1523 dlog_warning(
1524 "Too many fragments for memory share with handle %#x; "
1525 "only %d supported.\n",
1526 handle, MAX_FRAGMENTS);
1527 /* Free share state, as it's not possible to complete it. */
1528 share_state_free(share_states, share_state, page_pool);
1529 return ffa_error(FFA_NO_MEMORY);
1530 }
1531
1532 *share_state_ret = share_state;
1533
1534 return (struct ffa_value){.func = FFA_SUCCESS_32};
1535}
1536
1537/**
1538 * Forwards a memory send continuation message on to the TEE.
1539 */
1540static struct ffa_value memory_send_continue_tee_forward(
1541 struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, void *fragment,
1542 uint32_t fragment_length, ffa_memory_handle_t handle)
1543{
1544 struct ffa_value ret;
1545
1546 memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, fragment,
1547 fragment_length);
1548 tee_locked.vm->mailbox.recv_size = fragment_length;
1549 tee_locked.vm->mailbox.recv_sender = sender_vm_id;
1550 tee_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
1551 tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1552 ret = arch_tee_call(
1553 (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
1554 .arg1 = (uint32_t)handle,
1555 .arg2 = (uint32_t)(handle >> 32),
1556 .arg3 = fragment_length,
1557 .arg4 = (uint64_t)sender_vm_id << 16});
1558 /*
1559 * After the call to the TEE completes it must have finished reading its
1560 * RX buffer, so it is ready for another message.
1561 */
1562 tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
1563
1564 return ret;
1565}
1566
1567/**
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001568 * Validates a call to donate, lend or share memory to a non-TEE VM and then
1569 * updates the stage-2 page tables. Specifically, check if the message length
1570 * and number of memory region constituents match, and if the transition is
1571 * valid for the type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001572 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001573 * Assumes that the caller has already found and locked the sender VM and copied
1574 * the memory region descriptor from the sender's TX buffer to a freshly
1575 * allocated page from Hafnium's internal pool. The caller must have also
1576 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001577 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001578 * This function takes ownership of the `memory_region` passed in and will free
1579 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001580 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001581struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001582 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001583 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001584 uint32_t fragment_length, uint32_t share_func,
1585 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001586{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001587 ffa_memory_access_permissions_t permissions;
1588 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001589 struct share_states_locked share_states;
1590 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001591
1592 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001593 * If there is an error validating the `memory_region` then we need to
1594 * free it because we own it but we won't be storing it in a share state
1595 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001596 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001597 ret = ffa_memory_send_validate(from_locked, memory_region,
1598 memory_share_length, fragment_length,
1599 share_func, &permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001600 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001601 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001602 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001603 }
1604
Andrew Walbrana65a1322020-04-06 19:32:32 +01001605 /* Set flag for share function, ready to be retrieved later. */
1606 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001607 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001608 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001609 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001610 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001611 case FFA_MEM_LEND_32:
1612 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001613 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001614 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001615 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001616 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001617 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001618 }
1619
Andrew Walbranca808b12020-05-15 17:22:28 +01001620 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001621 /*
1622 * Allocate a share state before updating the page table. Otherwise if
1623 * updating the page table succeeded but allocating the share state
1624 * failed then it would leave the memory in a state where nobody could
1625 * get it back.
1626 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001627 if (!allocate_share_state(share_states, share_func, memory_region,
1628 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1629 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001630 dlog_verbose("Failed to allocate share state.\n");
1631 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001632 ret = ffa_error(FFA_NO_MEMORY);
1633 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001634 }
1635
Andrew Walbranca808b12020-05-15 17:22:28 +01001636 if (fragment_length == memory_share_length) {
1637 /* No more fragments to come, everything fit in one message. */
1638 ret = ffa_memory_send_complete(from_locked, share_states,
1639 share_state, page_pool);
1640 } else {
1641 ret = (struct ffa_value){
1642 .func = FFA_MEM_FRAG_RX_32,
1643 .arg1 = (uint32_t)share_state->handle,
1644 .arg2 = (uint32_t)(share_state->handle >> 32),
1645 .arg3 = fragment_length};
1646 }
1647
1648out:
1649 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001650 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001651 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001652}
1653
1654/**
1655 * Validates a call to donate, lend or share memory to the TEE and then updates
1656 * the stage-2 page tables. Specifically, check if the message length and number
1657 * of memory region constituents match, and if the transition is valid for the
1658 * type of memory sending operation.
1659 *
1660 * Assumes that the caller has already found and locked the sender VM and the
1661 * TEE VM, and copied the memory region descriptor from the sender's TX buffer
1662 * to a freshly allocated page from Hafnium's internal pool. The caller must
1663 * have also validated that the receiver VM ID is valid.
1664 *
1665 * This function takes ownership of the `memory_region` passed in and will free
1666 * it when necessary; it must not be freed by the caller.
1667 */
1668struct ffa_value ffa_memory_tee_send(
1669 struct vm_locked from_locked, struct vm_locked to_locked,
1670 struct ffa_memory_region *memory_region, uint32_t memory_share_length,
1671 uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool)
1672{
1673 ffa_memory_access_permissions_t permissions;
1674 struct ffa_value ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001675
1676 /*
1677 * If there is an error validating the `memory_region` then we need to
1678 * free it because we own it but we won't be storing it in a share state
1679 * after all.
1680 */
1681 ret = ffa_memory_send_validate(from_locked, memory_region,
1682 memory_share_length, fragment_length,
1683 share_func, &permissions);
1684 if (ret.func != FFA_SUCCESS_32) {
1685 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001686 }
1687
Andrew Walbranca808b12020-05-15 17:22:28 +01001688 if (fragment_length == memory_share_length) {
1689 /* No more fragments to come, everything fit in one message. */
1690 struct ffa_composite_memory_region *composite =
1691 ffa_memory_region_get_composite(memory_region, 0);
1692 struct ffa_memory_region_constituent *constituents =
1693 composite->constituents;
1694
1695 ret = ffa_send_check_update(
1696 from_locked, &constituents,
1697 &composite->constituent_count, 1, share_func,
1698 permissions, page_pool,
1699 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR);
1700 if (ret.func != FFA_SUCCESS_32) {
1701 goto out;
1702 }
1703
1704 /* Forward memory send message on to TEE. */
1705 ret = memory_send_tee_forward(
1706 to_locked, from_locked.vm->id, share_func,
1707 memory_region, memory_share_length, fragment_length);
1708 } else {
1709 struct share_states_locked share_states = share_states_lock();
1710 ffa_memory_handle_t handle;
1711
1712 /*
1713 * We need to wait for the rest of the fragments before we can
1714 * check whether the transaction is valid and unmap the memory.
1715 * Call the TEE so it can do its initial validation and assign a
1716 * handle, and allocate a share state to keep what we have so
1717 * far.
1718 */
1719 ret = memory_send_tee_forward(
1720 to_locked, from_locked.vm->id, share_func,
1721 memory_region, memory_share_length, fragment_length);
1722 if (ret.func == FFA_ERROR_32) {
1723 goto out_unlock;
1724 } else if (ret.func != FFA_MEM_FRAG_RX_32) {
1725 dlog_warning(
1726 "Got %#x from TEE in response to %#x for "
1727 "fragment with with %d/%d, expected "
1728 "FFA_MEM_FRAG_RX.\n",
1729 ret.func, share_func, fragment_length,
1730 memory_share_length);
1731 ret = ffa_error(FFA_INVALID_PARAMETERS);
1732 goto out_unlock;
1733 }
1734 handle = ffa_frag_handle(ret);
1735 if (ret.arg3 != fragment_length) {
1736 dlog_warning(
1737 "Got unexpected fragment offset %d for "
1738 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1739 ret.arg3, fragment_length);
1740 ret = ffa_error(FFA_INVALID_PARAMETERS);
1741 goto out_unlock;
1742 }
1743 if (ffa_frag_sender(ret) != from_locked.vm->id) {
1744 dlog_warning(
1745 "Got unexpected sender ID %d for "
1746 "FFA_MEM_FRAG_RX from TEE (expected %d).\n",
1747 ffa_frag_sender(ret), from_locked.vm->id);
1748 ret = ffa_error(FFA_INVALID_PARAMETERS);
1749 goto out_unlock;
1750 }
1751
1752 if (!allocate_share_state(share_states, share_func,
1753 memory_region, fragment_length,
1754 handle, NULL)) {
1755 dlog_verbose("Failed to allocate share state.\n");
1756 ret = ffa_error(FFA_NO_MEMORY);
1757 goto out_unlock;
1758 }
1759 /*
1760 * Don't free the memory region fragment, as it has been stored
1761 * in the share state.
1762 */
1763 memory_region = NULL;
1764 out_unlock:
1765 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001766 }
1767
Andrew Walbranca808b12020-05-15 17:22:28 +01001768out:
1769 if (memory_region != NULL) {
1770 mpool_free(page_pool, memory_region);
1771 }
1772 dump_share_states();
1773 return ret;
1774}
1775
1776/**
1777 * Continues an operation to donate, lend or share memory to a non-TEE VM. If
1778 * this is the last fragment then checks that the transition is valid for the
1779 * type of memory sending operation and updates the stage-2 page tables of the
1780 * sender.
1781 *
1782 * Assumes that the caller has already found and locked the sender VM and copied
1783 * the memory region descriptor from the sender's TX buffer to a freshly
1784 * allocated page from Hafnium's internal pool.
1785 *
1786 * This function takes ownership of the `fragment` passed in; it must not be
1787 * freed by the caller.
1788 */
1789struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1790 void *fragment,
1791 uint32_t fragment_length,
1792 ffa_memory_handle_t handle,
1793 struct mpool *page_pool)
1794{
1795 struct share_states_locked share_states = share_states_lock();
1796 struct ffa_memory_share_state *share_state;
1797 struct ffa_value ret;
1798 struct ffa_memory_region *memory_region;
1799
1800 ret = ffa_memory_send_continue_validate(share_states, handle,
1801 &share_state,
1802 from_locked.vm->id, page_pool);
1803 if (ret.func != FFA_SUCCESS_32) {
1804 goto out_free_fragment;
1805 }
1806 memory_region = share_state->memory_region;
1807
1808 if (memory_region->receivers[0].receiver_permissions.receiver ==
1809 HF_TEE_VM_ID) {
1810 dlog_error(
1811 "Got hypervisor-allocated handle for memory send to "
1812 "TEE. This should never happen, and indicates a bug in "
1813 "EL3 code.\n");
1814 ret = ffa_error(FFA_INVALID_PARAMETERS);
1815 goto out_free_fragment;
1816 }
1817
1818 /* Add this fragment. */
1819 share_state->fragments[share_state->fragment_count] = fragment;
1820 share_state->fragment_constituent_counts[share_state->fragment_count] =
1821 fragment_length / sizeof(struct ffa_memory_region_constituent);
1822 share_state->fragment_count++;
1823
1824 /* Check whether the memory send operation is now ready to complete. */
1825 if (share_state_sending_complete(share_states, share_state)) {
1826 ret = ffa_memory_send_complete(from_locked, share_states,
1827 share_state, page_pool);
1828 } else {
1829 ret = (struct ffa_value){
1830 .func = FFA_MEM_FRAG_RX_32,
1831 .arg1 = (uint32_t)handle,
1832 .arg2 = (uint32_t)(handle >> 32),
1833 .arg3 = share_state_next_fragment_offset(share_states,
1834 share_state)};
1835 }
1836 goto out;
1837
1838out_free_fragment:
1839 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001840
1841out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001842 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001843 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001844}
1845
Andrew Walbranca808b12020-05-15 17:22:28 +01001846/**
1847 * Continues an operation to donate, lend or share memory to the TEE VM. If this
1848 * is the last fragment then checks that the transition is valid for the type of
1849 * memory sending operation and updates the stage-2 page tables of the sender.
1850 *
1851 * Assumes that the caller has already found and locked the sender VM and copied
1852 * the memory region descriptor from the sender's TX buffer to a freshly
1853 * allocated page from Hafnium's internal pool.
1854 *
1855 * This function takes ownership of the `memory_region` passed in and will free
1856 * it when necessary; it must not be freed by the caller.
1857 */
1858struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked,
1859 struct vm_locked to_locked,
1860 void *fragment,
1861 uint32_t fragment_length,
1862 ffa_memory_handle_t handle,
1863 struct mpool *page_pool)
1864{
1865 struct share_states_locked share_states = share_states_lock();
1866 struct ffa_memory_share_state *share_state;
1867 struct ffa_value ret;
1868 struct ffa_memory_region *memory_region;
1869
1870 ret = ffa_memory_send_continue_validate(share_states, handle,
1871 &share_state,
1872 from_locked.vm->id, page_pool);
1873 if (ret.func != FFA_SUCCESS_32) {
1874 goto out_free_fragment;
1875 }
1876 memory_region = share_state->memory_region;
1877
1878 if (memory_region->receivers[0].receiver_permissions.receiver !=
1879 HF_TEE_VM_ID) {
1880 dlog_error(
1881 "Got SPM-allocated handle for memory send to non-TEE "
1882 "VM. This should never happen, and indicates a bug.\n");
1883 ret = ffa_error(FFA_INVALID_PARAMETERS);
1884 goto out_free_fragment;
1885 }
1886
1887 if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
1888 to_locked.vm->mailbox.recv == NULL) {
1889 /*
1890 * If the TEE RX buffer is not available, tell the sender to
1891 * retry by returning the current offset again.
1892 */
1893 ret = (struct ffa_value){
1894 .func = FFA_MEM_FRAG_RX_32,
1895 .arg1 = (uint32_t)handle,
1896 .arg2 = (uint32_t)(handle >> 32),
1897 .arg3 = share_state_next_fragment_offset(share_states,
1898 share_state),
1899 };
1900 goto out_free_fragment;
1901 }
1902
1903 /* Add this fragment. */
1904 share_state->fragments[share_state->fragment_count] = fragment;
1905 share_state->fragment_constituent_counts[share_state->fragment_count] =
1906 fragment_length / sizeof(struct ffa_memory_region_constituent);
1907 share_state->fragment_count++;
1908
1909 /* Check whether the memory send operation is now ready to complete. */
1910 if (share_state_sending_complete(share_states, share_state)) {
1911 ret = ffa_memory_send_complete(from_locked, share_states,
1912 share_state, page_pool);
1913
1914 if (ret.func == FFA_SUCCESS_32) {
1915 /*
1916 * Forward final fragment on to the TEE so that
1917 * it can complete the memory sending operation.
1918 */
1919 ret = memory_send_continue_tee_forward(
1920 to_locked, from_locked.vm->id, fragment,
1921 fragment_length, handle);
1922
1923 if (ret.func != FFA_SUCCESS_32) {
1924 /*
1925 * The error will be passed on to the caller,
1926 * but log it here too.
1927 */
1928 dlog_verbose(
1929 "TEE didn't successfully complete "
1930 "memory send operation; returned %#x "
1931 "(%d).\n",
1932 ret.func, ret.arg2);
1933 }
1934 /* Free share state. */
1935 share_state_free(share_states, share_state, page_pool);
1936 } else {
1937 /* Abort sending to TEE. */
1938 struct ffa_value tee_ret =
1939 arch_tee_call((struct ffa_value){
1940 .func = FFA_MEM_RECLAIM_32,
1941 .arg1 = (uint32_t)handle,
1942 .arg2 = (uint32_t)(handle >> 32)});
1943
1944 if (tee_ret.func != FFA_SUCCESS_32) {
1945 /*
1946 * Nothing we can do if TEE doesn't abort
1947 * properly, just log it.
1948 */
1949 dlog_verbose(
1950 "TEE didn't successfully abort failed "
1951 "memory send operation; returned %#x "
1952 "(%d).\n",
1953 tee_ret.func, tee_ret.arg2);
1954 }
1955 /*
1956 * We don't need to free the share state in this case
1957 * because ffa_memory_send_complete does that already.
1958 */
1959 }
1960 } else {
1961 uint32_t next_fragment_offset =
1962 share_state_next_fragment_offset(share_states,
1963 share_state);
1964
1965 ret = memory_send_continue_tee_forward(
1966 to_locked, from_locked.vm->id, fragment,
1967 fragment_length, handle);
1968
1969 if (ret.func != FFA_MEM_FRAG_RX_32 ||
1970 ffa_frag_handle(ret) != handle ||
1971 ret.arg3 != next_fragment_offset ||
1972 ffa_frag_sender(ret) != from_locked.vm->id) {
1973 dlog_verbose(
1974 "Got unexpected result from forwarding "
1975 "FFA_MEM_FRAG_TX to TEE: %#x (handle %#x, "
1976 "offset %d, sender %d); expected "
1977 "FFA_MEM_FRAG_RX (handle %#x, offset %d, "
1978 "sender %d).\n",
1979 ret.func, ffa_frag_handle(ret), ret.arg3,
1980 ffa_frag_sender(ret), handle,
1981 next_fragment_offset, from_locked.vm->id);
1982 /* Free share state. */
1983 share_state_free(share_states, share_state, page_pool);
1984 ret = ffa_error(FFA_INVALID_PARAMETERS);
1985 goto out;
1986 }
1987
1988 ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
1989 .arg1 = (uint32_t)handle,
1990 .arg2 = (uint32_t)(handle >> 32),
1991 .arg3 = next_fragment_offset};
1992 }
1993 goto out;
1994
1995out_free_fragment:
1996 mpool_free(page_pool, fragment);
1997
1998out:
1999 share_states_unlock(&share_states);
2000 return ret;
2001}
2002
2003/** Clean up after the receiver has finished retrieving a memory region. */
2004static void ffa_memory_retrieve_complete(
2005 struct share_states_locked share_states,
2006 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2007{
2008 if (share_state->share_func == FFA_MEM_DONATE_32) {
2009 /*
2010 * Memory that has been donated can't be relinquished,
2011 * so no need to keep the share state around.
2012 */
2013 share_state_free(share_states, share_state, page_pool);
2014 dlog_verbose("Freed share state for donate.\n");
2015 }
2016}
2017
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002018struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2019 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002020 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002021 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002022{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002023 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002024 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002025 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002026 sizeof(struct ffa_memory_access);
2027 ffa_memory_handle_t handle = retrieve_request->handle;
2028 ffa_memory_region_flags_t transaction_type =
Andrew Walbrana65a1322020-04-06 19:32:32 +01002029 retrieve_request->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002030 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
2031 struct ffa_memory_region *memory_region;
2032 ffa_memory_access_permissions_t sent_permissions;
2033 enum ffa_data_access sent_data_access;
2034 enum ffa_instruction_access sent_instruction_access;
2035 ffa_memory_access_permissions_t requested_permissions;
2036 enum ffa_data_access requested_data_access;
2037 enum ffa_instruction_access requested_instruction_access;
2038 ffa_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002039 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002040 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002041 struct ffa_memory_share_state *share_state;
2042 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002043 struct ffa_composite_memory_region *composite;
2044 uint32_t total_length;
2045 uint32_t fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002046
2047 dump_share_states();
2048
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002049 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002050 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002051 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002052 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002053 expected_retrieve_request_length,
2054 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002055 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002056 }
2057
Andrew Walbrana65a1322020-04-06 19:32:32 +01002058 if (retrieve_request->receiver_count != 1) {
2059 dlog_verbose(
2060 "Multi-way memory sharing not supported (got %d "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002061 "receivers descriptors on FFA_MEM_RETRIEVE_REQ, "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002062 "expected 1).\n",
2063 retrieve_request->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002064 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002065 }
2066
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002067 share_states = share_states_lock();
2068 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002069 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002070 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002071 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002072 goto out;
2073 }
2074
Andrew Walbrana65a1322020-04-06 19:32:32 +01002075 memory_region = share_state->memory_region;
2076 CHECK(memory_region != NULL);
2077
2078 /*
2079 * Check that the transaction type expected by the receiver is correct,
2080 * if it has been specified.
2081 */
2082 if (transaction_type !=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002083 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
Andrew Walbrana65a1322020-04-06 19:32:32 +01002084 transaction_type != (memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002085 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002086 dlog_verbose(
2087 "Incorrect transaction type %#x for "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002088 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002089 transaction_type,
2090 memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002091 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002092 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002093 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002094 goto out;
2095 }
2096
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002097 if (retrieve_request->sender != memory_region->sender) {
2098 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002099 "Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002100 "expected %d for handle %#x.\n",
2101 retrieve_request->sender, memory_region->sender,
2102 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002103 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002104 goto out;
2105 }
2106
2107 if (retrieve_request->tag != memory_region->tag) {
2108 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002109 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002110 "%d for handle %#x.\n",
2111 retrieve_request->tag, memory_region->tag, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002112 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002113 goto out;
2114 }
2115
Andrew Walbrana65a1322020-04-06 19:32:32 +01002116 if (retrieve_request->receivers[0].receiver_permissions.receiver !=
2117 to_locked.vm->id) {
2118 dlog_verbose(
2119 "Retrieve request receiver VM ID %d didn't match "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002120 "caller of FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002121 retrieve_request->receivers[0]
2122 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002123 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002124 goto out;
2125 }
2126
2127 if (memory_region->receivers[0].receiver_permissions.receiver !=
2128 to_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002129 dlog_verbose(
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002130 "Incorrect receiver VM ID %d for FFA_MEM_RETRIEVE_REQ, "
2131 "expected %d for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002132 to_locked.vm->id,
2133 memory_region->receivers[0]
2134 .receiver_permissions.receiver,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002135 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002136 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002137 goto out;
2138 }
2139
Andrew Walbranca808b12020-05-15 17:22:28 +01002140 if (!share_state->sending_complete) {
2141 dlog_verbose(
2142 "Memory with handle %#x not fully sent, can't "
2143 "retrieve.\n",
2144 handle);
2145 ret = ffa_error(FFA_INVALID_PARAMETERS);
2146 goto out;
2147 }
2148
2149 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002150 dlog_verbose("Memory with handle %#x already retrieved.\n",
2151 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002152 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002153 goto out;
2154 }
2155
Andrew Walbrana65a1322020-04-06 19:32:32 +01002156 if (retrieve_request->receivers[0].composite_memory_region_offset !=
2157 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002158 dlog_verbose(
2159 "Retriever specified address ranges not supported (got "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002160 "offset %d).\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002161 retrieve_request->receivers[0]
2162 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002163 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002164 goto out;
2165 }
2166
Andrew Walbrana65a1322020-04-06 19:32:32 +01002167 /*
2168 * Check permissions from sender against permissions requested by
2169 * receiver.
2170 */
2171 /* TODO: Check attributes too. */
2172 sent_permissions =
2173 memory_region->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002174 sent_data_access = ffa_get_data_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002175 sent_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002176 ffa_get_instruction_access_attr(sent_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002177 requested_permissions =
2178 retrieve_request->receivers[0].receiver_permissions.permissions;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002179 requested_data_access = ffa_get_data_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002180 requested_instruction_access =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002181 ffa_get_instruction_access_attr(requested_permissions);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002182 permissions = 0;
2183 switch (sent_data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002184 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2185 case FFA_DATA_ACCESS_RW:
2186 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2187 requested_data_access == FFA_DATA_ACCESS_RW) {
2188 ffa_set_data_access_attr(&permissions,
2189 FFA_DATA_ACCESS_RW);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002190 break;
2191 }
2192 /* Intentional fall-through. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002193 case FFA_DATA_ACCESS_RO:
2194 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2195 requested_data_access == FFA_DATA_ACCESS_RO) {
2196 ffa_set_data_access_attr(&permissions,
2197 FFA_DATA_ACCESS_RO);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002198 break;
2199 }
2200 dlog_verbose(
2201 "Invalid data access requested; sender specified "
2202 "permissions %#x but receiver requested %#x.\n",
2203 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002204 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002205 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002206 case FFA_DATA_ACCESS_RESERVED:
2207 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002208 "checked before this point.");
2209 }
2210 switch (sent_instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002211 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2212 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002213 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002214 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2215 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
2216 ffa_set_instruction_access_attr(
2217 &permissions, FFA_INSTRUCTION_ACCESS_X);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002218 break;
2219 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002220 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002221 if (requested_instruction_access ==
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002222 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2223 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2224 ffa_set_instruction_access_attr(
2225 &permissions, FFA_INSTRUCTION_ACCESS_NX);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002226 break;
2227 }
2228 dlog_verbose(
2229 "Invalid instruction access requested; sender "
Andrew Walbranf07f04d2020-05-01 18:09:00 +01002230 "specified permissions %#x but receiver requested "
2231 "%#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002232 sent_permissions, requested_permissions);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002233 ret = ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002234 goto out;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002235 case FFA_INSTRUCTION_ACCESS_RESERVED:
2236 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002237 "be checked before this point.");
2238 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002239 memory_to_attributes = ffa_memory_permissions_to_mode(permissions);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002240
Andrew Walbran996d1d12020-05-27 14:08:43 +01002241 ret = ffa_retrieve_check_update(
Andrew Walbranca808b12020-05-15 17:22:28 +01002242 to_locked, share_state->fragments,
2243 share_state->fragment_constituent_counts,
2244 share_state->fragment_count, memory_to_attributes,
Andrew Walbran996d1d12020-05-27 14:08:43 +01002245 share_state->share_func, false, page_pool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002246 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002247 goto out;
2248 }
2249
2250 /*
2251 * Copy response to RX buffer of caller and deliver the message. This
2252 * must be done before the share_state is (possibly) freed.
2253 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002254 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002255 composite = ffa_memory_region_get_composite(memory_region, 0);
2256 /*
2257 * Constituents which we received in the first fragment should always
2258 * fit in the first fragment we are sending, because the header is the
2259 * same size in both cases and we have a fixed message buffer size. So
2260 * `ffa_retrieved_memory_region_init` should never fail.
2261 */
2262 CHECK(ffa_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002263 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2264 memory_region->sender, memory_region->attributes,
2265 memory_region->flags, handle, to_locked.vm->id, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +01002266 composite->page_count, composite->constituent_count,
2267 share_state->fragments[0],
2268 share_state->fragment_constituent_counts[0], &total_length,
2269 &fragment_length));
2270 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002271 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002272 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002273 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2274
Andrew Walbranca808b12020-05-15 17:22:28 +01002275 share_state->retrieved_fragment_count[0] = 1;
2276 if (share_state->retrieved_fragment_count[0] ==
2277 share_state->fragment_count) {
2278 ffa_memory_retrieve_complete(share_states, share_state,
2279 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002280 }
2281
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002282 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002283 .arg1 = total_length,
2284 .arg2 = fragment_length};
2285
2286out:
2287 share_states_unlock(&share_states);
2288 dump_share_states();
2289 return ret;
2290}
2291
2292struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2293 ffa_memory_handle_t handle,
2294 uint32_t fragment_offset,
2295 struct mpool *page_pool)
2296{
2297 struct ffa_memory_region *memory_region;
2298 struct share_states_locked share_states;
2299 struct ffa_memory_share_state *share_state;
2300 struct ffa_value ret;
2301 uint32_t fragment_index;
2302 uint32_t retrieved_constituents_count;
2303 uint32_t i;
2304 uint32_t expected_fragment_offset;
2305 uint32_t remaining_constituent_count;
2306 uint32_t fragment_length;
2307
2308 dump_share_states();
2309
2310 share_states = share_states_lock();
2311 if (!get_share_state(share_states, handle, &share_state)) {
2312 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2313 handle);
2314 ret = ffa_error(FFA_INVALID_PARAMETERS);
2315 goto out;
2316 }
2317
2318 memory_region = share_state->memory_region;
2319 CHECK(memory_region != NULL);
2320
2321 if (memory_region->receivers[0].receiver_permissions.receiver !=
2322 to_locked.vm->id) {
2323 dlog_verbose(
2324 "Caller of FFA_MEM_FRAG_RX (%d) is not receiver (%d) "
2325 "of handle %#x.\n",
2326 to_locked.vm->id,
2327 memory_region->receivers[0]
2328 .receiver_permissions.receiver,
2329 handle);
2330 ret = ffa_error(FFA_INVALID_PARAMETERS);
2331 goto out;
2332 }
2333
2334 if (!share_state->sending_complete) {
2335 dlog_verbose(
2336 "Memory with handle %#x not fully sent, can't "
2337 "retrieve.\n",
2338 handle);
2339 ret = ffa_error(FFA_INVALID_PARAMETERS);
2340 goto out;
2341 }
2342
2343 if (share_state->retrieved_fragment_count[0] == 0 ||
2344 share_state->retrieved_fragment_count[0] >=
2345 share_state->fragment_count) {
2346 dlog_verbose(
2347 "Retrieval of memory with handle %#x not yet started "
2348 "or already completed (%d/%d fragments retrieved).\n",
2349 handle, share_state->retrieved_fragment_count[0],
2350 share_state->fragment_count);
2351 ret = ffa_error(FFA_INVALID_PARAMETERS);
2352 goto out;
2353 }
2354
2355 fragment_index = share_state->retrieved_fragment_count[0];
2356
2357 /*
2358 * Check that the given fragment offset is correct by counting how many
2359 * constituents were in the fragments previously sent.
2360 */
2361 retrieved_constituents_count = 0;
2362 for (i = 0; i < fragment_index; ++i) {
2363 retrieved_constituents_count +=
2364 share_state->fragment_constituent_counts[i];
2365 }
2366 expected_fragment_offset =
2367 ffa_composite_constituent_offset(memory_region, 0) +
2368 retrieved_constituents_count *
2369 sizeof(struct ffa_memory_region_constituent);
2370 if (fragment_offset != expected_fragment_offset) {
2371 dlog_verbose("Fragment offset was %d but expected %d.\n",
2372 fragment_offset, expected_fragment_offset);
2373 ret = ffa_error(FFA_INVALID_PARAMETERS);
2374 goto out;
2375 }
2376
2377 remaining_constituent_count = ffa_memory_fragment_init(
2378 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2379 share_state->fragments[fragment_index],
2380 share_state->fragment_constituent_counts[fragment_index],
2381 &fragment_length);
2382 CHECK(remaining_constituent_count == 0);
2383 to_locked.vm->mailbox.recv_size = fragment_length;
2384 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2385 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
2386 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2387 share_state->retrieved_fragment_count[0]++;
2388 if (share_state->retrieved_fragment_count[0] ==
2389 share_state->fragment_count) {
2390 ffa_memory_retrieve_complete(share_states, share_state,
2391 page_pool);
2392 }
2393
2394 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2395 .arg1 = (uint32_t)handle,
2396 .arg2 = (uint32_t)(handle >> 32),
2397 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002398
2399out:
2400 share_states_unlock(&share_states);
2401 dump_share_states();
2402 return ret;
2403}
2404
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002405struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002406 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002407 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002408{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002409 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002410 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002411 struct ffa_memory_share_state *share_state;
2412 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002413 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002414 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002415
Andrew Walbrana65a1322020-04-06 19:32:32 +01002416 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002417 dlog_verbose(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002418 "Stream endpoints not supported (got %d endpoints on "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002419 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002420 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002421 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002422 }
2423
Andrew Walbrana65a1322020-04-06 19:32:32 +01002424 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002425 dlog_verbose(
2426 "VM ID %d in relinquish message doesn't match calling "
2427 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002428 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002429 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002430 }
2431
2432 dump_share_states();
2433
2434 share_states = share_states_lock();
2435 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002436 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002437 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002438 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002439 goto out;
2440 }
2441
Andrew Walbranca808b12020-05-15 17:22:28 +01002442 if (!share_state->sending_complete) {
2443 dlog_verbose(
2444 "Memory with handle %#x not fully sent, can't "
2445 "relinquish.\n",
2446 handle);
2447 ret = ffa_error(FFA_INVALID_PARAMETERS);
2448 goto out;
2449 }
2450
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002451 memory_region = share_state->memory_region;
2452 CHECK(memory_region != NULL);
2453
Andrew Walbrana65a1322020-04-06 19:32:32 +01002454 if (memory_region->receivers[0].receiver_permissions.receiver !=
2455 from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002456 dlog_verbose(
2457 "VM ID %d tried to relinquish memory region with "
2458 "handle %#x but receiver was %d.\n",
2459 from_locked.vm->id, handle,
Andrew Walbrana65a1322020-04-06 19:32:32 +01002460 memory_region->receivers[0]
2461 .receiver_permissions.receiver);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002462 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002463 goto out;
2464 }
2465
Andrew Walbranca808b12020-05-15 17:22:28 +01002466 if (share_state->retrieved_fragment_count[0] !=
2467 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002468 dlog_verbose(
Andrew Walbranca808b12020-05-15 17:22:28 +01002469 "Memory with handle %#x not yet fully retrieved, can't "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002470 "relinquish.\n",
2471 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002472 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002473 goto out;
2474 }
2475
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002476 clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002477
2478 /*
2479 * Clear is not allowed for memory that was shared, as the original
2480 * sender still has access to the memory.
2481 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002482 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002483 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002484 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002485 goto out;
2486 }
2487
Andrew Walbranca808b12020-05-15 17:22:28 +01002488 ret = ffa_relinquish_check_update(
2489 from_locked, share_state->fragments,
2490 share_state->fragment_constituent_counts,
2491 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002492
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002493 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002494 /*
2495 * Mark memory handle as not retrieved, so it can be reclaimed
2496 * (or retrieved again).
2497 */
Andrew Walbranca808b12020-05-15 17:22:28 +01002498 share_state->retrieved_fragment_count[0] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002499 }
2500
2501out:
2502 share_states_unlock(&share_states);
2503 dump_share_states();
2504 return ret;
2505}
2506
2507/**
2508 * Validates that the reclaim transition is allowed for the given handle,
2509 * updates the page table of the reclaiming VM, and frees the internal state
2510 * associated with the handle.
2511 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002512struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002513 ffa_memory_handle_t handle,
2514 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002515 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002516{
2517 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002518 struct ffa_memory_share_state *share_state;
2519 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002520 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002521 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002522
2523 dump_share_states();
2524
2525 share_states = share_states_lock();
2526 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002527 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002528 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002529 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002530 goto out;
2531 }
2532
2533 memory_region = share_state->memory_region;
2534 CHECK(memory_region != NULL);
2535
2536 if (to_locked.vm->id != memory_region->sender) {
2537 dlog_verbose(
2538 "VM %d attempted to reclaim memory handle %#x "
2539 "originally sent by VM %d.\n",
2540 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002541 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002542 goto out;
2543 }
2544
Andrew Walbranca808b12020-05-15 17:22:28 +01002545 if (!share_state->sending_complete) {
2546 dlog_verbose(
2547 "Memory with handle %#x not fully sent, can't "
2548 "reclaim.\n",
2549 handle);
2550 ret = ffa_error(FFA_INVALID_PARAMETERS);
2551 goto out;
2552 }
2553
2554 if (share_state->retrieved_fragment_count[0] != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002555 dlog_verbose(
2556 "Tried to reclaim memory handle %#x that has not been "
2557 "relinquished.\n",
2558 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002559 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002560 goto out;
2561 }
2562
Andrew Walbranca808b12020-05-15 17:22:28 +01002563 ret = ffa_retrieve_check_update(
2564 to_locked, share_state->fragments,
2565 share_state->fragment_constituent_counts,
2566 share_state->fragment_count, memory_to_attributes,
2567 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002568
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002569 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002570 share_state_free(share_states, share_state, page_pool);
2571 dlog_verbose("Freed share state after successful reclaim.\n");
2572 }
2573
2574out:
2575 share_states_unlock(&share_states);
2576 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002577}
Andrew Walbran290b0c92020-02-03 16:37:14 +00002578
2579/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002580 * Validates that the reclaim transition is allowed for the memory region with
2581 * the given handle which was previously shared with the TEE, tells the TEE to
2582 * mark it as reclaimed, and updates the page table of the reclaiming VM.
2583 *
2584 * To do this information about the memory region is first fetched from the TEE.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002585 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002586struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002587 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002588 ffa_memory_handle_t handle,
Andrew Walbranca808b12020-05-15 17:22:28 +01002589 ffa_memory_region_flags_t flags,
2590 struct mpool *page_pool)
Andrew Walbran290b0c92020-02-03 16:37:14 +00002591{
Andrew Walbranca808b12020-05-15 17:22:28 +01002592 uint32_t request_length = ffa_memory_lender_retrieve_request_init(
2593 from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
2594 struct ffa_value tee_ret;
2595 uint32_t length;
2596 uint32_t fragment_length;
2597 uint32_t fragment_offset;
2598 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002599 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01002600 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
2601
2602 CHECK(request_length <= HF_MAILBOX_SIZE);
2603 CHECK(from_locked.vm->id == HF_TEE_VM_ID);
2604
2605 /* Retrieve memory region information from the TEE. */
2606 tee_ret = arch_tee_call(
2607 (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
2608 .arg1 = request_length,
2609 .arg2 = request_length});
2610 if (tee_ret.func == FFA_ERROR_32) {
2611 dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
2612 return tee_ret;
2613 }
2614 if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
2615 dlog_verbose(
2616 "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
2617 tee_ret.func);
2618 return ffa_error(FFA_INVALID_PARAMETERS);
2619 }
2620
2621 length = tee_ret.arg1;
2622 fragment_length = tee_ret.arg2;
2623
2624 if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
2625 length > sizeof(tee_retrieve_buffer)) {
2626 dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
2627 fragment_length, length, HF_MAILBOX_SIZE,
2628 sizeof(tee_retrieve_buffer));
2629 return ffa_error(FFA_INVALID_PARAMETERS);
2630 }
2631
2632 /*
2633 * Copy the first fragment of the memory region descriptor to an
2634 * internal buffer.
2635 */
2636 memcpy_s(tee_retrieve_buffer, sizeof(tee_retrieve_buffer),
2637 from_locked.vm->mailbox.send, fragment_length);
2638
2639 /* Fetch the remaining fragments into the same buffer. */
2640 fragment_offset = fragment_length;
2641 while (fragment_offset < length) {
2642 tee_ret = arch_tee_call(
2643 (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2644 .arg1 = (uint32_t)handle,
2645 .arg2 = (uint32_t)(handle >> 32),
2646 .arg3 = fragment_offset});
2647 if (tee_ret.func != FFA_MEM_FRAG_TX_32) {
2648 dlog_verbose(
2649 "Got %#x (%d) from TEE in response to "
2650 "FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
2651 tee_ret.func, tee_ret.arg2);
2652 return tee_ret;
2653 }
2654 if (ffa_frag_handle(tee_ret) != handle) {
2655 dlog_verbose(
2656 "Got FFA_MEM_FRAG_TX for unexpected handle %#x "
2657 "in response to FFA_MEM_FRAG_RX for handle "
2658 "%#x.\n",
2659 ffa_frag_handle(tee_ret), handle);
2660 return ffa_error(FFA_INVALID_PARAMETERS);
2661 }
2662 if (ffa_frag_sender(tee_ret) != 0) {
2663 dlog_verbose(
2664 "Got FFA_MEM_FRAG_TX with unexpected sender %d "
2665 "(expected 0).\n",
2666 ffa_frag_sender(tee_ret));
2667 return ffa_error(FFA_INVALID_PARAMETERS);
2668 }
2669 fragment_length = tee_ret.arg3;
2670 if (fragment_length > HF_MAILBOX_SIZE ||
2671 fragment_offset + fragment_length > length) {
2672 dlog_verbose(
2673 "Invalid fragment length %d at offset %d (max "
2674 "%d).\n",
2675 fragment_length, fragment_offset,
2676 HF_MAILBOX_SIZE);
2677 return ffa_error(FFA_INVALID_PARAMETERS);
2678 }
2679 memcpy_s(tee_retrieve_buffer + fragment_offset,
2680 sizeof(tee_retrieve_buffer) - fragment_offset,
2681 from_locked.vm->mailbox.send, fragment_length);
2682
2683 fragment_offset += fragment_length;
2684 }
2685
2686 memory_region = (struct ffa_memory_region *)tee_retrieve_buffer;
Andrew Walbran290b0c92020-02-03 16:37:14 +00002687
2688 if (memory_region->receiver_count != 1) {
2689 /* Only one receiver supported by Hafnium for now. */
2690 dlog_verbose(
2691 "Multiple recipients not supported (got %d, expected "
2692 "1).\n",
2693 memory_region->receiver_count);
Andrew Walbranca808b12020-05-15 17:22:28 +01002694 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002695 }
2696
2697 if (memory_region->handle != handle) {
2698 dlog_verbose(
2699 "Got memory region handle %#x from TEE but requested "
2700 "handle %#x.\n",
2701 memory_region->handle, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002702 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002703 }
2704
2705 /* The original sender must match the caller. */
2706 if (to_locked.vm->id != memory_region->sender) {
2707 dlog_verbose(
2708 "VM %d attempted to reclaim memory handle %#x "
2709 "originally sent by VM %d.\n",
2710 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002711 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002712 }
2713
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002714 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002715
2716 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01002717 * Validate that the reclaim transition is allowed for the given memory
2718 * region, forward the request to the TEE and then map the memory back
2719 * into the caller's stage-2 page table.
Andrew Walbran290b0c92020-02-03 16:37:14 +00002720 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01002721 return ffa_tee_reclaim_check_update(
2722 to_locked, handle, composite->constituents,
Andrew Walbranca808b12020-05-15 17:22:28 +01002723 composite->constituent_count, memory_to_attributes,
2724 flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00002725}