blob: 61d38120d819f98523e4103b127bf90408586f2b [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
Jose Marinho75509b42019-04-09 09:34:59 +010015#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010017#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000020#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010021#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000022#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010023
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000024/**
25 * The maximum number of memory sharing handles which may be active at once. A
26 * DONATE handle is active from when it is sent to when it is retrieved; a SHARE
27 * or LEND handle is active from when it is sent to when it is reclaimed.
28 */
29#define MAX_MEM_SHARES 100
30
Andrew Walbranca808b12020-05-15 17:22:28 +010031/**
32 * The maximum number of fragments into which a memory sharing message may be
33 * broken.
34 */
35#define MAX_FRAGMENTS 20
36
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0,
38 "struct ffa_memory_region_constituent must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000039 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010040static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0,
41 "struct ffa_composite_memory_region must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000042 "bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010043static_assert(sizeof(struct ffa_memory_region_attributes) == 4,
Andrew Walbran41890ff2020-09-23 15:09:39 +010044 "struct ffa_memory_region_attributes must be 4 bytes long.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010045static_assert(sizeof(struct ffa_memory_access) % 16 == 0,
46 "struct ffa_memory_access must be a multiple of 16 bytes long.");
47static_assert(sizeof(struct ffa_memory_region) % 16 == 0,
48 "struct ffa_memory_region must be a multiple of 16 bytes long.");
49static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0,
50 "struct ffa_mem_relinquish must be a multiple of 16 "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000051 "bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000052
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010053struct ffa_memory_share_state {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000054 /**
55 * The memory region being shared, or NULL if this share state is
56 * unallocated.
57 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010058 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000059
Andrew Walbranca808b12020-05-15 17:22:28 +010060 struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS];
61
62 /** The number of constituents in each fragment. */
63 uint32_t fragment_constituent_counts[MAX_FRAGMENTS];
64
65 /**
66 * The number of valid elements in the `fragments` and
67 * `fragment_constituent_counts` arrays.
68 */
69 uint32_t fragment_count;
70
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000071 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010072 * The FF-A function used for sharing the memory. Must be one of
73 * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000074 * share state is allocated, or 0.
75 */
76 uint32_t share_func;
77
78 /**
J-Alves2a0d2882020-10-29 14:49:50 +000079 * The sender's original mode before invoking the FF-A function for
80 * sharing the memory.
81 * This is used to reset the original configuration when sender invokes
82 * FFA_MEM_RECLAIM_32.
83 */
84 uint32_t sender_orig_mode;
85
86 /**
Andrew Walbranca808b12020-05-15 17:22:28 +010087 * True if all the fragments of this sharing request have been sent and
88 * Hafnium has updated the sender page table accordingly.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000089 */
Andrew Walbranca808b12020-05-15 17:22:28 +010090 bool sending_complete;
91
92 /**
93 * How many fragments of the memory region each recipient has retrieved
94 * so far. The order of this array matches the order of the endpoint
95 * memory access descriptors in the memory region descriptor. Any
96 * entries beyond the receiver_count will always be 0.
97 */
98 uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS];
Andrew Walbran475c1452020-02-07 13:22:22 +000099};
100
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000101/**
102 * Encapsulates the set of share states while the `share_states_lock` is held.
103 */
104struct share_states_locked {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100105 struct ffa_memory_share_state *share_states;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000106};
107
108/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110 * by this lock.
111 */
112static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100113static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000114
115/**
J-Alves8505a8a2022-06-15 18:10:18 +0100116 * Buffer for retrieving memory region information from the other world for when
117 * a region is reclaimed by a VM. Access to this buffer must be guarded by the
118 * VM lock of the other world VM.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100120alignas(PAGE_SIZE) static uint8_t
J-Alves8505a8a2022-06-15 18:10:18 +0100121 other_world_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
Andrew Walbranca808b12020-05-15 17:22:28 +0100122
123/**
J-Alves917d2f22020-10-30 18:39:30 +0000124 * Extracts the index from a memory handle allocated by Hafnium's current world.
125 */
126uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
127{
128 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
129}
130
131/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100132 * Initialises the next available `struct ffa_memory_share_state` and sets
133 * `share_state_ret` to a pointer to it. If `handle` is
134 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
135 * uses the provided handle which is assumed to be globally unique.
136 *
137 * Returns true on success or false if none are available.
138 */
139static bool allocate_share_state(
140 struct share_states_locked share_states, uint32_t share_func,
141 struct ffa_memory_region *memory_region, uint32_t fragment_length,
142 ffa_memory_handle_t handle,
143 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000144{
Andrew Walbrana65a1322020-04-06 19:32:32 +0100145 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000146
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000147 assert(share_states.share_states != NULL);
148 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000149
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000150 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100151 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000152 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100153 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +0100154 &share_states.share_states[i];
155 struct ffa_composite_memory_region *composite =
156 ffa_memory_region_get_composite(memory_region,
157 0);
158
159 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000160 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +0200161 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100162 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000163 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000165 allocated_state->share_func = share_func;
166 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100167 allocated_state->fragment_count = 1;
168 allocated_state->fragments[0] = composite->constituents;
169 allocated_state->fragment_constituent_counts[0] =
170 (fragment_length -
171 ffa_composite_constituent_offset(memory_region,
172 0)) /
173 sizeof(struct ffa_memory_region_constituent);
174 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000175 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100176 allocated_state->retrieved_fragment_count[j] =
177 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100179 if (share_state_ret != NULL) {
180 *share_state_ret = allocated_state;
181 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000182 return true;
183 }
184 }
185
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000186 return false;
187}
188
189/** Locks the share states lock. */
190struct share_states_locked share_states_lock(void)
191{
192 sl_lock(&share_states_lock_instance);
193
194 return (struct share_states_locked){.share_states = share_states};
195}
196
197/** Unlocks the share states lock. */
198static void share_states_unlock(struct share_states_locked *share_states)
199{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000200 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000201 share_states->share_states = NULL;
202 sl_unlock(&share_states_lock_instance);
203}
204
205/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100206 * If the given handle is a valid handle for an allocated share state then
207 * initialises `share_state_ret` to point to the share state and returns true.
208 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000209 */
210static bool get_share_state(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100211 ffa_memory_handle_t handle,
212 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000213{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100214 struct ffa_memory_share_state *share_state;
J-Alves917d2f22020-10-30 18:39:30 +0000215 uint64_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000216
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000217 assert(share_states.share_states != NULL);
218 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100219
220 /*
221 * First look for a share_state allocated by us, in which case the
222 * handle is based on the index.
223 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200224 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
J-Alves917d2f22020-10-30 18:39:30 +0000225 index = ffa_memory_handle_get_index(handle);
Andrew Walbranca808b12020-05-15 17:22:28 +0100226 if (index < MAX_MEM_SHARES) {
227 share_state = &share_states.share_states[index];
228 if (share_state->share_func != 0) {
229 *share_state_ret = share_state;
230 return true;
231 }
232 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000233 }
234
Andrew Walbranca808b12020-05-15 17:22:28 +0100235 /* Fall back to a linear scan. */
236 for (index = 0; index < MAX_MEM_SHARES; ++index) {
237 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000238 if (share_state->memory_region != NULL &&
239 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100240 share_state->share_func != 0) {
241 *share_state_ret = share_state;
242 return true;
243 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000244 }
245
Andrew Walbranca808b12020-05-15 17:22:28 +0100246 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000247}
248
249/** Marks a share state as unallocated. */
250static void share_state_free(struct share_states_locked share_states,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100251 struct ffa_memory_share_state *share_state,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000252 struct mpool *page_pool)
253{
Andrew Walbranca808b12020-05-15 17:22:28 +0100254 uint32_t i;
255
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000256 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000257 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100258 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000259 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100260 /*
261 * First fragment is part of the same page as the `memory_region`, so it
262 * doesn't need to be freed separately.
263 */
264 share_state->fragments[0] = NULL;
265 share_state->fragment_constituent_counts[0] = 0;
266 for (i = 1; i < share_state->fragment_count; ++i) {
267 mpool_free(page_pool, share_state->fragments[i]);
268 share_state->fragments[i] = NULL;
269 share_state->fragment_constituent_counts[i] = 0;
270 }
271 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000272 share_state->memory_region = NULL;
273}
274
Andrew Walbranca808b12020-05-15 17:22:28 +0100275/** Checks whether the given share state has been fully sent. */
276static bool share_state_sending_complete(
277 struct share_states_locked share_states,
278 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000279{
Andrew Walbranca808b12020-05-15 17:22:28 +0100280 struct ffa_composite_memory_region *composite;
281 uint32_t expected_constituent_count;
282 uint32_t fragment_constituent_count_total = 0;
283 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000284
Andrew Walbranca808b12020-05-15 17:22:28 +0100285 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000286 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100287
288 /*
289 * Share state must already be valid, or it's not possible to get hold
290 * of it.
291 */
292 CHECK(share_state->memory_region != NULL &&
293 share_state->share_func != 0);
294
295 composite =
296 ffa_memory_region_get_composite(share_state->memory_region, 0);
297 expected_constituent_count = composite->constituent_count;
298 for (i = 0; i < share_state->fragment_count; ++i) {
299 fragment_constituent_count_total +=
300 share_state->fragment_constituent_counts[i];
301 }
302 dlog_verbose(
303 "Checking completion: constituent count %d/%d from %d "
304 "fragments.\n",
305 fragment_constituent_count_total, expected_constituent_count,
306 share_state->fragment_count);
307
308 return fragment_constituent_count_total == expected_constituent_count;
309}
310
311/**
312 * Calculates the offset of the next fragment expected for the given share
313 * state.
314 */
315static uint32_t share_state_next_fragment_offset(
316 struct share_states_locked share_states,
317 struct ffa_memory_share_state *share_state)
318{
319 uint32_t next_fragment_offset;
320 uint32_t i;
321
322 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000323 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100324
325 next_fragment_offset =
326 ffa_composite_constituent_offset(share_state->memory_region, 0);
327 for (i = 0; i < share_state->fragment_count; ++i) {
328 next_fragment_offset +=
329 share_state->fragment_constituent_counts[i] *
330 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000331 }
332
Andrew Walbranca808b12020-05-15 17:22:28 +0100333 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000334}
335
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000337{
338 uint32_t i;
339
340 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
341 return;
342 }
343
Olivier Deprez935e1b12020-12-22 18:01:29 +0100344 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100345 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100346 "recipients [",
347 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100348 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100349 memory_region->receiver_count);
350 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000351 if (i != 0) {
352 dlog(", ");
353 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100354 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100355 memory_region->receivers[i].receiver_permissions.receiver,
356 memory_region->receivers[i]
357 .receiver_permissions.permissions,
358 memory_region->receivers[i]
359 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000360 }
361 dlog("]");
362}
363
364static void dump_share_states(void)
365{
366 uint32_t i;
367
368 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
369 return;
370 }
371
372 dlog("Current share states:\n");
373 sl_lock(&share_states_lock_instance);
374 for (i = 0; i < MAX_MEM_SHARES; ++i) {
375 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000376 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100377 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000378 dlog("SHARE");
379 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100380 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000381 dlog("LEND");
382 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100383 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000384 dlog("DONATE");
385 break;
386 default:
387 dlog("invalid share_func %#x",
388 share_states[i].share_func);
389 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100390 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000391 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100392 if (share_states[i].sending_complete) {
393 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000394 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100395 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000396 }
J-Alves2a0d2882020-10-29 14:49:50 +0000397 dlog(" with %d fragments, %d retrieved, "
398 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100399 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000400 share_states[i].retrieved_fragment_count[0],
401 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000402 }
403 }
404 sl_unlock(&share_states_lock_instance);
405}
406
Andrew Walbran475c1452020-02-07 13:22:22 +0000407/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100408static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100409 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000410{
411 uint32_t mode = 0;
412
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100413 switch (ffa_get_data_access_attr(permissions)) {
414 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000415 mode = MM_MODE_R;
416 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100417 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000418 mode = MM_MODE_R | MM_MODE_W;
419 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100420 case FFA_DATA_ACCESS_NOT_SPECIFIED:
421 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
422 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100423 case FFA_DATA_ACCESS_RESERVED:
424 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 }
426
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100427 switch (ffa_get_instruction_access_attr(permissions)) {
428 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000429 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100430 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100431 mode |= MM_MODE_X;
432 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100433 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
434 mode |= (default_mode & MM_MODE_X);
435 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100436 case FFA_INSTRUCTION_ACCESS_RESERVED:
437 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000438 }
439
440 return mode;
441}
442
Jose Marinho75509b42019-04-09 09:34:59 +0100443/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000444 * Get the current mode in the stage-2 page table of the given vm of all the
445 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100446 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100447 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100448static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000449 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100450 struct ffa_memory_region_constituent **fragments,
451 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100452{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100453 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100454 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100455
Andrew Walbranca808b12020-05-15 17:22:28 +0100456 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100457 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000458 * Fail if there are no constituents. Otherwise we would get an
459 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100460 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100461 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100462 }
463
Andrew Walbranca808b12020-05-15 17:22:28 +0100464 for (i = 0; i < fragment_count; ++i) {
465 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
466 ipaddr_t begin = ipa_init(fragments[i][j].address);
467 size_t size = fragments[i][j].page_count * PAGE_SIZE;
468 ipaddr_t end = ipa_add(begin, size);
469 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100470
Andrew Walbranca808b12020-05-15 17:22:28 +0100471 /* Fail if addresses are not page-aligned. */
472 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
473 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
474 return ffa_error(FFA_INVALID_PARAMETERS);
475 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100476
Andrew Walbranca808b12020-05-15 17:22:28 +0100477 /*
478 * Ensure that this constituent memory range is all
479 * mapped with the same mode.
480 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800481 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100482 return ffa_error(FFA_DENIED);
483 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100484
Andrew Walbranca808b12020-05-15 17:22:28 +0100485 /*
486 * Ensure that all constituents are mapped with the same
487 * mode.
488 */
489 if (i == 0) {
490 *orig_mode = current_mode;
491 } else if (current_mode != *orig_mode) {
492 dlog_verbose(
493 "Expected mode %#x but was %#x for %d "
494 "pages at %#x.\n",
495 *orig_mode, current_mode,
496 fragments[i][j].page_count,
497 ipa_addr(begin));
498 return ffa_error(FFA_DENIED);
499 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100500 }
Jose Marinho75509b42019-04-09 09:34:59 +0100501 }
502
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100503 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000504}
505
506/**
507 * Verify that all pages have the same mode, that the starting mode
508 * constitutes a valid state and obtain the next mode to apply
509 * to the sending VM.
510 *
511 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100512 * 1) FFA_DENIED if a state transition was not found;
513 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100514 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100515 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100516 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100517 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
518 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000519 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100520static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100521 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100522 struct ffa_memory_access *receivers, uint32_t receivers_count,
523 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100524 struct ffa_memory_region_constituent **fragments,
525 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
526 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000527{
528 const uint32_t state_mask =
529 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100530 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000531
Andrew Walbranca808b12020-05-15 17:22:28 +0100532 ret = constituents_get_mode(from, orig_from_mode, fragments,
533 fragment_constituent_counts,
534 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100535 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100536 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100537 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100538 }
539
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000540 /* Ensure the address range is normal memory and not a device. */
541 if (*orig_from_mode & MM_MODE_D) {
542 dlog_verbose("Can't share device memory (mode is %#x).\n",
543 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100544 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000545 }
546
547 /*
548 * Ensure the sender is the owner and has exclusive access to the
549 * memory.
550 */
551 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100552 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100553 }
554
J-Alves363f5722022-04-25 17:37:37 +0100555 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100556
J-Alves363f5722022-04-25 17:37:37 +0100557 for (uint32_t i = 0U; i < receivers_count; i++) {
558 ffa_memory_access_permissions_t permissions =
559 receivers[i].receiver_permissions.permissions;
560 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
561 permissions, *orig_from_mode);
562
563 if ((*orig_from_mode & required_from_mode) !=
564 required_from_mode) {
565 dlog_verbose(
566 "Sender tried to send memory with permissions "
567 "which "
568 "required mode %#x but only had %#x itself.\n",
569 required_from_mode, *orig_from_mode);
570 return ffa_error(FFA_DENIED);
571 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000572 }
573
574 /* Find the appropriate new mode. */
575 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000576 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000578 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100579 break;
580
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100581 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000582 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100583 break;
584
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100585 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000586 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100587 break;
588
Jose Marinho75509b42019-04-09 09:34:59 +0100589 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100590 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100591 }
592
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100593 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000594}
595
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100596static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000597 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100598 struct ffa_memory_region_constituent **fragments,
599 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
600 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000601{
602 const uint32_t state_mask =
603 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
604 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100605 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000606
Andrew Walbranca808b12020-05-15 17:22:28 +0100607 ret = constituents_get_mode(from, orig_from_mode, fragments,
608 fragment_constituent_counts,
609 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100610 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100611 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000612 }
613
614 /* Ensure the address range is normal memory and not a device. */
615 if (*orig_from_mode & MM_MODE_D) {
616 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
617 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100618 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000619 }
620
621 /*
622 * Ensure the relinquishing VM is not the owner but has access to the
623 * memory.
624 */
625 orig_from_state = *orig_from_mode & state_mask;
626 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
627 dlog_verbose(
628 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100629 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000630 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100631 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000632 }
633
634 /* Find the appropriate new mode. */
635 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
636
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100637 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000638}
639
640/**
641 * Verify that all pages have the same mode, that the starting mode
642 * constitutes a valid state and obtain the next mode to apply
643 * to the retrieving VM.
644 *
645 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100646 * 1) FFA_DENIED if a state transition was not found;
647 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100648 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100649 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100650 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100651 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
652 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000653 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100654static struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000655 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100656 struct ffa_memory_region_constituent **fragments,
657 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
658 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000659{
660 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100661 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000662
Andrew Walbranca808b12020-05-15 17:22:28 +0100663 ret = constituents_get_mode(to, &orig_to_mode, fragments,
664 fragment_constituent_counts,
665 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100666 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100667 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100668 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000669 }
670
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100671 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000672 /*
673 * If the original ffa memory send call has been processed
674 * successfully, it is expected the orig_to_mode would overlay
675 * with `state_mask`, as a result of the function
676 * `ffa_send_check_transition`.
677 */
Daniel Boulby9133dad2022-04-25 14:38:44 +0100678 assert((orig_to_mode & (MM_MODE_INVALID | MM_MODE_UNOWNED |
679 MM_MODE_SHARED)) != 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000680 } else {
681 /*
682 * Ensure the retriever has the expected state. We don't care
683 * about the MM_MODE_SHARED bit; either with or without it set
684 * are both valid representations of the !O-NA state.
685 */
686 if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
687 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100688 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000689 }
690 }
691
692 /* Find the appropriate new mode. */
693 *to_mode = memory_to_attributes;
694 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100695 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000696 *to_mode |= 0;
697 break;
698
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100699 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000700 *to_mode |= MM_MODE_UNOWNED;
701 break;
702
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100703 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000704 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
705 break;
706
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100707 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000708 *to_mode |= 0;
709 break;
710
711 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100712 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100713 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000714 }
715
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100716 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100717}
Jose Marinho09b1db82019-08-08 09:16:59 +0100718
719/**
720 * Updates a VM's page table such that the given set of physical address ranges
721 * are mapped in the address space at the corresponding address ranges, in the
722 * mode provided.
723 *
724 * If commit is false, the page tables will be allocated from the mpool but no
725 * mappings will actually be updated. This function must always be called first
726 * with commit false to check that it will succeed before calling with commit
727 * true, to avoid leaving the page table in a half-updated state. To make a
728 * series of changes atomically you can call them all with commit false before
729 * calling them all with commit true.
730 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700731 * vm_ptable_defrag should always be called after a series of page table
732 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100733 *
734 * Returns true on success, or false if the update failed and no changes were
735 * made to memory mappings.
736 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100737static bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000738 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100739 struct ffa_memory_region_constituent **fragments,
740 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100741 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100742{
Andrew Walbranca808b12020-05-15 17:22:28 +0100743 uint32_t i;
744 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100745
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700746 if (vm_locked.vm->el0_partition) {
747 mode |= MM_MODE_USER | MM_MODE_NG;
748 }
749
Andrew Walbranca808b12020-05-15 17:22:28 +0100750 /* Iterate over the memory region constituents within each fragment. */
751 for (i = 0; i < fragment_count; ++i) {
752 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
753 size_t size = fragments[i][j].page_count * PAGE_SIZE;
754 paddr_t pa_begin =
755 pa_from_ipa(ipa_init(fragments[i][j].address));
756 paddr_t pa_end = pa_add(pa_begin, size);
Federico Recanati4fd065d2021-12-13 20:06:23 +0100757 uint32_t pa_range = arch_mm_get_pa_range();
758
759 /*
760 * Ensure the requested region falls into system's PA
761 * range.
762 */
763 if (((pa_addr(pa_begin) >> pa_range) > 0) ||
764 ((pa_addr(pa_end) >> pa_range) > 0)) {
765 dlog_error("Region is outside of PA Range\n");
766 return false;
767 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100768
769 if (commit) {
770 vm_identity_commit(vm_locked, pa_begin, pa_end,
771 mode, ppool, NULL);
772 } else if (!vm_identity_prepare(vm_locked, pa_begin,
773 pa_end, mode, ppool)) {
774 return false;
775 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100776 }
777 }
778
779 return true;
780}
781
782/**
783 * Clears a region of physical memory by overwriting it with zeros. The data is
784 * flushed from the cache so the memory has been cleared across the system.
785 */
J-Alves7db32002021-12-14 14:44:50 +0000786static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
787 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100788{
789 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000790 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100791 * global mapping of the whole range. Such an approach will limit
792 * the changes to stage-1 tables and will allow only local
793 * invalidation.
794 */
795 bool ret;
796 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000797 void *ptr = mm_identity_map(stage1_locked, begin, end,
798 MM_MODE_W | (extra_mode_attributes &
799 plat_ffa_other_world_mode()),
800 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100801 size_t size = pa_difference(begin, end);
802
803 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100804 goto fail;
805 }
806
807 memset_s(ptr, size, 0, size);
808 arch_mm_flush_dcache(ptr, size);
809 mm_unmap(stage1_locked, begin, end, ppool);
810
811 ret = true;
812 goto out;
813
814fail:
815 ret = false;
816
817out:
818 mm_unlock_stage1(&stage1_locked);
819
820 return ret;
821}
822
823/**
824 * Clears a region of physical memory by overwriting it with zeros. The data is
825 * flushed from the cache so the memory has been cleared across the system.
826 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100827static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000828 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100829 struct ffa_memory_region_constituent **fragments,
830 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
831 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100832{
833 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100834 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100835 bool ret = false;
836
837 /*
838 * Create a local pool so any freed memory can't be used by another
839 * thread. This is to ensure each constituent that is mapped can be
840 * unmapped again afterwards.
841 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000842 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100843
Andrew Walbranca808b12020-05-15 17:22:28 +0100844 /* Iterate over the memory region constituents within each fragment. */
845 for (i = 0; i < fragment_count; ++i) {
846 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100847
Andrew Walbranca808b12020-05-15 17:22:28 +0100848 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
849 size_t size = fragments[i][j].page_count * PAGE_SIZE;
850 paddr_t begin =
851 pa_from_ipa(ipa_init(fragments[i][j].address));
852 paddr_t end = pa_add(begin, size);
853
J-Alves7db32002021-12-14 14:44:50 +0000854 if (!clear_memory(begin, end, &local_page_pool,
855 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100856 /*
857 * api_clear_memory will defrag on failure, so
858 * no need to do it here.
859 */
860 goto out;
861 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100862 }
863 }
864
Jose Marinho09b1db82019-08-08 09:16:59 +0100865 ret = true;
866
867out:
868 mpool_fini(&local_page_pool);
869 return ret;
870}
871
872/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000873 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100874 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000875 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100876 *
877 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000878 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100879 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100880 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100881 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
882 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100883 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100884 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100885 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100886 */
Andrew Walbran996d1d12020-05-27 14:08:43 +0100887static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000888 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100889 struct ffa_memory_region_constituent **fragments,
890 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves363f5722022-04-25 17:37:37 +0100891 uint32_t share_func, struct ffa_memory_access *receivers,
892 uint32_t receivers_count, struct mpool *page_pool, bool clear,
893 uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100894{
Andrew Walbranca808b12020-05-15 17:22:28 +0100895 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100896 uint32_t orig_from_mode;
897 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100898 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100899 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100900
901 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100902 * Make sure constituents are properly aligned to a 64-bit boundary. If
903 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100904 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100905 for (i = 0; i < fragment_count; ++i) {
906 if (!is_aligned(fragments[i], 8)) {
907 dlog_verbose("Constituents not aligned.\n");
908 return ffa_error(FFA_INVALID_PARAMETERS);
909 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100910 }
911
912 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000913 * Check if the state transition is lawful for the sender, ensure that
914 * all constituents of a memory region being shared are at the same
915 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100916 */
J-Alves363f5722022-04-25 17:37:37 +0100917 ret = ffa_send_check_transition(from_locked, share_func, receivers,
918 receivers_count, &orig_from_mode,
919 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100920 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100921 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100922 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100923 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100924 }
925
Andrew Walbran37c574e2020-06-03 11:45:46 +0100926 if (orig_from_mode_ret != NULL) {
927 *orig_from_mode_ret = orig_from_mode;
928 }
929
Jose Marinho09b1db82019-08-08 09:16:59 +0100930 /*
931 * Create a local pool so any freed memory can't be used by another
932 * thread. This is to ensure the original mapping can be restored if the
933 * clear fails.
934 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000935 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100936
937 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000938 * First reserve all required memory for the new page table entries
939 * without committing, to make sure the entire operation will succeed
940 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100941 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100942 if (!ffa_region_group_identity_map(
943 from_locked, fragments, fragment_constituent_counts,
944 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100945 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100946 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100947 goto out;
948 }
949
950 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000951 * Update the mapping for the sender. This won't allocate because the
952 * transaction was already prepared above, but may free pages in the
953 * case that a whole block is being unmapped that was previously
954 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100955 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100956 CHECK(ffa_region_group_identity_map(
957 from_locked, fragments, fragment_constituent_counts,
958 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100959
960 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000961 if (clear &&
962 !ffa_clear_memory_constituents(
963 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
964 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100965 /*
966 * On failure, roll back by returning memory to the sender. This
967 * may allocate pages which were previously freed into
968 * `local_page_pool` by the call above, but will never allocate
969 * more pages than that so can never fail.
970 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100971 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100972 from_locked, fragments, fragment_constituent_counts,
973 fragment_count, orig_from_mode, &local_page_pool,
974 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100975
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100976 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100977 goto out;
978 }
979
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100980 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000981
982out:
983 mpool_fini(&local_page_pool);
984
985 /*
986 * Tidy up the page table by reclaiming failed mappings (if there was an
987 * error) or merging entries into blocks where possible (on success).
988 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700989 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000990
991 return ret;
992}
993
994/**
995 * Validates and maps memory shared from one VM to another.
996 *
997 * This function requires the calling context to hold the <to> lock.
998 *
999 * Returns:
1000 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001001 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001002 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001003 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001004 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001005 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001006 */
Andrew Walbran996d1d12020-05-27 14:08:43 +01001007static struct ffa_value ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00001008 struct vm_locked to_locked, ffa_vm_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001009 struct ffa_memory_region_constituent **fragments,
1010 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1011 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
1012 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001013{
Andrew Walbranca808b12020-05-15 17:22:28 +01001014 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001015 uint32_t to_mode;
1016 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001018
1019 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001020 * Make sure constituents are properly aligned to a 64-bit boundary. If
1021 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001022 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001023 for (i = 0; i < fragment_count; ++i) {
1024 if (!is_aligned(fragments[i], 8)) {
1025 return ffa_error(FFA_INVALID_PARAMETERS);
1026 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001027 }
1028
1029 /*
1030 * Check if the state transition is lawful for the recipient, and ensure
1031 * that all constituents of the memory region being retrieved are at the
1032 * same state.
1033 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001034 ret = ffa_retrieve_check_transition(
1035 to_locked, share_func, fragments, fragment_constituent_counts,
1036 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001037 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001038 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001039 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001040 }
1041
1042 /*
1043 * Create a local pool so any freed memory can't be used by another
1044 * thread. This is to ensure the original mapping can be restored if the
1045 * clear fails.
1046 */
1047 mpool_init_with_fallback(&local_page_pool, page_pool);
1048
1049 /*
1050 * First reserve all required memory for the new page table entries in
1051 * the recipient page tables without committing, to make sure the entire
1052 * operation will succeed without exhausting the page pool.
1053 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001054 if (!ffa_region_group_identity_map(
1055 to_locked, fragments, fragment_constituent_counts,
1056 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001057 /* TODO: partial defrag of failed range. */
1058 dlog_verbose(
1059 "Insufficient memory to update recipient page "
1060 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001061 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001062 goto out;
1063 }
1064
1065 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001066 if (clear &&
1067 !ffa_clear_memory_constituents(
1068 plat_ffa_owner_world_mode(from_id), fragments,
1069 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001070 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001071 goto out;
1072 }
1073
Jose Marinho09b1db82019-08-08 09:16:59 +01001074 /*
1075 * Complete the transfer by mapping the memory into the recipient. This
1076 * won't allocate because the transaction was already prepared above, so
1077 * it doesn't need to use the `local_page_pool`.
1078 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001079 CHECK(ffa_region_group_identity_map(
1080 to_locked, fragments, fragment_constituent_counts,
1081 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001082
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001083 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001084
1085out:
1086 mpool_fini(&local_page_pool);
1087
1088 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001089 * Tidy up the page table by reclaiming failed mappings (if there was an
1090 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001091 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001092 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001093
1094 return ret;
1095}
1096
Andrew Walbran290b0c92020-02-03 16:37:14 +00001097/**
J-Alves8505a8a2022-06-15 18:10:18 +01001098 * Reclaims the given memory from the other world. To do this space is first
1099 * reserved in the <to> VM's page table, then the reclaim request is sent on to
1100 * the other world. then (if that is successful) the memory is mapped back into
1101 * the <to> VM's page table.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001102 *
1103 * This function requires the calling context to hold the <to> lock.
1104 *
1105 * Returns:
1106 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001107 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran290b0c92020-02-03 16:37:14 +00001108 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001109 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran290b0c92020-02-03 16:37:14 +00001110 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001111 * Success is indicated by FFA_SUCCESS.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001112 */
J-Alves8505a8a2022-06-15 18:10:18 +01001113static struct ffa_value ffa_other_world_reclaim_check_update(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001114 struct vm_locked to_locked, ffa_memory_handle_t handle,
1115 struct ffa_memory_region_constituent *constituents,
Andrew Walbran290b0c92020-02-03 16:37:14 +00001116 uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
1117 struct mpool *page_pool)
1118{
Andrew Walbran290b0c92020-02-03 16:37:14 +00001119 uint32_t to_mode;
1120 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001121 struct ffa_value ret;
J-Alves8505a8a2022-06-15 18:10:18 +01001122 ffa_memory_region_flags_t other_world_flags;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001123
1124 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001125 * Make sure constituents are properly aligned to a 64-bit boundary. If
1126 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001127 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001128 if (!is_aligned(constituents, 8)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001129 dlog_verbose("Constituents not aligned.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001130 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001131 }
1132
1133 /*
1134 * Check if the state transition is lawful for the recipient, and ensure
1135 * that all constituents of the memory region being retrieved are at the
1136 * same state.
1137 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001138 ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01001139 &constituents, &constituent_count,
1140 1, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001141 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001142 dlog_verbose("Invalid transition.\n");
1143 return ret;
1144 }
1145
1146 /*
1147 * Create a local pool so any freed memory can't be used by another
1148 * thread. This is to ensure the original mapping can be restored if the
1149 * clear fails.
1150 */
1151 mpool_init_with_fallback(&local_page_pool, page_pool);
1152
1153 /*
1154 * First reserve all required memory for the new page table entries in
1155 * the recipient page tables without committing, to make sure the entire
1156 * operation will succeed without exhausting the page pool.
1157 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001158 if (!ffa_region_group_identity_map(to_locked, &constituents,
1159 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001160 page_pool, false)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001161 /* TODO: partial defrag of failed range. */
1162 dlog_verbose(
1163 "Insufficient memory to update recipient page "
1164 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001165 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001166 goto out;
1167 }
1168
1169 /*
J-Alves8505a8a2022-06-15 18:10:18 +01001170 * Forward the request to the other world and see what happens.
Andrew Walbran290b0c92020-02-03 16:37:14 +00001171 */
J-Alves8505a8a2022-06-15 18:10:18 +01001172 other_world_flags = 0;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001173 if (clear) {
J-Alves8505a8a2022-06-15 18:10:18 +01001174 other_world_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001175 }
Olivier Deprez112d2b52020-09-30 07:39:23 +02001176 ret = arch_other_world_call(
1177 (struct ffa_value){.func = FFA_MEM_RECLAIM_32,
1178 .arg1 = (uint32_t)handle,
1179 .arg2 = (uint32_t)(handle >> 32),
J-Alves8505a8a2022-06-15 18:10:18 +01001180 .arg3 = other_world_flags});
Andrew Walbran290b0c92020-02-03 16:37:14 +00001181
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001182 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001183 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01001184 "Got %#x (%d) from other world in response to "
1185 "FFA_MEM_RECLAIM, "
Andrew Walbranca808b12020-05-15 17:22:28 +01001186 "expected FFA_SUCCESS.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00001187 ret.func, ret.arg2);
1188 goto out;
1189 }
1190
1191 /*
J-Alves8505a8a2022-06-15 18:10:18 +01001192 * The other world was happy with it, so complete the reclaim by mapping
1193 * the memory into the recipient. This won't allocate because the
Andrew Walbran290b0c92020-02-03 16:37:14 +00001194 * transaction was already prepared above, so it doesn't need to use the
1195 * `local_page_pool`.
1196 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001197 CHECK(ffa_region_group_identity_map(to_locked, &constituents,
1198 &constituent_count, 1, to_mode,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001199 page_pool, true));
Andrew Walbran290b0c92020-02-03 16:37:14 +00001200
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001201 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran290b0c92020-02-03 16:37:14 +00001202
1203out:
1204 mpool_fini(&local_page_pool);
1205
1206 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001207 * Tidy up the page table by reclaiming failed mappings (if there was an
1208 * error) or merging entries into blocks where possible (on success).
Andrew Walbran290b0c92020-02-03 16:37:14 +00001209 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001210 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001211
1212 return ret;
1213}
1214
Andrew Walbran996d1d12020-05-27 14:08:43 +01001215static struct ffa_value ffa_relinquish_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001216 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001217 struct ffa_memory_region_constituent **fragments,
1218 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1219 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001220{
1221 uint32_t orig_from_mode;
1222 uint32_t from_mode;
1223 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001224 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001225
Andrew Walbranca808b12020-05-15 17:22:28 +01001226 ret = ffa_relinquish_check_transition(
1227 from_locked, &orig_from_mode, fragments,
1228 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001229 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001230 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001231 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001232 }
1233
1234 /*
1235 * Create a local pool so any freed memory can't be used by another
1236 * thread. This is to ensure the original mapping can be restored if the
1237 * clear fails.
1238 */
1239 mpool_init_with_fallback(&local_page_pool, page_pool);
1240
1241 /*
1242 * First reserve all required memory for the new page table entries
1243 * without committing, to make sure the entire operation will succeed
1244 * without exhausting the page pool.
1245 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001246 if (!ffa_region_group_identity_map(
1247 from_locked, fragments, fragment_constituent_counts,
1248 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001249 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001250 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001251 goto out;
1252 }
1253
1254 /*
1255 * Update the mapping for the sender. This won't allocate because the
1256 * transaction was already prepared above, but may free pages in the
1257 * case that a whole block is being unmapped that was previously
1258 * partially mapped.
1259 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001260 CHECK(ffa_region_group_identity_map(
1261 from_locked, fragments, fragment_constituent_counts,
1262 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001263
1264 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001265 if (clear &&
1266 !ffa_clear_memory_constituents(
1267 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
1268 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001269 /*
1270 * On failure, roll back by returning memory to the sender. This
1271 * may allocate pages which were previously freed into
1272 * `local_page_pool` by the call above, but will never allocate
1273 * more pages than that so can never fail.
1274 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001275 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001276 from_locked, fragments, fragment_constituent_counts,
1277 fragment_count, orig_from_mode, &local_page_pool,
1278 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001279
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001280 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001281 goto out;
1282 }
1283
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001284 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001285
1286out:
1287 mpool_fini(&local_page_pool);
1288
1289 /*
1290 * Tidy up the page table by reclaiming failed mappings (if there was an
1291 * error) or merging entries into blocks where possible (on success).
1292 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001293 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001294
1295 return ret;
1296}
1297
1298/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001299 * Complete a memory sending operation by checking that it is valid, updating
1300 * the sender page table, and then either marking the share state as having
1301 * completed sending (on success) or freeing it (on failure).
1302 *
1303 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1304 */
1305static struct ffa_value ffa_memory_send_complete(
1306 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001307 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1308 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001309{
1310 struct ffa_memory_region *memory_region = share_state->memory_region;
1311 struct ffa_value ret;
1312
1313 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001314 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001315
1316 /* Check that state is valid in sender page table and update. */
1317 ret = ffa_send_check_update(
1318 from_locked, share_state->fragments,
1319 share_state->fragment_constituent_counts,
1320 share_state->fragment_count, share_state->share_func,
J-Alves363f5722022-04-25 17:37:37 +01001321 memory_region->receivers, memory_region->receiver_count,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001322 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1323 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001324 if (ret.func != FFA_SUCCESS_32) {
1325 /*
1326 * Free share state, it failed to send so it can't be retrieved.
1327 */
1328 dlog_verbose("Complete failed, freeing share state.\n");
1329 share_state_free(share_states, share_state, page_pool);
1330 return ret;
1331 }
1332
1333 share_state->sending_complete = true;
1334 dlog_verbose("Marked sending complete.\n");
1335
J-Alvesee68c542020-10-29 17:48:20 +00001336 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001337}
1338
1339/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001340 * Check that the memory attributes match Hafnium expectations:
1341 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1342 * Write-Allocate Cacheable.
1343 */
1344static struct ffa_value ffa_memory_attributes_validate(
1345 ffa_memory_access_permissions_t attributes)
1346{
1347 enum ffa_memory_type memory_type;
1348 enum ffa_memory_cacheability cacheability;
1349 enum ffa_memory_shareability shareability;
1350
1351 memory_type = ffa_get_memory_type_attr(attributes);
1352 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1353 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1354 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001355 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001356 }
1357
1358 cacheability = ffa_get_memory_cacheability_attr(attributes);
1359 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1360 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1361 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001362 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001363 }
1364
1365 shareability = ffa_get_memory_shareability_attr(attributes);
1366 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1367 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1368 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001369 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001370 }
1371
1372 return (struct ffa_value){.func = FFA_SUCCESS_32};
1373}
1374
1375/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001376 * Check that the given `memory_region` represents a valid memory send request
1377 * of the given `share_func` type, return the clear flag and permissions via the
1378 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001379 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001380 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001381 * not.
1382 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001383static struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001384 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1385 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001386 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001387{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001388 struct ffa_composite_memory_region *composite;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001389 uint32_t receivers_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001390 uint32_t composite_memory_region_offset;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001391 uint32_t constituents_offset;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001392 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001393 enum ffa_data_access data_access;
1394 enum ffa_instruction_access instruction_access;
Federico Recanatia98603a2021-12-20 18:04:03 +01001395 struct ffa_value ret;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001396
J-Alves95df0ef2022-12-07 10:09:48 +00001397 /* The sender must match the caller. */
1398 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1399 vm_id_is_current_world(memory_region->sender)) ||
1400 (vm_id_is_current_world(from_locked.vm->id) &&
1401 memory_region->sender != from_locked.vm->id)) {
1402 dlog_verbose("Invalid memory sender ID.\n");
1403 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001404 }
1405
Andrew Walbrana65a1322020-04-06 19:32:32 +01001406 /*
1407 * Ensure that the composite header is within the memory bounds and
1408 * doesn't overlap the first part of the message.
1409 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001410 receivers_length = sizeof(struct ffa_memory_access) *
1411 memory_region->receiver_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001412 constituents_offset =
1413 ffa_composite_constituent_offset(memory_region, 0);
Federico Recanati872cd692022-01-05 13:10:10 +01001414 composite_memory_region_offset =
1415 memory_region->receivers[0].composite_memory_region_offset;
1416 if ((composite_memory_region_offset == 0) ||
1417 (composite_memory_region_offset <
1418 sizeof(struct ffa_memory_region) + receivers_length) ||
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001419 constituents_offset > fragment_length) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001420 dlog_verbose(
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001421 "Invalid composite memory region descriptor offset "
1422 "%d.\n",
1423 memory_region->receivers[0]
1424 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001425 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001426 }
1427
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001428 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001429
1430 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001431 * Ensure the number of constituents are within the memory bounds.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001432 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001433 constituents_length = sizeof(struct ffa_memory_region_constituent) *
1434 composite->constituent_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001435 if (memory_share_length != constituents_offset + constituents_length) {
1436 dlog_verbose("Invalid length %d or composite offset %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001437 memory_share_length,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001438 memory_region->receivers[0]
1439 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001440 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001441 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001442 if (fragment_length < memory_share_length &&
1443 fragment_length < HF_MAILBOX_SIZE) {
1444 dlog_warning(
1445 "Initial fragment length %d smaller than mailbox "
1446 "size.\n",
1447 fragment_length);
1448 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001449
Andrew Walbrana65a1322020-04-06 19:32:32 +01001450 /*
1451 * Clear is not allowed for memory sharing, as the sender still has
1452 * access to the memory.
1453 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001454 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1455 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001456 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001457 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001458 }
1459
1460 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001461 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001462 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001463 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001464 }
1465
J-Alves363f5722022-04-25 17:37:37 +01001466 /* Check that the permissions are valid, for each specified receiver. */
1467 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1468 ffa_memory_access_permissions_t permissions =
1469 memory_region->receivers[i]
1470 .receiver_permissions.permissions;
1471 ffa_vm_id_t receiver_id =
1472 memory_region->receivers[i]
1473 .receiver_permissions.receiver;
1474
1475 if (memory_region->sender == receiver_id) {
1476 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001477 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001478 }
Federico Recanati85090c42021-12-15 13:17:54 +01001479
J-Alves363f5722022-04-25 17:37:37 +01001480 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1481 j++) {
1482 if (receiver_id ==
1483 memory_region->receivers[j]
1484 .receiver_permissions.receiver) {
1485 dlog_verbose(
1486 "Repeated receiver(%x) in memory send "
1487 "operation.\n",
1488 memory_region->receivers[j]
1489 .receiver_permissions.receiver);
1490 return ffa_error(FFA_INVALID_PARAMETERS);
1491 }
1492 }
1493
1494 if (composite_memory_region_offset !=
1495 memory_region->receivers[i]
1496 .composite_memory_region_offset) {
1497 dlog_verbose(
1498 "All ffa_memory_access should point to the "
1499 "same composite memory region offset.\n");
1500 return ffa_error(FFA_INVALID_PARAMETERS);
1501 }
1502
1503 data_access = ffa_get_data_access_attr(permissions);
1504 instruction_access =
1505 ffa_get_instruction_access_attr(permissions);
1506 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1507 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1508 dlog_verbose(
1509 "Reserved value for receiver permissions "
1510 "%#x.\n",
1511 permissions);
1512 return ffa_error(FFA_INVALID_PARAMETERS);
1513 }
1514 if (instruction_access !=
1515 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1516 dlog_verbose(
1517 "Invalid instruction access permissions %#x "
1518 "for sending memory.\n",
1519 permissions);
1520 return ffa_error(FFA_INVALID_PARAMETERS);
1521 }
1522 if (share_func == FFA_MEM_SHARE_32) {
1523 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1524 dlog_verbose(
1525 "Invalid data access permissions %#x "
1526 "for sharing memory.\n",
1527 permissions);
1528 return ffa_error(FFA_INVALID_PARAMETERS);
1529 }
1530 /*
1531 * According to section 10.10.3 of the FF-A v1.1 EAC0
1532 * spec, NX is required for share operations (but must
1533 * not be specified by the sender) so set it in the
1534 * copy that we store, ready to be returned to the
1535 * retriever.
1536 */
J-Alvesb19731a2022-06-20 17:30:33 +01001537 if (vm_id_is_current_world(receiver_id)) {
1538 ffa_set_instruction_access_attr(
1539 &permissions,
1540 FFA_INSTRUCTION_ACCESS_NX);
1541 memory_region->receivers[i]
1542 .receiver_permissions.permissions =
1543 permissions;
1544 }
J-Alves363f5722022-04-25 17:37:37 +01001545 }
1546 if (share_func == FFA_MEM_LEND_32 &&
1547 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1548 dlog_verbose(
1549 "Invalid data access permissions %#x for "
1550 "lending memory.\n",
1551 permissions);
1552 return ffa_error(FFA_INVALID_PARAMETERS);
1553 }
1554
1555 if (share_func == FFA_MEM_DONATE_32 &&
1556 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1557 dlog_verbose(
1558 "Invalid data access permissions %#x for "
1559 "donating memory.\n",
1560 permissions);
1561 return ffa_error(FFA_INVALID_PARAMETERS);
1562 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001563 }
1564
Federico Recanatid937f5e2021-12-20 17:38:23 +01001565 /*
J-Alves807794e2022-06-16 13:42:47 +01001566 * If a memory donate or lend with single borrower, the memory type
1567 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001568 */
J-Alves807794e2022-06-16 13:42:47 +01001569 if (share_func == FFA_MEM_DONATE_32 ||
1570 (share_func == FFA_MEM_LEND_32 &&
1571 memory_region->receiver_count == 1)) {
1572 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1573 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1574 dlog_verbose(
1575 "Memory type shall not be specified by "
1576 "sender.\n");
1577 return ffa_error(FFA_INVALID_PARAMETERS);
1578 }
1579 } else {
1580 /*
1581 * Check that sender's memory attributes match Hafnium
1582 * expectations: Normal Memory, Inner shareable, Write-Back
1583 * Read-Allocate Write-Allocate Cacheable.
1584 */
1585 ret = ffa_memory_attributes_validate(memory_region->attributes);
1586 if (ret.func != FFA_SUCCESS_32) {
1587 return ret;
1588 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001589 }
1590
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001591 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001592}
1593
J-Alves8505a8a2022-06-15 18:10:18 +01001594/** Forwards a memory send message on to the other world. */
1595static struct ffa_value memory_send_other_world_forward(
1596 struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001597 uint32_t share_func, struct ffa_memory_region *memory_region,
1598 uint32_t memory_share_length, uint32_t fragment_length)
1599{
1600 struct ffa_value ret;
1601
J-Alves8505a8a2022-06-15 18:10:18 +01001602 /* Use its own RX buffer. */
1603 memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001604 memory_region, fragment_length);
J-Alves8505a8a2022-06-15 18:10:18 +01001605 other_world_locked.vm->mailbox.recv_size = fragment_length;
1606 other_world_locked.vm->mailbox.recv_sender = sender_vm_id;
1607 other_world_locked.vm->mailbox.recv_func = share_func;
1608 other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001609 ret = arch_other_world_call(
1610 (struct ffa_value){.func = share_func,
1611 .arg1 = memory_share_length,
1612 .arg2 = fragment_length});
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001613 /*
J-Alves8505a8a2022-06-15 18:10:18 +01001614 * After the call to the other world completes it must have finished
1615 * reading its RX buffer, so it is ready for another message.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001616 */
J-Alves8505a8a2022-06-15 18:10:18 +01001617 other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001618
1619 return ret;
1620}
1621
Andrew Walbrana65a1322020-04-06 19:32:32 +01001622/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001623 * Gets the share state for continuing an operation to donate, lend or share
1624 * memory, and checks that it is a valid request.
1625 *
1626 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1627 * not.
1628 */
1629static struct ffa_value ffa_memory_send_continue_validate(
1630 struct share_states_locked share_states, ffa_memory_handle_t handle,
1631 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1632 struct mpool *page_pool)
1633{
1634 struct ffa_memory_share_state *share_state;
1635 struct ffa_memory_region *memory_region;
1636
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001637 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001638
1639 /*
1640 * Look up the share state by handle and make sure that the VM ID
1641 * matches.
1642 */
1643 if (!get_share_state(share_states, handle, &share_state)) {
1644 dlog_verbose(
1645 "Invalid handle %#x for memory send continuation.\n",
1646 handle);
1647 return ffa_error(FFA_INVALID_PARAMETERS);
1648 }
1649 memory_region = share_state->memory_region;
1650
1651 if (memory_region->sender != from_vm_id) {
1652 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1653 return ffa_error(FFA_INVALID_PARAMETERS);
1654 }
1655
1656 if (share_state->sending_complete) {
1657 dlog_verbose(
1658 "Sending of memory handle %#x is already complete.\n",
1659 handle);
1660 return ffa_error(FFA_INVALID_PARAMETERS);
1661 }
1662
1663 if (share_state->fragment_count == MAX_FRAGMENTS) {
1664 /*
1665 * Log a warning as this is a sign that MAX_FRAGMENTS should
1666 * probably be increased.
1667 */
1668 dlog_warning(
1669 "Too many fragments for memory share with handle %#x; "
1670 "only %d supported.\n",
1671 handle, MAX_FRAGMENTS);
1672 /* Free share state, as it's not possible to complete it. */
1673 share_state_free(share_states, share_state, page_pool);
1674 return ffa_error(FFA_NO_MEMORY);
1675 }
1676
1677 *share_state_ret = share_state;
1678
1679 return (struct ffa_value){.func = FFA_SUCCESS_32};
1680}
1681
1682/**
J-Alves8505a8a2022-06-15 18:10:18 +01001683 * Forwards a memory send continuation message on to the other world.
Andrew Walbranca808b12020-05-15 17:22:28 +01001684 */
J-Alves8505a8a2022-06-15 18:10:18 +01001685static struct ffa_value memory_send_continue_other_world_forward(
1686 struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id,
1687 void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle)
Andrew Walbranca808b12020-05-15 17:22:28 +01001688{
1689 struct ffa_value ret;
1690
J-Alves8505a8a2022-06-15 18:10:18 +01001691 memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX,
1692 fragment, fragment_length);
1693 other_world_locked.vm->mailbox.recv_size = fragment_length;
1694 other_world_locked.vm->mailbox.recv_sender = sender_vm_id;
1695 other_world_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
1696 other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
Olivier Deprez112d2b52020-09-30 07:39:23 +02001697 ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01001698 (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
1699 .arg1 = (uint32_t)handle,
1700 .arg2 = (uint32_t)(handle >> 32),
1701 .arg3 = fragment_length,
1702 .arg4 = (uint64_t)sender_vm_id << 16});
1703 /*
J-Alves8505a8a2022-06-15 18:10:18 +01001704 * After the call to the other world completes it must have finished
1705 * reading its RX buffer, so it is ready for another message.
Andrew Walbranca808b12020-05-15 17:22:28 +01001706 */
J-Alves8505a8a2022-06-15 18:10:18 +01001707 other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbranca808b12020-05-15 17:22:28 +01001708
1709 return ret;
1710}
1711
1712/**
J-Alves95df0ef2022-12-07 10:09:48 +00001713 * Checks if there is at least one receiver from the other world.
1714 */
1715static bool memory_region_receivers_from_other_world(
1716 struct ffa_memory_region *memory_region)
1717{
1718 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
1719 ffa_vm_id_t receiver = memory_region->receivers[i]
1720 .receiver_permissions.receiver;
1721 if (!vm_id_is_current_world(receiver)) {
1722 return true;
1723 }
1724 }
1725 return false;
1726}
1727
1728/**
J-Alves8505a8a2022-06-15 18:10:18 +01001729 * Validates a call to donate, lend or share memory to a non-other world VM and
1730 * then updates the stage-2 page tables. Specifically, check if the message
1731 * length and number of memory region constituents match, and if the transition
1732 * is valid for the type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001733 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001734 * Assumes that the caller has already found and locked the sender VM and copied
1735 * the memory region descriptor from the sender's TX buffer to a freshly
1736 * allocated page from Hafnium's internal pool. The caller must have also
1737 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001738 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001739 * This function takes ownership of the `memory_region` passed in and will free
1740 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001741 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001742struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001743 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001744 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001745 uint32_t fragment_length, uint32_t share_func,
1746 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001747{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001748 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001749 struct share_states_locked share_states;
1750 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001751
1752 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001753 * If there is an error validating the `memory_region` then we need to
1754 * free it because we own it but we won't be storing it in a share state
1755 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001756 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001757 ret = ffa_memory_send_validate(from_locked, memory_region,
1758 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001759 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001760 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001761 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001762 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001763 }
1764
Andrew Walbrana65a1322020-04-06 19:32:32 +01001765 /* Set flag for share function, ready to be retrieved later. */
1766 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001767 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001768 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001769 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001770 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001771 case FFA_MEM_LEND_32:
1772 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001773 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001774 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001775 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001776 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001777 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001778 }
1779
Andrew Walbranca808b12020-05-15 17:22:28 +01001780 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001781 /*
1782 * Allocate a share state before updating the page table. Otherwise if
1783 * updating the page table succeeded but allocating the share state
1784 * failed then it would leave the memory in a state where nobody could
1785 * get it back.
1786 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001787 if (!allocate_share_state(share_states, share_func, memory_region,
1788 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1789 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001790 dlog_verbose("Failed to allocate share state.\n");
1791 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001792 ret = ffa_error(FFA_NO_MEMORY);
1793 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001794 }
1795
Andrew Walbranca808b12020-05-15 17:22:28 +01001796 if (fragment_length == memory_share_length) {
1797 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001798 ret = ffa_memory_send_complete(
1799 from_locked, share_states, share_state, page_pool,
1800 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001801 } else {
1802 ret = (struct ffa_value){
1803 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001804 .arg1 = (uint32_t)memory_region->handle,
1805 .arg2 = (uint32_t)(memory_region->handle >> 32),
Andrew Walbranca808b12020-05-15 17:22:28 +01001806 .arg3 = fragment_length};
1807 }
1808
1809out:
1810 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001811 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001812 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001813}
1814
1815/**
J-Alves8505a8a2022-06-15 18:10:18 +01001816 * Validates a call to donate, lend or share memory to the other world and then
1817 * updates the stage-2 page tables. Specifically, check if the message length
1818 * and number of memory region constituents match, and if the transition is
1819 * valid for the type of memory sending operation.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001820 *
1821 * Assumes that the caller has already found and locked the sender VM and the
J-Alves8505a8a2022-06-15 18:10:18 +01001822 * other world VM, and copied the memory region descriptor from the sender's TX
1823 * buffer to a freshly allocated page from Hafnium's internal pool. The caller
1824 * must have also validated that the receiver VM ID is valid.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001825 *
1826 * This function takes ownership of the `memory_region` passed in and will free
1827 * it when necessary; it must not be freed by the caller.
1828 */
J-Alves8505a8a2022-06-15 18:10:18 +01001829struct ffa_value ffa_memory_other_world_send(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001830 struct vm_locked from_locked, struct vm_locked to_locked,
1831 struct ffa_memory_region *memory_region, uint32_t memory_share_length,
1832 uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool)
1833{
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001834 struct ffa_value ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001835
1836 /*
1837 * If there is an error validating the `memory_region` then we need to
1838 * free it because we own it but we won't be storing it in a share state
1839 * after all.
1840 */
1841 ret = ffa_memory_send_validate(from_locked, memory_region,
1842 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001843 share_func);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001844 if (ret.func != FFA_SUCCESS_32) {
1845 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001846 }
1847
Andrew Walbranca808b12020-05-15 17:22:28 +01001848 if (fragment_length == memory_share_length) {
1849 /* No more fragments to come, everything fit in one message. */
1850 struct ffa_composite_memory_region *composite =
1851 ffa_memory_region_get_composite(memory_region, 0);
1852 struct ffa_memory_region_constituent *constituents =
1853 composite->constituents;
Andrew Walbran37c574e2020-06-03 11:45:46 +01001854 struct mpool local_page_pool;
1855 uint32_t orig_from_mode;
1856
1857 /*
1858 * Use a local page pool so that we can roll back if necessary.
1859 */
1860 mpool_init_with_fallback(&local_page_pool, page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001861
1862 ret = ffa_send_check_update(
1863 from_locked, &constituents,
1864 &composite->constituent_count, 1, share_func,
J-Alves363f5722022-04-25 17:37:37 +01001865 memory_region->receivers, memory_region->receiver_count,
1866 &local_page_pool,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001867 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1868 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01001869 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01001870 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001871 goto out;
1872 }
1873
J-Alves8505a8a2022-06-15 18:10:18 +01001874 /* Forward memory send message on to other world. */
1875 ret = memory_send_other_world_forward(
Andrew Walbranca808b12020-05-15 17:22:28 +01001876 to_locked, from_locked.vm->id, share_func,
1877 memory_region, memory_share_length, fragment_length);
Andrew Walbran37c574e2020-06-03 11:45:46 +01001878
1879 if (ret.func != FFA_SUCCESS_32) {
1880 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01001881 "Other world didn't successfully complete "
1882 "memory send operation; returned %#x (%d). "
1883 "Rolling back.\n",
Andrew Walbran37c574e2020-06-03 11:45:46 +01001884 ret.func, ret.arg2);
1885
1886 /*
J-Alves8505a8a2022-06-15 18:10:18 +01001887 * The other world failed to complete the send
1888 * operation, so roll back the page table update for the
1889 * VM. This can't fail because it won't try to allocate
1890 * more memory than was freed into the `local_page_pool`
1891 * by `ffa_send_check_update` in the initial update.
Andrew Walbran37c574e2020-06-03 11:45:46 +01001892 */
1893 CHECK(ffa_region_group_identity_map(
1894 from_locked, &constituents,
1895 &composite->constituent_count, 1,
1896 orig_from_mode, &local_page_pool, true));
1897 }
1898
1899 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01001900 } else {
1901 struct share_states_locked share_states = share_states_lock();
1902 ffa_memory_handle_t handle;
1903
1904 /*
1905 * We need to wait for the rest of the fragments before we can
1906 * check whether the transaction is valid and unmap the memory.
J-Alves8505a8a2022-06-15 18:10:18 +01001907 * Call the other world so it can do its initial validation and
1908 * assign a handle, and allocate a share state to keep what we
1909 * have so far.
Andrew Walbranca808b12020-05-15 17:22:28 +01001910 */
J-Alves8505a8a2022-06-15 18:10:18 +01001911 ret = memory_send_other_world_forward(
Andrew Walbranca808b12020-05-15 17:22:28 +01001912 to_locked, from_locked.vm->id, share_func,
1913 memory_region, memory_share_length, fragment_length);
1914 if (ret.func == FFA_ERROR_32) {
1915 goto out_unlock;
1916 } else if (ret.func != FFA_MEM_FRAG_RX_32) {
1917 dlog_warning(
J-Alves8505a8a2022-06-15 18:10:18 +01001918 "Got %#x from other world in response to %#x "
1919 "for "
Olivier Deprez701e8bf2022-04-06 18:45:18 +02001920 "fragment with %d/%d, expected "
Andrew Walbranca808b12020-05-15 17:22:28 +01001921 "FFA_MEM_FRAG_RX.\n",
1922 ret.func, share_func, fragment_length,
1923 memory_share_length);
1924 ret = ffa_error(FFA_INVALID_PARAMETERS);
1925 goto out_unlock;
1926 }
1927 handle = ffa_frag_handle(ret);
1928 if (ret.arg3 != fragment_length) {
1929 dlog_warning(
1930 "Got unexpected fragment offset %d for "
J-Alves8505a8a2022-06-15 18:10:18 +01001931 "FFA_MEM_FRAG_RX from other world (expected "
1932 "%d).\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01001933 ret.arg3, fragment_length);
1934 ret = ffa_error(FFA_INVALID_PARAMETERS);
1935 goto out_unlock;
1936 }
1937 if (ffa_frag_sender(ret) != from_locked.vm->id) {
1938 dlog_warning(
1939 "Got unexpected sender ID %d for "
J-Alves8505a8a2022-06-15 18:10:18 +01001940 "FFA_MEM_FRAG_RX from other world (expected "
1941 "%d).\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01001942 ffa_frag_sender(ret), from_locked.vm->id);
1943 ret = ffa_error(FFA_INVALID_PARAMETERS);
1944 goto out_unlock;
1945 }
1946
1947 if (!allocate_share_state(share_states, share_func,
1948 memory_region, fragment_length,
1949 handle, NULL)) {
1950 dlog_verbose("Failed to allocate share state.\n");
1951 ret = ffa_error(FFA_NO_MEMORY);
1952 goto out_unlock;
1953 }
1954 /*
1955 * Don't free the memory region fragment, as it has been stored
1956 * in the share state.
1957 */
1958 memory_region = NULL;
1959 out_unlock:
1960 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001961 }
1962
Andrew Walbranca808b12020-05-15 17:22:28 +01001963out:
1964 if (memory_region != NULL) {
1965 mpool_free(page_pool, memory_region);
1966 }
1967 dump_share_states();
1968 return ret;
1969}
1970
1971/**
J-Alves8505a8a2022-06-15 18:10:18 +01001972 * Continues an operation to donate, lend or share memory to a VM from current
1973 * world. If this is the last fragment then checks that the transition is valid
1974 * for the type of memory sending operation and updates the stage-2 page tables
1975 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001976 *
1977 * Assumes that the caller has already found and locked the sender VM and copied
1978 * the memory region descriptor from the sender's TX buffer to a freshly
1979 * allocated page from Hafnium's internal pool.
1980 *
1981 * This function takes ownership of the `fragment` passed in; it must not be
1982 * freed by the caller.
1983 */
1984struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1985 void *fragment,
1986 uint32_t fragment_length,
1987 ffa_memory_handle_t handle,
1988 struct mpool *page_pool)
1989{
1990 struct share_states_locked share_states = share_states_lock();
1991 struct ffa_memory_share_state *share_state;
1992 struct ffa_value ret;
1993 struct ffa_memory_region *memory_region;
1994
1995 ret = ffa_memory_send_continue_validate(share_states, handle,
1996 &share_state,
1997 from_locked.vm->id, page_pool);
1998 if (ret.func != FFA_SUCCESS_32) {
1999 goto out_free_fragment;
2000 }
2001 memory_region = share_state->memory_region;
2002
J-Alves95df0ef2022-12-07 10:09:48 +00002003 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002004 dlog_error(
2005 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002006 "other world. This should never happen, and indicates "
2007 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002008 "EL3 code.\n");
2009 ret = ffa_error(FFA_INVALID_PARAMETERS);
2010 goto out_free_fragment;
2011 }
2012
2013 /* Add this fragment. */
2014 share_state->fragments[share_state->fragment_count] = fragment;
2015 share_state->fragment_constituent_counts[share_state->fragment_count] =
2016 fragment_length / sizeof(struct ffa_memory_region_constituent);
2017 share_state->fragment_count++;
2018
2019 /* Check whether the memory send operation is now ready to complete. */
2020 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002021 ret = ffa_memory_send_complete(
2022 from_locked, share_states, share_state, page_pool,
2023 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002024 } else {
2025 ret = (struct ffa_value){
2026 .func = FFA_MEM_FRAG_RX_32,
2027 .arg1 = (uint32_t)handle,
2028 .arg2 = (uint32_t)(handle >> 32),
2029 .arg3 = share_state_next_fragment_offset(share_states,
2030 share_state)};
2031 }
2032 goto out;
2033
2034out_free_fragment:
2035 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002036
2037out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002038 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002039 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002040}
2041
Andrew Walbranca808b12020-05-15 17:22:28 +01002042/**
J-Alves8505a8a2022-06-15 18:10:18 +01002043 * Continues an operation to donate, lend or share memory to the other world VM.
2044 * If this is the last fragment then checks that the transition is valid for the
2045 * type of memory sending operation and updates the stage-2 page tables of the
2046 * sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002047 *
2048 * Assumes that the caller has already found and locked the sender VM and copied
2049 * the memory region descriptor from the sender's TX buffer to a freshly
2050 * allocated page from Hafnium's internal pool.
2051 *
2052 * This function takes ownership of the `memory_region` passed in and will free
2053 * it when necessary; it must not be freed by the caller.
2054 */
J-Alves8505a8a2022-06-15 18:10:18 +01002055struct ffa_value ffa_memory_other_world_send_continue(
2056 struct vm_locked from_locked, struct vm_locked to_locked,
2057 void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle,
2058 struct mpool *page_pool)
Andrew Walbranca808b12020-05-15 17:22:28 +01002059{
2060 struct share_states_locked share_states = share_states_lock();
2061 struct ffa_memory_share_state *share_state;
2062 struct ffa_value ret;
2063 struct ffa_memory_region *memory_region;
2064
2065 ret = ffa_memory_send_continue_validate(share_states, handle,
2066 &share_state,
2067 from_locked.vm->id, page_pool);
2068 if (ret.func != FFA_SUCCESS_32) {
2069 goto out_free_fragment;
2070 }
2071 memory_region = share_state->memory_region;
2072
J-Alves95df0ef2022-12-07 10:09:48 +00002073 if (!memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002074 dlog_error(
J-Alves8505a8a2022-06-15 18:10:18 +01002075 "Got SPM-allocated handle for memory send to non-other "
2076 "world VM. This should never happen, and indicates a "
2077 "bug.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01002078 ret = ffa_error(FFA_INVALID_PARAMETERS);
2079 goto out_free_fragment;
2080 }
2081
2082 if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
2083 to_locked.vm->mailbox.recv == NULL) {
2084 /*
J-Alves8505a8a2022-06-15 18:10:18 +01002085 * If the other world RX buffer is not available, tell the
2086 * sender to retry by returning the current offset again.
Andrew Walbranca808b12020-05-15 17:22:28 +01002087 */
2088 ret = (struct ffa_value){
2089 .func = FFA_MEM_FRAG_RX_32,
2090 .arg1 = (uint32_t)handle,
2091 .arg2 = (uint32_t)(handle >> 32),
2092 .arg3 = share_state_next_fragment_offset(share_states,
2093 share_state),
2094 };
2095 goto out_free_fragment;
2096 }
2097
2098 /* Add this fragment. */
2099 share_state->fragments[share_state->fragment_count] = fragment;
2100 share_state->fragment_constituent_counts[share_state->fragment_count] =
2101 fragment_length / sizeof(struct ffa_memory_region_constituent);
2102 share_state->fragment_count++;
2103
2104 /* Check whether the memory send operation is now ready to complete. */
2105 if (share_state_sending_complete(share_states, share_state)) {
Andrew Walbran37c574e2020-06-03 11:45:46 +01002106 struct mpool local_page_pool;
2107 uint32_t orig_from_mode;
2108
2109 /*
2110 * Use a local page pool so that we can roll back if necessary.
2111 */
2112 mpool_init_with_fallback(&local_page_pool, page_pool);
2113
Andrew Walbranca808b12020-05-15 17:22:28 +01002114 ret = ffa_memory_send_complete(from_locked, share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01002115 share_state, &local_page_pool,
2116 &orig_from_mode);
Andrew Walbranca808b12020-05-15 17:22:28 +01002117
2118 if (ret.func == FFA_SUCCESS_32) {
2119 /*
J-Alves8505a8a2022-06-15 18:10:18 +01002120 * Forward final fragment on to the other world so that
Andrew Walbranca808b12020-05-15 17:22:28 +01002121 * it can complete the memory sending operation.
2122 */
J-Alves8505a8a2022-06-15 18:10:18 +01002123 ret = memory_send_continue_other_world_forward(
Andrew Walbranca808b12020-05-15 17:22:28 +01002124 to_locked, from_locked.vm->id, fragment,
2125 fragment_length, handle);
2126
2127 if (ret.func != FFA_SUCCESS_32) {
2128 /*
2129 * The error will be passed on to the caller,
2130 * but log it here too.
2131 */
2132 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01002133 "other world didn't successfully "
2134 "complete "
Andrew Walbranca808b12020-05-15 17:22:28 +01002135 "memory send operation; returned %#x "
Andrew Walbran37c574e2020-06-03 11:45:46 +01002136 "(%d). Rolling back.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002137 ret.func, ret.arg2);
Andrew Walbran37c574e2020-06-03 11:45:46 +01002138
2139 /*
J-Alves8505a8a2022-06-15 18:10:18 +01002140 * The other world failed to complete the send
Andrew Walbran37c574e2020-06-03 11:45:46 +01002141 * operation, so roll back the page table update
2142 * for the VM. This can't fail because it won't
2143 * try to allocate more memory than was freed
2144 * into the `local_page_pool` by
2145 * `ffa_send_check_update` in the initial
2146 * update.
2147 */
2148 CHECK(ffa_region_group_identity_map(
2149 from_locked, share_state->fragments,
2150 share_state
2151 ->fragment_constituent_counts,
2152 share_state->fragment_count,
2153 orig_from_mode, &local_page_pool,
2154 true));
Andrew Walbranca808b12020-05-15 17:22:28 +01002155 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002156
Andrew Walbranca808b12020-05-15 17:22:28 +01002157 /* Free share state. */
2158 share_state_free(share_states, share_state, page_pool);
2159 } else {
J-Alves8505a8a2022-06-15 18:10:18 +01002160 /* Abort sending to other world. */
2161 struct ffa_value other_world_ret =
Olivier Deprez112d2b52020-09-30 07:39:23 +02002162 arch_other_world_call((struct ffa_value){
Andrew Walbranca808b12020-05-15 17:22:28 +01002163 .func = FFA_MEM_RECLAIM_32,
2164 .arg1 = (uint32_t)handle,
2165 .arg2 = (uint32_t)(handle >> 32)});
2166
J-Alves8505a8a2022-06-15 18:10:18 +01002167 if (other_world_ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002168 /*
J-Alves8505a8a2022-06-15 18:10:18 +01002169 * Nothing we can do if other world doesn't
2170 * abort properly, just log it.
Andrew Walbranca808b12020-05-15 17:22:28 +01002171 */
2172 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01002173 "other world didn't successfully abort "
2174 "failed "
Andrew Walbranca808b12020-05-15 17:22:28 +01002175 "memory send operation; returned %#x "
2176 "(%d).\n",
J-Alves8505a8a2022-06-15 18:10:18 +01002177 other_world_ret.func,
2178 other_world_ret.arg2);
Andrew Walbranca808b12020-05-15 17:22:28 +01002179 }
2180 /*
2181 * We don't need to free the share state in this case
2182 * because ffa_memory_send_complete does that already.
2183 */
2184 }
Andrew Walbran37c574e2020-06-03 11:45:46 +01002185
2186 mpool_fini(&local_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01002187 } else {
2188 uint32_t next_fragment_offset =
2189 share_state_next_fragment_offset(share_states,
2190 share_state);
2191
J-Alves8505a8a2022-06-15 18:10:18 +01002192 ret = memory_send_continue_other_world_forward(
Andrew Walbranca808b12020-05-15 17:22:28 +01002193 to_locked, from_locked.vm->id, fragment,
2194 fragment_length, handle);
2195
2196 if (ret.func != FFA_MEM_FRAG_RX_32 ||
2197 ffa_frag_handle(ret) != handle ||
2198 ret.arg3 != next_fragment_offset ||
2199 ffa_frag_sender(ret) != from_locked.vm->id) {
2200 dlog_verbose(
2201 "Got unexpected result from forwarding "
J-Alves8505a8a2022-06-15 18:10:18 +01002202 "FFA_MEM_FRAG_TX to other world. %#x (handle "
2203 "%#x, "
Andrew Walbranca808b12020-05-15 17:22:28 +01002204 "offset %d, sender %d); expected "
2205 "FFA_MEM_FRAG_RX (handle %#x, offset %d, "
2206 "sender %d).\n",
2207 ret.func, ffa_frag_handle(ret), ret.arg3,
2208 ffa_frag_sender(ret), handle,
2209 next_fragment_offset, from_locked.vm->id);
2210 /* Free share state. */
2211 share_state_free(share_states, share_state, page_pool);
2212 ret = ffa_error(FFA_INVALID_PARAMETERS);
2213 goto out;
2214 }
2215
2216 ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
2217 .arg1 = (uint32_t)handle,
2218 .arg2 = (uint32_t)(handle >> 32),
2219 .arg3 = next_fragment_offset};
2220 }
2221 goto out;
2222
2223out_free_fragment:
2224 mpool_free(page_pool, fragment);
2225
2226out:
2227 share_states_unlock(&share_states);
2228 return ret;
2229}
2230
2231/** Clean up after the receiver has finished retrieving a memory region. */
2232static void ffa_memory_retrieve_complete(
2233 struct share_states_locked share_states,
2234 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2235{
2236 if (share_state->share_func == FFA_MEM_DONATE_32) {
2237 /*
2238 * Memory that has been donated can't be relinquished,
2239 * so no need to keep the share state around.
2240 */
2241 share_state_free(share_states, share_state, page_pool);
2242 dlog_verbose("Freed share state for donate.\n");
2243 }
2244}
2245
J-Alves96de29f2022-04-26 16:05:24 +01002246/*
2247 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
2248 * returns its index in the receiver's array. If receiver's ID doesn't exist
2249 * in the array, return the region's 'receiver_count'.
2250 */
2251static uint32_t ffa_memory_region_get_receiver(
2252 struct ffa_memory_region *memory_region, ffa_vm_id_t receiver)
2253{
2254 struct ffa_memory_access *receivers;
2255 uint32_t i;
2256
2257 assert(memory_region != NULL);
2258
2259 receivers = memory_region->receivers;
2260
2261 for (i = 0U; i < memory_region->receiver_count; i++) {
2262 if (receivers[i].receiver_permissions.receiver == receiver) {
2263 break;
2264 }
2265 }
2266
2267 return i;
2268}
2269
2270/**
2271 * Validates the retrieved permissions against those specified by the lender
2272 * of memory share operation. Optionally can help set the permissions to be used
2273 * for the S2 mapping, through the `permissions` argument.
2274 * Returns true if permissions are valid, false otherwise.
2275 */
2276static bool ffa_memory_retrieve_is_memory_access_valid(
2277 enum ffa_data_access sent_data_access,
2278 enum ffa_data_access requested_data_access,
2279 enum ffa_instruction_access sent_instruction_access,
2280 enum ffa_instruction_access requested_instruction_access,
2281 ffa_memory_access_permissions_t *permissions)
2282{
2283 switch (sent_data_access) {
2284 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2285 case FFA_DATA_ACCESS_RW:
2286 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2287 requested_data_access == FFA_DATA_ACCESS_RW) {
2288 if (permissions != NULL) {
2289 ffa_set_data_access_attr(permissions,
2290 FFA_DATA_ACCESS_RW);
2291 }
2292 break;
2293 }
2294 /* Intentional fall-through. */
2295 case FFA_DATA_ACCESS_RO:
2296 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2297 requested_data_access == FFA_DATA_ACCESS_RO) {
2298 if (permissions != NULL) {
2299 ffa_set_data_access_attr(permissions,
2300 FFA_DATA_ACCESS_RO);
2301 }
2302 break;
2303 }
2304 dlog_verbose(
2305 "Invalid data access requested; sender specified "
2306 "permissions %#x but receiver requested %#x.\n",
2307 sent_data_access, requested_data_access);
2308 return false;
2309 case FFA_DATA_ACCESS_RESERVED:
2310 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2311 "checked before this point.");
2312 }
2313
2314 switch (sent_instruction_access) {
2315 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2316 case FFA_INSTRUCTION_ACCESS_X:
2317 if (requested_instruction_access ==
2318 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2319 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
2320 if (permissions != NULL) {
2321 ffa_set_instruction_access_attr(
2322 permissions, FFA_INSTRUCTION_ACCESS_X);
2323 }
2324 break;
2325 }
2326 case FFA_INSTRUCTION_ACCESS_NX:
2327 if (requested_instruction_access ==
2328 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2329 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2330 if (permissions != NULL) {
2331 ffa_set_instruction_access_attr(
2332 permissions, FFA_INSTRUCTION_ACCESS_NX);
2333 }
2334 break;
2335 }
2336 dlog_verbose(
2337 "Invalid instruction access requested; sender "
2338 "specified permissions %#x but receiver requested "
2339 "%#x.\n",
2340 sent_instruction_access, requested_instruction_access);
2341 return false;
2342 case FFA_INSTRUCTION_ACCESS_RESERVED:
2343 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2344 "be checked before this point.");
2345 }
2346
2347 return true;
2348}
2349
2350/**
2351 * Validate the receivers' permissions in the retrieve request against those
2352 * specified by the lender.
2353 * In the `permissions` argument returns the permissions to set at S2 for the
2354 * caller to the FFA_MEMORY_RETRIEVE_REQ.
2355 * Returns FFA_SUCCESS if all specified permissions are valid.
2356 */
2357static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2358 struct ffa_memory_region *memory_region,
2359 struct ffa_memory_region *retrieve_request, ffa_vm_id_t to_vm_id,
2360 ffa_memory_access_permissions_t *permissions)
2361{
2362 uint32_t retrieve_receiver_index;
2363
2364 assert(permissions != NULL);
2365
2366 if (retrieve_request->receiver_count != memory_region->receiver_count) {
2367 dlog_verbose(
2368 "Retrieve request should contain same list of "
2369 "borrowers, as specified by the lender.\n");
2370 return ffa_error(FFA_INVALID_PARAMETERS);
2371 }
2372
2373 retrieve_receiver_index = retrieve_request->receiver_count;
2374
2375 /* Should be populated with the permissions of the retriever. */
2376 *permissions = 0;
2377
2378 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2379 ffa_memory_access_permissions_t sent_permissions;
2380 struct ffa_memory_access *current_receiver =
2381 &retrieve_request->receivers[i];
2382 ffa_memory_access_permissions_t requested_permissions =
2383 current_receiver->receiver_permissions.permissions;
2384 ffa_vm_id_t current_receiver_id =
2385 current_receiver->receiver_permissions.receiver;
2386 bool found_to_id = current_receiver_id == to_vm_id;
2387
2388 /*
2389 * Find the current receiver in the transaction descriptor from
2390 * sender.
2391 */
2392 uint32_t mem_region_receiver_index =
2393 ffa_memory_region_get_receiver(memory_region,
2394 current_receiver_id);
2395
2396 if (mem_region_receiver_index ==
2397 memory_region->receiver_count) {
2398 dlog_verbose("%s: receiver %x not found\n", __func__,
2399 current_receiver_id);
2400 return ffa_error(FFA_DENIED);
2401 }
2402
2403 sent_permissions =
2404 memory_region->receivers[mem_region_receiver_index]
2405 .receiver_permissions.permissions;
2406
2407 if (found_to_id) {
2408 retrieve_receiver_index = i;
2409 }
2410
2411 /*
2412 * Since we are traversing the list of receivers, save the index
2413 * of the caller. As it needs to be there.
2414 */
2415
2416 if (current_receiver->composite_memory_region_offset != 0U) {
2417 dlog_verbose(
2418 "Retriever specified address ranges not "
2419 "supported (got offset %d).\n",
2420 current_receiver
2421 ->composite_memory_region_offset);
2422 return ffa_error(FFA_INVALID_PARAMETERS);
2423 }
2424
2425 /*
2426 * Check permissions from sender against permissions requested
2427 * by receiver.
2428 */
2429 if (!ffa_memory_retrieve_is_memory_access_valid(
2430 ffa_get_data_access_attr(sent_permissions),
2431 ffa_get_data_access_attr(requested_permissions),
2432 ffa_get_instruction_access_attr(sent_permissions),
2433 ffa_get_instruction_access_attr(
2434 requested_permissions),
2435 found_to_id ? permissions : NULL)) {
2436 return ffa_error(FFA_DENIED);
2437 }
2438
2439 /*
2440 * Can't request PM to clear memory if only provided with RO
2441 * permissions.
2442 */
2443 if (found_to_id &&
2444 (ffa_get_data_access_attr(*permissions) ==
2445 FFA_DATA_ACCESS_RO) &&
2446 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2447 0U) {
2448 dlog_verbose(
2449 "Receiver has RO permissions can not request "
2450 "clear.\n");
2451 return ffa_error(FFA_DENIED);
2452 }
2453 }
2454
2455 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2456 dlog_verbose(
2457 "Retrieve request does not contain caller's (%x) "
2458 "permissions\n",
2459 to_vm_id);
2460 return ffa_error(FFA_INVALID_PARAMETERS);
2461 }
2462
2463 return (struct ffa_value){.func = FFA_SUCCESS_32};
2464}
2465
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002466struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2467 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002468 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002469 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002470{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002471 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002472 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002473 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002474 sizeof(struct ffa_memory_access);
2475 ffa_memory_handle_t handle = retrieve_request->handle;
2476 ffa_memory_region_flags_t transaction_type =
Andrew Walbrana65a1322020-04-06 19:32:32 +01002477 retrieve_request->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002478 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
2479 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002480 ffa_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002481 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002482 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002483 struct ffa_memory_share_state *share_state;
2484 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002485 struct ffa_composite_memory_region *composite;
2486 uint32_t total_length;
2487 uint32_t fragment_length;
J-Alves96de29f2022-04-26 16:05:24 +01002488 uint32_t receiver_index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002489
2490 dump_share_states();
2491
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002492 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002493 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002494 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002495 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002496 expected_retrieve_request_length,
2497 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002498 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002499 }
2500
2501 share_states = share_states_lock();
2502 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002503 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002504 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002505 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002506 goto out;
2507 }
2508
J-Alves96de29f2022-04-26 16:05:24 +01002509 if (!share_state->sending_complete) {
2510 dlog_verbose(
2511 "Memory with handle %#x not fully sent, can't "
2512 "retrieve.\n",
2513 handle);
2514 ret = ffa_error(FFA_INVALID_PARAMETERS);
2515 goto out;
2516 }
2517
Andrew Walbrana65a1322020-04-06 19:32:32 +01002518 memory_region = share_state->memory_region;
2519 CHECK(memory_region != NULL);
2520
2521 /*
J-Alves96de29f2022-04-26 16:05:24 +01002522 * Find receiver index in the receivers list specified by the sender.
2523 */
2524 receiver_index =
2525 ffa_memory_region_get_receiver(memory_region, to_locked.vm->id);
2526
2527 if (receiver_index == memory_region->receiver_count) {
2528 dlog_verbose(
2529 "Incorrect receiver VM ID %x for FFA_MEM_RETRIEVE_REQ, "
2530 "for handle %#x.\n",
2531 to_locked.vm->id, handle);
2532 ret = ffa_error(FFA_INVALID_PARAMETERS);
2533 goto out;
2534 }
2535
2536 if (share_state->retrieved_fragment_count[receiver_index] != 0U) {
2537 dlog_verbose("Memory with handle %#x already retrieved.\n",
2538 handle);
2539 ret = ffa_error(FFA_DENIED);
2540 goto out;
2541 }
2542
2543 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002544 * Check that the transaction type expected by the receiver is correct,
2545 * if it has been specified.
2546 */
2547 if (transaction_type !=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002548 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
Andrew Walbrana65a1322020-04-06 19:32:32 +01002549 transaction_type != (memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002550 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002551 dlog_verbose(
2552 "Incorrect transaction type %#x for "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002553 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002554 transaction_type,
2555 memory_region->flags &
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002556 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002557 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002558 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002559 goto out;
2560 }
2561
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002562 if (retrieve_request->sender != memory_region->sender) {
2563 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002564 "Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002565 "expected %d for handle %#x.\n",
2566 retrieve_request->sender, memory_region->sender,
2567 handle);
J-Alves040c4ef2022-05-13 14:42:49 +01002568 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002569 goto out;
2570 }
2571
2572 if (retrieve_request->tag != memory_region->tag) {
2573 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002574 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002575 "%d for handle %#x.\n",
2576 retrieve_request->tag, memory_region->tag, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002577 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002578 goto out;
2579 }
2580
Federico Recanati85090c42021-12-15 13:17:54 +01002581 if ((retrieve_request->flags &
J-Alves96de29f2022-04-26 16:05:24 +01002582 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
Federico Recanati85090c42021-12-15 13:17:54 +01002583 dlog_verbose(
2584 "Retriever specified 'address range alignment hint'"
2585 " not supported.\n");
2586 ret = ffa_error(FFA_INVALID_PARAMETERS);
2587 goto out;
2588 }
2589 if ((retrieve_request->flags &
2590 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2591 dlog_verbose(
2592 "Bits 8-5 must be zero in memory region's flags "
2593 "(address range alignment hint not supported).\n");
2594 ret = ffa_error(FFA_INVALID_PARAMETERS);
2595 goto out;
2596 }
2597
J-Alves84658fc2021-06-17 14:37:32 +01002598 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2599 dlog_verbose(
2600 "Bits 31-10 must be zero in memory region's flags.\n");
2601 ret = ffa_error(FFA_INVALID_PARAMETERS);
2602 goto out;
2603 }
2604
2605 if (share_state->share_func == FFA_MEM_SHARE_32 &&
2606 (retrieve_request->flags &
2607 (FFA_MEMORY_REGION_FLAG_CLEAR |
2608 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2609 dlog_verbose(
2610 "Memory Share operation can't clean after relinquish "
2611 "memory region.\n");
2612 ret = ffa_error(FFA_INVALID_PARAMETERS);
2613 goto out;
2614 }
2615
Andrew Walbrana65a1322020-04-06 19:32:32 +01002616 /*
J-Alves17c069c2021-12-07 16:00:38 +00002617 * If the borrower needs the memory to be cleared before mapping to its
2618 * address space, the sender should have set the flag when calling
2619 * FFA_MEM_LEND/FFA_MEM_DONATE, else return FFA_DENIED.
2620 */
2621 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
J-Alves96de29f2022-04-26 16:05:24 +01002622 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
J-Alves17c069c2021-12-07 16:00:38 +00002623 dlog_verbose(
2624 "Borrower needs memory cleared. Sender needs to set "
2625 "flag for clearing memory.\n");
2626 ret = ffa_error(FFA_DENIED);
2627 goto out;
2628 }
2629
J-Alves96de29f2022-04-26 16:05:24 +01002630 ret = ffa_memory_retrieve_validate_memory_access_list(
2631 memory_region, retrieve_request, to_locked.vm->id,
2632 &permissions);
2633 if (ret.func != FFA_SUCCESS_32) {
J-Alves84658fc2021-06-17 14:37:32 +01002634 goto out;
2635 }
2636
J-Alves614d9f42022-06-28 14:03:10 +01002637 if (ffa_get_memory_type_attr(retrieve_request->attributes) !=
2638 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2639 /*
2640 * Ensure receiver's attributes are compatible with how Hafnium
2641 * maps memory: Normal Memory, Inner shareable, Write-Back
2642 * Read-Allocate Write-Allocate Cacheable.
2643 */
2644 ret = ffa_memory_attributes_validate(
2645 retrieve_request->attributes);
2646 if (ret.func != FFA_SUCCESS_32) {
2647 goto out;
2648 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002649 }
2650
J-Alves7cd5eb32020-10-16 19:06:10 +01002651 memory_to_attributes = ffa_memory_permissions_to_mode(
2652 permissions, share_state->sender_orig_mode);
Andrew Walbran996d1d12020-05-27 14:08:43 +01002653 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00002654 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002655 share_state->fragment_constituent_counts,
2656 share_state->fragment_count, memory_to_attributes,
Andrew Walbran996d1d12020-05-27 14:08:43 +01002657 share_state->share_func, false, page_pool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002658 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002659 goto out;
2660 }
2661
2662 /*
2663 * Copy response to RX buffer of caller and deliver the message. This
2664 * must be done before the share_state is (possibly) freed.
2665 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002666 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002667 composite = ffa_memory_region_get_composite(memory_region, 0);
2668 /*
2669 * Constituents which we received in the first fragment should always
2670 * fit in the first fragment we are sending, because the header is the
2671 * same size in both cases and we have a fixed message buffer size. So
2672 * `ffa_retrieved_memory_region_init` should never fail.
2673 */
2674 CHECK(ffa_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002675 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2676 memory_region->sender, memory_region->attributes,
2677 memory_region->flags, handle, to_locked.vm->id, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +01002678 composite->page_count, composite->constituent_count,
2679 share_state->fragments[0],
2680 share_state->fragment_constituent_counts[0], &total_length,
2681 &fragment_length));
2682 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002683 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002684 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002685 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
2686
J-Alves96de29f2022-04-26 16:05:24 +01002687 share_state->retrieved_fragment_count[receiver_index] = 1;
2688 if (share_state->retrieved_fragment_count[receiver_index] ==
Andrew Walbranca808b12020-05-15 17:22:28 +01002689 share_state->fragment_count) {
2690 ffa_memory_retrieve_complete(share_states, share_state,
2691 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002692 }
2693
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002694 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002695 .arg1 = total_length,
2696 .arg2 = fragment_length};
2697
2698out:
2699 share_states_unlock(&share_states);
2700 dump_share_states();
2701 return ret;
2702}
2703
2704struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2705 ffa_memory_handle_t handle,
2706 uint32_t fragment_offset,
2707 struct mpool *page_pool)
2708{
2709 struct ffa_memory_region *memory_region;
2710 struct share_states_locked share_states;
2711 struct ffa_memory_share_state *share_state;
2712 struct ffa_value ret;
2713 uint32_t fragment_index;
2714 uint32_t retrieved_constituents_count;
2715 uint32_t i;
2716 uint32_t expected_fragment_offset;
2717 uint32_t remaining_constituent_count;
2718 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002719 uint32_t receiver_index;
Andrew Walbranca808b12020-05-15 17:22:28 +01002720
2721 dump_share_states();
2722
2723 share_states = share_states_lock();
2724 if (!get_share_state(share_states, handle, &share_state)) {
2725 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2726 handle);
2727 ret = ffa_error(FFA_INVALID_PARAMETERS);
2728 goto out;
2729 }
2730
2731 memory_region = share_state->memory_region;
2732 CHECK(memory_region != NULL);
2733
J-Alvesc7484f12022-05-13 12:41:14 +01002734 receiver_index =
2735 ffa_memory_region_get_receiver(memory_region, to_locked.vm->id);
2736
2737 if (receiver_index == memory_region->receiver_count) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002738 dlog_verbose(
J-Alvesc7484f12022-05-13 12:41:14 +01002739 "Caller of FFA_MEM_FRAG_RX (%x) is not a borrower to "
2740 "memory sharing transaction (%x)\n",
2741 to_locked.vm->id, handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01002742 ret = ffa_error(FFA_INVALID_PARAMETERS);
2743 goto out;
2744 }
2745
2746 if (!share_state->sending_complete) {
2747 dlog_verbose(
2748 "Memory with handle %#x not fully sent, can't "
2749 "retrieve.\n",
2750 handle);
2751 ret = ffa_error(FFA_INVALID_PARAMETERS);
2752 goto out;
2753 }
2754
J-Alvesc7484f12022-05-13 12:41:14 +01002755 if (share_state->retrieved_fragment_count[receiver_index] == 0 ||
2756 share_state->retrieved_fragment_count[receiver_index] >=
Andrew Walbranca808b12020-05-15 17:22:28 +01002757 share_state->fragment_count) {
2758 dlog_verbose(
2759 "Retrieval of memory with handle %#x not yet started "
2760 "or already completed (%d/%d fragments retrieved).\n",
J-Alvesc7484f12022-05-13 12:41:14 +01002761 handle,
2762 share_state->retrieved_fragment_count[receiver_index],
Andrew Walbranca808b12020-05-15 17:22:28 +01002763 share_state->fragment_count);
2764 ret = ffa_error(FFA_INVALID_PARAMETERS);
2765 goto out;
2766 }
2767
J-Alvesc7484f12022-05-13 12:41:14 +01002768 fragment_index = share_state->retrieved_fragment_count[receiver_index];
Andrew Walbranca808b12020-05-15 17:22:28 +01002769
2770 /*
2771 * Check that the given fragment offset is correct by counting how many
2772 * constituents were in the fragments previously sent.
2773 */
2774 retrieved_constituents_count = 0;
2775 for (i = 0; i < fragment_index; ++i) {
2776 retrieved_constituents_count +=
2777 share_state->fragment_constituent_counts[i];
2778 }
J-Alvesc7484f12022-05-13 12:41:14 +01002779
2780 CHECK(memory_region->receiver_count > 0);
2781
Andrew Walbranca808b12020-05-15 17:22:28 +01002782 expected_fragment_offset =
J-Alvesc7484f12022-05-13 12:41:14 +01002783 ffa_composite_constituent_offset(memory_region,
2784 receiver_index) +
Andrew Walbranca808b12020-05-15 17:22:28 +01002785 retrieved_constituents_count *
J-Alvesc7484f12022-05-13 12:41:14 +01002786 sizeof(struct ffa_memory_region_constituent) -
2787 sizeof(struct ffa_memory_access) *
2788 (memory_region->receiver_count - 1);
Andrew Walbranca808b12020-05-15 17:22:28 +01002789 if (fragment_offset != expected_fragment_offset) {
2790 dlog_verbose("Fragment offset was %d but expected %d.\n",
2791 fragment_offset, expected_fragment_offset);
2792 ret = ffa_error(FFA_INVALID_PARAMETERS);
2793 goto out;
2794 }
2795
2796 remaining_constituent_count = ffa_memory_fragment_init(
2797 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2798 share_state->fragments[fragment_index],
2799 share_state->fragment_constituent_counts[fragment_index],
2800 &fragment_length);
2801 CHECK(remaining_constituent_count == 0);
2802 to_locked.vm->mailbox.recv_size = fragment_length;
2803 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2804 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
2805 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
J-Alvesc7484f12022-05-13 12:41:14 +01002806 share_state->retrieved_fragment_count[receiver_index]++;
2807 if (share_state->retrieved_fragment_count[receiver_index] ==
Andrew Walbranca808b12020-05-15 17:22:28 +01002808 share_state->fragment_count) {
2809 ffa_memory_retrieve_complete(share_states, share_state,
2810 page_pool);
2811 }
2812
2813 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2814 .arg1 = (uint32_t)handle,
2815 .arg2 = (uint32_t)(handle >> 32),
2816 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002817
2818out:
2819 share_states_unlock(&share_states);
2820 dump_share_states();
2821 return ret;
2822}
2823
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002824struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002825 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002826 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002827{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002828 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002829 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002830 struct ffa_memory_share_state *share_state;
2831 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002832 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002833 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002834 uint32_t receiver_index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002835
Andrew Walbrana65a1322020-04-06 19:32:32 +01002836 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002837 dlog_verbose(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002838 "Stream endpoints not supported (got %d endpoints on "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002839 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002840 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002841 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002842 }
2843
Andrew Walbrana65a1322020-04-06 19:32:32 +01002844 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002845 dlog_verbose(
2846 "VM ID %d in relinquish message doesn't match calling "
2847 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002848 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002849 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002850 }
2851
2852 dump_share_states();
2853
2854 share_states = share_states_lock();
2855 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002856 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002857 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002858 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002859 goto out;
2860 }
2861
Andrew Walbranca808b12020-05-15 17:22:28 +01002862 if (!share_state->sending_complete) {
2863 dlog_verbose(
2864 "Memory with handle %#x not fully sent, can't "
2865 "relinquish.\n",
2866 handle);
2867 ret = ffa_error(FFA_INVALID_PARAMETERS);
2868 goto out;
2869 }
2870
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002871 memory_region = share_state->memory_region;
2872 CHECK(memory_region != NULL);
2873
J-Alves8eb19162022-04-28 10:56:48 +01002874 receiver_index = ffa_memory_region_get_receiver(memory_region,
2875 from_locked.vm->id);
2876
2877 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002878 dlog_verbose(
2879 "VM ID %d tried to relinquish memory region with "
J-Alves8eb19162022-04-28 10:56:48 +01002880 "handle %#x and it is not a valid borrower.\n",
2881 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002882 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002883 goto out;
2884 }
2885
J-Alves8eb19162022-04-28 10:56:48 +01002886 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002887 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002888 dlog_verbose(
J-Alves8eb19162022-04-28 10:56:48 +01002889 "Memory with handle %#x not yet fully retrieved, "
2890 "receiver %x can't relinquish.\n",
2891 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002892 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002893 goto out;
2894 }
2895
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002896 clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002897
2898 /*
2899 * Clear is not allowed for memory that was shared, as the original
2900 * sender still has access to the memory.
2901 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002902 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002903 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002904 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002905 goto out;
2906 }
2907
Andrew Walbranca808b12020-05-15 17:22:28 +01002908 ret = ffa_relinquish_check_update(
2909 from_locked, share_state->fragments,
2910 share_state->fragment_constituent_counts,
2911 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002912
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002913 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002914 /*
2915 * Mark memory handle as not retrieved, so it can be reclaimed
2916 * (or retrieved again).
2917 */
J-Alves8eb19162022-04-28 10:56:48 +01002918 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002919 }
2920
2921out:
2922 share_states_unlock(&share_states);
2923 dump_share_states();
2924 return ret;
2925}
2926
2927/**
2928 * Validates that the reclaim transition is allowed for the given handle,
2929 * updates the page table of the reclaiming VM, and frees the internal state
2930 * associated with the handle.
2931 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002932struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002933 ffa_memory_handle_t handle,
2934 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002935 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002936{
2937 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002938 struct ffa_memory_share_state *share_state;
2939 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002940 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002941
2942 dump_share_states();
2943
2944 share_states = share_states_lock();
2945 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002946 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002947 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002948 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002949 goto out;
2950 }
2951
2952 memory_region = share_state->memory_region;
2953 CHECK(memory_region != NULL);
2954
2955 if (to_locked.vm->id != memory_region->sender) {
2956 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002957 "VM %#x attempted to reclaim memory handle %#x "
2958 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002959 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002960 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002961 goto out;
2962 }
2963
Andrew Walbranca808b12020-05-15 17:22:28 +01002964 if (!share_state->sending_complete) {
2965 dlog_verbose(
2966 "Memory with handle %#x not fully sent, can't "
2967 "reclaim.\n",
2968 handle);
2969 ret = ffa_error(FFA_INVALID_PARAMETERS);
2970 goto out;
2971 }
2972
J-Alves752236c2022-04-28 11:07:47 +01002973 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2974 if (share_state->retrieved_fragment_count[i] != 0) {
2975 dlog_verbose(
2976 "Tried to reclaim memory handle %#x that has "
2977 "not been relinquished by all borrowers(%x).\n",
2978 handle,
2979 memory_region->receivers[i]
2980 .receiver_permissions.receiver);
2981 ret = ffa_error(FFA_DENIED);
2982 goto out;
2983 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002984 }
2985
Andrew Walbranca808b12020-05-15 17:22:28 +01002986 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00002987 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002988 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00002989 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01002990 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002991
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002992 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002993 share_state_free(share_states, share_state, page_pool);
2994 dlog_verbose("Freed share state after successful reclaim.\n");
2995 }
2996
2997out:
2998 share_states_unlock(&share_states);
2999 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003000}
Andrew Walbran290b0c92020-02-03 16:37:14 +00003001
3002/**
Andrew Walbranca808b12020-05-15 17:22:28 +01003003 * Validates that the reclaim transition is allowed for the memory region with
J-Alves8505a8a2022-06-15 18:10:18 +01003004 * the given handle which was previously shared with the other world. tells the
3005 * other world to mark it as reclaimed, and updates the page table of the
3006 * reclaiming VM.
Andrew Walbranca808b12020-05-15 17:22:28 +01003007 *
J-Alves8505a8a2022-06-15 18:10:18 +01003008 * To do this information about the memory region is first fetched from the
3009 * other world.
Andrew Walbran290b0c92020-02-03 16:37:14 +00003010 */
J-Alves8505a8a2022-06-15 18:10:18 +01003011struct ffa_value ffa_memory_other_world_reclaim(struct vm_locked to_locked,
3012 struct vm_locked from_locked,
3013 ffa_memory_handle_t handle,
3014 ffa_memory_region_flags_t flags,
3015 struct mpool *page_pool)
Andrew Walbran290b0c92020-02-03 16:37:14 +00003016{
Andrew Walbranca808b12020-05-15 17:22:28 +01003017 uint32_t request_length = ffa_memory_lender_retrieve_request_init(
3018 from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
J-Alves8505a8a2022-06-15 18:10:18 +01003019 struct ffa_value other_world_ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003020 uint32_t length;
3021 uint32_t fragment_length;
3022 uint32_t fragment_offset;
3023 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003024 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01003025 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
3026
3027 CHECK(request_length <= HF_MAILBOX_SIZE);
J-Alves8505a8a2022-06-15 18:10:18 +01003028 CHECK(from_locked.vm->id == HF_OTHER_WORLD_ID);
Andrew Walbranca808b12020-05-15 17:22:28 +01003029
J-Alves8505a8a2022-06-15 18:10:18 +01003030 /* Retrieve memory region information from the other world. */
3031 other_world_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01003032 (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
3033 .arg1 = request_length,
3034 .arg2 = request_length});
J-Alves8505a8a2022-06-15 18:10:18 +01003035 if (other_world_ret.func == FFA_ERROR_32) {
3036 dlog_verbose("Got error %d from EL3.\n", other_world_ret.arg2);
3037 return other_world_ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003038 }
J-Alves8505a8a2022-06-15 18:10:18 +01003039 if (other_world_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003040 dlog_verbose(
3041 "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
J-Alves8505a8a2022-06-15 18:10:18 +01003042 other_world_ret.func);
Andrew Walbranca808b12020-05-15 17:22:28 +01003043 return ffa_error(FFA_INVALID_PARAMETERS);
3044 }
3045
J-Alves8505a8a2022-06-15 18:10:18 +01003046 length = other_world_ret.arg1;
3047 fragment_length = other_world_ret.arg2;
Andrew Walbranca808b12020-05-15 17:22:28 +01003048
3049 if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
J-Alves8505a8a2022-06-15 18:10:18 +01003050 length > sizeof(other_world_retrieve_buffer)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003051 dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
3052 fragment_length, length, HF_MAILBOX_SIZE,
J-Alves8505a8a2022-06-15 18:10:18 +01003053 sizeof(other_world_retrieve_buffer));
Andrew Walbranca808b12020-05-15 17:22:28 +01003054 return ffa_error(FFA_INVALID_PARAMETERS);
3055 }
3056
3057 /*
3058 * Copy the first fragment of the memory region descriptor to an
3059 * internal buffer.
3060 */
J-Alves8505a8a2022-06-15 18:10:18 +01003061 memcpy_s(other_world_retrieve_buffer,
3062 sizeof(other_world_retrieve_buffer),
Andrew Walbranca808b12020-05-15 17:22:28 +01003063 from_locked.vm->mailbox.send, fragment_length);
3064
3065 /* Fetch the remaining fragments into the same buffer. */
3066 fragment_offset = fragment_length;
3067 while (fragment_offset < length) {
J-Alves8505a8a2022-06-15 18:10:18 +01003068 other_world_ret = arch_other_world_call(
Andrew Walbranca808b12020-05-15 17:22:28 +01003069 (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
3070 .arg1 = (uint32_t)handle,
3071 .arg2 = (uint32_t)(handle >> 32),
3072 .arg3 = fragment_offset});
J-Alves8505a8a2022-06-15 18:10:18 +01003073 if (other_world_ret.func != FFA_MEM_FRAG_TX_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003074 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01003075 "Got %#x (%d) from other world in response to "
Andrew Walbranca808b12020-05-15 17:22:28 +01003076 "FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
J-Alves8505a8a2022-06-15 18:10:18 +01003077 other_world_ret.func, other_world_ret.arg2);
3078 return other_world_ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003079 }
J-Alves8505a8a2022-06-15 18:10:18 +01003080 if (ffa_frag_handle(other_world_ret) != handle) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003081 dlog_verbose(
3082 "Got FFA_MEM_FRAG_TX for unexpected handle %#x "
3083 "in response to FFA_MEM_FRAG_RX for handle "
3084 "%#x.\n",
J-Alves8505a8a2022-06-15 18:10:18 +01003085 ffa_frag_handle(other_world_ret), handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01003086 return ffa_error(FFA_INVALID_PARAMETERS);
3087 }
J-Alves8505a8a2022-06-15 18:10:18 +01003088 if (ffa_frag_sender(other_world_ret) != 0) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003089 dlog_verbose(
3090 "Got FFA_MEM_FRAG_TX with unexpected sender %d "
3091 "(expected 0).\n",
J-Alves8505a8a2022-06-15 18:10:18 +01003092 ffa_frag_sender(other_world_ret));
Andrew Walbranca808b12020-05-15 17:22:28 +01003093 return ffa_error(FFA_INVALID_PARAMETERS);
3094 }
J-Alves8505a8a2022-06-15 18:10:18 +01003095 fragment_length = other_world_ret.arg3;
Andrew Walbranca808b12020-05-15 17:22:28 +01003096 if (fragment_length > HF_MAILBOX_SIZE ||
3097 fragment_offset + fragment_length > length) {
3098 dlog_verbose(
3099 "Invalid fragment length %d at offset %d (max "
3100 "%d).\n",
3101 fragment_length, fragment_offset,
3102 HF_MAILBOX_SIZE);
3103 return ffa_error(FFA_INVALID_PARAMETERS);
3104 }
J-Alves8505a8a2022-06-15 18:10:18 +01003105 memcpy_s(other_world_retrieve_buffer + fragment_offset,
3106 sizeof(other_world_retrieve_buffer) - fragment_offset,
Andrew Walbranca808b12020-05-15 17:22:28 +01003107 from_locked.vm->mailbox.send, fragment_length);
3108
3109 fragment_offset += fragment_length;
3110 }
3111
J-Alves8505a8a2022-06-15 18:10:18 +01003112 memory_region = (struct ffa_memory_region *)other_world_retrieve_buffer;
Andrew Walbran290b0c92020-02-03 16:37:14 +00003113
3114 if (memory_region->receiver_count != 1) {
3115 /* Only one receiver supported by Hafnium for now. */
3116 dlog_verbose(
3117 "Multiple recipients not supported (got %d, expected "
3118 "1).\n",
3119 memory_region->receiver_count);
Andrew Walbranca808b12020-05-15 17:22:28 +01003120 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00003121 }
3122
3123 if (memory_region->handle != handle) {
3124 dlog_verbose(
J-Alves8505a8a2022-06-15 18:10:18 +01003125 "Got memory region handle %#x from other world but "
3126 "requested "
Andrew Walbran290b0c92020-02-03 16:37:14 +00003127 "handle %#x.\n",
3128 memory_region->handle, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003129 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00003130 }
3131
3132 /* The original sender must match the caller. */
3133 if (to_locked.vm->id != memory_region->sender) {
3134 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003135 "VM %#x attempted to reclaim memory handle %#x "
3136 "originally sent by VM %#x.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00003137 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003138 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00003139 }
3140
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003141 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbran290b0c92020-02-03 16:37:14 +00003142
3143 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01003144 * Validate that the reclaim transition is allowed for the given memory
J-Alves8505a8a2022-06-15 18:10:18 +01003145 * region, forward the request to the other world and then map the
3146 * memory back into the caller's stage-2 page table.
Andrew Walbran290b0c92020-02-03 16:37:14 +00003147 */
J-Alves8505a8a2022-06-15 18:10:18 +01003148 return ffa_other_world_reclaim_check_update(
Andrew Walbran996d1d12020-05-27 14:08:43 +01003149 to_locked, handle, composite->constituents,
Andrew Walbranca808b12020-05-15 17:22:28 +01003150 composite->constituent_count, memory_to_attributes,
3151 flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00003152}