blob: 6ef64e1e88d0b08bedbd7aeb51d1ac72a2b1e515 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
Jose Marinho75509b42019-04-09 09:34:59 +010015#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010017#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010020#include "hf/ffa_memory_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000021#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010022#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010024
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000025/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000027 * by this lock.
28 */
29static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010030static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000031
32/**
J-Alves917d2f22020-10-30 18:39:30 +000033 * Extracts the index from a memory handle allocated by Hafnium's current world.
34 */
35uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
36{
37 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
38}
39
40/**
Andrew Walbranca808b12020-05-15 17:22:28 +010041 * Initialises the next available `struct ffa_memory_share_state` and sets
42 * `share_state_ret` to a pointer to it. If `handle` is
43 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
44 * uses the provided handle which is assumed to be globally unique.
45 *
46 * Returns true on success or false if none are available.
47 */
J-Alves66652252022-07-06 09:49:51 +010048bool allocate_share_state(struct share_states_locked share_states,
49 uint32_t share_func,
50 struct ffa_memory_region *memory_region,
51 uint32_t fragment_length, ffa_memory_handle_t handle,
52 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000053{
Andrew Walbrana65a1322020-04-06 19:32:32 +010054 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000055
Daniel Boulbya2f8c662021-11-26 17:52:53 +000056 assert(share_states.share_states != NULL);
57 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000058
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000059 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010060 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000061 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010062 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010063 &share_states.share_states[i];
64 struct ffa_composite_memory_region *composite =
65 ffa_memory_region_get_composite(memory_region,
66 0);
67
68 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000069 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020070 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010071 } else {
J-Alvesee68c542020-10-29 17:48:20 +000072 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010073 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000074 allocated_state->share_func = share_func;
75 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +010076 allocated_state->fragment_count = 1;
77 allocated_state->fragments[0] = composite->constituents;
78 allocated_state->fragment_constituent_counts[0] =
79 (fragment_length -
80 ffa_composite_constituent_offset(memory_region,
81 0)) /
82 sizeof(struct ffa_memory_region_constituent);
83 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000084 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +010085 allocated_state->retrieved_fragment_count[j] =
86 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000087 }
Andrew Walbranca808b12020-05-15 17:22:28 +010088 if (share_state_ret != NULL) {
89 *share_state_ret = allocated_state;
90 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000091 return true;
92 }
93 }
94
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000095 return false;
96}
97
98/** Locks the share states lock. */
99struct share_states_locked share_states_lock(void)
100{
101 sl_lock(&share_states_lock_instance);
102
103 return (struct share_states_locked){.share_states = share_states};
104}
105
106/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100107void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000108{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000109 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110 share_states->share_states = NULL;
111 sl_unlock(&share_states_lock_instance);
112}
113
114/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100115 * If the given handle is a valid handle for an allocated share state then
116 * initialises `share_state_ret` to point to the share state and returns true.
117 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000118 */
J-Alvesfdd29272022-07-19 13:16:31 +0100119bool get_share_state(struct share_states_locked share_states,
120 ffa_memory_handle_t handle,
121 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123 struct ffa_memory_share_state *share_state;
J-Alves917d2f22020-10-30 18:39:30 +0000124 uint64_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000125
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000126 assert(share_states.share_states != NULL);
127 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100128
129 /*
130 * First look for a share_state allocated by us, in which case the
131 * handle is based on the index.
132 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200133 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
J-Alves917d2f22020-10-30 18:39:30 +0000134 index = ffa_memory_handle_get_index(handle);
Andrew Walbranca808b12020-05-15 17:22:28 +0100135 if (index < MAX_MEM_SHARES) {
136 share_state = &share_states.share_states[index];
137 if (share_state->share_func != 0) {
138 *share_state_ret = share_state;
139 return true;
140 }
141 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 }
143
Andrew Walbranca808b12020-05-15 17:22:28 +0100144 /* Fall back to a linear scan. */
145 for (index = 0; index < MAX_MEM_SHARES; ++index) {
146 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000147 if (share_state->memory_region != NULL &&
148 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100149 share_state->share_func != 0) {
150 *share_state_ret = share_state;
151 return true;
152 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000153 }
154
Andrew Walbranca808b12020-05-15 17:22:28 +0100155 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000156}
157
158/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100159void share_state_free(struct share_states_locked share_states,
160 struct ffa_memory_share_state *share_state,
161 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000162{
Andrew Walbranca808b12020-05-15 17:22:28 +0100163 uint32_t i;
164
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000165 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000166 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100167 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000168 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100169 /*
170 * First fragment is part of the same page as the `memory_region`, so it
171 * doesn't need to be freed separately.
172 */
173 share_state->fragments[0] = NULL;
174 share_state->fragment_constituent_counts[0] = 0;
175 for (i = 1; i < share_state->fragment_count; ++i) {
176 mpool_free(page_pool, share_state->fragments[i]);
177 share_state->fragments[i] = NULL;
178 share_state->fragment_constituent_counts[i] = 0;
179 }
180 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000181 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100182 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000183}
184
Andrew Walbranca808b12020-05-15 17:22:28 +0100185/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100186bool share_state_sending_complete(struct share_states_locked share_states,
187 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000188{
Andrew Walbranca808b12020-05-15 17:22:28 +0100189 struct ffa_composite_memory_region *composite;
190 uint32_t expected_constituent_count;
191 uint32_t fragment_constituent_count_total = 0;
192 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000193
Andrew Walbranca808b12020-05-15 17:22:28 +0100194 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000195 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100196
197 /*
198 * Share state must already be valid, or it's not possible to get hold
199 * of it.
200 */
201 CHECK(share_state->memory_region != NULL &&
202 share_state->share_func != 0);
203
204 composite =
205 ffa_memory_region_get_composite(share_state->memory_region, 0);
206 expected_constituent_count = composite->constituent_count;
207 for (i = 0; i < share_state->fragment_count; ++i) {
208 fragment_constituent_count_total +=
209 share_state->fragment_constituent_counts[i];
210 }
211 dlog_verbose(
212 "Checking completion: constituent count %d/%d from %d "
213 "fragments.\n",
214 fragment_constituent_count_total, expected_constituent_count,
215 share_state->fragment_count);
216
217 return fragment_constituent_count_total == expected_constituent_count;
218}
219
220/**
221 * Calculates the offset of the next fragment expected for the given share
222 * state.
223 */
J-Alvesfdd29272022-07-19 13:16:31 +0100224uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100225 struct share_states_locked share_states,
226 struct ffa_memory_share_state *share_state)
227{
228 uint32_t next_fragment_offset;
229 uint32_t i;
230
231 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000232 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100233
234 next_fragment_offset =
235 ffa_composite_constituent_offset(share_state->memory_region, 0);
236 for (i = 0; i < share_state->fragment_count; ++i) {
237 next_fragment_offset +=
238 share_state->fragment_constituent_counts[i] *
239 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000240 }
241
Andrew Walbranca808b12020-05-15 17:22:28 +0100242 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000243}
244
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100245static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000246{
247 uint32_t i;
248
249 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
250 return;
251 }
252
Olivier Deprez935e1b12020-12-22 18:01:29 +0100253 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100254 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100255 "recipients [",
256 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100257 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100258 memory_region->receiver_count);
259 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000260 if (i != 0) {
261 dlog(", ");
262 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100263 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100264 memory_region->receivers[i].receiver_permissions.receiver,
265 memory_region->receivers[i]
266 .receiver_permissions.permissions,
267 memory_region->receivers[i]
268 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000269 }
270 dlog("]");
271}
272
J-Alves66652252022-07-06 09:49:51 +0100273void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000274{
275 uint32_t i;
276
277 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
278 return;
279 }
280
281 dlog("Current share states:\n");
282 sl_lock(&share_states_lock_instance);
283 for (i = 0; i < MAX_MEM_SHARES; ++i) {
284 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000285 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100286 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000287 dlog("SHARE");
288 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100289 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000290 dlog("LEND");
291 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100292 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000293 dlog("DONATE");
294 break;
295 default:
296 dlog("invalid share_func %#x",
297 share_states[i].share_func);
298 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100299 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000300 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100301 if (share_states[i].sending_complete) {
302 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000303 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100304 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000305 }
J-Alves2a0d2882020-10-29 14:49:50 +0000306 dlog(" with %d fragments, %d retrieved, "
307 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100308 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000309 share_states[i].retrieved_fragment_count[0],
310 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000311 }
312 }
313 sl_unlock(&share_states_lock_instance);
314}
315
Andrew Walbran475c1452020-02-07 13:22:22 +0000316/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100317static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100318 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000319{
320 uint32_t mode = 0;
321
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100322 switch (ffa_get_data_access_attr(permissions)) {
323 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000324 mode = MM_MODE_R;
325 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000327 mode = MM_MODE_R | MM_MODE_W;
328 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100329 case FFA_DATA_ACCESS_NOT_SPECIFIED:
330 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
331 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100332 case FFA_DATA_ACCESS_RESERVED:
333 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100334 }
335
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 switch (ffa_get_instruction_access_attr(permissions)) {
337 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000338 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100339 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100340 mode |= MM_MODE_X;
341 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100342 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
343 mode |= (default_mode & MM_MODE_X);
344 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100345 case FFA_INSTRUCTION_ACCESS_RESERVED:
346 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000347 }
348
349 return mode;
350}
351
Jose Marinho75509b42019-04-09 09:34:59 +0100352/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000353 * Get the current mode in the stage-2 page table of the given vm of all the
354 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100355 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100356 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000358 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100359 struct ffa_memory_region_constituent **fragments,
360 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100361{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100362 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100363 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100364
Andrew Walbranca808b12020-05-15 17:22:28 +0100365 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100366 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000367 * Fail if there are no constituents. Otherwise we would get an
368 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100369 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100370 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100371 }
372
Andrew Walbranca808b12020-05-15 17:22:28 +0100373 for (i = 0; i < fragment_count; ++i) {
374 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
375 ipaddr_t begin = ipa_init(fragments[i][j].address);
376 size_t size = fragments[i][j].page_count * PAGE_SIZE;
377 ipaddr_t end = ipa_add(begin, size);
378 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100379
Andrew Walbranca808b12020-05-15 17:22:28 +0100380 /* Fail if addresses are not page-aligned. */
381 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
382 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
383 return ffa_error(FFA_INVALID_PARAMETERS);
384 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100385
Andrew Walbranca808b12020-05-15 17:22:28 +0100386 /*
387 * Ensure that this constituent memory range is all
388 * mapped with the same mode.
389 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800390 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100391 return ffa_error(FFA_DENIED);
392 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100393
Andrew Walbranca808b12020-05-15 17:22:28 +0100394 /*
395 * Ensure that all constituents are mapped with the same
396 * mode.
397 */
398 if (i == 0) {
399 *orig_mode = current_mode;
400 } else if (current_mode != *orig_mode) {
401 dlog_verbose(
402 "Expected mode %#x but was %#x for %d "
403 "pages at %#x.\n",
404 *orig_mode, current_mode,
405 fragments[i][j].page_count,
406 ipa_addr(begin));
407 return ffa_error(FFA_DENIED);
408 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100409 }
Jose Marinho75509b42019-04-09 09:34:59 +0100410 }
411
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100412 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000413}
414
415/**
416 * Verify that all pages have the same mode, that the starting mode
417 * constitutes a valid state and obtain the next mode to apply
418 * to the sending VM.
419 *
420 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100421 * 1) FFA_DENIED if a state transition was not found;
422 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100423 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100424 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
427 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000428 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100429static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100430 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100431 struct ffa_memory_access *receivers, uint32_t receivers_count,
432 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100433 struct ffa_memory_region_constituent **fragments,
434 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
435 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000436{
437 const uint32_t state_mask =
438 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100439 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000440
Andrew Walbranca808b12020-05-15 17:22:28 +0100441 ret = constituents_get_mode(from, orig_from_mode, fragments,
442 fragment_constituent_counts,
443 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100444 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100445 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100446 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100447 }
448
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000449 /* Ensure the address range is normal memory and not a device. */
450 if (*orig_from_mode & MM_MODE_D) {
451 dlog_verbose("Can't share device memory (mode is %#x).\n",
452 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100453 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000454 }
455
456 /*
457 * Ensure the sender is the owner and has exclusive access to the
458 * memory.
459 */
460 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100461 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100462 }
463
J-Alves363f5722022-04-25 17:37:37 +0100464 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100465
J-Alves363f5722022-04-25 17:37:37 +0100466 for (uint32_t i = 0U; i < receivers_count; i++) {
467 ffa_memory_access_permissions_t permissions =
468 receivers[i].receiver_permissions.permissions;
469 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
470 permissions, *orig_from_mode);
471
472 if ((*orig_from_mode & required_from_mode) !=
473 required_from_mode) {
474 dlog_verbose(
475 "Sender tried to send memory with permissions "
476 "which "
477 "required mode %#x but only had %#x itself.\n",
478 required_from_mode, *orig_from_mode);
479 return ffa_error(FFA_DENIED);
480 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000481 }
482
483 /* Find the appropriate new mode. */
484 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000485 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100486 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000487 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100488 break;
489
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100490 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000491 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100492 break;
493
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100494 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000495 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100496 break;
497
Jose Marinho75509b42019-04-09 09:34:59 +0100498 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100499 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100500 }
501
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100502 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000503}
504
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100505static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000506 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100507 struct ffa_memory_region_constituent **fragments,
508 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
509 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000510{
511 const uint32_t state_mask =
512 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
513 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100514 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000515
Andrew Walbranca808b12020-05-15 17:22:28 +0100516 ret = constituents_get_mode(from, orig_from_mode, fragments,
517 fragment_constituent_counts,
518 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100519 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100520 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000521 }
522
523 /* Ensure the address range is normal memory and not a device. */
524 if (*orig_from_mode & MM_MODE_D) {
525 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
526 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100527 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000528 }
529
530 /*
531 * Ensure the relinquishing VM is not the owner but has access to the
532 * memory.
533 */
534 orig_from_state = *orig_from_mode & state_mask;
535 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
536 dlog_verbose(
537 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100538 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000539 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100540 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000541 }
542
543 /* Find the appropriate new mode. */
544 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
545
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100546 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000547}
548
549/**
550 * Verify that all pages have the same mode, that the starting mode
551 * constitutes a valid state and obtain the next mode to apply
552 * to the retrieving VM.
553 *
554 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100555 * 1) FFA_DENIED if a state transition was not found;
556 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100557 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100558 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100559 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100560 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
561 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000562 */
J-Alvesfc19b372022-07-06 12:17:35 +0100563struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000564 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100565 struct ffa_memory_region_constituent **fragments,
566 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
567 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000568{
569 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100570 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000571
Andrew Walbranca808b12020-05-15 17:22:28 +0100572 ret = constituents_get_mode(to, &orig_to_mode, fragments,
573 fragment_constituent_counts,
574 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100575 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100576 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100577 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000578 }
579
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100580 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000581 /*
582 * If the original ffa memory send call has been processed
583 * successfully, it is expected the orig_to_mode would overlay
584 * with `state_mask`, as a result of the function
585 * `ffa_send_check_transition`.
586 */
J-Alves59ed0042022-07-28 18:26:41 +0100587 if (vm_id_is_current_world(to.vm->id)) {
588 assert((orig_to_mode &
589 (MM_MODE_INVALID | MM_MODE_UNOWNED |
590 MM_MODE_SHARED)) != 0U);
591 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000592 } else {
593 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100594 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000595 * Ensure the retriever has the expected state. We don't care
596 * about the MM_MODE_SHARED bit; either with or without it set
597 * are both valid representations of the !O-NA state.
598 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100599 if (vm_id_is_current_world(to.vm->id) &&
600 to.vm->id != HF_PRIMARY_VM_ID &&
601 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
602 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100603 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000604 }
605 }
606
607 /* Find the appropriate new mode. */
608 *to_mode = memory_to_attributes;
609 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100610 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000611 *to_mode |= 0;
612 break;
613
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100614 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000615 *to_mode |= MM_MODE_UNOWNED;
616 break;
617
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100618 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000619 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
620 break;
621
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100622 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000623 *to_mode |= 0;
624 break;
625
626 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100627 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100628 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000629 }
630
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100631 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100632}
Jose Marinho09b1db82019-08-08 09:16:59 +0100633
634/**
635 * Updates a VM's page table such that the given set of physical address ranges
636 * are mapped in the address space at the corresponding address ranges, in the
637 * mode provided.
638 *
639 * If commit is false, the page tables will be allocated from the mpool but no
640 * mappings will actually be updated. This function must always be called first
641 * with commit false to check that it will succeed before calling with commit
642 * true, to avoid leaving the page table in a half-updated state. To make a
643 * series of changes atomically you can call them all with commit false before
644 * calling them all with commit true.
645 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700646 * vm_ptable_defrag should always be called after a series of page table
647 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100648 *
649 * Returns true on success, or false if the update failed and no changes were
650 * made to memory mappings.
651 */
J-Alves66652252022-07-06 09:49:51 +0100652bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000653 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100654 struct ffa_memory_region_constituent **fragments,
655 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100656 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100657{
Andrew Walbranca808b12020-05-15 17:22:28 +0100658 uint32_t i;
659 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100660
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700661 if (vm_locked.vm->el0_partition) {
662 mode |= MM_MODE_USER | MM_MODE_NG;
663 }
664
Andrew Walbranca808b12020-05-15 17:22:28 +0100665 /* Iterate over the memory region constituents within each fragment. */
666 for (i = 0; i < fragment_count; ++i) {
667 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
668 size_t size = fragments[i][j].page_count * PAGE_SIZE;
669 paddr_t pa_begin =
670 pa_from_ipa(ipa_init(fragments[i][j].address));
671 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200672 uint32_t pa_bits =
673 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100674
675 /*
676 * Ensure the requested region falls into system's PA
677 * range.
678 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200679 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
680 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100681 dlog_error("Region is outside of PA Range\n");
682 return false;
683 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100684
685 if (commit) {
686 vm_identity_commit(vm_locked, pa_begin, pa_end,
687 mode, ppool, NULL);
688 } else if (!vm_identity_prepare(vm_locked, pa_begin,
689 pa_end, mode, ppool)) {
690 return false;
691 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100692 }
693 }
694
695 return true;
696}
697
698/**
699 * Clears a region of physical memory by overwriting it with zeros. The data is
700 * flushed from the cache so the memory has been cleared across the system.
701 */
J-Alves7db32002021-12-14 14:44:50 +0000702static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
703 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100704{
705 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000706 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100707 * global mapping of the whole range. Such an approach will limit
708 * the changes to stage-1 tables and will allow only local
709 * invalidation.
710 */
711 bool ret;
712 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000713 void *ptr = mm_identity_map(stage1_locked, begin, end,
714 MM_MODE_W | (extra_mode_attributes &
715 plat_ffa_other_world_mode()),
716 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100717 size_t size = pa_difference(begin, end);
718
719 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100720 goto fail;
721 }
722
723 memset_s(ptr, size, 0, size);
724 arch_mm_flush_dcache(ptr, size);
725 mm_unmap(stage1_locked, begin, end, ppool);
726
727 ret = true;
728 goto out;
729
730fail:
731 ret = false;
732
733out:
734 mm_unlock_stage1(&stage1_locked);
735
736 return ret;
737}
738
739/**
740 * Clears a region of physical memory by overwriting it with zeros. The data is
741 * flushed from the cache so the memory has been cleared across the system.
742 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100743static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000744 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100745 struct ffa_memory_region_constituent **fragments,
746 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
747 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100748{
749 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100750 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100751 bool ret = false;
752
753 /*
754 * Create a local pool so any freed memory can't be used by another
755 * thread. This is to ensure each constituent that is mapped can be
756 * unmapped again afterwards.
757 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000758 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100759
Andrew Walbranca808b12020-05-15 17:22:28 +0100760 /* Iterate over the memory region constituents within each fragment. */
761 for (i = 0; i < fragment_count; ++i) {
762 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100763
Andrew Walbranca808b12020-05-15 17:22:28 +0100764 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
765 size_t size = fragments[i][j].page_count * PAGE_SIZE;
766 paddr_t begin =
767 pa_from_ipa(ipa_init(fragments[i][j].address));
768 paddr_t end = pa_add(begin, size);
769
J-Alves7db32002021-12-14 14:44:50 +0000770 if (!clear_memory(begin, end, &local_page_pool,
771 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100772 /*
773 * api_clear_memory will defrag on failure, so
774 * no need to do it here.
775 */
776 goto out;
777 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100778 }
779 }
780
Jose Marinho09b1db82019-08-08 09:16:59 +0100781 ret = true;
782
783out:
784 mpool_fini(&local_page_pool);
785 return ret;
786}
787
788/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000789 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100790 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000791 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100792 *
793 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000794 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100795 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100796 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100797 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
798 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100799 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100800 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100801 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100802 */
J-Alves66652252022-07-06 09:49:51 +0100803struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000804 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100805 struct ffa_memory_region_constituent **fragments,
806 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves363f5722022-04-25 17:37:37 +0100807 uint32_t share_func, struct ffa_memory_access *receivers,
808 uint32_t receivers_count, struct mpool *page_pool, bool clear,
809 uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100810{
Andrew Walbranca808b12020-05-15 17:22:28 +0100811 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100812 uint32_t orig_from_mode;
813 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100814 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100815 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100816
817 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100818 * Make sure constituents are properly aligned to a 64-bit boundary. If
819 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100820 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100821 for (i = 0; i < fragment_count; ++i) {
822 if (!is_aligned(fragments[i], 8)) {
823 dlog_verbose("Constituents not aligned.\n");
824 return ffa_error(FFA_INVALID_PARAMETERS);
825 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100826 }
827
828 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000829 * Check if the state transition is lawful for the sender, ensure that
830 * all constituents of a memory region being shared are at the same
831 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100832 */
J-Alves363f5722022-04-25 17:37:37 +0100833 ret = ffa_send_check_transition(from_locked, share_func, receivers,
834 receivers_count, &orig_from_mode,
835 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100836 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100837 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100838 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100839 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100840 }
841
Andrew Walbran37c574e2020-06-03 11:45:46 +0100842 if (orig_from_mode_ret != NULL) {
843 *orig_from_mode_ret = orig_from_mode;
844 }
845
Jose Marinho09b1db82019-08-08 09:16:59 +0100846 /*
847 * Create a local pool so any freed memory can't be used by another
848 * thread. This is to ensure the original mapping can be restored if the
849 * clear fails.
850 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000851 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100852
853 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000854 * First reserve all required memory for the new page table entries
855 * without committing, to make sure the entire operation will succeed
856 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100857 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100858 if (!ffa_region_group_identity_map(
859 from_locked, fragments, fragment_constituent_counts,
860 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100861 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100862 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100863 goto out;
864 }
865
866 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 * Update the mapping for the sender. This won't allocate because the
868 * transaction was already prepared above, but may free pages in the
869 * case that a whole block is being unmapped that was previously
870 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100871 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100872 CHECK(ffa_region_group_identity_map(
873 from_locked, fragments, fragment_constituent_counts,
874 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100875
876 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000877 if (clear &&
878 !ffa_clear_memory_constituents(
879 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
880 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100881 /*
882 * On failure, roll back by returning memory to the sender. This
883 * may allocate pages which were previously freed into
884 * `local_page_pool` by the call above, but will never allocate
885 * more pages than that so can never fail.
886 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100887 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100888 from_locked, fragments, fragment_constituent_counts,
889 fragment_count, orig_from_mode, &local_page_pool,
890 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100891
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100892 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100893 goto out;
894 }
895
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100896 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000897
898out:
899 mpool_fini(&local_page_pool);
900
901 /*
902 * Tidy up the page table by reclaiming failed mappings (if there was an
903 * error) or merging entries into blocks where possible (on success).
904 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700905 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000906
907 return ret;
908}
909
910/**
911 * Validates and maps memory shared from one VM to another.
912 *
913 * This function requires the calling context to hold the <to> lock.
914 *
915 * Returns:
916 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100917 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000918 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100919 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000920 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100921 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000922 */
J-Alvesb5084cf2022-07-06 14:20:12 +0100923struct ffa_value ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +0000924 struct vm_locked to_locked, ffa_vm_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +0100925 struct ffa_memory_region_constituent **fragments,
926 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
927 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
928 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000929{
Andrew Walbranca808b12020-05-15 17:22:28 +0100930 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000931 uint32_t to_mode;
932 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100933 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000934
935 /*
Andrew Walbranca808b12020-05-15 17:22:28 +0100936 * Make sure constituents are properly aligned to a 64-bit boundary. If
937 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000938 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100939 for (i = 0; i < fragment_count; ++i) {
940 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +0100941 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +0100942 return ffa_error(FFA_INVALID_PARAMETERS);
943 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000944 }
945
946 /*
947 * Check if the state transition is lawful for the recipient, and ensure
948 * that all constituents of the memory region being retrieved are at the
949 * same state.
950 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100951 ret = ffa_retrieve_check_transition(
952 to_locked, share_func, fragments, fragment_constituent_counts,
953 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100954 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100955 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100956 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000957 }
958
959 /*
960 * Create a local pool so any freed memory can't be used by another
961 * thread. This is to ensure the original mapping can be restored if the
962 * clear fails.
963 */
964 mpool_init_with_fallback(&local_page_pool, page_pool);
965
966 /*
967 * First reserve all required memory for the new page table entries in
968 * the recipient page tables without committing, to make sure the entire
969 * operation will succeed without exhausting the page pool.
970 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100971 if (!ffa_region_group_identity_map(
972 to_locked, fragments, fragment_constituent_counts,
973 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000974 /* TODO: partial defrag of failed range. */
975 dlog_verbose(
976 "Insufficient memory to update recipient page "
977 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100978 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000979 goto out;
980 }
981
982 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000983 if (clear &&
984 !ffa_clear_memory_constituents(
985 plat_ffa_owner_world_mode(from_id), fragments,
986 fragment_constituent_counts, fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +0100987 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100988 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000989 goto out;
990 }
991
Jose Marinho09b1db82019-08-08 09:16:59 +0100992 /*
993 * Complete the transfer by mapping the memory into the recipient. This
994 * won't allocate because the transaction was already prepared above, so
995 * it doesn't need to use the `local_page_pool`.
996 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100997 CHECK(ffa_region_group_identity_map(
998 to_locked, fragments, fragment_constituent_counts,
999 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001000
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001001 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001002
1003out:
1004 mpool_fini(&local_page_pool);
1005
1006 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001007 * Tidy up the page table by reclaiming failed mappings (if there was an
1008 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001009 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001010 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001011
1012 return ret;
1013}
1014
Andrew Walbran996d1d12020-05-27 14:08:43 +01001015static struct ffa_value ffa_relinquish_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001016 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001017 struct ffa_memory_region_constituent **fragments,
1018 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1019 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001020{
1021 uint32_t orig_from_mode;
1022 uint32_t from_mode;
1023 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001024 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001025
Andrew Walbranca808b12020-05-15 17:22:28 +01001026 ret = ffa_relinquish_check_transition(
1027 from_locked, &orig_from_mode, fragments,
1028 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001029 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001030 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001031 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001032 }
1033
1034 /*
1035 * Create a local pool so any freed memory can't be used by another
1036 * thread. This is to ensure the original mapping can be restored if the
1037 * clear fails.
1038 */
1039 mpool_init_with_fallback(&local_page_pool, page_pool);
1040
1041 /*
1042 * First reserve all required memory for the new page table entries
1043 * without committing, to make sure the entire operation will succeed
1044 * without exhausting the page pool.
1045 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001046 if (!ffa_region_group_identity_map(
1047 from_locked, fragments, fragment_constituent_counts,
1048 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001049 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001050 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001051 goto out;
1052 }
1053
1054 /*
1055 * Update the mapping for the sender. This won't allocate because the
1056 * transaction was already prepared above, but may free pages in the
1057 * case that a whole block is being unmapped that was previously
1058 * partially mapped.
1059 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001060 CHECK(ffa_region_group_identity_map(
1061 from_locked, fragments, fragment_constituent_counts,
1062 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001063
1064 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001065 if (clear &&
1066 !ffa_clear_memory_constituents(
1067 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
1068 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001069 /*
1070 * On failure, roll back by returning memory to the sender. This
1071 * may allocate pages which were previously freed into
1072 * `local_page_pool` by the call above, but will never allocate
1073 * more pages than that so can never fail.
1074 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001075 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001076 from_locked, fragments, fragment_constituent_counts,
1077 fragment_count, orig_from_mode, &local_page_pool,
1078 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001079
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001080 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001081 goto out;
1082 }
1083
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001084 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001085
1086out:
1087 mpool_fini(&local_page_pool);
1088
1089 /*
1090 * Tidy up the page table by reclaiming failed mappings (if there was an
1091 * error) or merging entries into blocks where possible (on success).
1092 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001093 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001094
1095 return ret;
1096}
1097
1098/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001099 * Complete a memory sending operation by checking that it is valid, updating
1100 * the sender page table, and then either marking the share state as having
1101 * completed sending (on success) or freeing it (on failure).
1102 *
1103 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1104 */
J-Alvesfdd29272022-07-19 13:16:31 +01001105struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001106 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001107 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1108 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001109{
1110 struct ffa_memory_region *memory_region = share_state->memory_region;
1111 struct ffa_value ret;
1112
1113 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001114 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001115
1116 /* Check that state is valid in sender page table and update. */
1117 ret = ffa_send_check_update(
1118 from_locked, share_state->fragments,
1119 share_state->fragment_constituent_counts,
1120 share_state->fragment_count, share_state->share_func,
J-Alves363f5722022-04-25 17:37:37 +01001121 memory_region->receivers, memory_region->receiver_count,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001122 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1123 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001124 if (ret.func != FFA_SUCCESS_32) {
1125 /*
1126 * Free share state, it failed to send so it can't be retrieved.
1127 */
1128 dlog_verbose("Complete failed, freeing share state.\n");
1129 share_state_free(share_states, share_state, page_pool);
1130 return ret;
1131 }
1132
1133 share_state->sending_complete = true;
1134 dlog_verbose("Marked sending complete.\n");
1135
J-Alvesee68c542020-10-29 17:48:20 +00001136 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001137}
1138
1139/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001140 * Check that the memory attributes match Hafnium expectations:
1141 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1142 * Write-Allocate Cacheable.
1143 */
1144static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001145 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001146{
1147 enum ffa_memory_type memory_type;
1148 enum ffa_memory_cacheability cacheability;
1149 enum ffa_memory_shareability shareability;
1150
1151 memory_type = ffa_get_memory_type_attr(attributes);
1152 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1153 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1154 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001155 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001156 }
1157
1158 cacheability = ffa_get_memory_cacheability_attr(attributes);
1159 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1160 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1161 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001162 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001163 }
1164
1165 shareability = ffa_get_memory_shareability_attr(attributes);
1166 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1167 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1168 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001169 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001170 }
1171
1172 return (struct ffa_value){.func = FFA_SUCCESS_32};
1173}
1174
1175/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001176 * Check that the given `memory_region` represents a valid memory send request
1177 * of the given `share_func` type, return the clear flag and permissions via the
1178 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001179 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001180 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001181 * not.
1182 */
J-Alves66652252022-07-06 09:49:51 +01001183struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001184 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1185 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001186 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001187{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001188 struct ffa_composite_memory_region *composite;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001189 uint32_t receivers_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001190 uint32_t composite_memory_region_offset;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001191 uint32_t constituents_offset;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001192 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001193 enum ffa_data_access data_access;
1194 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001195 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001196 struct ffa_value ret;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001197
J-Alves0b6653d2022-04-22 13:17:38 +01001198 assert(memory_region->receivers_offset ==
1199 offsetof(struct ffa_memory_region, receivers));
1200 assert(memory_region->memory_access_desc_size ==
1201 sizeof(struct ffa_memory_access));
1202
J-Alves95df0ef2022-12-07 10:09:48 +00001203 /* The sender must match the caller. */
1204 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1205 vm_id_is_current_world(memory_region->sender)) ||
1206 (vm_id_is_current_world(from_locked.vm->id) &&
1207 memory_region->sender != from_locked.vm->id)) {
1208 dlog_verbose("Invalid memory sender ID.\n");
1209 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001210 }
1211
Andrew Walbrana65a1322020-04-06 19:32:32 +01001212 /*
1213 * Ensure that the composite header is within the memory bounds and
1214 * doesn't overlap the first part of the message.
1215 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001216 receivers_length = sizeof(struct ffa_memory_access) *
1217 memory_region->receiver_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001218 constituents_offset =
1219 ffa_composite_constituent_offset(memory_region, 0);
Federico Recanati872cd692022-01-05 13:10:10 +01001220 composite_memory_region_offset =
1221 memory_region->receivers[0].composite_memory_region_offset;
1222 if ((composite_memory_region_offset == 0) ||
1223 (composite_memory_region_offset <
1224 sizeof(struct ffa_memory_region) + receivers_length) ||
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001225 constituents_offset > fragment_length) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001226 dlog_verbose(
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001227 "Invalid composite memory region descriptor offset "
1228 "%d.\n",
1229 memory_region->receivers[0]
1230 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001231 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001232 }
1233
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001234 composite = ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001235
1236 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001237 * Ensure the number of constituents are within the memory bounds.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001238 */
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001239 constituents_length = sizeof(struct ffa_memory_region_constituent) *
1240 composite->constituent_count;
Andrew Walbran352aa3d2020-05-01 17:51:33 +01001241 if (memory_share_length != constituents_offset + constituents_length) {
1242 dlog_verbose("Invalid length %d or composite offset %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001243 memory_share_length,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001244 memory_region->receivers[0]
1245 .composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001246 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001247 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001248 if (fragment_length < memory_share_length &&
1249 fragment_length < HF_MAILBOX_SIZE) {
1250 dlog_warning(
1251 "Initial fragment length %d smaller than mailbox "
1252 "size.\n",
1253 fragment_length);
1254 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001255
Andrew Walbrana65a1322020-04-06 19:32:32 +01001256 /*
1257 * Clear is not allowed for memory sharing, as the sender still has
1258 * access to the memory.
1259 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001260 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1261 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001262 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001263 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001264 }
1265
1266 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001267 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001268 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001269 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001270 }
1271
J-Alves363f5722022-04-25 17:37:37 +01001272 /* Check that the permissions are valid, for each specified receiver. */
1273 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1274 ffa_memory_access_permissions_t permissions =
1275 memory_region->receivers[i]
1276 .receiver_permissions.permissions;
1277 ffa_vm_id_t receiver_id =
1278 memory_region->receivers[i]
1279 .receiver_permissions.receiver;
1280
1281 if (memory_region->sender == receiver_id) {
1282 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001283 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001284 }
Federico Recanati85090c42021-12-15 13:17:54 +01001285
J-Alves363f5722022-04-25 17:37:37 +01001286 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1287 j++) {
1288 if (receiver_id ==
1289 memory_region->receivers[j]
1290 .receiver_permissions.receiver) {
1291 dlog_verbose(
1292 "Repeated receiver(%x) in memory send "
1293 "operation.\n",
1294 memory_region->receivers[j]
1295 .receiver_permissions.receiver);
1296 return ffa_error(FFA_INVALID_PARAMETERS);
1297 }
1298 }
1299
1300 if (composite_memory_region_offset !=
1301 memory_region->receivers[i]
1302 .composite_memory_region_offset) {
1303 dlog_verbose(
1304 "All ffa_memory_access should point to the "
1305 "same composite memory region offset.\n");
1306 return ffa_error(FFA_INVALID_PARAMETERS);
1307 }
1308
1309 data_access = ffa_get_data_access_attr(permissions);
1310 instruction_access =
1311 ffa_get_instruction_access_attr(permissions);
1312 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1313 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1314 dlog_verbose(
1315 "Reserved value for receiver permissions "
1316 "%#x.\n",
1317 permissions);
1318 return ffa_error(FFA_INVALID_PARAMETERS);
1319 }
1320 if (instruction_access !=
1321 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1322 dlog_verbose(
1323 "Invalid instruction access permissions %#x "
1324 "for sending memory.\n",
1325 permissions);
1326 return ffa_error(FFA_INVALID_PARAMETERS);
1327 }
1328 if (share_func == FFA_MEM_SHARE_32) {
1329 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1330 dlog_verbose(
1331 "Invalid data access permissions %#x "
1332 "for sharing memory.\n",
1333 permissions);
1334 return ffa_error(FFA_INVALID_PARAMETERS);
1335 }
1336 /*
1337 * According to section 10.10.3 of the FF-A v1.1 EAC0
1338 * spec, NX is required for share operations (but must
1339 * not be specified by the sender) so set it in the
1340 * copy that we store, ready to be returned to the
1341 * retriever.
1342 */
J-Alvesb19731a2022-06-20 17:30:33 +01001343 if (vm_id_is_current_world(receiver_id)) {
1344 ffa_set_instruction_access_attr(
1345 &permissions,
1346 FFA_INSTRUCTION_ACCESS_NX);
1347 memory_region->receivers[i]
1348 .receiver_permissions.permissions =
1349 permissions;
1350 }
J-Alves363f5722022-04-25 17:37:37 +01001351 }
1352 if (share_func == FFA_MEM_LEND_32 &&
1353 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1354 dlog_verbose(
1355 "Invalid data access permissions %#x for "
1356 "lending memory.\n",
1357 permissions);
1358 return ffa_error(FFA_INVALID_PARAMETERS);
1359 }
1360
1361 if (share_func == FFA_MEM_DONATE_32 &&
1362 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1363 dlog_verbose(
1364 "Invalid data access permissions %#x for "
1365 "donating memory.\n",
1366 permissions);
1367 return ffa_error(FFA_INVALID_PARAMETERS);
1368 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001369 }
1370
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001371 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1372 security_state =
1373 ffa_get_memory_security_attr(memory_region->attributes);
1374 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1375 dlog_verbose(
1376 "Invalid security state for memory share operation.\n");
1377 return ffa_error(FFA_INVALID_PARAMETERS);
1378 }
1379
Federico Recanatid937f5e2021-12-20 17:38:23 +01001380 /*
J-Alves807794e2022-06-16 13:42:47 +01001381 * If a memory donate or lend with single borrower, the memory type
1382 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001383 */
J-Alves807794e2022-06-16 13:42:47 +01001384 if (share_func == FFA_MEM_DONATE_32 ||
1385 (share_func == FFA_MEM_LEND_32 &&
1386 memory_region->receiver_count == 1)) {
1387 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1388 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1389 dlog_verbose(
1390 "Memory type shall not be specified by "
1391 "sender.\n");
1392 return ffa_error(FFA_INVALID_PARAMETERS);
1393 }
1394 } else {
1395 /*
1396 * Check that sender's memory attributes match Hafnium
1397 * expectations: Normal Memory, Inner shareable, Write-Back
1398 * Read-Allocate Write-Allocate Cacheable.
1399 */
1400 ret = ffa_memory_attributes_validate(memory_region->attributes);
1401 if (ret.func != FFA_SUCCESS_32) {
1402 return ret;
1403 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001404 }
1405
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001406 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001407}
1408
1409/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001410 * Gets the share state for continuing an operation to donate, lend or share
1411 * memory, and checks that it is a valid request.
1412 *
1413 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1414 * not.
1415 */
J-Alvesfdd29272022-07-19 13:16:31 +01001416struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001417 struct share_states_locked share_states, ffa_memory_handle_t handle,
1418 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1419 struct mpool *page_pool)
1420{
1421 struct ffa_memory_share_state *share_state;
1422 struct ffa_memory_region *memory_region;
1423
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001424 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001425
1426 /*
1427 * Look up the share state by handle and make sure that the VM ID
1428 * matches.
1429 */
1430 if (!get_share_state(share_states, handle, &share_state)) {
1431 dlog_verbose(
1432 "Invalid handle %#x for memory send continuation.\n",
1433 handle);
1434 return ffa_error(FFA_INVALID_PARAMETERS);
1435 }
1436 memory_region = share_state->memory_region;
1437
J-Alvesfdd29272022-07-19 13:16:31 +01001438 if (vm_id_is_current_world(from_vm_id) &&
1439 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001440 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1441 return ffa_error(FFA_INVALID_PARAMETERS);
1442 }
1443
1444 if (share_state->sending_complete) {
1445 dlog_verbose(
1446 "Sending of memory handle %#x is already complete.\n",
1447 handle);
1448 return ffa_error(FFA_INVALID_PARAMETERS);
1449 }
1450
1451 if (share_state->fragment_count == MAX_FRAGMENTS) {
1452 /*
1453 * Log a warning as this is a sign that MAX_FRAGMENTS should
1454 * probably be increased.
1455 */
1456 dlog_warning(
1457 "Too many fragments for memory share with handle %#x; "
1458 "only %d supported.\n",
1459 handle, MAX_FRAGMENTS);
1460 /* Free share state, as it's not possible to complete it. */
1461 share_state_free(share_states, share_state, page_pool);
1462 return ffa_error(FFA_NO_MEMORY);
1463 }
1464
1465 *share_state_ret = share_state;
1466
1467 return (struct ffa_value){.func = FFA_SUCCESS_32};
1468}
1469
1470/**
J-Alves95df0ef2022-12-07 10:09:48 +00001471 * Checks if there is at least one receiver from the other world.
1472 */
J-Alvesfdd29272022-07-19 13:16:31 +01001473bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001474 struct ffa_memory_region *memory_region)
1475{
1476 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
1477 ffa_vm_id_t receiver = memory_region->receivers[i]
1478 .receiver_permissions.receiver;
1479 if (!vm_id_is_current_world(receiver)) {
1480 return true;
1481 }
1482 }
1483 return false;
1484}
1485
1486/**
J-Alves8505a8a2022-06-15 18:10:18 +01001487 * Validates a call to donate, lend or share memory to a non-other world VM and
1488 * then updates the stage-2 page tables. Specifically, check if the message
1489 * length and number of memory region constituents match, and if the transition
1490 * is valid for the type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001491 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001492 * Assumes that the caller has already found and locked the sender VM and copied
1493 * the memory region descriptor from the sender's TX buffer to a freshly
1494 * allocated page from Hafnium's internal pool. The caller must have also
1495 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001496 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001497 * This function takes ownership of the `memory_region` passed in and will free
1498 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001499 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001500struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001501 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001502 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001503 uint32_t fragment_length, uint32_t share_func,
1504 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001505{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001506 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001507 struct share_states_locked share_states;
1508 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001509
1510 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001511 * If there is an error validating the `memory_region` then we need to
1512 * free it because we own it but we won't be storing it in a share state
1513 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001514 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001515 ret = ffa_memory_send_validate(from_locked, memory_region,
1516 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001517 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001518 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001519 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001520 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001521 }
1522
Andrew Walbrana65a1322020-04-06 19:32:32 +01001523 /* Set flag for share function, ready to be retrieved later. */
1524 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001525 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001526 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001527 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001528 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001529 case FFA_MEM_LEND_32:
1530 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001531 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001532 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001533 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001534 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001535 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001536 }
1537
Andrew Walbranca808b12020-05-15 17:22:28 +01001538 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001539 /*
1540 * Allocate a share state before updating the page table. Otherwise if
1541 * updating the page table succeeded but allocating the share state
1542 * failed then it would leave the memory in a state where nobody could
1543 * get it back.
1544 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001545 if (!allocate_share_state(share_states, share_func, memory_region,
1546 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1547 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001548 dlog_verbose("Failed to allocate share state.\n");
1549 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001550 ret = ffa_error(FFA_NO_MEMORY);
1551 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001552 }
1553
Andrew Walbranca808b12020-05-15 17:22:28 +01001554 if (fragment_length == memory_share_length) {
1555 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001556 ret = ffa_memory_send_complete(
1557 from_locked, share_states, share_state, page_pool,
1558 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001559 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01001560 /*
1561 * Use sender ID from 'memory_region' assuming
1562 * that at this point it has been validated:
1563 * - MBZ at virtual FF-A instance.
1564 */
1565 ffa_vm_id_t sender_to_ret =
1566 (from_locked.vm->id == HF_OTHER_WORLD_ID)
1567 ? memory_region->sender
1568 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01001569 ret = (struct ffa_value){
1570 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001571 .arg1 = (uint32_t)memory_region->handle,
1572 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01001573 .arg3 = fragment_length,
1574 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01001575 }
1576
1577out:
1578 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001579 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001580 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001581}
1582
1583/**
J-Alves8505a8a2022-06-15 18:10:18 +01001584 * Continues an operation to donate, lend or share memory to a VM from current
1585 * world. If this is the last fragment then checks that the transition is valid
1586 * for the type of memory sending operation and updates the stage-2 page tables
1587 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001588 *
1589 * Assumes that the caller has already found and locked the sender VM and copied
1590 * the memory region descriptor from the sender's TX buffer to a freshly
1591 * allocated page from Hafnium's internal pool.
1592 *
1593 * This function takes ownership of the `fragment` passed in; it must not be
1594 * freed by the caller.
1595 */
1596struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1597 void *fragment,
1598 uint32_t fragment_length,
1599 ffa_memory_handle_t handle,
1600 struct mpool *page_pool)
1601{
1602 struct share_states_locked share_states = share_states_lock();
1603 struct ffa_memory_share_state *share_state;
1604 struct ffa_value ret;
1605 struct ffa_memory_region *memory_region;
1606
1607 ret = ffa_memory_send_continue_validate(share_states, handle,
1608 &share_state,
1609 from_locked.vm->id, page_pool);
1610 if (ret.func != FFA_SUCCESS_32) {
1611 goto out_free_fragment;
1612 }
1613 memory_region = share_state->memory_region;
1614
J-Alves95df0ef2022-12-07 10:09:48 +00001615 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001616 dlog_error(
1617 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01001618 "other world. This should never happen, and indicates "
1619 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01001620 "EL3 code.\n");
1621 ret = ffa_error(FFA_INVALID_PARAMETERS);
1622 goto out_free_fragment;
1623 }
1624
1625 /* Add this fragment. */
1626 share_state->fragments[share_state->fragment_count] = fragment;
1627 share_state->fragment_constituent_counts[share_state->fragment_count] =
1628 fragment_length / sizeof(struct ffa_memory_region_constituent);
1629 share_state->fragment_count++;
1630
1631 /* Check whether the memory send operation is now ready to complete. */
1632 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001633 ret = ffa_memory_send_complete(
1634 from_locked, share_states, share_state, page_pool,
1635 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001636 } else {
1637 ret = (struct ffa_value){
1638 .func = FFA_MEM_FRAG_RX_32,
1639 .arg1 = (uint32_t)handle,
1640 .arg2 = (uint32_t)(handle >> 32),
1641 .arg3 = share_state_next_fragment_offset(share_states,
1642 share_state)};
1643 }
1644 goto out;
1645
1646out_free_fragment:
1647 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001648
1649out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001650 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001651 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001652}
1653
Andrew Walbranca808b12020-05-15 17:22:28 +01001654/** Clean up after the receiver has finished retrieving a memory region. */
1655static void ffa_memory_retrieve_complete(
1656 struct share_states_locked share_states,
1657 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1658{
1659 if (share_state->share_func == FFA_MEM_DONATE_32) {
1660 /*
1661 * Memory that has been donated can't be relinquished,
1662 * so no need to keep the share state around.
1663 */
1664 share_state_free(share_states, share_state, page_pool);
1665 dlog_verbose("Freed share state for donate.\n");
1666 }
1667}
1668
J-Alves96de29f2022-04-26 16:05:24 +01001669/*
1670 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
1671 * returns its index in the receiver's array. If receiver's ID doesn't exist
1672 * in the array, return the region's 'receiver_count'.
1673 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001674uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
1675 ffa_vm_id_t receiver)
J-Alves96de29f2022-04-26 16:05:24 +01001676{
1677 struct ffa_memory_access *receivers;
1678 uint32_t i;
1679
1680 assert(memory_region != NULL);
1681
1682 receivers = memory_region->receivers;
1683
1684 for (i = 0U; i < memory_region->receiver_count; i++) {
1685 if (receivers[i].receiver_permissions.receiver == receiver) {
1686 break;
1687 }
1688 }
1689
1690 return i;
1691}
1692
1693/**
1694 * Validates the retrieved permissions against those specified by the lender
1695 * of memory share operation. Optionally can help set the permissions to be used
1696 * for the S2 mapping, through the `permissions` argument.
1697 * Returns true if permissions are valid, false otherwise.
1698 */
1699static bool ffa_memory_retrieve_is_memory_access_valid(
1700 enum ffa_data_access sent_data_access,
1701 enum ffa_data_access requested_data_access,
1702 enum ffa_instruction_access sent_instruction_access,
1703 enum ffa_instruction_access requested_instruction_access,
1704 ffa_memory_access_permissions_t *permissions)
1705{
1706 switch (sent_data_access) {
1707 case FFA_DATA_ACCESS_NOT_SPECIFIED:
1708 case FFA_DATA_ACCESS_RW:
1709 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1710 requested_data_access == FFA_DATA_ACCESS_RW) {
1711 if (permissions != NULL) {
1712 ffa_set_data_access_attr(permissions,
1713 FFA_DATA_ACCESS_RW);
1714 }
1715 break;
1716 }
1717 /* Intentional fall-through. */
1718 case FFA_DATA_ACCESS_RO:
1719 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1720 requested_data_access == FFA_DATA_ACCESS_RO) {
1721 if (permissions != NULL) {
1722 ffa_set_data_access_attr(permissions,
1723 FFA_DATA_ACCESS_RO);
1724 }
1725 break;
1726 }
1727 dlog_verbose(
1728 "Invalid data access requested; sender specified "
1729 "permissions %#x but receiver requested %#x.\n",
1730 sent_data_access, requested_data_access);
1731 return false;
1732 case FFA_DATA_ACCESS_RESERVED:
1733 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
1734 "checked before this point.");
1735 }
1736
1737 switch (sent_instruction_access) {
1738 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
1739 case FFA_INSTRUCTION_ACCESS_X:
1740 if (requested_instruction_access ==
1741 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1742 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
1743 if (permissions != NULL) {
1744 ffa_set_instruction_access_attr(
1745 permissions, FFA_INSTRUCTION_ACCESS_X);
1746 }
1747 break;
1748 }
1749 case FFA_INSTRUCTION_ACCESS_NX:
1750 if (requested_instruction_access ==
1751 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1752 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
1753 if (permissions != NULL) {
1754 ffa_set_instruction_access_attr(
1755 permissions, FFA_INSTRUCTION_ACCESS_NX);
1756 }
1757 break;
1758 }
1759 dlog_verbose(
1760 "Invalid instruction access requested; sender "
1761 "specified permissions %#x but receiver requested "
1762 "%#x.\n",
1763 sent_instruction_access, requested_instruction_access);
1764 return false;
1765 case FFA_INSTRUCTION_ACCESS_RESERVED:
1766 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
1767 "be checked before this point.");
1768 }
1769
1770 return true;
1771}
1772
1773/**
1774 * Validate the receivers' permissions in the retrieve request against those
1775 * specified by the lender.
1776 * In the `permissions` argument returns the permissions to set at S2 for the
1777 * caller to the FFA_MEMORY_RETRIEVE_REQ.
1778 * Returns FFA_SUCCESS if all specified permissions are valid.
1779 */
1780static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
1781 struct ffa_memory_region *memory_region,
1782 struct ffa_memory_region *retrieve_request, ffa_vm_id_t to_vm_id,
1783 ffa_memory_access_permissions_t *permissions)
1784{
1785 uint32_t retrieve_receiver_index;
1786
1787 assert(permissions != NULL);
1788
1789 if (retrieve_request->receiver_count != memory_region->receiver_count) {
1790 dlog_verbose(
1791 "Retrieve request should contain same list of "
1792 "borrowers, as specified by the lender.\n");
1793 return ffa_error(FFA_INVALID_PARAMETERS);
1794 }
1795
1796 retrieve_receiver_index = retrieve_request->receiver_count;
1797
1798 /* Should be populated with the permissions of the retriever. */
1799 *permissions = 0;
1800
1801 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
1802 ffa_memory_access_permissions_t sent_permissions;
1803 struct ffa_memory_access *current_receiver =
1804 &retrieve_request->receivers[i];
1805 ffa_memory_access_permissions_t requested_permissions =
1806 current_receiver->receiver_permissions.permissions;
1807 ffa_vm_id_t current_receiver_id =
1808 current_receiver->receiver_permissions.receiver;
1809 bool found_to_id = current_receiver_id == to_vm_id;
1810
1811 /*
1812 * Find the current receiver in the transaction descriptor from
1813 * sender.
1814 */
1815 uint32_t mem_region_receiver_index =
1816 ffa_memory_region_get_receiver(memory_region,
1817 current_receiver_id);
1818
1819 if (mem_region_receiver_index ==
1820 memory_region->receiver_count) {
1821 dlog_verbose("%s: receiver %x not found\n", __func__,
1822 current_receiver_id);
1823 return ffa_error(FFA_DENIED);
1824 }
1825
1826 sent_permissions =
1827 memory_region->receivers[mem_region_receiver_index]
1828 .receiver_permissions.permissions;
1829
1830 if (found_to_id) {
1831 retrieve_receiver_index = i;
1832 }
1833
1834 /*
1835 * Since we are traversing the list of receivers, save the index
1836 * of the caller. As it needs to be there.
1837 */
1838
1839 if (current_receiver->composite_memory_region_offset != 0U) {
1840 dlog_verbose(
1841 "Retriever specified address ranges not "
1842 "supported (got offset %d).\n",
1843 current_receiver
1844 ->composite_memory_region_offset);
1845 return ffa_error(FFA_INVALID_PARAMETERS);
1846 }
1847
1848 /*
1849 * Check permissions from sender against permissions requested
1850 * by receiver.
1851 */
1852 if (!ffa_memory_retrieve_is_memory_access_valid(
1853 ffa_get_data_access_attr(sent_permissions),
1854 ffa_get_data_access_attr(requested_permissions),
1855 ffa_get_instruction_access_attr(sent_permissions),
1856 ffa_get_instruction_access_attr(
1857 requested_permissions),
1858 found_to_id ? permissions : NULL)) {
1859 return ffa_error(FFA_DENIED);
1860 }
1861
1862 /*
1863 * Can't request PM to clear memory if only provided with RO
1864 * permissions.
1865 */
1866 if (found_to_id &&
1867 (ffa_get_data_access_attr(*permissions) ==
1868 FFA_DATA_ACCESS_RO) &&
1869 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
1870 0U) {
1871 dlog_verbose(
1872 "Receiver has RO permissions can not request "
1873 "clear.\n");
1874 return ffa_error(FFA_DENIED);
1875 }
1876 }
1877
1878 if (retrieve_receiver_index == retrieve_request->receiver_count) {
1879 dlog_verbose(
1880 "Retrieve request does not contain caller's (%x) "
1881 "permissions\n",
1882 to_vm_id);
1883 return ffa_error(FFA_INVALID_PARAMETERS);
1884 }
1885
1886 return (struct ffa_value){.func = FFA_SUCCESS_32};
1887}
1888
J-Alvesa9cd7e32022-07-01 13:49:33 +01001889/*
1890 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
1891 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
1892 * of a pending memory sharing operation whose allocator is the SPM, for
1893 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
1894 * the memory region descriptor of the retrieve request must be zeroed with the
1895 * exception of the sender ID and handle.
1896 */
1897bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
1898 struct vm_locked to_locked)
1899{
1900 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
1901 request->attributes == 0U && request->flags == 0U &&
1902 request->tag == 0U && request->receiver_count == 0U &&
1903 plat_ffa_memory_handle_allocated_by_current_world(
1904 request->handle);
1905}
1906
1907/*
1908 * Helper to reset count of fragments retrieved by the hypervisor.
1909 */
1910static void ffa_memory_retrieve_complete_from_hyp(
1911 struct ffa_memory_share_state *share_state)
1912{
1913 if (share_state->hypervisor_fragment_count ==
1914 share_state->fragment_count) {
1915 share_state->hypervisor_fragment_count = 0;
1916 }
1917}
1918
J-Alves089004f2022-07-13 14:25:44 +01001919/**
1920 * Validate that the memory region descriptor provided by the borrower on
1921 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
1922 * memory sharing call.
1923 */
1924static struct ffa_value ffa_memory_retrieve_validate(
1925 ffa_vm_id_t receiver_id, struct ffa_memory_region *retrieve_request,
1926 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
1927 uint32_t share_func)
1928{
1929 ffa_memory_region_flags_t transaction_type =
1930 retrieve_request->flags &
1931 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001932 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01001933
1934 assert(retrieve_request != NULL);
1935 assert(memory_region != NULL);
1936 assert(receiver_index != NULL);
1937 assert(retrieve_request->sender == memory_region->sender);
1938
1939 /*
1940 * Check that the transaction type expected by the receiver is
1941 * correct, if it has been specified.
1942 */
1943 if (transaction_type !=
1944 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
1945 transaction_type != (memory_region->flags &
1946 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
1947 dlog_verbose(
1948 "Incorrect transaction type %#x for "
1949 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
1950 transaction_type,
1951 memory_region->flags &
1952 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
1953 retrieve_request->handle);
1954 return ffa_error(FFA_INVALID_PARAMETERS);
1955 }
1956
1957 if (retrieve_request->tag != memory_region->tag) {
1958 dlog_verbose(
1959 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
1960 "%d for handle %#x.\n",
1961 retrieve_request->tag, memory_region->tag,
1962 retrieve_request->handle);
1963 return ffa_error(FFA_INVALID_PARAMETERS);
1964 }
1965
1966 *receiver_index =
1967 ffa_memory_region_get_receiver(memory_region, receiver_id);
1968
1969 if (*receiver_index == memory_region->receiver_count) {
1970 dlog_verbose(
1971 "Incorrect receiver VM ID %d for "
1972 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01001973 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01001974 return ffa_error(FFA_INVALID_PARAMETERS);
1975 }
1976
1977 if ((retrieve_request->flags &
1978 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
1979 dlog_verbose(
1980 "Retriever specified 'address range alignment 'hint' "
1981 "not supported.\n");
1982 return ffa_error(FFA_INVALID_PARAMETERS);
1983 }
1984 if ((retrieve_request->flags &
1985 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
1986 dlog_verbose(
1987 "Bits 8-5 must be zero in memory region's flags "
1988 "(address range alignment hint not supported).\n");
1989 return ffa_error(FFA_INVALID_PARAMETERS);
1990 }
1991
1992 if ((retrieve_request->flags & ~0x7FF) != 0U) {
1993 dlog_verbose(
1994 "Bits 31-10 must be zero in memory region's flags.\n");
1995 return ffa_error(FFA_INVALID_PARAMETERS);
1996 }
1997
1998 if (share_func == FFA_MEM_SHARE_32 &&
1999 (retrieve_request->flags &
2000 (FFA_MEMORY_REGION_FLAG_CLEAR |
2001 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2002 dlog_verbose(
2003 "Memory Share operation can't clean after relinquish "
2004 "memory region.\n");
2005 return ffa_error(FFA_INVALID_PARAMETERS);
2006 }
2007
2008 /*
2009 * If the borrower needs the memory to be cleared before mapping
2010 * to its address space, the sender should have set the flag
2011 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2012 * FFA_DENIED.
2013 */
2014 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2015 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2016 dlog_verbose(
2017 "Borrower needs memory cleared. Sender needs to set "
2018 "flag for clearing memory.\n");
2019 return ffa_error(FFA_DENIED);
2020 }
2021
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002022 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2023 security_state =
2024 ffa_get_memory_security_attr(retrieve_request->attributes);
2025 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2026 dlog_verbose(
2027 "Invalid security state for memory retrieve request "
2028 "operation.\n");
2029 return ffa_error(FFA_INVALID_PARAMETERS);
2030 }
2031
J-Alves089004f2022-07-13 14:25:44 +01002032 /*
2033 * If memory type is not specified, bypass validation of memory
2034 * attributes in the retrieve request. The retriever is expecting to
2035 * obtain this information from the SPMC.
2036 */
2037 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2038 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2039 return (struct ffa_value){.func = FFA_SUCCESS_32};
2040 }
2041
2042 /*
2043 * Ensure receiver's attributes are compatible with how
2044 * Hafnium maps memory: Normal Memory, Inner shareable,
2045 * Write-Back Read-Allocate Write-Allocate Cacheable.
2046 */
2047 return ffa_memory_attributes_validate(retrieve_request->attributes);
2048}
2049
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002050struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2051 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002052 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002053 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002054{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002055 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002056 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002057 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002058 sizeof(struct ffa_memory_access);
2059 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002060 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002061 ffa_memory_access_permissions_t permissions = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002062 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002063 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002064 struct ffa_memory_share_state *share_state;
2065 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002066 struct ffa_composite_memory_region *composite;
2067 uint32_t total_length;
2068 uint32_t fragment_length;
J-Alves089004f2022-07-13 14:25:44 +01002069 ffa_vm_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002070 bool is_send_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002071
2072 dump_share_states();
2073
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002074 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002075 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002076 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002077 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002078 expected_retrieve_request_length,
2079 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002080 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002081 }
2082
2083 share_states = share_states_lock();
2084 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002085 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002086 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002087 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002088 goto out;
2089 }
2090
J-Alves96de29f2022-04-26 16:05:24 +01002091 if (!share_state->sending_complete) {
2092 dlog_verbose(
2093 "Memory with handle %#x not fully sent, can't "
2094 "retrieve.\n",
2095 handle);
2096 ret = ffa_error(FFA_INVALID_PARAMETERS);
2097 goto out;
2098 }
2099
Andrew Walbrana65a1322020-04-06 19:32:32 +01002100 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002101
Andrew Walbrana65a1322020-04-06 19:32:32 +01002102 CHECK(memory_region != NULL);
2103
J-Alves089004f2022-07-13 14:25:44 +01002104 if (retrieve_request->sender != memory_region->sender) {
2105 dlog_verbose(
2106 "Memory with handle %#x not fully sent, can't "
2107 "retrieve.\n",
2108 handle);
2109 ret = ffa_error(FFA_INVALID_PARAMETERS);
2110 goto out;
2111 }
J-Alves96de29f2022-04-26 16:05:24 +01002112
J-Alvesa9cd7e32022-07-01 13:49:33 +01002113 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2114 to_locked)) {
2115 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002116
J-Alvesb5084cf2022-07-06 14:20:12 +01002117 /*
2118 * The SPMC can only process retrieve requests to memory share
2119 * operations with one borrower from the other world. It can't
2120 * determine the ID of the NWd VM that invoked the retrieve
2121 * request interface call. It relies on the hypervisor to
2122 * validate the caller's ID against that provided in the
2123 * `receivers` list of the retrieve response.
2124 * In case there is only one borrower from the NWd in the
2125 * transaction descriptor, record that in the `receiver_id` for
2126 * later use, and validate in the retrieve request message.
2127 */
2128 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2129 uint32_t other_world_count = 0;
2130
2131 for (uint32_t i = 0; i < memory_region->receiver_count;
2132 i++) {
2133 receiver_id =
2134 retrieve_request->receivers[0]
2135 .receiver_permissions.receiver;
2136 if (!vm_id_is_current_world(receiver_id)) {
2137 other_world_count++;
2138 }
2139 }
2140 if (other_world_count > 1) {
2141 dlog_verbose(
2142 "Support one receiver from the other "
2143 "world.\n");
2144 return ffa_error(FFA_NOT_SUPPORTED);
2145 }
2146 }
2147
2148 /*
2149 * Validate retrieve request, according to what was sent by the
2150 * sender. Function will output the `receiver_index` from the
2151 * provided memory region, and will output `permissions` from
2152 * the validated requested permissions.
2153 */
J-Alves089004f2022-07-13 14:25:44 +01002154 ret = ffa_memory_retrieve_validate(
2155 receiver_id, retrieve_request, memory_region,
2156 &receiver_index, share_state->share_func);
2157 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002158 goto out;
2159 }
2160
2161 if (share_state->retrieved_fragment_count[receiver_index] !=
2162 0U) {
2163 dlog_verbose(
2164 "Memory with handle %#x already retrieved.\n",
2165 handle);
2166 ret = ffa_error(FFA_DENIED);
2167 goto out;
2168 }
2169
J-Alvesa9cd7e32022-07-01 13:49:33 +01002170 ret = ffa_memory_retrieve_validate_memory_access_list(
2171 memory_region, retrieve_request, receiver_id,
2172 &permissions);
J-Alves614d9f42022-06-28 14:03:10 +01002173 if (ret.func != FFA_SUCCESS_32) {
2174 goto out;
2175 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002176
J-Alvesa9cd7e32022-07-01 13:49:33 +01002177 memory_to_attributes = ffa_memory_permissions_to_mode(
2178 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002179
2180 if (to_locked.vm->el0_partition) {
2181 /*
2182 * Get extra mapping attributes for the given VM ID.
2183 * If the memory is shared by a VM executing in non
2184 * secure world, attribute MM_MODE_NS has to be set
2185 * while mapping that in a SP executing in secure world.
2186 */
2187 memory_to_attributes |=
2188 arch_mm_extra_attributes_from_vm(
2189 retrieve_request->sender);
2190 }
2191
J-Alvesa9cd7e32022-07-01 13:49:33 +01002192 ret = ffa_retrieve_check_update(
2193 to_locked, memory_region->sender,
2194 share_state->fragments,
2195 share_state->fragment_constituent_counts,
2196 share_state->fragment_count, memory_to_attributes,
2197 share_state->share_func, false, page_pool);
2198
2199 if (ret.func != FFA_SUCCESS_32) {
2200 goto out;
2201 }
2202
2203 share_state->retrieved_fragment_count[receiver_index] = 1;
2204 is_send_complete =
2205 share_state->retrieved_fragment_count[receiver_index] ==
2206 share_state->fragment_count;
2207 } else {
2208 if (share_state->hypervisor_fragment_count != 0U) {
2209 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002210 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002211 "the hypervisor.\n",
2212 handle);
2213 ret = ffa_error(FFA_DENIED);
2214 goto out;
2215 }
2216
2217 share_state->hypervisor_fragment_count = 1;
2218
2219 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002220 }
2221
J-Alvesb5084cf2022-07-06 14:20:12 +01002222 /* VMs acquire the RX buffer from SPMC. */
2223 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2224
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002225 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002226 * Copy response to RX buffer of caller and deliver the message.
2227 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002228 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002229 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002230 composite = ffa_memory_region_get_composite(memory_region, 0);
2231 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002232 * Constituents which we received in the first fragment should
2233 * always fit in the first fragment we are sending, because the
2234 * header is the same size in both cases and we have a fixed
2235 * message buffer size. So `ffa_retrieved_memory_region_init`
2236 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002237 */
2238 CHECK(ffa_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01002239 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2240 memory_region->sender, memory_region->attributes,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002241 memory_region->flags, handle, receiver_id, permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +01002242 composite->page_count, composite->constituent_count,
2243 share_state->fragments[0],
2244 share_state->fragment_constituent_counts[0], &total_length,
2245 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002246
Andrew Walbranca808b12020-05-15 17:22:28 +01002247 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002248 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002249 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002250 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002251
J-Alvesa9cd7e32022-07-01 13:49:33 +01002252 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002253 ffa_memory_retrieve_complete(share_states, share_state,
2254 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002255 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002256 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002257 .arg1 = total_length,
2258 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002259out:
2260 share_states_unlock(&share_states);
2261 dump_share_states();
2262 return ret;
2263}
2264
2265struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2266 ffa_memory_handle_t handle,
2267 uint32_t fragment_offset,
J-Alves59ed0042022-07-28 18:26:41 +01002268 ffa_vm_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002269 struct mpool *page_pool)
2270{
2271 struct ffa_memory_region *memory_region;
2272 struct share_states_locked share_states;
2273 struct ffa_memory_share_state *share_state;
2274 struct ffa_value ret;
2275 uint32_t fragment_index;
2276 uint32_t retrieved_constituents_count;
2277 uint32_t i;
2278 uint32_t expected_fragment_offset;
2279 uint32_t remaining_constituent_count;
2280 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002281 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01002282 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01002283
2284 dump_share_states();
2285
2286 share_states = share_states_lock();
2287 if (!get_share_state(share_states, handle, &share_state)) {
2288 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2289 handle);
2290 ret = ffa_error(FFA_INVALID_PARAMETERS);
2291 goto out;
2292 }
2293
2294 memory_region = share_state->memory_region;
2295 CHECK(memory_region != NULL);
2296
Andrew Walbranca808b12020-05-15 17:22:28 +01002297 if (!share_state->sending_complete) {
2298 dlog_verbose(
2299 "Memory with handle %#x not fully sent, can't "
2300 "retrieve.\n",
2301 handle);
2302 ret = ffa_error(FFA_INVALID_PARAMETERS);
2303 goto out;
2304 }
2305
J-Alves59ed0042022-07-28 18:26:41 +01002306 /*
2307 * If retrieve request from the hypervisor has been initiated in the
2308 * given share_state, continue it, else assume it is a continuation of
2309 * retrieve request from a NWd VM.
2310 */
2311 continue_ffa_hyp_mem_retrieve_req =
2312 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
2313 (share_state->hypervisor_fragment_count != 0U) &&
2314 plat_ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01002315
J-Alves59ed0042022-07-28 18:26:41 +01002316 if (!continue_ffa_hyp_mem_retrieve_req) {
2317 receiver_index = ffa_memory_region_get_receiver(
2318 memory_region, to_locked.vm->id);
2319
2320 if (receiver_index == memory_region->receiver_count) {
2321 dlog_verbose(
2322 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
2323 "borrower to memory sharing transaction (%x)\n",
2324 to_locked.vm->id, handle);
2325 ret = ffa_error(FFA_INVALID_PARAMETERS);
2326 goto out;
2327 }
2328
2329 if (share_state->retrieved_fragment_count[receiver_index] ==
2330 0 ||
2331 share_state->retrieved_fragment_count[receiver_index] >=
2332 share_state->fragment_count) {
2333 dlog_verbose(
2334 "Retrieval of memory with handle %#x not yet "
2335 "started or already completed (%d/%d fragments "
2336 "retrieved).\n",
2337 handle,
2338 share_state->retrieved_fragment_count
2339 [receiver_index],
2340 share_state->fragment_count);
2341 ret = ffa_error(FFA_INVALID_PARAMETERS);
2342 goto out;
2343 }
2344
2345 fragment_index =
2346 share_state->retrieved_fragment_count[receiver_index];
2347 } else {
2348 if (share_state->hypervisor_fragment_count == 0 ||
2349 share_state->hypervisor_fragment_count >=
2350 share_state->fragment_count) {
2351 dlog_verbose(
2352 "Retrieve of memory with handle %x not "
2353 "started from hypervisor.\n",
2354 handle);
2355 ret = ffa_error(FFA_INVALID_PARAMETERS);
2356 goto out;
2357 }
2358
2359 if (memory_region->sender != sender_vm_id) {
2360 dlog_verbose(
2361 "Sender ID (%x) is not as expected for memory "
2362 "handle %x\n",
2363 sender_vm_id, handle);
2364 ret = ffa_error(FFA_INVALID_PARAMETERS);
2365 goto out;
2366 }
2367
2368 fragment_index = share_state->hypervisor_fragment_count;
2369
2370 receiver_index = 0;
2371 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002372
2373 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002374 * Check that the given fragment offset is correct by counting
2375 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01002376 */
2377 retrieved_constituents_count = 0;
2378 for (i = 0; i < fragment_index; ++i) {
2379 retrieved_constituents_count +=
2380 share_state->fragment_constituent_counts[i];
2381 }
J-Alvesc7484f12022-05-13 12:41:14 +01002382
2383 CHECK(memory_region->receiver_count > 0);
2384
Andrew Walbranca808b12020-05-15 17:22:28 +01002385 expected_fragment_offset =
J-Alvesc7484f12022-05-13 12:41:14 +01002386 ffa_composite_constituent_offset(memory_region,
2387 receiver_index) +
Andrew Walbranca808b12020-05-15 17:22:28 +01002388 retrieved_constituents_count *
J-Alvesc7484f12022-05-13 12:41:14 +01002389 sizeof(struct ffa_memory_region_constituent) -
2390 sizeof(struct ffa_memory_access) *
2391 (memory_region->receiver_count - 1);
Andrew Walbranca808b12020-05-15 17:22:28 +01002392 if (fragment_offset != expected_fragment_offset) {
2393 dlog_verbose("Fragment offset was %d but expected %d.\n",
2394 fragment_offset, expected_fragment_offset);
2395 ret = ffa_error(FFA_INVALID_PARAMETERS);
2396 goto out;
2397 }
2398
J-Alves59ed0042022-07-28 18:26:41 +01002399 /* VMs acquire the RX buffer from SPMC. */
2400 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2401
Andrew Walbranca808b12020-05-15 17:22:28 +01002402 remaining_constituent_count = ffa_memory_fragment_init(
2403 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2404 share_state->fragments[fragment_index],
2405 share_state->fragment_constituent_counts[fragment_index],
2406 &fragment_length);
2407 CHECK(remaining_constituent_count == 0);
2408 to_locked.vm->mailbox.recv_size = fragment_length;
2409 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2410 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002411 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01002412
J-Alves59ed0042022-07-28 18:26:41 +01002413 if (!continue_ffa_hyp_mem_retrieve_req) {
2414 share_state->retrieved_fragment_count[receiver_index]++;
2415 if (share_state->retrieved_fragment_count[receiver_index] ==
2416 share_state->fragment_count) {
2417 ffa_memory_retrieve_complete(share_states, share_state,
2418 page_pool);
2419 }
2420 } else {
2421 share_state->hypervisor_fragment_count++;
2422
2423 ffa_memory_retrieve_complete_from_hyp(share_state);
2424 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002425 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2426 .arg1 = (uint32_t)handle,
2427 .arg2 = (uint32_t)(handle >> 32),
2428 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002429
2430out:
2431 share_states_unlock(&share_states);
2432 dump_share_states();
2433 return ret;
2434}
2435
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002436struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002437 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002438 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002439{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002440 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002441 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002442 struct ffa_memory_share_state *share_state;
2443 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002444 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002445 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002446 uint32_t receiver_index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002447
Andrew Walbrana65a1322020-04-06 19:32:32 +01002448 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002449 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002450 "Stream endpoints not supported (got %d "
2451 "endpoints on "
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002452 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002453 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002454 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002455 }
2456
Andrew Walbrana65a1322020-04-06 19:32:32 +01002457 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002458 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002459 "VM ID %d in relinquish message doesn't match "
2460 "calling "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002461 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002462 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002463 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002464 }
2465
2466 dump_share_states();
2467
2468 share_states = share_states_lock();
2469 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002470 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002471 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002472 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002473 goto out;
2474 }
2475
Andrew Walbranca808b12020-05-15 17:22:28 +01002476 if (!share_state->sending_complete) {
2477 dlog_verbose(
2478 "Memory with handle %#x not fully sent, can't "
2479 "relinquish.\n",
2480 handle);
2481 ret = ffa_error(FFA_INVALID_PARAMETERS);
2482 goto out;
2483 }
2484
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002485 memory_region = share_state->memory_region;
2486 CHECK(memory_region != NULL);
2487
J-Alves8eb19162022-04-28 10:56:48 +01002488 receiver_index = ffa_memory_region_get_receiver(memory_region,
2489 from_locked.vm->id);
2490
2491 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002492 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002493 "VM ID %d tried to relinquish memory region "
2494 "with "
J-Alves8eb19162022-04-28 10:56:48 +01002495 "handle %#x and it is not a valid borrower.\n",
2496 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002497 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002498 goto out;
2499 }
2500
J-Alves8eb19162022-04-28 10:56:48 +01002501 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002502 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002503 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002504 "Memory with handle %#x not yet fully "
2505 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01002506 "receiver %x can't relinquish.\n",
2507 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002508 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002509 goto out;
2510 }
2511
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002512 clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002513
2514 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002515 * Clear is not allowed for memory that was shared, as the
2516 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002517 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002518 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002519 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002520 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002521 goto out;
2522 }
2523
Andrew Walbranca808b12020-05-15 17:22:28 +01002524 ret = ffa_relinquish_check_update(
2525 from_locked, share_state->fragments,
2526 share_state->fragment_constituent_counts,
2527 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002528
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002529 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002530 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002531 * Mark memory handle as not retrieved, so it can be
2532 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002533 */
J-Alves8eb19162022-04-28 10:56:48 +01002534 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002535 }
2536
2537out:
2538 share_states_unlock(&share_states);
2539 dump_share_states();
2540 return ret;
2541}
2542
2543/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01002544 * Validates that the reclaim transition is allowed for the given
2545 * handle, updates the page table of the reclaiming VM, and frees the
2546 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002547 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002548struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002549 ffa_memory_handle_t handle,
2550 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002551 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002552{
2553 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002554 struct ffa_memory_share_state *share_state;
2555 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002556 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002557
2558 dump_share_states();
2559
2560 share_states = share_states_lock();
J-Alvesb5084cf2022-07-06 14:20:12 +01002561 if (get_share_state(share_states, handle, &share_state)) {
2562 memory_region = share_state->memory_region;
2563 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002564 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002565 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002566 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002567 goto out;
2568 }
2569
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002570 CHECK(memory_region != NULL);
2571
J-Alvesa9cd7e32022-07-01 13:49:33 +01002572 if (vm_id_is_current_world(to_locked.vm->id) &&
2573 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002574 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002575 "VM %#x attempted to reclaim memory handle %#x "
2576 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002577 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002578 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002579 goto out;
2580 }
2581
Andrew Walbranca808b12020-05-15 17:22:28 +01002582 if (!share_state->sending_complete) {
2583 dlog_verbose(
2584 "Memory with handle %#x not fully sent, can't "
2585 "reclaim.\n",
2586 handle);
2587 ret = ffa_error(FFA_INVALID_PARAMETERS);
2588 goto out;
2589 }
2590
J-Alves752236c2022-04-28 11:07:47 +01002591 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2592 if (share_state->retrieved_fragment_count[i] != 0) {
2593 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002594 "Tried to reclaim memory handle %#x "
2595 "that has "
2596 "not been relinquished by all "
2597 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01002598 handle,
2599 memory_region->receivers[i]
2600 .receiver_permissions.receiver);
2601 ret = ffa_error(FFA_DENIED);
2602 goto out;
2603 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002604 }
2605
Andrew Walbranca808b12020-05-15 17:22:28 +01002606 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00002607 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002608 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00002609 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01002610 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002611
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002612 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002613 share_state_free(share_states, share_state, page_pool);
J-Alvesa9cd7e32022-07-01 13:49:33 +01002614 dlog_verbose(
2615 "Freed share state after successful "
2616 "reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002617 }
2618
2619out:
2620 share_states_unlock(&share_states);
2621 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002622}