blob: e146121e6fc8bc5e6d187f467067b8effc67724d [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
J-Alves5952d942022-12-22 16:03:00 +000015#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010016#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000017#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010020#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010022#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010023#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000024#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010026#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
J-Alves2d8457f2022-10-05 11:06:41 +010029#include "vmapi/hf/ffa_v1_0.h"
30
J-Alves5da37d92022-10-24 16:33:48 +010031#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
32
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000033/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000035 * by this lock.
36 */
37static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000039
40/**
J-Alvesed508c82023-05-04 16:09:48 +010041 * Return the offset to the first constituent within the
42 * `ffa_composite_memory_region` for the given receiver from an
43 * `ffa_memory_region`. The caller must check that the receiver_index is within
44 * bounds, and that it has a composite memory region offset.
45 */
46static uint32_t ffa_composite_constituent_offset(
47 struct ffa_memory_region *memory_region, uint32_t receiver_index)
48{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000049 struct ffa_memory_access *receiver;
50 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010051
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000052 CHECK(receiver_index < memory_region->receiver_count);
53
54 receiver =
55 ffa_memory_region_get_receiver(memory_region, receiver_index);
56 CHECK(receiver != NULL);
57
58 composite_offset = receiver->composite_memory_region_offset;
59
60 CHECK(composite_offset != 0);
61
62 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010063}
64
65/**
J-Alves917d2f22020-10-30 18:39:30 +000066 * Extracts the index from a memory handle allocated by Hafnium's current world.
67 */
68uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
69{
70 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
71}
72
73/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010074 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
75 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
76 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010077 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010078 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
79 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010080 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010081struct ffa_memory_share_state *allocate_share_state(
82 struct share_states_locked share_states, uint32_t share_func,
83 struct ffa_memory_region *memory_region, uint32_t fragment_length,
84 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000085{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000086 assert(share_states.share_states != NULL);
87 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000088
Karl Meakin52cdfe72023-06-30 14:49:10 +010089 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010090 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010091 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010092 &share_states.share_states[i];
93 struct ffa_composite_memory_region *composite =
94 ffa_memory_region_get_composite(memory_region,
95 0);
96
97 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000098 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020099 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100100 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000101 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100102 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000103 allocated_state->share_func = share_func;
104 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 allocated_state->fragment_count = 1;
106 allocated_state->fragments[0] = composite->constituents;
107 allocated_state->fragment_constituent_counts[0] =
108 (fragment_length -
109 ffa_composite_constituent_offset(memory_region,
110 0)) /
111 sizeof(struct ffa_memory_region_constituent);
112 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100113 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
114 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100115 allocated_state->retrieved_fragment_count[j] =
116 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000117 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100118 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 }
120 }
121
Karl Meakin52cdfe72023-06-30 14:49:10 +0100122 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000123}
124
125/** Locks the share states lock. */
126struct share_states_locked share_states_lock(void)
127{
128 sl_lock(&share_states_lock_instance);
129
130 return (struct share_states_locked){.share_states = share_states};
131}
132
133/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100134void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000135{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000136 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000137 share_states->share_states = NULL;
138 sl_unlock(&share_states_lock_instance);
139}
140
141/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100142 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100143 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000144 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100145struct ffa_memory_share_state *get_share_state(
146 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000147{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000149
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000150 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100151
152 /*
153 * First look for a share_state allocated by us, in which case the
154 * handle is based on the index.
155 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200156 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100157 uint64_t index = ffa_memory_handle_get_index(handle);
158
Andrew Walbranca808b12020-05-15 17:22:28 +0100159 if (index < MAX_MEM_SHARES) {
160 share_state = &share_states.share_states[index];
161 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100162 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100163 }
164 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000165 }
166
Andrew Walbranca808b12020-05-15 17:22:28 +0100167 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100168 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100169 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000170 if (share_state->memory_region != NULL &&
171 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100172 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100173 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100174 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000175 }
176
Karl Meakin4a2854a2023-06-30 16:26:52 +0100177 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178}
179
180/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100181void share_state_free(struct share_states_locked share_states,
182 struct ffa_memory_share_state *share_state,
183 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000184{
Andrew Walbranca808b12020-05-15 17:22:28 +0100185 uint32_t i;
186
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000187 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000188 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100189 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000190 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100191 /*
192 * First fragment is part of the same page as the `memory_region`, so it
193 * doesn't need to be freed separately.
194 */
195 share_state->fragments[0] = NULL;
196 share_state->fragment_constituent_counts[0] = 0;
197 for (i = 1; i < share_state->fragment_count; ++i) {
198 mpool_free(page_pool, share_state->fragments[i]);
199 share_state->fragments[i] = NULL;
200 share_state->fragment_constituent_counts[i] = 0;
201 }
202 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000203 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100204 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000205}
206
Andrew Walbranca808b12020-05-15 17:22:28 +0100207/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100208bool share_state_sending_complete(struct share_states_locked share_states,
209 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000210{
Andrew Walbranca808b12020-05-15 17:22:28 +0100211 struct ffa_composite_memory_region *composite;
212 uint32_t expected_constituent_count;
213 uint32_t fragment_constituent_count_total = 0;
214 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000215
Andrew Walbranca808b12020-05-15 17:22:28 +0100216 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000217 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100218
219 /*
220 * Share state must already be valid, or it's not possible to get hold
221 * of it.
222 */
223 CHECK(share_state->memory_region != NULL &&
224 share_state->share_func != 0);
225
226 composite =
227 ffa_memory_region_get_composite(share_state->memory_region, 0);
228 expected_constituent_count = composite->constituent_count;
229 for (i = 0; i < share_state->fragment_count; ++i) {
230 fragment_constituent_count_total +=
231 share_state->fragment_constituent_counts[i];
232 }
233 dlog_verbose(
234 "Checking completion: constituent count %d/%d from %d "
235 "fragments.\n",
236 fragment_constituent_count_total, expected_constituent_count,
237 share_state->fragment_count);
238
239 return fragment_constituent_count_total == expected_constituent_count;
240}
241
242/**
243 * Calculates the offset of the next fragment expected for the given share
244 * state.
245 */
J-Alvesfdd29272022-07-19 13:16:31 +0100246uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100247 struct share_states_locked share_states,
248 struct ffa_memory_share_state *share_state)
249{
250 uint32_t next_fragment_offset;
251 uint32_t i;
252
253 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000254 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100255
256 next_fragment_offset =
257 ffa_composite_constituent_offset(share_state->memory_region, 0);
258 for (i = 0; i < share_state->fragment_count; ++i) {
259 next_fragment_offset +=
260 share_state->fragment_constituent_counts[i] *
261 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000262 }
263
Andrew Walbranca808b12020-05-15 17:22:28 +0100264 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000265}
266
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000268{
269 uint32_t i;
270
271 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
272 return;
273 }
274
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000275 dlog("from VM %#x, attributes %#x, flags %#x, handle %#x "
276 "tag %u, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100277 "recipients [",
278 memory_region->sender, memory_region->attributes,
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000279 memory_region->flags, memory_region->handle, memory_region->tag,
280 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100281 memory_region->receiver_count);
282 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000283 struct ffa_memory_access *receiver =
284 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000285 if (i != 0) {
286 dlog(", ");
287 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000288 dlog("Receiver %#x: %#x (offset %u)",
289 receiver->receiver_permissions.receiver,
290 receiver->receiver_permissions.permissions,
291 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000292 /* The impdef field is only present from v1.2 and later */
293 if (ffa_version_from_memory_access_desc_size(
294 memory_region->memory_access_desc_size) >=
295 MAKE_FFA_VERSION(1, 2)) {
296 dlog(", impdef: %#x %#x", receiver->impdef.val[0],
297 receiver->impdef.val[1]);
298 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000299 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000300 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000301}
302
J-Alves66652252022-07-06 09:49:51 +0100303void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000304{
305 uint32_t i;
306
307 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
308 return;
309 }
310
311 dlog("Current share states:\n");
312 sl_lock(&share_states_lock_instance);
313 for (i = 0; i < MAX_MEM_SHARES; ++i) {
314 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000315 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100316 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000317 dlog("SHARE");
318 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100319 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000320 dlog("LEND");
321 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100322 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000323 dlog("DONATE");
324 break;
325 default:
326 dlog("invalid share_func %#x",
327 share_states[i].share_func);
328 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100329 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000330 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100331 if (share_states[i].sending_complete) {
332 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000333 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100334 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000335 }
J-Alves2a0d2882020-10-29 14:49:50 +0000336 dlog(" with %d fragments, %d retrieved, "
337 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100338 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000339 share_states[i].retrieved_fragment_count[0],
340 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000341 }
342 }
343 sl_unlock(&share_states_lock_instance);
344}
345
Andrew Walbran475c1452020-02-07 13:22:22 +0000346/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100347static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100348 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000349{
350 uint32_t mode = 0;
351
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100352 switch (ffa_get_data_access_attr(permissions)) {
353 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000354 mode = MM_MODE_R;
355 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100356 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000357 mode = MM_MODE_R | MM_MODE_W;
358 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100359 case FFA_DATA_ACCESS_NOT_SPECIFIED:
360 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
361 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100362 case FFA_DATA_ACCESS_RESERVED:
363 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100364 }
365
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100366 switch (ffa_get_instruction_access_attr(permissions)) {
367 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000368 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100369 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100370 mode |= MM_MODE_X;
371 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100372 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
373 mode |= (default_mode & MM_MODE_X);
374 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100375 case FFA_INSTRUCTION_ACCESS_RESERVED:
376 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000377 }
378
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200379 /* Set the security state bit if necessary. */
380 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
381 mode |= plat_ffa_other_world_mode();
382 }
383
Andrew Walbran475c1452020-02-07 13:22:22 +0000384 return mode;
385}
386
Jose Marinho75509b42019-04-09 09:34:59 +0100387/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000388 * Get the current mode in the stage-2 page table of the given vm of all the
389 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100390 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100391 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100392static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000393 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100394 struct ffa_memory_region_constituent **fragments,
395 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100396{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100397 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100398 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100399
Andrew Walbranca808b12020-05-15 17:22:28 +0100400 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100401 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000402 * Fail if there are no constituents. Otherwise we would get an
403 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100404 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100405 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100406 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100407 }
408
Andrew Walbranca808b12020-05-15 17:22:28 +0100409 for (i = 0; i < fragment_count; ++i) {
410 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
411 ipaddr_t begin = ipa_init(fragments[i][j].address);
412 size_t size = fragments[i][j].page_count * PAGE_SIZE;
413 ipaddr_t end = ipa_add(begin, size);
414 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100415
Andrew Walbranca808b12020-05-15 17:22:28 +0100416 /* Fail if addresses are not page-aligned. */
417 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
418 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100419 dlog_verbose("%s: addresses not page-aligned\n",
420 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100421 return ffa_error(FFA_INVALID_PARAMETERS);
422 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100423
Andrew Walbranca808b12020-05-15 17:22:28 +0100424 /*
425 * Ensure that this constituent memory range is all
426 * mapped with the same mode.
427 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800428 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100429 dlog_verbose(
430 "%s: constituent memory range %#x..%#x "
431 "not mapped with the same mode\n",
432 __func__, begin, end);
Andrew Walbranca808b12020-05-15 17:22:28 +0100433 return ffa_error(FFA_DENIED);
434 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100435
Andrew Walbranca808b12020-05-15 17:22:28 +0100436 /*
437 * Ensure that all constituents are mapped with the same
438 * mode.
439 */
440 if (i == 0) {
441 *orig_mode = current_mode;
442 } else if (current_mode != *orig_mode) {
443 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100444 "%s: expected mode %#x but was %#x for "
445 "%d pages at %#x.\n",
446 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100447 fragments[i][j].page_count,
448 ipa_addr(begin));
449 return ffa_error(FFA_DENIED);
450 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100451 }
Jose Marinho75509b42019-04-09 09:34:59 +0100452 }
453
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100454 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000455}
456
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100457uint32_t ffa_version_from_memory_access_desc_size(
458 uint32_t memory_access_desc_size)
459{
460 switch (memory_access_desc_size) {
461 /*
462 * v1.0 and v1.1 memory access descriptors are the same size however
463 * v1.1 is the first version to include the memory access descriptor
464 * size field so return v1.1.
465 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000466 case sizeof(struct ffa_memory_access_v1_0):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100467 return MAKE_FFA_VERSION(1, 1);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000468 case sizeof(struct ffa_memory_access):
469 return MAKE_FFA_VERSION(1, 2);
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100470 }
471 return 0;
472}
473
474/**
475 * Check if the receivers size and offset given is valid for the senders
476 * FF-A version.
477 */
478static bool receiver_size_and_offset_valid_for_version(
479 uint32_t receivers_size, uint32_t receivers_offset,
480 uint32_t ffa_version)
481{
482 /*
483 * Check that the version that the memory access descriptor size belongs
484 * to is compatible with the FF-A version we believe the sender to be.
485 */
486 uint32_t expected_ffa_version =
487 ffa_version_from_memory_access_desc_size(receivers_size);
488 if (!FFA_VERSIONS_ARE_COMPATIBLE(expected_ffa_version, ffa_version)) {
489 return false;
490 }
491
492 /*
493 * Check the receivers_offset matches the version we found from
494 * memory access descriptor size.
495 */
496 switch (expected_ffa_version) {
497 case MAKE_FFA_VERSION(1, 1):
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000498 case MAKE_FFA_VERSION(1, 2):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100499 return receivers_offset == sizeof(struct ffa_memory_region);
500 default:
501 return false;
502 }
503}
504
505/**
506 * Check the values set for fields in the memory region are valid and safe.
507 * Offset values are within safe bounds, receiver count will not cause overflows
508 * and reserved fields are 0.
509 */
510bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
511 uint32_t ffa_version,
512 uint32_t fragment_length,
513 bool send_transaction)
514{
515 uint32_t receiver_count;
516 struct ffa_memory_access *receiver;
517 uint32_t composite_offset_0;
518
519 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
520 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
521 (struct ffa_memory_region_v1_0 *)memory_region;
522 /* Check the reserved fields are 0. */
523 if (memory_region_v1_0->reserved_0 != 0 ||
524 memory_region_v1_0->reserved_1 != 0) {
525 dlog_verbose("Reserved fields must be 0.\n");
526 return false;
527 }
528
529 receiver_count = memory_region_v1_0->receiver_count;
530 } else {
531 uint32_t receivers_size =
532 memory_region->memory_access_desc_size;
533 uint32_t receivers_offset = memory_region->receivers_offset;
534
535 /* Check the reserved field is 0. */
536 if (memory_region->reserved[0] != 0 ||
537 memory_region->reserved[1] != 0 ||
538 memory_region->reserved[2] != 0) {
539 dlog_verbose("Reserved fields must be 0.\n");
540 return false;
541 }
542
543 /*
544 * Check memory_access_desc_size matches the size of the struct
545 * for the senders FF-A version.
546 */
547 if (!receiver_size_and_offset_valid_for_version(
548 receivers_size, receivers_offset, ffa_version)) {
549 dlog_verbose(
550 "Invalid memory access descriptor size %d, "
551 " or receiver offset %d, "
552 "for FF-A version %#x\n",
553 receivers_size, receivers_offset, ffa_version);
554 return false;
555 }
556
557 receiver_count = memory_region->receiver_count;
558 }
559
560 /* Check receiver count is not too large. */
561 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
562 dlog_verbose(
563 "Max number of recipients supported is %u "
564 "specified %u\n",
565 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
566 return false;
567 }
568
569 /* Check values in the memory access descriptors. */
570 /*
571 * The composite offset values must be the same for all recievers so
572 * check the first one is valid and then they are all the same.
573 */
574 receiver = ffa_version == MAKE_FFA_VERSION(1, 0)
575 ? (struct ffa_memory_access *)&(
576 (struct ffa_memory_region_v1_0 *)
577 memory_region)
578 ->receivers[0]
579 : ffa_memory_region_get_receiver(memory_region, 0);
580 assert(receiver != NULL);
581 composite_offset_0 = receiver->composite_memory_region_offset;
582
583 if (!send_transaction) {
584 if (composite_offset_0 != 0) {
585 dlog_verbose(
586 "Composite offset memory region descriptor "
587 "offset must be 0 for retrieve requests. "
588 "Currently %d",
589 composite_offset_0);
590 return false;
591 }
592 } else {
593 bool comp_offset_is_zero = composite_offset_0 == 0U;
594 bool comp_offset_lt_transaction_descriptor_size =
595 composite_offset_0 <
596 (sizeof(struct ffa_memory_region) +
597 (uint32_t)(memory_region->memory_access_desc_size *
598 memory_region->receiver_count));
599 bool comp_offset_with_comp_gt_fragment_length =
600 composite_offset_0 +
601 sizeof(struct ffa_composite_memory_region) >
602 fragment_length;
603 if (comp_offset_is_zero ||
604 comp_offset_lt_transaction_descriptor_size ||
605 comp_offset_with_comp_gt_fragment_length) {
606 dlog_verbose(
607 "Invalid composite memory region descriptor "
608 "offset for send transaction %u\n",
609 composite_offset_0);
610 return false;
611 }
612 }
613
614 for (int i = 0; i < memory_region->receiver_count; i++) {
615 uint32_t composite_offset;
616
617 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
618 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
619 (struct ffa_memory_region_v1_0 *)memory_region;
620
621 struct ffa_memory_access_v1_0 *receiver_v1_0 =
622 &memory_region_v1_0->receivers[i];
623 /* Check reserved fields are 0 */
624 if (receiver_v1_0->reserved_0 != 0) {
625 dlog_verbose(
626 "Reserved field in the memory access "
627 " descriptor must be zero "
628 " Currently reciever %d has a reserved "
629 " field with a value of %d\n",
630 i, receiver_v1_0->reserved_0);
631 return false;
632 }
633 /*
634 * We can cast to the current version receiver as the
635 * remaining fields we are checking have the same
636 * offsets for all versions since memory access
637 * descriptors are forwards compatible.
638 */
639 receiver = (struct ffa_memory_access *)receiver_v1_0;
640 } else {
641 receiver = ffa_memory_region_get_receiver(memory_region,
642 i);
643 assert(receiver != NULL);
644
645 if (receiver->reserved_0 != 0) {
646 dlog_verbose(
647 "Reserved field in the memory access "
648 " descriptor must be zero "
649 " Currently reciever %d has a reserved "
650 " field with a value of %d\n",
651 i, receiver->reserved_0);
652 return false;
653 }
654 }
655
656 /* Check composite offset values are equal for all receivers. */
657 composite_offset = receiver->composite_memory_region_offset;
658 if (composite_offset != composite_offset_0) {
659 dlog_verbose(
660 "Composite offset %x differs from %x in index "
661 "%u\n",
662 composite_offset, composite_offset_0);
663 return false;
664 }
665 }
666 return true;
667}
668
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000669/**
670 * Verify that all pages have the same mode, that the starting mode
671 * constitutes a valid state and obtain the next mode to apply
672 * to the sending VM.
673 *
674 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100675 * 1) FFA_DENIED if a state transition was not found;
676 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100677 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100678 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100679 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100680 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
681 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000682 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100683static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100684 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100685 struct ffa_memory_access *receivers, uint32_t receivers_count,
686 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100687 struct ffa_memory_region_constituent **fragments,
688 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
689 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000690{
691 const uint32_t state_mask =
692 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100693 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000694
Andrew Walbranca808b12020-05-15 17:22:28 +0100695 ret = constituents_get_mode(from, orig_from_mode, fragments,
696 fragment_constituent_counts,
697 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100698 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100699 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100700 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100701 }
702
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000703 /* Ensure the address range is normal memory and not a device. */
J-Alves788b4492023-04-18 14:01:23 +0100704 if ((*orig_from_mode & MM_MODE_D) != 0U) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000705 dlog_verbose("Can't share device memory (mode is %#x).\n",
706 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100707 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000708 }
709
710 /*
711 * Ensure the sender is the owner and has exclusive access to the
712 * memory.
713 */
714 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100715 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100716 }
717
J-Alves363f5722022-04-25 17:37:37 +0100718 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100719
J-Alves363f5722022-04-25 17:37:37 +0100720 for (uint32_t i = 0U; i < receivers_count; i++) {
721 ffa_memory_access_permissions_t permissions =
722 receivers[i].receiver_permissions.permissions;
723 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
724 permissions, *orig_from_mode);
725
J-Alves788b4492023-04-18 14:01:23 +0100726 /*
727 * The assumption is that at this point, the operation from
728 * SP to a receiver VM, should have returned an FFA_ERROR
729 * already.
730 */
731 if (!ffa_is_vm_id(from.vm->id)) {
732 assert(!ffa_is_vm_id(
733 receivers[i].receiver_permissions.receiver));
734 }
735
J-Alves363f5722022-04-25 17:37:37 +0100736 if ((*orig_from_mode & required_from_mode) !=
737 required_from_mode) {
738 dlog_verbose(
739 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100740 "which required mode %#x but only had %#x "
741 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100742 required_from_mode, *orig_from_mode);
743 return ffa_error(FFA_DENIED);
744 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000745 }
746
747 /* Find the appropriate new mode. */
748 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000749 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100750 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000751 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100752 break;
753
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100754 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000755 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100756 break;
757
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100758 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000759 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100760 break;
761
Jose Marinho75509b42019-04-09 09:34:59 +0100762 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100763 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100764 }
765
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100766 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000767}
768
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100769static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000770 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100771 struct ffa_memory_region_constituent **fragments,
772 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
773 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000774{
775 const uint32_t state_mask =
776 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
777 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100778 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000779
Andrew Walbranca808b12020-05-15 17:22:28 +0100780 ret = constituents_get_mode(from, orig_from_mode, fragments,
781 fragment_constituent_counts,
782 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100783 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100784 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000785 }
786
787 /* Ensure the address range is normal memory and not a device. */
788 if (*orig_from_mode & MM_MODE_D) {
789 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
790 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100791 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000792 }
793
794 /*
795 * Ensure the relinquishing VM is not the owner but has access to the
796 * memory.
797 */
798 orig_from_state = *orig_from_mode & state_mask;
799 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
800 dlog_verbose(
801 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100802 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000803 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100804 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000805 }
806
807 /* Find the appropriate new mode. */
808 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
809
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100810 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000811}
812
813/**
814 * Verify that all pages have the same mode, that the starting mode
815 * constitutes a valid state and obtain the next mode to apply
816 * to the retrieving VM.
817 *
818 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100819 * 1) FFA_DENIED if a state transition was not found;
820 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100821 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100822 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100823 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100824 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
825 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000826 */
J-Alvesfc19b372022-07-06 12:17:35 +0100827struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000828 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100829 struct ffa_memory_region_constituent **fragments,
830 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
831 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000832{
833 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100834 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000835
Andrew Walbranca808b12020-05-15 17:22:28 +0100836 ret = constituents_get_mode(to, &orig_to_mode, fragments,
837 fragment_constituent_counts,
838 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100839 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100840 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100841 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000842 }
843
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100844 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000845 /*
846 * If the original ffa memory send call has been processed
847 * successfully, it is expected the orig_to_mode would overlay
848 * with `state_mask`, as a result of the function
849 * `ffa_send_check_transition`.
850 */
J-Alves59ed0042022-07-28 18:26:41 +0100851 if (vm_id_is_current_world(to.vm->id)) {
852 assert((orig_to_mode &
853 (MM_MODE_INVALID | MM_MODE_UNOWNED |
854 MM_MODE_SHARED)) != 0U);
855 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000856 } else {
857 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100858 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000859 * Ensure the retriever has the expected state. We don't care
860 * about the MM_MODE_SHARED bit; either with or without it set
861 * are both valid representations of the !O-NA state.
862 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100863 if (vm_id_is_current_world(to.vm->id) &&
864 to.vm->id != HF_PRIMARY_VM_ID &&
865 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
866 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100867 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000868 }
869 }
870
871 /* Find the appropriate new mode. */
872 *to_mode = memory_to_attributes;
873 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100874 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000875 *to_mode |= 0;
876 break;
877
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100878 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000879 *to_mode |= MM_MODE_UNOWNED;
880 break;
881
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100882 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000883 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
884 break;
885
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100886 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000887 *to_mode |= 0;
888 break;
889
890 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100891 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100892 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000893 }
894
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100895 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100896}
Jose Marinho09b1db82019-08-08 09:16:59 +0100897
898/**
899 * Updates a VM's page table such that the given set of physical address ranges
900 * are mapped in the address space at the corresponding address ranges, in the
901 * mode provided.
902 *
903 * If commit is false, the page tables will be allocated from the mpool but no
904 * mappings will actually be updated. This function must always be called first
905 * with commit false to check that it will succeed before calling with commit
906 * true, to avoid leaving the page table in a half-updated state. To make a
907 * series of changes atomically you can call them all with commit false before
908 * calling them all with commit true.
909 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700910 * vm_ptable_defrag should always be called after a series of page table
911 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100912 *
913 * Returns true on success, or false if the update failed and no changes were
914 * made to memory mappings.
915 */
J-Alves66652252022-07-06 09:49:51 +0100916bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000917 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100918 struct ffa_memory_region_constituent **fragments,
919 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100920 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100921{
Andrew Walbranca808b12020-05-15 17:22:28 +0100922 uint32_t i;
923 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100924
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700925 if (vm_locked.vm->el0_partition) {
926 mode |= MM_MODE_USER | MM_MODE_NG;
927 }
928
Andrew Walbranca808b12020-05-15 17:22:28 +0100929 /* Iterate over the memory region constituents within each fragment. */
930 for (i = 0; i < fragment_count; ++i) {
931 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
932 size_t size = fragments[i][j].page_count * PAGE_SIZE;
933 paddr_t pa_begin =
934 pa_from_ipa(ipa_init(fragments[i][j].address));
935 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200936 uint32_t pa_bits =
937 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100938
939 /*
940 * Ensure the requested region falls into system's PA
941 * range.
942 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200943 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
944 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100945 dlog_error("Region is outside of PA Range\n");
946 return false;
947 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100948
949 if (commit) {
950 vm_identity_commit(vm_locked, pa_begin, pa_end,
951 mode, ppool, NULL);
952 } else if (!vm_identity_prepare(vm_locked, pa_begin,
953 pa_end, mode, ppool)) {
954 return false;
955 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100956 }
957 }
958
959 return true;
960}
961
962/**
963 * Clears a region of physical memory by overwriting it with zeros. The data is
964 * flushed from the cache so the memory has been cleared across the system.
965 */
J-Alves7db32002021-12-14 14:44:50 +0000966static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
967 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100968{
969 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000970 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100971 * global mapping of the whole range. Such an approach will limit
972 * the changes to stage-1 tables and will allow only local
973 * invalidation.
974 */
975 bool ret;
976 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000977 void *ptr = mm_identity_map(stage1_locked, begin, end,
978 MM_MODE_W | (extra_mode_attributes &
979 plat_ffa_other_world_mode()),
980 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100981 size_t size = pa_difference(begin, end);
982
983 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100984 goto fail;
985 }
986
987 memset_s(ptr, size, 0, size);
988 arch_mm_flush_dcache(ptr, size);
989 mm_unmap(stage1_locked, begin, end, ppool);
990
991 ret = true;
992 goto out;
993
994fail:
995 ret = false;
996
997out:
998 mm_unlock_stage1(&stage1_locked);
999
1000 return ret;
1001}
1002
1003/**
1004 * Clears a region of physical memory by overwriting it with zeros. The data is
1005 * flushed from the cache so the memory has been cleared across the system.
1006 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001007static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001008 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001009 struct ffa_memory_region_constituent **fragments,
1010 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1011 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001012{
1013 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001014 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001015 bool ret = false;
1016
1017 /*
1018 * Create a local pool so any freed memory can't be used by another
1019 * thread. This is to ensure each constituent that is mapped can be
1020 * unmapped again afterwards.
1021 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001022 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001023
Andrew Walbranca808b12020-05-15 17:22:28 +01001024 /* Iterate over the memory region constituents within each fragment. */
1025 for (i = 0; i < fragment_count; ++i) {
1026 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001027
J-Alves8457f932023-10-11 16:41:45 +01001028 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001029 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1030 paddr_t begin =
1031 pa_from_ipa(ipa_init(fragments[i][j].address));
1032 paddr_t end = pa_add(begin, size);
1033
J-Alves7db32002021-12-14 14:44:50 +00001034 if (!clear_memory(begin, end, &local_page_pool,
1035 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001036 /*
1037 * api_clear_memory will defrag on failure, so
1038 * no need to do it here.
1039 */
1040 goto out;
1041 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001042 }
1043 }
1044
Jose Marinho09b1db82019-08-08 09:16:59 +01001045 ret = true;
1046
1047out:
1048 mpool_fini(&local_page_pool);
1049 return ret;
1050}
1051
J-Alves5952d942022-12-22 16:03:00 +00001052static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1053 ipaddr_t in_begin, ipaddr_t in_end)
1054{
1055 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1056 ipa_addr(begin) < ipa_addr(in_end)) ||
1057 (ipa_addr(end) <= ipa_addr(in_end) &&
1058 ipa_addr(end) > ipa_addr(in_begin));
1059}
1060
1061/**
1062 * Receives a memory range and looks for overlaps with the remainder
1063 * constituents of the memory share/lend/donate operation. Assumes they are
1064 * passed in order to avoid having to loop over all the elements at each call.
1065 * The function only compares the received memory ranges with those that follow
1066 * within the same fragment, and subsequent fragments from the same operation.
1067 */
1068static bool ffa_memory_check_overlap(
1069 struct ffa_memory_region_constituent **fragments,
1070 const uint32_t *fragment_constituent_counts,
1071 const uint32_t fragment_count, const uint32_t current_fragment,
1072 const uint32_t current_constituent)
1073{
1074 uint32_t i = current_fragment;
1075 uint32_t j = current_constituent;
1076 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1077 const uint32_t current_page_count = fragments[i][j].page_count;
1078 size_t current_size = current_page_count * PAGE_SIZE;
1079 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1080
1081 if (current_size == 0 ||
1082 current_size > UINT64_MAX - ipa_addr(current_begin)) {
1083 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
1084 current_begin, current_page_count);
1085 return false;
1086 }
1087
1088 for (; i < fragment_count; i++) {
1089 j = (i == current_fragment) ? j + 1 : 0;
1090
1091 for (; j < fragment_constituent_counts[i]; j++) {
1092 ipaddr_t begin = ipa_init(fragments[i][j].address);
1093 const uint32_t page_count = fragments[i][j].page_count;
1094 size_t size = page_count * PAGE_SIZE;
1095 ipaddr_t end = ipa_add(begin, size - 1);
1096
1097 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1098 dlog_verbose(
1099 "Invalid page count. Addr: %x "
1100 "page_count: %x\n",
1101 begin, page_count);
1102 return false;
1103 }
1104
1105 /*
1106 * Check if current ranges is within begin and end, as
1107 * well as the reverse. This should help optimize the
1108 * loop, and reduce the number of iterations.
1109 */
1110 if (is_memory_range_within(begin, end, current_begin,
1111 current_end) ||
1112 is_memory_range_within(current_begin, current_end,
1113 begin, end)) {
1114 dlog_verbose(
1115 "Overlapping memory ranges: %#x - %#x "
1116 "with %#x - %#x\n",
1117 ipa_addr(begin), ipa_addr(end),
1118 ipa_addr(current_begin),
1119 ipa_addr(current_end));
1120 return true;
1121 }
1122 }
1123 }
1124
1125 return false;
1126}
1127
Jose Marinho09b1db82019-08-08 09:16:59 +01001128/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001129 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001130 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001131 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001132 *
1133 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001134 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001135 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001136 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001137 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1138 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001139 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001140 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001141 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001142 */
J-Alves66652252022-07-06 09:49:51 +01001143struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001144 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001145 struct ffa_memory_region_constituent **fragments,
1146 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001147 uint32_t composite_total_page_count, uint32_t share_func,
1148 struct ffa_memory_access *receivers, uint32_t receivers_count,
1149 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +01001150{
Andrew Walbranca808b12020-05-15 17:22:28 +01001151 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001152 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001153 uint32_t orig_from_mode;
1154 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001155 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001156 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001157 uint32_t constituents_total_page_count = 0;
Jose Marinho09b1db82019-08-08 09:16:59 +01001158
1159 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001160 * Make sure constituents are properly aligned to a 64-bit boundary. If
1161 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001162 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001163 for (i = 0; i < fragment_count; ++i) {
1164 if (!is_aligned(fragments[i], 8)) {
1165 dlog_verbose("Constituents not aligned.\n");
1166 return ffa_error(FFA_INVALID_PARAMETERS);
1167 }
J-Alves8f11cde2022-12-21 16:18:22 +00001168 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1169 constituents_total_page_count +=
1170 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001171 if (ffa_memory_check_overlap(
1172 fragments, fragment_constituent_counts,
1173 fragment_count, i, j)) {
1174 return ffa_error(FFA_INVALID_PARAMETERS);
1175 }
J-Alves8f11cde2022-12-21 16:18:22 +00001176 }
1177 }
1178
1179 if (constituents_total_page_count != composite_total_page_count) {
1180 dlog_verbose(
1181 "Composite page count differs from calculated page "
1182 "count from constituents.\n");
1183 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001184 }
1185
1186 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001187 * Check if the state transition is lawful for the sender, ensure that
1188 * all constituents of a memory region being shared are at the same
1189 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001190 */
J-Alves363f5722022-04-25 17:37:37 +01001191 ret = ffa_send_check_transition(from_locked, share_func, receivers,
1192 receivers_count, &orig_from_mode,
1193 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +01001194 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001195 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001196 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001197 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001198 }
1199
Andrew Walbran37c574e2020-06-03 11:45:46 +01001200 if (orig_from_mode_ret != NULL) {
1201 *orig_from_mode_ret = orig_from_mode;
1202 }
1203
Jose Marinho09b1db82019-08-08 09:16:59 +01001204 /*
1205 * Create a local pool so any freed memory can't be used by another
1206 * thread. This is to ensure the original mapping can be restored if the
1207 * clear fails.
1208 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001209 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001210
1211 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001212 * First reserve all required memory for the new page table entries
1213 * without committing, to make sure the entire operation will succeed
1214 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +01001215 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001216 if (!ffa_region_group_identity_map(
1217 from_locked, fragments, fragment_constituent_counts,
1218 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001219 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001220 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001221 goto out;
1222 }
1223
1224 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001225 * Update the mapping for the sender. This won't allocate because the
1226 * transaction was already prepared above, but may free pages in the
1227 * case that a whole block is being unmapped that was previously
1228 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001229 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001230 CHECK(ffa_region_group_identity_map(
1231 from_locked, fragments, fragment_constituent_counts,
1232 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001233
1234 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001235 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001236 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1237 fragment_constituent_counts,
1238 fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001239 /*
1240 * On failure, roll back by returning memory to the sender. This
1241 * may allocate pages which were previously freed into
1242 * `local_page_pool` by the call above, but will never allocate
1243 * more pages than that so can never fail.
1244 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001245 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001246 from_locked, fragments, fragment_constituent_counts,
1247 fragment_count, orig_from_mode, &local_page_pool,
1248 true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001249
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001250 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001251 goto out;
1252 }
1253
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001254 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001255
1256out:
1257 mpool_fini(&local_page_pool);
1258
1259 /*
1260 * Tidy up the page table by reclaiming failed mappings (if there was an
1261 * error) or merging entries into blocks where possible (on success).
1262 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001263 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001264
1265 return ret;
1266}
1267
1268/**
1269 * Validates and maps memory shared from one VM to another.
1270 *
1271 * This function requires the calling context to hold the <to> lock.
1272 *
1273 * Returns:
1274 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001275 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001276 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001277 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001278 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001279 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001280 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001281struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001282 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001283 struct ffa_memory_region_constituent **fragments,
1284 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001285 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
Andrew Walbranca808b12020-05-15 17:22:28 +01001286 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001287{
Andrew Walbranca808b12020-05-15 17:22:28 +01001288 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001289 uint32_t to_mode;
1290 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001291 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001292
1293 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001294 * Make sure constituents are properly aligned to a 64-bit boundary. If
1295 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001296 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001297 for (i = 0; i < fragment_count; ++i) {
1298 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001299 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001300 return ffa_error(FFA_INVALID_PARAMETERS);
1301 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001302 }
1303
1304 /*
1305 * Check if the state transition is lawful for the recipient, and ensure
1306 * that all constituents of the memory region being retrieved are at the
1307 * same state.
1308 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001309 ret = ffa_retrieve_check_transition(
1310 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alves26483382023-04-20 12:01:49 +01001311 fragment_count, sender_orig_mode, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001312 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001313 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001314 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001315 }
1316
1317 /*
1318 * Create a local pool so any freed memory can't be used by another
1319 * thread. This is to ensure the original mapping can be restored if the
1320 * clear fails.
1321 */
1322 mpool_init_with_fallback(&local_page_pool, page_pool);
1323
1324 /*
1325 * First reserve all required memory for the new page table entries in
1326 * the recipient page tables without committing, to make sure the entire
1327 * operation will succeed without exhausting the page pool.
1328 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001329 if (!ffa_region_group_identity_map(
1330 to_locked, fragments, fragment_constituent_counts,
1331 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001332 /* TODO: partial defrag of failed range. */
1333 dlog_verbose(
1334 "Insufficient memory to update recipient page "
1335 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001336 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001337 goto out;
1338 }
1339
1340 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001341 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001342 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1343 fragment_constituent_counts,
1344 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001345 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001346 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001347 goto out;
1348 }
1349
Jose Marinho09b1db82019-08-08 09:16:59 +01001350 /*
1351 * Complete the transfer by mapping the memory into the recipient. This
1352 * won't allocate because the transaction was already prepared above, so
1353 * it doesn't need to use the `local_page_pool`.
1354 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001355 CHECK(ffa_region_group_identity_map(
1356 to_locked, fragments, fragment_constituent_counts,
1357 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001358
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001359 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001360
1361out:
1362 mpool_fini(&local_page_pool);
1363
1364 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001365 * Tidy up the page table by reclaiming failed mappings (if there was an
1366 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001367 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001368 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001369
1370 return ret;
1371}
1372
Andrew Walbran996d1d12020-05-27 14:08:43 +01001373static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001374 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001375 struct ffa_memory_region_constituent **fragments,
1376 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1377 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001378{
1379 uint32_t orig_from_mode;
1380 uint32_t from_mode;
1381 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001382 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001383
Andrew Walbranca808b12020-05-15 17:22:28 +01001384 ret = ffa_relinquish_check_transition(
1385 from_locked, &orig_from_mode, fragments,
1386 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001387 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001388 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001389 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001390 }
1391
1392 /*
1393 * Create a local pool so any freed memory can't be used by another
1394 * thread. This is to ensure the original mapping can be restored if the
1395 * clear fails.
1396 */
1397 mpool_init_with_fallback(&local_page_pool, page_pool);
1398
1399 /*
1400 * First reserve all required memory for the new page table entries
1401 * without committing, to make sure the entire operation will succeed
1402 * without exhausting the page pool.
1403 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001404 if (!ffa_region_group_identity_map(
1405 from_locked, fragments, fragment_constituent_counts,
1406 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001407 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001408 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001409 goto out;
1410 }
1411
1412 /*
1413 * Update the mapping for the sender. This won't allocate because the
1414 * transaction was already prepared above, but may free pages in the
1415 * case that a whole block is being unmapped that was previously
1416 * partially mapped.
1417 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001418 CHECK(ffa_region_group_identity_map(
1419 from_locked, fragments, fragment_constituent_counts,
1420 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001421
1422 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001423 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001424 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1425 fragment_constituent_counts,
1426 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001427 /*
1428 * On failure, roll back by returning memory to the sender. This
1429 * may allocate pages which were previously freed into
1430 * `local_page_pool` by the call above, but will never allocate
1431 * more pages than that so can never fail.
1432 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001433 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001434 from_locked, fragments, fragment_constituent_counts,
1435 fragment_count, orig_from_mode, &local_page_pool,
1436 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001437
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001438 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001439 goto out;
1440 }
1441
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001442 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001443
1444out:
1445 mpool_fini(&local_page_pool);
1446
1447 /*
1448 * Tidy up the page table by reclaiming failed mappings (if there was an
1449 * error) or merging entries into blocks where possible (on success).
1450 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001451 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001452
1453 return ret;
1454}
1455
1456/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001457 * Complete a memory sending operation by checking that it is valid, updating
1458 * the sender page table, and then either marking the share state as having
1459 * completed sending (on success) or freeing it (on failure).
1460 *
1461 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1462 */
J-Alvesfdd29272022-07-19 13:16:31 +01001463struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001464 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001465 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1466 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001467{
1468 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001469 struct ffa_composite_memory_region *composite;
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001470 struct ffa_memory_access *receiver;
Andrew Walbranca808b12020-05-15 17:22:28 +01001471 struct ffa_value ret;
1472
1473 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001474 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001475 assert(memory_region != NULL);
1476 composite = ffa_memory_region_get_composite(memory_region, 0);
1477 assert(composite != NULL);
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001478 receiver = ffa_memory_region_get_receiver(memory_region, 0);
1479 assert(receiver != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001480
1481 /* Check that state is valid in sender page table and update. */
1482 ret = ffa_send_check_update(
1483 from_locked, share_state->fragments,
1484 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001485 share_state->fragment_count, composite->page_count,
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001486 share_state->share_func, receiver,
J-Alves8f11cde2022-12-21 16:18:22 +00001487 memory_region->receiver_count, page_pool,
1488 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001489 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001490 if (ret.func != FFA_SUCCESS_32) {
1491 /*
1492 * Free share state, it failed to send so it can't be retrieved.
1493 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001494 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1495 __func__, ffa_func_name(ret.func),
1496 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001497 share_state_free(share_states, share_state, page_pool);
1498 return ret;
1499 }
1500
1501 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001502 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001503
J-Alvesee68c542020-10-29 17:48:20 +00001504 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001505}
1506
1507/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001508 * Check that the memory attributes match Hafnium expectations:
1509 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1510 * Write-Allocate Cacheable.
1511 */
1512static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001513 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001514{
1515 enum ffa_memory_type memory_type;
1516 enum ffa_memory_cacheability cacheability;
1517 enum ffa_memory_shareability shareability;
1518
1519 memory_type = ffa_get_memory_type_attr(attributes);
1520 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1521 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1522 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001523 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001524 }
1525
1526 cacheability = ffa_get_memory_cacheability_attr(attributes);
1527 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1528 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1529 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001530 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001531 }
1532
1533 shareability = ffa_get_memory_shareability_attr(attributes);
1534 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1535 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1536 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001537 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001538 }
1539
1540 return (struct ffa_value){.func = FFA_SUCCESS_32};
1541}
1542
1543/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001544 * Check that the given `memory_region` represents a valid memory send request
1545 * of the given `share_func` type, return the clear flag and permissions via the
1546 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001547 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001548 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001549 * not.
1550 */
J-Alves66652252022-07-06 09:49:51 +01001551struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001552 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1553 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001554 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001555{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001556 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001557 struct ffa_memory_access *receiver =
1558 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001559 uint64_t receivers_end;
1560 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001561 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001562 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001563 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001564 enum ffa_data_access data_access;
1565 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001566 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001567 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001568 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001569 memory_region->receivers_offset +
1570 memory_region->memory_access_desc_size +
1571 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001572
1573 if (fragment_length < minimum_first_fragment_length) {
1574 dlog_verbose("Fragment length %u too short (min %u).\n",
1575 (size_t)fragment_length,
1576 minimum_first_fragment_length);
1577 return ffa_error(FFA_INVALID_PARAMETERS);
1578 }
1579
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001580 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1581 "struct ffa_memory_region_constituent must be 16 bytes");
1582 if (!is_aligned(fragment_length,
1583 sizeof(struct ffa_memory_region_constituent)) ||
1584 !is_aligned(memory_share_length,
1585 sizeof(struct ffa_memory_region_constituent))) {
1586 dlog_verbose(
1587 "Fragment length %u or total length %u"
1588 " is not 16-byte aligned.\n",
1589 fragment_length, memory_share_length);
1590 return ffa_error(FFA_INVALID_PARAMETERS);
1591 }
1592
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001593 if (fragment_length > memory_share_length) {
1594 dlog_verbose(
1595 "Fragment length %u greater than total length %u.\n",
1596 (size_t)fragment_length, (size_t)memory_share_length);
1597 return ffa_error(FFA_INVALID_PARAMETERS);
1598 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001599
J-Alves95df0ef2022-12-07 10:09:48 +00001600 /* The sender must match the caller. */
1601 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1602 vm_id_is_current_world(memory_region->sender)) ||
1603 (vm_id_is_current_world(from_locked.vm->id) &&
1604 memory_region->sender != from_locked.vm->id)) {
1605 dlog_verbose("Invalid memory sender ID.\n");
1606 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001607 }
1608
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001609 if (memory_region->receiver_count <= 0) {
1610 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001611 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001612 }
1613
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001614 /*
1615 * Ensure that the composite header is within the memory bounds and
1616 * doesn't overlap the first part of the message. Cast to uint64_t
1617 * to prevent overflow.
1618 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001619 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001620 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001621 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001622 min_length = receivers_end +
1623 sizeof(struct ffa_composite_memory_region) +
1624 sizeof(struct ffa_memory_region_constituent);
1625 if (min_length > memory_share_length) {
1626 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1627 (size_t)memory_share_length, (size_t)min_length);
1628 return ffa_error(FFA_INVALID_PARAMETERS);
1629 }
1630
1631 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001632 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001633
1634 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001635 * Check that the composite memory region descriptor is after the access
1636 * descriptors, is at least 16-byte aligned, and fits in the first
1637 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001638 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001639 if ((composite_memory_region_offset < receivers_end) ||
1640 (composite_memory_region_offset % 16 != 0) ||
1641 (composite_memory_region_offset >
1642 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1643 dlog_verbose(
1644 "Invalid composite memory region descriptor offset "
1645 "%u.\n",
1646 (size_t)composite_memory_region_offset);
1647 return ffa_error(FFA_INVALID_PARAMETERS);
1648 }
1649
1650 /*
1651 * Compute the start of the constituent regions. Already checked
1652 * to be not more than fragment_length and thus not more than
1653 * memory_share_length.
1654 */
1655 constituents_start = composite_memory_region_offset +
1656 sizeof(struct ffa_composite_memory_region);
1657 constituents_length = memory_share_length - constituents_start;
1658
1659 /*
1660 * Check that the number of constituents is consistent with the length
1661 * of the constituent region.
1662 */
1663 composite = ffa_memory_region_get_composite(memory_region, 0);
1664 if ((constituents_length %
1665 sizeof(struct ffa_memory_region_constituent) !=
1666 0) ||
1667 ((constituents_length /
1668 sizeof(struct ffa_memory_region_constituent)) !=
1669 composite->constituent_count)) {
1670 dlog_verbose("Invalid length %u or composite offset %u.\n",
1671 (size_t)memory_share_length,
1672 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001673 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001674 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001675 if (fragment_length < memory_share_length &&
1676 fragment_length < HF_MAILBOX_SIZE) {
1677 dlog_warning(
1678 "Initial fragment length %d smaller than mailbox "
1679 "size.\n",
1680 fragment_length);
1681 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001682
Andrew Walbrana65a1322020-04-06 19:32:32 +01001683 /*
1684 * Clear is not allowed for memory sharing, as the sender still has
1685 * access to the memory.
1686 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001687 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1688 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001689 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001690 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001691 }
1692
1693 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001694 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001695 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001696 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001697 }
1698
J-Alves363f5722022-04-25 17:37:37 +01001699 /* Check that the permissions are valid, for each specified receiver. */
1700 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001701 struct ffa_memory_region_attributes receiver_permissions;
1702
1703 receiver = ffa_memory_region_get_receiver(memory_region, i);
1704 assert(receiver != NULL);
1705 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01001706 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001707 receiver_permissions.permissions;
1708 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01001709
1710 if (memory_region->sender == receiver_id) {
1711 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001712 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001713 }
Federico Recanati85090c42021-12-15 13:17:54 +01001714
J-Alves363f5722022-04-25 17:37:37 +01001715 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1716 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001717 struct ffa_memory_access *other_receiver =
1718 ffa_memory_region_get_receiver(memory_region,
1719 j);
1720 assert(other_receiver != NULL);
1721
J-Alves363f5722022-04-25 17:37:37 +01001722 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001723 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01001724 dlog_verbose(
1725 "Repeated receiver(%x) in memory send "
1726 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001727 other_receiver->receiver_permissions
1728 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01001729 return ffa_error(FFA_INVALID_PARAMETERS);
1730 }
1731 }
1732
1733 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001734 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01001735 dlog_verbose(
1736 "All ffa_memory_access should point to the "
1737 "same composite memory region offset.\n");
1738 return ffa_error(FFA_INVALID_PARAMETERS);
1739 }
1740
1741 data_access = ffa_get_data_access_attr(permissions);
1742 instruction_access =
1743 ffa_get_instruction_access_attr(permissions);
1744 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1745 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1746 dlog_verbose(
1747 "Reserved value for receiver permissions "
1748 "%#x.\n",
1749 permissions);
1750 return ffa_error(FFA_INVALID_PARAMETERS);
1751 }
1752 if (instruction_access !=
1753 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1754 dlog_verbose(
1755 "Invalid instruction access permissions %#x "
1756 "for sending memory.\n",
1757 permissions);
1758 return ffa_error(FFA_INVALID_PARAMETERS);
1759 }
1760 if (share_func == FFA_MEM_SHARE_32) {
1761 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1762 dlog_verbose(
1763 "Invalid data access permissions %#x "
1764 "for sharing memory.\n",
1765 permissions);
1766 return ffa_error(FFA_INVALID_PARAMETERS);
1767 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001768 /*
1769 * According to section 10.10.3 of the FF-A v1.1 EAC0
1770 * spec, NX is required for share operations (but must
1771 * not be specified by the sender) so set it in the
1772 * copy that we store, ready to be returned to the
1773 * retriever.
1774 */
1775 if (vm_id_is_current_world(receiver_id)) {
1776 ffa_set_instruction_access_attr(
1777 &permissions,
1778 FFA_INSTRUCTION_ACCESS_NX);
1779 receiver_permissions.permissions = permissions;
1780 }
J-Alves363f5722022-04-25 17:37:37 +01001781 }
1782 if (share_func == FFA_MEM_LEND_32 &&
1783 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1784 dlog_verbose(
1785 "Invalid data access permissions %#x for "
1786 "lending memory.\n",
1787 permissions);
1788 return ffa_error(FFA_INVALID_PARAMETERS);
1789 }
1790
1791 if (share_func == FFA_MEM_DONATE_32 &&
1792 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1793 dlog_verbose(
1794 "Invalid data access permissions %#x for "
1795 "donating memory.\n",
1796 permissions);
1797 return ffa_error(FFA_INVALID_PARAMETERS);
1798 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001799 }
1800
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001801 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1802 security_state =
1803 ffa_get_memory_security_attr(memory_region->attributes);
1804 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1805 dlog_verbose(
1806 "Invalid security state for memory share operation.\n");
1807 return ffa_error(FFA_INVALID_PARAMETERS);
1808 }
1809
Federico Recanatid937f5e2021-12-20 17:38:23 +01001810 /*
J-Alves807794e2022-06-16 13:42:47 +01001811 * If a memory donate or lend with single borrower, the memory type
1812 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001813 */
J-Alves807794e2022-06-16 13:42:47 +01001814 if (share_func == FFA_MEM_DONATE_32 ||
1815 (share_func == FFA_MEM_LEND_32 &&
1816 memory_region->receiver_count == 1)) {
1817 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1818 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1819 dlog_verbose(
1820 "Memory type shall not be specified by "
1821 "sender.\n");
1822 return ffa_error(FFA_INVALID_PARAMETERS);
1823 }
1824 } else {
1825 /*
1826 * Check that sender's memory attributes match Hafnium
1827 * expectations: Normal Memory, Inner shareable, Write-Back
1828 * Read-Allocate Write-Allocate Cacheable.
1829 */
1830 ret = ffa_memory_attributes_validate(memory_region->attributes);
1831 if (ret.func != FFA_SUCCESS_32) {
1832 return ret;
1833 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001834 }
1835
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001836 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001837}
1838
1839/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001840 * Gets the share state for continuing an operation to donate, lend or share
1841 * memory, and checks that it is a valid request.
1842 *
1843 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1844 * not.
1845 */
J-Alvesfdd29272022-07-19 13:16:31 +01001846struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001847 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001848 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001849 struct mpool *page_pool)
1850{
1851 struct ffa_memory_share_state *share_state;
1852 struct ffa_memory_region *memory_region;
1853
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001854 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001855
1856 /*
1857 * Look up the share state by handle and make sure that the VM ID
1858 * matches.
1859 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01001860 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00001861 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001862 dlog_verbose(
1863 "Invalid handle %#x for memory send continuation.\n",
1864 handle);
1865 return ffa_error(FFA_INVALID_PARAMETERS);
1866 }
1867 memory_region = share_state->memory_region;
1868
J-Alvesfdd29272022-07-19 13:16:31 +01001869 if (vm_id_is_current_world(from_vm_id) &&
1870 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001871 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1872 return ffa_error(FFA_INVALID_PARAMETERS);
1873 }
1874
1875 if (share_state->sending_complete) {
1876 dlog_verbose(
1877 "Sending of memory handle %#x is already complete.\n",
1878 handle);
1879 return ffa_error(FFA_INVALID_PARAMETERS);
1880 }
1881
1882 if (share_state->fragment_count == MAX_FRAGMENTS) {
1883 /*
1884 * Log a warning as this is a sign that MAX_FRAGMENTS should
1885 * probably be increased.
1886 */
1887 dlog_warning(
1888 "Too many fragments for memory share with handle %#x; "
1889 "only %d supported.\n",
1890 handle, MAX_FRAGMENTS);
1891 /* Free share state, as it's not possible to complete it. */
1892 share_state_free(share_states, share_state, page_pool);
1893 return ffa_error(FFA_NO_MEMORY);
1894 }
1895
1896 *share_state_ret = share_state;
1897
1898 return (struct ffa_value){.func = FFA_SUCCESS_32};
1899}
1900
1901/**
J-Alves95df0ef2022-12-07 10:09:48 +00001902 * Checks if there is at least one receiver from the other world.
1903 */
J-Alvesfdd29272022-07-19 13:16:31 +01001904bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001905 struct ffa_memory_region *memory_region)
1906{
1907 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001908 struct ffa_memory_access *receiver =
1909 ffa_memory_region_get_receiver(memory_region, i);
1910 assert(receiver != NULL);
1911 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
1912
1913 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00001914 return true;
1915 }
1916 }
1917 return false;
1918}
1919
1920/**
J-Alves9da280b2022-12-21 14:55:39 +00001921 * Validates a call to donate, lend or share memory in which Hafnium is the
1922 * designated allocator of the memory handle. In practice, this also means
1923 * Hafnium is responsible for managing the state structures for the transaction.
1924 * If Hafnium is the SPMC, it should allocate the memory handle when either the
1925 * sender is an SP or there is at least one borrower that is an SP.
1926 * If Hafnium is the hypervisor, it should allocate the memory handle when
1927 * operation involves only NWd VMs.
1928 *
1929 * If validation goes well, Hafnium updates the stage-2 page tables of the
1930 * sender. Validation consists of checking if the message length and number of
1931 * memory region constituents match, and if the transition is valid for the
1932 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001933 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001934 * Assumes that the caller has already found and locked the sender VM and copied
1935 * the memory region descriptor from the sender's TX buffer to a freshly
1936 * allocated page from Hafnium's internal pool. The caller must have also
1937 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001938 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001939 * This function takes ownership of the `memory_region` passed in and will free
1940 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001941 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001942struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001943 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001944 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001945 uint32_t fragment_length, uint32_t share_func,
1946 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001947{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001948 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001949 struct share_states_locked share_states;
1950 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001951
1952 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001953 * If there is an error validating the `memory_region` then we need to
1954 * free it because we own it but we won't be storing it in a share state
1955 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001956 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001957 ret = ffa_memory_send_validate(from_locked, memory_region,
1958 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001959 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001960 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001961 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001962 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001963 }
1964
Andrew Walbrana65a1322020-04-06 19:32:32 +01001965 /* Set flag for share function, ready to be retrieved later. */
1966 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001967 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001968 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001969 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001970 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001971 case FFA_MEM_LEND_32:
1972 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001973 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001974 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001975 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001976 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001977 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001978 }
1979
Andrew Walbranca808b12020-05-15 17:22:28 +01001980 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001981 /*
1982 * Allocate a share state before updating the page table. Otherwise if
1983 * updating the page table succeeded but allocating the share state
1984 * failed then it would leave the memory in a state where nobody could
1985 * get it back.
1986 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01001987 share_state = allocate_share_state(share_states, share_func,
1988 memory_region, fragment_length,
1989 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00001990 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001991 dlog_verbose("Failed to allocate share state.\n");
1992 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001993 ret = ffa_error(FFA_NO_MEMORY);
1994 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001995 }
1996
Andrew Walbranca808b12020-05-15 17:22:28 +01001997 if (fragment_length == memory_share_length) {
1998 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001999 ret = ffa_memory_send_complete(
2000 from_locked, share_states, share_state, page_pool,
2001 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002002 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002003 /*
2004 * Use sender ID from 'memory_region' assuming
2005 * that at this point it has been validated:
2006 * - MBZ at virtual FF-A instance.
2007 */
J-Alves19e20cf2023-08-02 12:48:55 +01002008 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002009 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2010 ? memory_region->sender
2011 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002012 ret = (struct ffa_value){
2013 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002014 .arg1 = (uint32_t)memory_region->handle,
2015 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002016 .arg3 = fragment_length,
2017 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002018 }
2019
2020out:
2021 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002022 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002023 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002024}
2025
2026/**
J-Alves8505a8a2022-06-15 18:10:18 +01002027 * Continues an operation to donate, lend or share memory to a VM from current
2028 * world. If this is the last fragment then checks that the transition is valid
2029 * for the type of memory sending operation and updates the stage-2 page tables
2030 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002031 *
2032 * Assumes that the caller has already found and locked the sender VM and copied
2033 * the memory region descriptor from the sender's TX buffer to a freshly
2034 * allocated page from Hafnium's internal pool.
2035 *
2036 * This function takes ownership of the `fragment` passed in; it must not be
2037 * freed by the caller.
2038 */
2039struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2040 void *fragment,
2041 uint32_t fragment_length,
2042 ffa_memory_handle_t handle,
2043 struct mpool *page_pool)
2044{
2045 struct share_states_locked share_states = share_states_lock();
2046 struct ffa_memory_share_state *share_state;
2047 struct ffa_value ret;
2048 struct ffa_memory_region *memory_region;
2049
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002050 CHECK(is_aligned(fragment,
2051 alignof(struct ffa_memory_region_constituent)));
2052 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2053 0) {
2054 dlog_verbose("Fragment length %u misaligned.\n",
2055 fragment_length);
2056 ret = ffa_error(FFA_INVALID_PARAMETERS);
2057 goto out_free_fragment;
2058 }
2059
Andrew Walbranca808b12020-05-15 17:22:28 +01002060 ret = ffa_memory_send_continue_validate(share_states, handle,
2061 &share_state,
2062 from_locked.vm->id, page_pool);
2063 if (ret.func != FFA_SUCCESS_32) {
2064 goto out_free_fragment;
2065 }
2066 memory_region = share_state->memory_region;
2067
J-Alves95df0ef2022-12-07 10:09:48 +00002068 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002069 dlog_error(
2070 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002071 "other world. This should never happen, and indicates "
2072 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002073 "EL3 code.\n");
2074 ret = ffa_error(FFA_INVALID_PARAMETERS);
2075 goto out_free_fragment;
2076 }
2077
2078 /* Add this fragment. */
2079 share_state->fragments[share_state->fragment_count] = fragment;
2080 share_state->fragment_constituent_counts[share_state->fragment_count] =
2081 fragment_length / sizeof(struct ffa_memory_region_constituent);
2082 share_state->fragment_count++;
2083
2084 /* Check whether the memory send operation is now ready to complete. */
2085 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002086 ret = ffa_memory_send_complete(
2087 from_locked, share_states, share_state, page_pool,
2088 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002089 } else {
2090 ret = (struct ffa_value){
2091 .func = FFA_MEM_FRAG_RX_32,
2092 .arg1 = (uint32_t)handle,
2093 .arg2 = (uint32_t)(handle >> 32),
2094 .arg3 = share_state_next_fragment_offset(share_states,
2095 share_state)};
2096 }
2097 goto out;
2098
2099out_free_fragment:
2100 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002101
2102out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002103 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002104 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002105}
2106
Andrew Walbranca808b12020-05-15 17:22:28 +01002107/** Clean up after the receiver has finished retrieving a memory region. */
2108static void ffa_memory_retrieve_complete(
2109 struct share_states_locked share_states,
2110 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2111{
2112 if (share_state->share_func == FFA_MEM_DONATE_32) {
2113 /*
2114 * Memory that has been donated can't be relinquished,
2115 * so no need to keep the share state around.
2116 */
2117 share_state_free(share_states, share_state, page_pool);
2118 dlog_verbose("Freed share state for donate.\n");
2119 }
2120}
2121
J-Alves2d8457f2022-10-05 11:06:41 +01002122/**
2123 * Initialises the given memory region descriptor to be used for an
2124 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2125 * fragment.
2126 * The memory region descriptor is initialized according to retriever's
2127 * FF-A version.
2128 *
2129 * Returns true on success, or false if the given constituents won't all fit in
2130 * the first fragment.
2131 */
2132static bool ffa_retrieved_memory_region_init(
2133 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002134 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002135 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002136 ffa_id_t receiver_id, uint32_t memory_access_desc_size,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002137 ffa_memory_access_permissions_t permissions,
2138 struct ffa_memory_access_impdef receiver_impdef_val,
2139 uint32_t page_count, uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002140 const struct ffa_memory_region_constituent constituents[],
2141 uint32_t fragment_constituent_count, uint32_t *total_length,
2142 uint32_t *fragment_length)
2143{
2144 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002145 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002146 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002147 uint32_t constituents_offset;
2148 uint32_t receiver_count;
2149
2150 assert(response != NULL);
2151
2152 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2153 struct ffa_memory_region_v1_0 *retrieve_response =
2154 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002155 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002156
J-Alves5da37d92022-10-24 16:33:48 +01002157 ffa_memory_region_init_header_v1_0(
2158 retrieve_response, sender, attributes, flags, handle, 0,
2159 RECEIVERS_COUNT_IN_RETRIEVE_RESP);
J-Alves2d8457f2022-10-05 11:06:41 +01002160
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002161 receiver = (struct ffa_memory_access_v1_0 *)
2162 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002163 receiver_count = retrieve_response->receiver_count;
2164
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002165 /*
2166 * Initialized here as in memory retrieve responses we currently
2167 * expect one borrower to be specified.
2168 */
Daniel Boulby59ffee92023-11-02 18:26:26 +00002169 ffa_memory_access_init_v1_0(
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002170 receiver, receiver_id,
2171 ffa_get_data_access_attr(permissions),
2172 ffa_get_instruction_access_attr(permissions), flags);
2173
2174 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002175 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002176 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2177 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002178
2179 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2180 retrieve_response, 0);
2181 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002182 struct ffa_memory_region *retrieve_response =
2183 (struct ffa_memory_region *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002184 struct ffa_memory_access *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002185
2186 ffa_memory_region_init_header(retrieve_response, sender,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002187 attributes, flags, handle, 0, 1,
2188 memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002189
J-Alves2d8457f2022-10-05 11:06:41 +01002190 receiver_count = retrieve_response->receiver_count;
2191
2192 /*
2193 * Note that `sizeof(struct_ffa_memory_region)` and
2194 * `sizeof(struct ffa_memory_access)` must both be multiples of
2195 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2196 * guaranteed that the offset we calculate here is aligned to a
2197 * 64-bit boundary and so 64-bit values can be copied without
2198 * alignment faults.
2199 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002200 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002201 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002202 (uint32_t)(receiver_count *
2203 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002204
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002205 receiver = ffa_memory_region_get_receiver(retrieve_response, 0);
2206 assert(receiver != NULL);
2207
2208 /*
2209 * Initialized here as in memory retrieve responses we currently
2210 * expect one borrower to be specified.
2211 */
Daniel Boulby59ffee92023-11-02 18:26:26 +00002212 ffa_memory_access_init(
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002213 receiver, receiver_id,
2214 ffa_get_data_access_attr(permissions),
Daniel Boulby59ffee92023-11-02 18:26:26 +00002215 ffa_get_instruction_access_attr(permissions), flags,
2216 &receiver_impdef_val);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002217 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002218 composite_memory_region =
2219 ffa_memory_region_get_composite(retrieve_response, 0);
2220 }
2221
J-Alves2d8457f2022-10-05 11:06:41 +01002222 assert(composite_memory_region != NULL);
2223
J-Alves2d8457f2022-10-05 11:06:41 +01002224 composite_memory_region->page_count = page_count;
2225 composite_memory_region->constituent_count = total_constituent_count;
2226 composite_memory_region->reserved_0 = 0;
2227
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002228 constituents_offset =
2229 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002230 if (constituents_offset +
2231 fragment_constituent_count *
2232 sizeof(struct ffa_memory_region_constituent) >
2233 response_max_size) {
2234 return false;
2235 }
2236
2237 for (i = 0; i < fragment_constituent_count; ++i) {
2238 composite_memory_region->constituents[i] = constituents[i];
2239 }
2240
2241 if (total_length != NULL) {
2242 *total_length =
2243 constituents_offset +
2244 composite_memory_region->constituent_count *
2245 sizeof(struct ffa_memory_region_constituent);
2246 }
2247 if (fragment_length != NULL) {
2248 *fragment_length =
2249 constituents_offset +
2250 fragment_constituent_count *
2251 sizeof(struct ffa_memory_region_constituent);
2252 }
2253
2254 return true;
2255}
2256
J-Alves96de29f2022-04-26 16:05:24 +01002257/**
2258 * Validates the retrieved permissions against those specified by the lender
2259 * of memory share operation. Optionally can help set the permissions to be used
2260 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002261 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2262 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2263 * specification for each ABI.
2264 * - FFA_DENIED -> if the permissions specified by the retriever are not
2265 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002266 */
J-Alvesdcad8992023-09-15 14:10:35 +01002267static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2268 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002269 enum ffa_data_access requested_data_access,
2270 enum ffa_instruction_access sent_instruction_access,
2271 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002272 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002273{
2274 switch (sent_data_access) {
2275 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2276 case FFA_DATA_ACCESS_RW:
2277 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2278 requested_data_access == FFA_DATA_ACCESS_RW) {
2279 if (permissions != NULL) {
2280 ffa_set_data_access_attr(permissions,
2281 FFA_DATA_ACCESS_RW);
2282 }
2283 break;
2284 }
2285 /* Intentional fall-through. */
2286 case FFA_DATA_ACCESS_RO:
2287 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2288 requested_data_access == FFA_DATA_ACCESS_RO) {
2289 if (permissions != NULL) {
2290 ffa_set_data_access_attr(permissions,
2291 FFA_DATA_ACCESS_RO);
2292 }
2293 break;
2294 }
2295 dlog_verbose(
2296 "Invalid data access requested; sender specified "
2297 "permissions %#x but receiver requested %#x.\n",
2298 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002299 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002300 case FFA_DATA_ACCESS_RESERVED:
2301 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2302 "checked before this point.");
2303 }
2304
J-Alvesdcad8992023-09-15 14:10:35 +01002305 /*
2306 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2307 * or FFA_MEMORY_DONATE the retriever should have specifed the
2308 * instruction permissions it wishes to receive.
2309 */
2310 switch (share_func) {
2311 case FFA_MEM_SHARE_32:
2312 if (requested_instruction_access !=
2313 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2314 dlog_verbose(
2315 "%s: for share instruction permissions must "
2316 "NOT be specified.\n",
2317 __func__);
2318 return ffa_error(FFA_INVALID_PARAMETERS);
2319 }
2320 break;
2321 case FFA_MEM_LEND_32:
2322 /*
2323 * For operations with multiple borrowers only permit XN
2324 * permissions, and both Sender and borrower should have used
2325 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2326 */
2327 if (multiple_borrowers) {
2328 if (requested_instruction_access !=
2329 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2330 dlog_verbose(
2331 "%s: lend/share/donate with multiple "
2332 "borrowers "
2333 "instruction permissions must NOT be "
2334 "specified.\n",
2335 __func__);
2336 return ffa_error(FFA_INVALID_PARAMETERS);
2337 }
2338 break;
2339 }
2340 /* Fall through if the operation targets a single borrower. */
2341 case FFA_MEM_DONATE_32:
2342 if (!multiple_borrowers &&
2343 requested_instruction_access ==
2344 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2345 dlog_verbose(
2346 "%s: for lend/donate with single borrower "
2347 "instruction permissions must be speficified "
2348 "by borrower\n",
2349 __func__);
2350 return ffa_error(FFA_INVALID_PARAMETERS);
2351 }
2352 break;
2353 default:
2354 panic("%s: Wrong func id provided.\n", __func__);
2355 }
2356
J-Alves96de29f2022-04-26 16:05:24 +01002357 switch (sent_instruction_access) {
2358 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2359 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002360 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002361 if (permissions != NULL) {
2362 ffa_set_instruction_access_attr(
2363 permissions, FFA_INSTRUCTION_ACCESS_X);
2364 }
2365 break;
2366 }
J-Alvesdcad8992023-09-15 14:10:35 +01002367 /*
2368 * Fall through if requested permissions are less
2369 * permissive than those provided by the sender.
2370 */
J-Alves96de29f2022-04-26 16:05:24 +01002371 case FFA_INSTRUCTION_ACCESS_NX:
2372 if (requested_instruction_access ==
2373 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2374 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2375 if (permissions != NULL) {
2376 ffa_set_instruction_access_attr(
2377 permissions, FFA_INSTRUCTION_ACCESS_NX);
2378 }
2379 break;
2380 }
2381 dlog_verbose(
2382 "Invalid instruction access requested; sender "
2383 "specified permissions %#x but receiver requested "
2384 "%#x.\n",
2385 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002386 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002387 case FFA_INSTRUCTION_ACCESS_RESERVED:
2388 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2389 "be checked before this point.");
2390 }
2391
J-Alvesdcad8992023-09-15 14:10:35 +01002392 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002393}
2394
2395/**
2396 * Validate the receivers' permissions in the retrieve request against those
2397 * specified by the lender.
2398 * In the `permissions` argument returns the permissions to set at S2 for the
2399 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002400 * The function looks into the flag to bypass multiple borrower checks:
2401 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2402 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2403 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2404 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002405 */
2406static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2407 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002408 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002409 ffa_memory_access_permissions_t *permissions, uint32_t func_id,
2410 struct ffa_memory_access_impdef *to_impdef_val)
J-Alves96de29f2022-04-26 16:05:24 +01002411{
2412 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002413 bool bypass_multi_receiver_check =
2414 (retrieve_request->flags &
2415 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002416 const uint32_t region_receiver_count = memory_region->receiver_count;
2417 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002418
2419 assert(permissions != NULL);
2420
J-Alves3456e032023-07-20 12:20:05 +01002421 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002422 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002423 dlog_verbose(
2424 "Retrieve request should contain same list of "
2425 "borrowers, as specified by the lender.\n");
2426 return ffa_error(FFA_INVALID_PARAMETERS);
2427 }
2428 } else {
2429 if (retrieve_request->receiver_count != 1) {
2430 dlog_verbose(
2431 "Set bypass multiple borrower check, receiver "
2432 "list must be sized 1 (%x)\n",
2433 memory_region->receiver_count);
2434 return ffa_error(FFA_INVALID_PARAMETERS);
2435 }
J-Alves96de29f2022-04-26 16:05:24 +01002436 }
2437
2438 retrieve_receiver_index = retrieve_request->receiver_count;
2439
2440 /* Should be populated with the permissions of the retriever. */
2441 *permissions = 0;
2442
2443 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2444 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002445 struct ffa_memory_access *retrieve_request_receiver =
2446 ffa_memory_region_get_receiver(retrieve_request, i);
2447 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002448 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002449 retrieve_request_receiver->receiver_permissions
2450 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002451 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002452 retrieve_request_receiver->receiver_permissions
2453 .receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002454 bool found_to_id = current_receiver_id == to_vm_id;
2455
J-Alves3456e032023-07-20 12:20:05 +01002456 if (bypass_multi_receiver_check && !found_to_id) {
2457 dlog_verbose(
2458 "Bypass multiple borrower check for id %x.\n",
2459 current_receiver_id);
2460 continue;
2461 }
2462
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002463 if (retrieve_request_receiver->composite_memory_region_offset !=
2464 0U) {
2465 dlog_verbose(
2466 "Retriever specified address ranges not "
2467 "supported (got offset %d).\n",
2468 retrieve_request_receiver
2469 ->composite_memory_region_offset);
2470 return ffa_error(FFA_INVALID_PARAMETERS);
2471 }
2472
J-Alves96de29f2022-04-26 16:05:24 +01002473 /*
2474 * Find the current receiver in the transaction descriptor from
2475 * sender.
2476 */
2477 uint32_t mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002478 ffa_memory_region_get_receiver_index(
2479 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002480
2481 if (mem_region_receiver_index ==
2482 memory_region->receiver_count) {
2483 dlog_verbose("%s: receiver %x not found\n", __func__,
2484 current_receiver_id);
2485 return ffa_error(FFA_DENIED);
2486 }
2487
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002488 struct ffa_memory_access *receiver =
2489 ffa_memory_region_get_receiver(
2490 memory_region, mem_region_receiver_index);
2491 assert(receiver != NULL);
2492
2493 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002494
2495 if (found_to_id) {
2496 retrieve_receiver_index = i;
2497 }
2498
2499 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002500 * Check if retrieve request memory access list is valid:
2501 * - The retrieve request complies with the specification.
2502 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002503 */
J-Alvesdcad8992023-09-15 14:10:35 +01002504 ret = ffa_memory_retrieve_is_memory_access_valid(
2505 func_id, ffa_get_data_access_attr(sent_permissions),
2506 ffa_get_data_access_attr(requested_permissions),
2507 ffa_get_instruction_access_attr(sent_permissions),
2508 ffa_get_instruction_access_attr(requested_permissions),
2509 found_to_id ? permissions : NULL,
2510 region_receiver_count > 1);
2511 if (ret.func != FFA_SUCCESS_32) {
2512 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002513 }
2514
2515 /*
2516 * Can't request PM to clear memory if only provided with RO
2517 * permissions.
2518 */
2519 if (found_to_id &&
2520 (ffa_get_data_access_attr(*permissions) ==
2521 FFA_DATA_ACCESS_RO) &&
2522 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2523 0U) {
2524 dlog_verbose(
2525 "Receiver has RO permissions can not request "
2526 "clear.\n");
2527 return ffa_error(FFA_DENIED);
2528 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002529
2530 /*
2531 * Check the impdef in the retrieve_request matches the value in
2532 * the original memory send.
2533 */
2534 if (ffa_version_from_memory_access_desc_size(
2535 memory_region->memory_access_desc_size) >=
2536 MAKE_FFA_VERSION(1, 2) &&
2537 ffa_version_from_memory_access_desc_size(
2538 retrieve_request->memory_access_desc_size) >=
2539 MAKE_FFA_VERSION(1, 2)) {
2540 if (found_to_id) {
2541 *to_impdef_val =
2542 retrieve_request_receiver->impdef;
2543 }
2544 if (receiver->impdef.val[0] !=
2545 retrieve_request_receiver->impdef.val[0] ||
2546 receiver->impdef.val[1] !=
2547 retrieve_request_receiver->impdef.val[1]) {
2548 dlog_verbose(
2549 "Impdef value in memory send does not "
2550 "match retrieve request value "
2551 "send value %#x %#x retrieve request "
2552 "value %#x %#x\n",
2553 receiver->impdef.val[0],
2554 receiver->impdef.val[1],
2555 retrieve_request_receiver->impdef
2556 .val[0],
2557 retrieve_request_receiver->impdef
2558 .val[1]);
2559 return ffa_error(FFA_INVALID_PARAMETERS);
2560 }
2561 }
J-Alves96de29f2022-04-26 16:05:24 +01002562 }
2563
2564 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2565 dlog_verbose(
2566 "Retrieve request does not contain caller's (%x) "
2567 "permissions\n",
2568 to_vm_id);
2569 return ffa_error(FFA_INVALID_PARAMETERS);
2570 }
2571
2572 return (struct ffa_value){.func = FFA_SUCCESS_32};
2573}
2574
J-Alvesa9cd7e32022-07-01 13:49:33 +01002575/*
2576 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2577 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2578 * of a pending memory sharing operation whose allocator is the SPM, for
2579 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2580 * the memory region descriptor of the retrieve request must be zeroed with the
2581 * exception of the sender ID and handle.
2582 */
2583bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
2584 struct vm_locked to_locked)
2585{
2586 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2587 request->attributes == 0U && request->flags == 0U &&
2588 request->tag == 0U && request->receiver_count == 0U &&
2589 plat_ffa_memory_handle_allocated_by_current_world(
2590 request->handle);
2591}
2592
2593/*
2594 * Helper to reset count of fragments retrieved by the hypervisor.
2595 */
2596static void ffa_memory_retrieve_complete_from_hyp(
2597 struct ffa_memory_share_state *share_state)
2598{
2599 if (share_state->hypervisor_fragment_count ==
2600 share_state->fragment_count) {
2601 share_state->hypervisor_fragment_count = 0;
2602 }
2603}
2604
J-Alves089004f2022-07-13 14:25:44 +01002605/**
2606 * Validate that the memory region descriptor provided by the borrower on
2607 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2608 * memory sharing call.
2609 */
2610static struct ffa_value ffa_memory_retrieve_validate(
J-Alves19e20cf2023-08-02 12:48:55 +01002611 ffa_id_t receiver_id, struct ffa_memory_region *retrieve_request,
J-Alves089004f2022-07-13 14:25:44 +01002612 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2613 uint32_t share_func)
2614{
2615 ffa_memory_region_flags_t transaction_type =
2616 retrieve_request->flags &
2617 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002618 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01002619
2620 assert(retrieve_request != NULL);
2621 assert(memory_region != NULL);
2622 assert(receiver_index != NULL);
2623 assert(retrieve_request->sender == memory_region->sender);
2624
2625 /*
2626 * Check that the transaction type expected by the receiver is
2627 * correct, if it has been specified.
2628 */
2629 if (transaction_type !=
2630 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2631 transaction_type != (memory_region->flags &
2632 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2633 dlog_verbose(
2634 "Incorrect transaction type %#x for "
2635 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2636 transaction_type,
2637 memory_region->flags &
2638 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2639 retrieve_request->handle);
2640 return ffa_error(FFA_INVALID_PARAMETERS);
2641 }
2642
2643 if (retrieve_request->tag != memory_region->tag) {
2644 dlog_verbose(
2645 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2646 "%d for handle %#x.\n",
2647 retrieve_request->tag, memory_region->tag,
2648 retrieve_request->handle);
2649 return ffa_error(FFA_INVALID_PARAMETERS);
2650 }
2651
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002652 *receiver_index = ffa_memory_region_get_receiver_index(memory_region,
2653 receiver_id);
J-Alves089004f2022-07-13 14:25:44 +01002654
2655 if (*receiver_index == memory_region->receiver_count) {
2656 dlog_verbose(
2657 "Incorrect receiver VM ID %d for "
2658 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01002659 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002660 return ffa_error(FFA_INVALID_PARAMETERS);
2661 }
2662
2663 if ((retrieve_request->flags &
2664 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2665 dlog_verbose(
2666 "Retriever specified 'address range alignment 'hint' "
2667 "not supported.\n");
2668 return ffa_error(FFA_INVALID_PARAMETERS);
2669 }
2670 if ((retrieve_request->flags &
2671 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2672 dlog_verbose(
2673 "Bits 8-5 must be zero in memory region's flags "
2674 "(address range alignment hint not supported).\n");
2675 return ffa_error(FFA_INVALID_PARAMETERS);
2676 }
2677
2678 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2679 dlog_verbose(
2680 "Bits 31-10 must be zero in memory region's flags.\n");
2681 return ffa_error(FFA_INVALID_PARAMETERS);
2682 }
2683
2684 if (share_func == FFA_MEM_SHARE_32 &&
2685 (retrieve_request->flags &
2686 (FFA_MEMORY_REGION_FLAG_CLEAR |
2687 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2688 dlog_verbose(
2689 "Memory Share operation can't clean after relinquish "
2690 "memory region.\n");
2691 return ffa_error(FFA_INVALID_PARAMETERS);
2692 }
2693
2694 /*
2695 * If the borrower needs the memory to be cleared before mapping
2696 * to its address space, the sender should have set the flag
2697 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2698 * FFA_DENIED.
2699 */
2700 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2701 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2702 dlog_verbose(
2703 "Borrower needs memory cleared. Sender needs to set "
2704 "flag for clearing memory.\n");
2705 return ffa_error(FFA_DENIED);
2706 }
2707
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002708 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2709 security_state =
2710 ffa_get_memory_security_attr(retrieve_request->attributes);
2711 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2712 dlog_verbose(
2713 "Invalid security state for memory retrieve request "
2714 "operation.\n");
2715 return ffa_error(FFA_INVALID_PARAMETERS);
2716 }
2717
J-Alves089004f2022-07-13 14:25:44 +01002718 /*
2719 * If memory type is not specified, bypass validation of memory
2720 * attributes in the retrieve request. The retriever is expecting to
2721 * obtain this information from the SPMC.
2722 */
2723 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2724 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2725 return (struct ffa_value){.func = FFA_SUCCESS_32};
2726 }
2727
2728 /*
2729 * Ensure receiver's attributes are compatible with how
2730 * Hafnium maps memory: Normal Memory, Inner shareable,
2731 * Write-Back Read-Allocate Write-Allocate Cacheable.
2732 */
2733 return ffa_memory_attributes_validate(retrieve_request->attributes);
2734}
2735
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002736struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2737 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002738 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002739 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002740{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002741 uint32_t expected_retrieve_request_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002742 retrieve_request->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002743 (uint32_t)(retrieve_request->receiver_count *
2744 retrieve_request->memory_access_desc_size);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002745 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002746 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002747 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002748 uint32_t memory_to_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002749 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002750 struct ffa_memory_share_state *share_state;
2751 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002752 struct ffa_composite_memory_region *composite;
2753 uint32_t total_length;
2754 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01002755 ffa_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002756 bool is_send_complete = false;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002757 ffa_memory_attributes_t attributes;
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002758 struct ffa_memory_access_impdef receiver_impdef_val;
2759 uint64_t retrieve_memory_access_desc_size =
2760 retrieve_request->memory_access_desc_size;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002761
2762 dump_share_states();
2763
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002764 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002765 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002766 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002767 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002768 expected_retrieve_request_length,
2769 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002770 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002771 }
2772
2773 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002774 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00002775 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002776 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002777 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002778 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002779 goto out;
2780 }
2781
J-Alves96de29f2022-04-26 16:05:24 +01002782 if (!share_state->sending_complete) {
2783 dlog_verbose(
2784 "Memory with handle %#x not fully sent, can't "
2785 "retrieve.\n",
2786 handle);
2787 ret = ffa_error(FFA_INVALID_PARAMETERS);
2788 goto out;
2789 }
2790
Andrew Walbrana65a1322020-04-06 19:32:32 +01002791 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002792
Andrew Walbrana65a1322020-04-06 19:32:32 +01002793 CHECK(memory_region != NULL);
2794
J-Alves089004f2022-07-13 14:25:44 +01002795 if (retrieve_request->sender != memory_region->sender) {
2796 dlog_verbose(
2797 "Memory with handle %#x not fully sent, can't "
2798 "retrieve.\n",
2799 handle);
J-Alves41d4fef2023-11-16 16:20:09 +00002800 ret = ffa_error(FFA_DENIED);
J-Alves089004f2022-07-13 14:25:44 +01002801 goto out;
2802 }
J-Alves96de29f2022-04-26 16:05:24 +01002803
J-Alvesa9cd7e32022-07-01 13:49:33 +01002804 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2805 to_locked)) {
2806 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002807
J-Alvesb5084cf2022-07-06 14:20:12 +01002808 /*
2809 * The SPMC can only process retrieve requests to memory share
2810 * operations with one borrower from the other world. It can't
2811 * determine the ID of the NWd VM that invoked the retrieve
2812 * request interface call. It relies on the hypervisor to
2813 * validate the caller's ID against that provided in the
2814 * `receivers` list of the retrieve response.
2815 * In case there is only one borrower from the NWd in the
2816 * transaction descriptor, record that in the `receiver_id` for
2817 * later use, and validate in the retrieve request message.
J-Alves3fa82aa2023-09-20 18:19:21 +01002818 * This limitation is due to the fact SPMC can't determine the
2819 * index in the memory share structures state to update.
J-Alvesb5084cf2022-07-06 14:20:12 +01002820 */
2821 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2822 uint32_t other_world_count = 0;
2823
2824 for (uint32_t i = 0; i < memory_region->receiver_count;
2825 i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002826 struct ffa_memory_access *receiver =
2827 ffa_memory_region_get_receiver(
2828 retrieve_request, 0);
2829
2830 assert(receiver != NULL);
J-Alvesb5084cf2022-07-06 14:20:12 +01002831 receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002832 receiver->receiver_permissions.receiver;
J-Alvesb5084cf2022-07-06 14:20:12 +01002833 if (!vm_id_is_current_world(receiver_id)) {
2834 other_world_count++;
2835 }
2836 }
2837 if (other_world_count > 1) {
2838 dlog_verbose(
2839 "Support one receiver from the other "
2840 "world.\n");
2841 return ffa_error(FFA_NOT_SUPPORTED);
2842 }
2843 }
2844
2845 /*
2846 * Validate retrieve request, according to what was sent by the
2847 * sender. Function will output the `receiver_index` from the
J-Alves3fa82aa2023-09-20 18:19:21 +01002848 * provided memory region.
J-Alvesb5084cf2022-07-06 14:20:12 +01002849 */
J-Alves089004f2022-07-13 14:25:44 +01002850 ret = ffa_memory_retrieve_validate(
2851 receiver_id, retrieve_request, memory_region,
2852 &receiver_index, share_state->share_func);
2853 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002854 goto out;
2855 }
2856
2857 if (share_state->retrieved_fragment_count[receiver_index] !=
2858 0U) {
2859 dlog_verbose(
2860 "Memory with handle %#x already retrieved.\n",
2861 handle);
2862 ret = ffa_error(FFA_DENIED);
2863 goto out;
2864 }
2865
J-Alves3fa82aa2023-09-20 18:19:21 +01002866 /*
2867 * Validate the requested permissions against the sent
2868 * permissions.
2869 * Outputs the permissions to give to retriever at S2
2870 * PTs.
2871 */
J-Alvesa9cd7e32022-07-01 13:49:33 +01002872 ret = ffa_memory_retrieve_validate_memory_access_list(
2873 memory_region, retrieve_request, receiver_id,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002874 &permissions, share_state->share_func,
2875 &receiver_impdef_val);
J-Alves614d9f42022-06-28 14:03:10 +01002876 if (ret.func != FFA_SUCCESS_32) {
2877 goto out;
2878 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002879
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002880 memory_to_mode = ffa_memory_permissions_to_mode(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002881 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002882
J-Alvesa9cd7e32022-07-01 13:49:33 +01002883 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01002884 to_locked, share_state->fragments,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002885 share_state->fragment_constituent_counts,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002886 share_state->fragment_count, memory_to_mode,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002887 share_state->share_func, false, page_pool);
2888
2889 if (ret.func != FFA_SUCCESS_32) {
2890 goto out;
2891 }
2892
2893 share_state->retrieved_fragment_count[receiver_index] = 1;
2894 is_send_complete =
2895 share_state->retrieved_fragment_count[receiver_index] ==
2896 share_state->fragment_count;
J-Alves3c5b2072022-11-21 12:45:40 +00002897
2898 share_state->clear_after_relinquish =
2899 (retrieve_request->flags &
2900 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) != 0U;
2901
J-Alvesa9cd7e32022-07-01 13:49:33 +01002902 } else {
2903 if (share_state->hypervisor_fragment_count != 0U) {
2904 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002905 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002906 "the hypervisor.\n",
2907 handle);
2908 ret = ffa_error(FFA_DENIED);
2909 goto out;
2910 }
2911
2912 share_state->hypervisor_fragment_count = 1;
2913
2914 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002915 }
2916
J-Alvesb5084cf2022-07-06 14:20:12 +01002917 /* VMs acquire the RX buffer from SPMC. */
2918 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2919
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002920 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002921 * Copy response to RX buffer of caller and deliver the message.
2922 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002923 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002924 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002925 composite = ffa_memory_region_get_composite(memory_region, 0);
2926 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002927 * Constituents which we received in the first fragment should
2928 * always fit in the first fragment we are sending, because the
2929 * header is the same size in both cases and we have a fixed
2930 * message buffer size. So `ffa_retrieved_memory_region_init`
2931 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002932 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002933
2934 /*
2935 * Set the security state in the memory retrieve response attributes
2936 * if specified by the target mode.
2937 */
2938 attributes = plat_ffa_memory_security_mode(
2939 memory_region->attributes, share_state->sender_orig_mode);
2940
Andrew Walbranca808b12020-05-15 17:22:28 +01002941 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01002942 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002943 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002944 memory_region->flags, handle, receiver_id,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002945 retrieve_memory_access_desc_size, permissions,
2946 receiver_impdef_val, composite->page_count,
2947 composite->constituent_count, share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01002948 share_state->fragment_constituent_counts[0], &total_length,
2949 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002950
Andrew Walbranca808b12020-05-15 17:22:28 +01002951 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002952 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002953 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002954 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002955
J-Alvesa9cd7e32022-07-01 13:49:33 +01002956 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002957 ffa_memory_retrieve_complete(share_states, share_state,
2958 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002959 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002960 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002961 .arg1 = total_length,
2962 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002963out:
2964 share_states_unlock(&share_states);
2965 dump_share_states();
2966 return ret;
2967}
2968
J-Alves5da37d92022-10-24 16:33:48 +01002969/**
2970 * Determine expected fragment offset according to the FF-A version of
2971 * the caller.
2972 */
2973static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
2974 struct ffa_memory_region *memory_region,
2975 uint32_t retrieved_constituents_count, uint32_t ffa_version)
2976{
2977 uint32_t expected_fragment_offset;
2978 uint32_t composite_constituents_offset;
2979
Kathleen Capellae4fe2962023-09-01 17:08:47 -04002980 if (ffa_version >= MAKE_FFA_VERSION(1, 1)) {
J-Alves5da37d92022-10-24 16:33:48 +01002981 /*
2982 * Hafnium operates memory regions in FF-A v1.1 format, so we
2983 * can retrieve the constituents offset from descriptor.
2984 */
2985 composite_constituents_offset =
2986 ffa_composite_constituent_offset(memory_region, 0);
2987 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2988 /*
2989 * If retriever is FF-A v1.0, determine the composite offset
2990 * as it is expected to have been configured in the
2991 * retrieve response.
2992 */
2993 composite_constituents_offset =
2994 sizeof(struct ffa_memory_region_v1_0) +
2995 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002996 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01002997 sizeof(struct ffa_composite_memory_region);
2998 } else {
2999 panic("%s received an invalid FF-A version.\n", __func__);
3000 }
3001
3002 expected_fragment_offset =
3003 composite_constituents_offset +
3004 retrieved_constituents_count *
3005 sizeof(struct ffa_memory_region_constituent) -
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003006 (uint32_t)(memory_region->memory_access_desc_size *
3007 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003008
3009 return expected_fragment_offset;
3010}
3011
Andrew Walbranca808b12020-05-15 17:22:28 +01003012struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3013 ffa_memory_handle_t handle,
3014 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003015 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003016 struct mpool *page_pool)
3017{
3018 struct ffa_memory_region *memory_region;
3019 struct share_states_locked share_states;
3020 struct ffa_memory_share_state *share_state;
3021 struct ffa_value ret;
3022 uint32_t fragment_index;
3023 uint32_t retrieved_constituents_count;
3024 uint32_t i;
3025 uint32_t expected_fragment_offset;
3026 uint32_t remaining_constituent_count;
3027 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003028 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003029 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003030
3031 dump_share_states();
3032
3033 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003034 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003035 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003036 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
3037 handle);
3038 ret = ffa_error(FFA_INVALID_PARAMETERS);
3039 goto out;
3040 }
3041
3042 memory_region = share_state->memory_region;
3043 CHECK(memory_region != NULL);
3044
Andrew Walbranca808b12020-05-15 17:22:28 +01003045 if (!share_state->sending_complete) {
3046 dlog_verbose(
3047 "Memory with handle %#x not fully sent, can't "
3048 "retrieve.\n",
3049 handle);
3050 ret = ffa_error(FFA_INVALID_PARAMETERS);
3051 goto out;
3052 }
3053
J-Alves59ed0042022-07-28 18:26:41 +01003054 /*
3055 * If retrieve request from the hypervisor has been initiated in the
3056 * given share_state, continue it, else assume it is a continuation of
3057 * retrieve request from a NWd VM.
3058 */
3059 continue_ffa_hyp_mem_retrieve_req =
3060 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3061 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003062 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003063
J-Alves59ed0042022-07-28 18:26:41 +01003064 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003065 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003066 memory_region, to_locked.vm->id);
3067
3068 if (receiver_index == memory_region->receiver_count) {
3069 dlog_verbose(
3070 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
3071 "borrower to memory sharing transaction (%x)\n",
3072 to_locked.vm->id, handle);
3073 ret = ffa_error(FFA_INVALID_PARAMETERS);
3074 goto out;
3075 }
3076
3077 if (share_state->retrieved_fragment_count[receiver_index] ==
3078 0 ||
3079 share_state->retrieved_fragment_count[receiver_index] >=
3080 share_state->fragment_count) {
3081 dlog_verbose(
3082 "Retrieval of memory with handle %#x not yet "
3083 "started or already completed (%d/%d fragments "
3084 "retrieved).\n",
3085 handle,
3086 share_state->retrieved_fragment_count
3087 [receiver_index],
3088 share_state->fragment_count);
3089 ret = ffa_error(FFA_INVALID_PARAMETERS);
3090 goto out;
3091 }
3092
3093 fragment_index =
3094 share_state->retrieved_fragment_count[receiver_index];
3095 } else {
3096 if (share_state->hypervisor_fragment_count == 0 ||
3097 share_state->hypervisor_fragment_count >=
3098 share_state->fragment_count) {
3099 dlog_verbose(
3100 "Retrieve of memory with handle %x not "
3101 "started from hypervisor.\n",
3102 handle);
3103 ret = ffa_error(FFA_INVALID_PARAMETERS);
3104 goto out;
3105 }
3106
3107 if (memory_region->sender != sender_vm_id) {
3108 dlog_verbose(
3109 "Sender ID (%x) is not as expected for memory "
3110 "handle %x\n",
3111 sender_vm_id, handle);
3112 ret = ffa_error(FFA_INVALID_PARAMETERS);
3113 goto out;
3114 }
3115
3116 fragment_index = share_state->hypervisor_fragment_count;
3117
3118 receiver_index = 0;
3119 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003120
3121 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003122 * Check that the given fragment offset is correct by counting
3123 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003124 */
3125 retrieved_constituents_count = 0;
3126 for (i = 0; i < fragment_index; ++i) {
3127 retrieved_constituents_count +=
3128 share_state->fragment_constituent_counts[i];
3129 }
J-Alvesc7484f12022-05-13 12:41:14 +01003130
3131 CHECK(memory_region->receiver_count > 0);
3132
Andrew Walbranca808b12020-05-15 17:22:28 +01003133 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003134 ffa_memory_retrieve_expected_offset_per_ffa_version(
3135 memory_region, retrieved_constituents_count,
3136 to_locked.vm->ffa_version);
3137
Andrew Walbranca808b12020-05-15 17:22:28 +01003138 if (fragment_offset != expected_fragment_offset) {
3139 dlog_verbose("Fragment offset was %d but expected %d.\n",
3140 fragment_offset, expected_fragment_offset);
3141 ret = ffa_error(FFA_INVALID_PARAMETERS);
3142 goto out;
3143 }
3144
J-Alves59ed0042022-07-28 18:26:41 +01003145 /* VMs acquire the RX buffer from SPMC. */
3146 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3147
Andrew Walbranca808b12020-05-15 17:22:28 +01003148 remaining_constituent_count = ffa_memory_fragment_init(
3149 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3150 share_state->fragments[fragment_index],
3151 share_state->fragment_constituent_counts[fragment_index],
3152 &fragment_length);
3153 CHECK(remaining_constituent_count == 0);
3154 to_locked.vm->mailbox.recv_size = fragment_length;
3155 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
3156 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003157 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003158
J-Alves59ed0042022-07-28 18:26:41 +01003159 if (!continue_ffa_hyp_mem_retrieve_req) {
3160 share_state->retrieved_fragment_count[receiver_index]++;
3161 if (share_state->retrieved_fragment_count[receiver_index] ==
3162 share_state->fragment_count) {
3163 ffa_memory_retrieve_complete(share_states, share_state,
3164 page_pool);
3165 }
3166 } else {
3167 share_state->hypervisor_fragment_count++;
3168
3169 ffa_memory_retrieve_complete_from_hyp(share_state);
3170 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003171 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3172 .arg1 = (uint32_t)handle,
3173 .arg2 = (uint32_t)(handle >> 32),
3174 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003175
3176out:
3177 share_states_unlock(&share_states);
3178 dump_share_states();
3179 return ret;
3180}
3181
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003182struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003183 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003184 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003185{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003186 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003187 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003188 struct ffa_memory_share_state *share_state;
3189 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003190 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003191 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003192 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003193 bool receivers_relinquished_memory;
J-Alves639ddfc2023-11-21 14:17:26 +00003194 ffa_memory_access_permissions_t receiver_permissions = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003195
Andrew Walbrana65a1322020-04-06 19:32:32 +01003196 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003197 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003198 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01003199 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003200 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003201 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003202 }
3203
Andrew Walbrana65a1322020-04-06 19:32:32 +01003204 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003205 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003206 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01003207 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003208 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003209 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003210 }
3211
3212 dump_share_states();
3213
3214 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003215 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003216 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003217 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003218 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003219 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003220 goto out;
3221 }
3222
Andrew Walbranca808b12020-05-15 17:22:28 +01003223 if (!share_state->sending_complete) {
3224 dlog_verbose(
3225 "Memory with handle %#x not fully sent, can't "
3226 "relinquish.\n",
3227 handle);
3228 ret = ffa_error(FFA_INVALID_PARAMETERS);
3229 goto out;
3230 }
3231
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003232 memory_region = share_state->memory_region;
3233 CHECK(memory_region != NULL);
3234
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003235 receiver_index = ffa_memory_region_get_receiver_index(
3236 memory_region, from_locked.vm->id);
J-Alves8eb19162022-04-28 10:56:48 +01003237
3238 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003239 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003240 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01003241 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003242 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003243 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003244 goto out;
3245 }
3246
J-Alves8eb19162022-04-28 10:56:48 +01003247 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003248 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003249 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003250 "Memory with handle %#x not yet fully "
3251 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003252 "receiver %x can't relinquish.\n",
3253 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003254 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003255 goto out;
3256 }
3257
J-Alves3c5b2072022-11-21 12:45:40 +00003258 /*
3259 * Either clear if requested in relinquish call, or in a retrieve
3260 * request from one of the borrowers.
3261 */
3262 receivers_relinquished_memory = true;
3263
3264 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3265 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003266 ffa_memory_region_get_receiver(memory_region, i);
3267 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003268 if (receiver->receiver_permissions.receiver ==
3269 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003270 receiver_permissions =
3271 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003272 continue;
3273 }
3274
3275 if (share_state->retrieved_fragment_count[i] != 0U) {
3276 receivers_relinquished_memory = false;
3277 break;
3278 }
3279 }
3280
3281 clear = receivers_relinquished_memory &&
3282 (share_state->clear_after_relinquish ||
3283 (relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3284 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003285
3286 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003287 * Clear is not allowed for memory that was shared, as the
3288 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003289 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003290 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003291 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003292 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003293 goto out;
3294 }
3295
J-Alves639ddfc2023-11-21 14:17:26 +00003296 if (clear && receiver_permissions == FFA_DATA_ACCESS_RO) {
3297 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3298 __func__);
3299 ret = ffa_error(FFA_DENIED);
3300 goto out;
3301 }
3302
Andrew Walbranca808b12020-05-15 17:22:28 +01003303 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003304 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003305 share_state->fragment_constituent_counts,
3306 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003307
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003308 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003309 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003310 * Mark memory handle as not retrieved, so it can be
3311 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003312 */
J-Alves8eb19162022-04-28 10:56:48 +01003313 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003314 }
3315
3316out:
3317 share_states_unlock(&share_states);
3318 dump_share_states();
3319 return ret;
3320}
3321
3322/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003323 * Validates that the reclaim transition is allowed for the given
3324 * handle, updates the page table of the reclaiming VM, and frees the
3325 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003326 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003327struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003328 ffa_memory_handle_t handle,
3329 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003330 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003331{
3332 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003333 struct ffa_memory_share_state *share_state;
3334 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003335 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003336
3337 dump_share_states();
3338
3339 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003340
Karl Meakin4a2854a2023-06-30 16:26:52 +01003341 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003342 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003343 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003344 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003345 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003346 goto out;
3347 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003348 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003349
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003350 CHECK(memory_region != NULL);
3351
J-Alvesa9cd7e32022-07-01 13:49:33 +01003352 if (vm_id_is_current_world(to_locked.vm->id) &&
3353 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003354 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003355 "VM %#x attempted to reclaim memory handle %#x "
3356 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003357 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003358 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003359 goto out;
3360 }
3361
Andrew Walbranca808b12020-05-15 17:22:28 +01003362 if (!share_state->sending_complete) {
3363 dlog_verbose(
3364 "Memory with handle %#x not fully sent, can't "
3365 "reclaim.\n",
3366 handle);
3367 ret = ffa_error(FFA_INVALID_PARAMETERS);
3368 goto out;
3369 }
3370
J-Alves752236c2022-04-28 11:07:47 +01003371 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3372 if (share_state->retrieved_fragment_count[i] != 0) {
3373 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003374 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00003375 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003376 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003377 handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003378 ffa_memory_region_get_receiver(memory_region, i)
3379 ->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01003380 ret = ffa_error(FFA_DENIED);
3381 goto out;
3382 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003383 }
3384
Andrew Walbranca808b12020-05-15 17:22:28 +01003385 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01003386 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003387 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003388 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01003389 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003390
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003391 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003392 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003393 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003394 }
3395
3396out:
3397 share_states_unlock(&share_states);
3398 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003399}