blob: 6a9de102e06e42b0be1fcdff21aa8e46394aace4 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
J-Alves5952d942022-12-22 16:03:00 +000015#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010016#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000017#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010020#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010022#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010023#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000024#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010026#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
J-Alves2d8457f2022-10-05 11:06:41 +010029#include "vmapi/hf/ffa_v1_0.h"
30
J-Alves5da37d92022-10-24 16:33:48 +010031#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
32
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000033/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000035 * by this lock.
36 */
37static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000039
40/**
J-Alves917d2f22020-10-30 18:39:30 +000041 * Extracts the index from a memory handle allocated by Hafnium's current world.
42 */
43uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
44{
45 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
46}
47
48/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010049 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
50 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
51 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010052 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010053 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
54 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010055 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010056struct ffa_memory_share_state *allocate_share_state(
57 struct share_states_locked share_states, uint32_t share_func,
58 struct ffa_memory_region *memory_region, uint32_t fragment_length,
59 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000060{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000061 assert(share_states.share_states != NULL);
62 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000063
Karl Meakin52cdfe72023-06-30 14:49:10 +010064 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010065 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010066 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010067 &share_states.share_states[i];
68 struct ffa_composite_memory_region *composite =
69 ffa_memory_region_get_composite(memory_region,
70 0);
71
72 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000073 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010075 } else {
J-Alvesee68c542020-10-29 17:48:20 +000076 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010077 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000078 allocated_state->share_func = share_func;
79 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +010080 allocated_state->fragment_count = 1;
81 allocated_state->fragments[0] = composite->constituents;
82 allocated_state->fragment_constituent_counts[0] =
83 (fragment_length -
84 ffa_composite_constituent_offset(memory_region,
85 0)) /
86 sizeof(struct ffa_memory_region_constituent);
87 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +010088 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
89 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +010090 allocated_state->retrieved_fragment_count[j] =
91 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000092 }
Karl Meakin52cdfe72023-06-30 14:49:10 +010093 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000094 }
95 }
96
Karl Meakin52cdfe72023-06-30 14:49:10 +010097 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000098}
99
100/** Locks the share states lock. */
101struct share_states_locked share_states_lock(void)
102{
103 sl_lock(&share_states_lock_instance);
104
105 return (struct share_states_locked){.share_states = share_states};
106}
107
108/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100109void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000111 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112 share_states->share_states = NULL;
113 sl_unlock(&share_states_lock_instance);
114}
115
116/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100117 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100118 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100120struct ffa_memory_share_state *get_share_state(
121 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000124
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000125 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100126
127 /*
128 * First look for a share_state allocated by us, in which case the
129 * handle is based on the index.
130 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200131 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100132 uint64_t index = ffa_memory_handle_get_index(handle);
133
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 if (index < MAX_MEM_SHARES) {
135 share_state = &share_states.share_states[index];
136 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100137 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100138 }
139 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140 }
141
Andrew Walbranca808b12020-05-15 17:22:28 +0100142 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100143 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100144 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000145 if (share_state->memory_region != NULL &&
146 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100149 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000150 }
151
Karl Meakin4a2854a2023-06-30 16:26:52 +0100152 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000153}
154
155/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100156void share_state_free(struct share_states_locked share_states,
157 struct ffa_memory_share_state *share_state,
158 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000159{
Andrew Walbranca808b12020-05-15 17:22:28 +0100160 uint32_t i;
161
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000162 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000163 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000165 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 /*
167 * First fragment is part of the same page as the `memory_region`, so it
168 * doesn't need to be freed separately.
169 */
170 share_state->fragments[0] = NULL;
171 share_state->fragment_constituent_counts[0] = 0;
172 for (i = 1; i < share_state->fragment_count; ++i) {
173 mpool_free(page_pool, share_state->fragments[i]);
174 share_state->fragments[i] = NULL;
175 share_state->fragment_constituent_counts[i] = 0;
176 }
177 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100179 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000180}
181
Andrew Walbranca808b12020-05-15 17:22:28 +0100182/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100183bool share_state_sending_complete(struct share_states_locked share_states,
184 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000185{
Andrew Walbranca808b12020-05-15 17:22:28 +0100186 struct ffa_composite_memory_region *composite;
187 uint32_t expected_constituent_count;
188 uint32_t fragment_constituent_count_total = 0;
189 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000190
Andrew Walbranca808b12020-05-15 17:22:28 +0100191 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000192 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100193
194 /*
195 * Share state must already be valid, or it's not possible to get hold
196 * of it.
197 */
198 CHECK(share_state->memory_region != NULL &&
199 share_state->share_func != 0);
200
201 composite =
202 ffa_memory_region_get_composite(share_state->memory_region, 0);
203 expected_constituent_count = composite->constituent_count;
204 for (i = 0; i < share_state->fragment_count; ++i) {
205 fragment_constituent_count_total +=
206 share_state->fragment_constituent_counts[i];
207 }
208 dlog_verbose(
209 "Checking completion: constituent count %d/%d from %d "
210 "fragments.\n",
211 fragment_constituent_count_total, expected_constituent_count,
212 share_state->fragment_count);
213
214 return fragment_constituent_count_total == expected_constituent_count;
215}
216
217/**
218 * Calculates the offset of the next fragment expected for the given share
219 * state.
220 */
J-Alvesfdd29272022-07-19 13:16:31 +0100221uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100222 struct share_states_locked share_states,
223 struct ffa_memory_share_state *share_state)
224{
225 uint32_t next_fragment_offset;
226 uint32_t i;
227
228 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000229 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100230
231 next_fragment_offset =
232 ffa_composite_constituent_offset(share_state->memory_region, 0);
233 for (i = 0; i < share_state->fragment_count; ++i) {
234 next_fragment_offset +=
235 share_state->fragment_constituent_counts[i] *
236 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000237 }
238
Andrew Walbranca808b12020-05-15 17:22:28 +0100239 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000240}
241
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000243{
244 uint32_t i;
245
246 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
247 return;
248 }
249
Olivier Deprez935e1b12020-12-22 18:01:29 +0100250 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100251 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100252 "recipients [",
253 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100254 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100255 memory_region->receiver_count);
256 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000257 if (i != 0) {
258 dlog(", ");
259 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100260 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100261 memory_region->receivers[i].receiver_permissions.receiver,
262 memory_region->receivers[i]
263 .receiver_permissions.permissions,
264 memory_region->receivers[i]
265 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000266 }
267 dlog("]");
268}
269
J-Alves66652252022-07-06 09:49:51 +0100270void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000271{
272 uint32_t i;
273
274 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
275 return;
276 }
277
278 dlog("Current share states:\n");
279 sl_lock(&share_states_lock_instance);
280 for (i = 0; i < MAX_MEM_SHARES; ++i) {
281 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000282 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100283 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000284 dlog("SHARE");
285 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100286 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000287 dlog("LEND");
288 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100289 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000290 dlog("DONATE");
291 break;
292 default:
293 dlog("invalid share_func %#x",
294 share_states[i].share_func);
295 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100296 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000297 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100298 if (share_states[i].sending_complete) {
299 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000300 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100301 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000302 }
J-Alves2a0d2882020-10-29 14:49:50 +0000303 dlog(" with %d fragments, %d retrieved, "
304 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100305 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000306 share_states[i].retrieved_fragment_count[0],
307 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000308 }
309 }
310 sl_unlock(&share_states_lock_instance);
311}
312
Andrew Walbran475c1452020-02-07 13:22:22 +0000313/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100314static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100315 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000316{
317 uint32_t mode = 0;
318
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100319 switch (ffa_get_data_access_attr(permissions)) {
320 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000321 mode = MM_MODE_R;
322 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100323 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000324 mode = MM_MODE_R | MM_MODE_W;
325 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100326 case FFA_DATA_ACCESS_NOT_SPECIFIED:
327 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
328 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100329 case FFA_DATA_ACCESS_RESERVED:
330 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100331 }
332
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 switch (ffa_get_instruction_access_attr(permissions)) {
334 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000335 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100337 mode |= MM_MODE_X;
338 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100339 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
340 mode |= (default_mode & MM_MODE_X);
341 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100342 case FFA_INSTRUCTION_ACCESS_RESERVED:
343 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000344 }
345
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200346 /* Set the security state bit if necessary. */
347 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
348 mode |= plat_ffa_other_world_mode();
349 }
350
Andrew Walbran475c1452020-02-07 13:22:22 +0000351 return mode;
352}
353
Jose Marinho75509b42019-04-09 09:34:59 +0100354/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000355 * Get the current mode in the stage-2 page table of the given vm of all the
356 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100358 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000360 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100361 struct ffa_memory_region_constituent **fragments,
362 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100363{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100364 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100365 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100366
Andrew Walbranca808b12020-05-15 17:22:28 +0100367 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100368 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000369 * Fail if there are no constituents. Otherwise we would get an
370 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100371 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100373 }
374
Andrew Walbranca808b12020-05-15 17:22:28 +0100375 for (i = 0; i < fragment_count; ++i) {
376 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
377 ipaddr_t begin = ipa_init(fragments[i][j].address);
378 size_t size = fragments[i][j].page_count * PAGE_SIZE;
379 ipaddr_t end = ipa_add(begin, size);
380 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100381
Andrew Walbranca808b12020-05-15 17:22:28 +0100382 /* Fail if addresses are not page-aligned. */
383 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
384 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
385 return ffa_error(FFA_INVALID_PARAMETERS);
386 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100387
Andrew Walbranca808b12020-05-15 17:22:28 +0100388 /*
389 * Ensure that this constituent memory range is all
390 * mapped with the same mode.
391 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800392 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100393 return ffa_error(FFA_DENIED);
394 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100395
Andrew Walbranca808b12020-05-15 17:22:28 +0100396 /*
397 * Ensure that all constituents are mapped with the same
398 * mode.
399 */
400 if (i == 0) {
401 *orig_mode = current_mode;
402 } else if (current_mode != *orig_mode) {
403 dlog_verbose(
404 "Expected mode %#x but was %#x for %d "
405 "pages at %#x.\n",
406 *orig_mode, current_mode,
407 fragments[i][j].page_count,
408 ipa_addr(begin));
409 return ffa_error(FFA_DENIED);
410 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100411 }
Jose Marinho75509b42019-04-09 09:34:59 +0100412 }
413
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100414 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000415}
416
417/**
418 * Verify that all pages have the same mode, that the starting mode
419 * constitutes a valid state and obtain the next mode to apply
420 * to the sending VM.
421 *
422 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100423 * 1) FFA_DENIED if a state transition was not found;
424 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100427 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
429 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000430 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100431static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100432 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100433 struct ffa_memory_access *receivers, uint32_t receivers_count,
434 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100435 struct ffa_memory_region_constituent **fragments,
436 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
437 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000438{
439 const uint32_t state_mask =
440 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100441 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000442
Andrew Walbranca808b12020-05-15 17:22:28 +0100443 ret = constituents_get_mode(from, orig_from_mode, fragments,
444 fragment_constituent_counts,
445 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100446 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100447 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100448 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100449 }
450
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000451 /* Ensure the address range is normal memory and not a device. */
452 if (*orig_from_mode & MM_MODE_D) {
453 dlog_verbose("Can't share device memory (mode is %#x).\n",
454 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100455 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000456 }
457
458 /*
459 * Ensure the sender is the owner and has exclusive access to the
460 * memory.
461 */
462 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100463 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100464 }
465
J-Alves363f5722022-04-25 17:37:37 +0100466 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100467
J-Alves363f5722022-04-25 17:37:37 +0100468 for (uint32_t i = 0U; i < receivers_count; i++) {
469 ffa_memory_access_permissions_t permissions =
470 receivers[i].receiver_permissions.permissions;
471 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
472 permissions, *orig_from_mode);
473
474 if ((*orig_from_mode & required_from_mode) !=
475 required_from_mode) {
476 dlog_verbose(
477 "Sender tried to send memory with permissions "
478 "which "
479 "required mode %#x but only had %#x itself.\n",
480 required_from_mode, *orig_from_mode);
481 return ffa_error(FFA_DENIED);
482 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000483 }
484
485 /* Find the appropriate new mode. */
486 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000487 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100488 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000489 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100490 break;
491
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100492 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000493 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100494 break;
495
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100496 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000497 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100498 break;
499
Jose Marinho75509b42019-04-09 09:34:59 +0100500 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100501 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100502 }
503
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100504 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000505}
506
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100507static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000508 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100509 struct ffa_memory_region_constituent **fragments,
510 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
511 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000512{
513 const uint32_t state_mask =
514 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
515 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100516 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000517
Andrew Walbranca808b12020-05-15 17:22:28 +0100518 ret = constituents_get_mode(from, orig_from_mode, fragments,
519 fragment_constituent_counts,
520 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100521 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100522 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000523 }
524
525 /* Ensure the address range is normal memory and not a device. */
526 if (*orig_from_mode & MM_MODE_D) {
527 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
528 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100529 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000530 }
531
532 /*
533 * Ensure the relinquishing VM is not the owner but has access to the
534 * memory.
535 */
536 orig_from_state = *orig_from_mode & state_mask;
537 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
538 dlog_verbose(
539 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100540 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000541 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100542 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000543 }
544
545 /* Find the appropriate new mode. */
546 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
547
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100548 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000549}
550
551/**
552 * Verify that all pages have the same mode, that the starting mode
553 * constitutes a valid state and obtain the next mode to apply
554 * to the retrieving VM.
555 *
556 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100557 * 1) FFA_DENIED if a state transition was not found;
558 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100559 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100560 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100561 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100562 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
563 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000564 */
J-Alvesfc19b372022-07-06 12:17:35 +0100565struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000566 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100567 struct ffa_memory_region_constituent **fragments,
568 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
569 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000570{
571 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100572 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000573
Andrew Walbranca808b12020-05-15 17:22:28 +0100574 ret = constituents_get_mode(to, &orig_to_mode, fragments,
575 fragment_constituent_counts,
576 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100578 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100579 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000580 }
581
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100582 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000583 /*
584 * If the original ffa memory send call has been processed
585 * successfully, it is expected the orig_to_mode would overlay
586 * with `state_mask`, as a result of the function
587 * `ffa_send_check_transition`.
588 */
J-Alves59ed0042022-07-28 18:26:41 +0100589 if (vm_id_is_current_world(to.vm->id)) {
590 assert((orig_to_mode &
591 (MM_MODE_INVALID | MM_MODE_UNOWNED |
592 MM_MODE_SHARED)) != 0U);
593 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000594 } else {
595 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100596 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000597 * Ensure the retriever has the expected state. We don't care
598 * about the MM_MODE_SHARED bit; either with or without it set
599 * are both valid representations of the !O-NA state.
600 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100601 if (vm_id_is_current_world(to.vm->id) &&
602 to.vm->id != HF_PRIMARY_VM_ID &&
603 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
604 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100605 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000606 }
607 }
608
609 /* Find the appropriate new mode. */
610 *to_mode = memory_to_attributes;
611 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100612 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000613 *to_mode |= 0;
614 break;
615
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100616 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000617 *to_mode |= MM_MODE_UNOWNED;
618 break;
619
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100620 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000621 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
622 break;
623
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100624 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000625 *to_mode |= 0;
626 break;
627
628 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100629 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100630 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000631 }
632
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100633 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100634}
Jose Marinho09b1db82019-08-08 09:16:59 +0100635
636/**
637 * Updates a VM's page table such that the given set of physical address ranges
638 * are mapped in the address space at the corresponding address ranges, in the
639 * mode provided.
640 *
641 * If commit is false, the page tables will be allocated from the mpool but no
642 * mappings will actually be updated. This function must always be called first
643 * with commit false to check that it will succeed before calling with commit
644 * true, to avoid leaving the page table in a half-updated state. To make a
645 * series of changes atomically you can call them all with commit false before
646 * calling them all with commit true.
647 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700648 * vm_ptable_defrag should always be called after a series of page table
649 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100650 *
651 * Returns true on success, or false if the update failed and no changes were
652 * made to memory mappings.
653 */
J-Alves66652252022-07-06 09:49:51 +0100654bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000655 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100656 struct ffa_memory_region_constituent **fragments,
657 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100658 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100659{
Andrew Walbranca808b12020-05-15 17:22:28 +0100660 uint32_t i;
661 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100662
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700663 if (vm_locked.vm->el0_partition) {
664 mode |= MM_MODE_USER | MM_MODE_NG;
665 }
666
Andrew Walbranca808b12020-05-15 17:22:28 +0100667 /* Iterate over the memory region constituents within each fragment. */
668 for (i = 0; i < fragment_count; ++i) {
669 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
670 size_t size = fragments[i][j].page_count * PAGE_SIZE;
671 paddr_t pa_begin =
672 pa_from_ipa(ipa_init(fragments[i][j].address));
673 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200674 uint32_t pa_bits =
675 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100676
677 /*
678 * Ensure the requested region falls into system's PA
679 * range.
680 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200681 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
682 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100683 dlog_error("Region is outside of PA Range\n");
684 return false;
685 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100686
687 if (commit) {
688 vm_identity_commit(vm_locked, pa_begin, pa_end,
689 mode, ppool, NULL);
690 } else if (!vm_identity_prepare(vm_locked, pa_begin,
691 pa_end, mode, ppool)) {
692 return false;
693 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100694 }
695 }
696
697 return true;
698}
699
700/**
701 * Clears a region of physical memory by overwriting it with zeros. The data is
702 * flushed from the cache so the memory has been cleared across the system.
703 */
J-Alves7db32002021-12-14 14:44:50 +0000704static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
705 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100706{
707 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000708 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100709 * global mapping of the whole range. Such an approach will limit
710 * the changes to stage-1 tables and will allow only local
711 * invalidation.
712 */
713 bool ret;
714 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000715 void *ptr = mm_identity_map(stage1_locked, begin, end,
716 MM_MODE_W | (extra_mode_attributes &
717 plat_ffa_other_world_mode()),
718 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100719 size_t size = pa_difference(begin, end);
720
721 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100722 goto fail;
723 }
724
725 memset_s(ptr, size, 0, size);
726 arch_mm_flush_dcache(ptr, size);
727 mm_unmap(stage1_locked, begin, end, ppool);
728
729 ret = true;
730 goto out;
731
732fail:
733 ret = false;
734
735out:
736 mm_unlock_stage1(&stage1_locked);
737
738 return ret;
739}
740
741/**
742 * Clears a region of physical memory by overwriting it with zeros. The data is
743 * flushed from the cache so the memory has been cleared across the system.
744 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100745static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000746 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100747 struct ffa_memory_region_constituent **fragments,
748 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
749 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100750{
751 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100752 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100753 bool ret = false;
754
755 /*
756 * Create a local pool so any freed memory can't be used by another
757 * thread. This is to ensure each constituent that is mapped can be
758 * unmapped again afterwards.
759 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000760 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100761
Andrew Walbranca808b12020-05-15 17:22:28 +0100762 /* Iterate over the memory region constituents within each fragment. */
763 for (i = 0; i < fragment_count; ++i) {
764 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100765
Andrew Walbranca808b12020-05-15 17:22:28 +0100766 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
767 size_t size = fragments[i][j].page_count * PAGE_SIZE;
768 paddr_t begin =
769 pa_from_ipa(ipa_init(fragments[i][j].address));
770 paddr_t end = pa_add(begin, size);
771
J-Alves7db32002021-12-14 14:44:50 +0000772 if (!clear_memory(begin, end, &local_page_pool,
773 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100774 /*
775 * api_clear_memory will defrag on failure, so
776 * no need to do it here.
777 */
778 goto out;
779 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100780 }
781 }
782
Jose Marinho09b1db82019-08-08 09:16:59 +0100783 ret = true;
784
785out:
786 mpool_fini(&local_page_pool);
787 return ret;
788}
789
J-Alves5952d942022-12-22 16:03:00 +0000790static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
791 ipaddr_t in_begin, ipaddr_t in_end)
792{
793 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
794 ipa_addr(begin) < ipa_addr(in_end)) ||
795 (ipa_addr(end) <= ipa_addr(in_end) &&
796 ipa_addr(end) > ipa_addr(in_begin));
797}
798
799/**
800 * Receives a memory range and looks for overlaps with the remainder
801 * constituents of the memory share/lend/donate operation. Assumes they are
802 * passed in order to avoid having to loop over all the elements at each call.
803 * The function only compares the received memory ranges with those that follow
804 * within the same fragment, and subsequent fragments from the same operation.
805 */
806static bool ffa_memory_check_overlap(
807 struct ffa_memory_region_constituent **fragments,
808 const uint32_t *fragment_constituent_counts,
809 const uint32_t fragment_count, const uint32_t current_fragment,
810 const uint32_t current_constituent)
811{
812 uint32_t i = current_fragment;
813 uint32_t j = current_constituent;
814 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
815 const uint32_t current_page_count = fragments[i][j].page_count;
816 size_t current_size = current_page_count * PAGE_SIZE;
817 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
818
819 if (current_size == 0 ||
820 current_size > UINT64_MAX - ipa_addr(current_begin)) {
821 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
822 current_begin, current_page_count);
823 return false;
824 }
825
826 for (; i < fragment_count; i++) {
827 j = (i == current_fragment) ? j + 1 : 0;
828
829 for (; j < fragment_constituent_counts[i]; j++) {
830 ipaddr_t begin = ipa_init(fragments[i][j].address);
831 const uint32_t page_count = fragments[i][j].page_count;
832 size_t size = page_count * PAGE_SIZE;
833 ipaddr_t end = ipa_add(begin, size - 1);
834
835 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
836 dlog_verbose(
837 "Invalid page count. Addr: %x "
838 "page_count: %x\n",
839 begin, page_count);
840 return false;
841 }
842
843 /*
844 * Check if current ranges is within begin and end, as
845 * well as the reverse. This should help optimize the
846 * loop, and reduce the number of iterations.
847 */
848 if (is_memory_range_within(begin, end, current_begin,
849 current_end) ||
850 is_memory_range_within(current_begin, current_end,
851 begin, end)) {
852 dlog_verbose(
853 "Overlapping memory ranges: %#x - %#x "
854 "with %#x - %#x\n",
855 ipa_addr(begin), ipa_addr(end),
856 ipa_addr(current_begin),
857 ipa_addr(current_end));
858 return true;
859 }
860 }
861 }
862
863 return false;
864}
865
Jose Marinho09b1db82019-08-08 09:16:59 +0100866/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100868 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000869 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100870 *
871 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000872 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100873 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100874 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100875 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
876 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100877 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100878 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100879 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100880 */
J-Alves66652252022-07-06 09:49:51 +0100881struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000882 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100883 struct ffa_memory_region_constituent **fragments,
884 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +0000885 uint32_t composite_total_page_count, uint32_t share_func,
886 struct ffa_memory_access *receivers, uint32_t receivers_count,
887 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100888{
Andrew Walbranca808b12020-05-15 17:22:28 +0100889 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +0000890 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100891 uint32_t orig_from_mode;
892 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100893 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100894 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +0000895 uint32_t constituents_total_page_count = 0;
Jose Marinho09b1db82019-08-08 09:16:59 +0100896
897 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100898 * Make sure constituents are properly aligned to a 64-bit boundary. If
899 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100900 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100901 for (i = 0; i < fragment_count; ++i) {
902 if (!is_aligned(fragments[i], 8)) {
903 dlog_verbose("Constituents not aligned.\n");
904 return ffa_error(FFA_INVALID_PARAMETERS);
905 }
J-Alves8f11cde2022-12-21 16:18:22 +0000906 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
907 constituents_total_page_count +=
908 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +0000909 if (ffa_memory_check_overlap(
910 fragments, fragment_constituent_counts,
911 fragment_count, i, j)) {
912 return ffa_error(FFA_INVALID_PARAMETERS);
913 }
J-Alves8f11cde2022-12-21 16:18:22 +0000914 }
915 }
916
917 if (constituents_total_page_count != composite_total_page_count) {
918 dlog_verbose(
919 "Composite page count differs from calculated page "
920 "count from constituents.\n");
921 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +0100922 }
923
924 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000925 * Check if the state transition is lawful for the sender, ensure that
926 * all constituents of a memory region being shared are at the same
927 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100928 */
J-Alves363f5722022-04-25 17:37:37 +0100929 ret = ffa_send_check_transition(from_locked, share_func, receivers,
930 receivers_count, &orig_from_mode,
931 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100932 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100933 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100934 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100935 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100936 }
937
Andrew Walbran37c574e2020-06-03 11:45:46 +0100938 if (orig_from_mode_ret != NULL) {
939 *orig_from_mode_ret = orig_from_mode;
940 }
941
Jose Marinho09b1db82019-08-08 09:16:59 +0100942 /*
943 * Create a local pool so any freed memory can't be used by another
944 * thread. This is to ensure the original mapping can be restored if the
945 * clear fails.
946 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000947 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100948
949 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000950 * First reserve all required memory for the new page table entries
951 * without committing, to make sure the entire operation will succeed
952 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100953 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100954 if (!ffa_region_group_identity_map(
955 from_locked, fragments, fragment_constituent_counts,
956 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100957 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100958 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100959 goto out;
960 }
961
962 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000963 * Update the mapping for the sender. This won't allocate because the
964 * transaction was already prepared above, but may free pages in the
965 * case that a whole block is being unmapped that was previously
966 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100967 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100968 CHECK(ffa_region_group_identity_map(
969 from_locked, fragments, fragment_constituent_counts,
970 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100971
972 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000973 if (clear &&
974 !ffa_clear_memory_constituents(
975 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
976 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100977 /*
978 * On failure, roll back by returning memory to the sender. This
979 * may allocate pages which were previously freed into
980 * `local_page_pool` by the call above, but will never allocate
981 * more pages than that so can never fail.
982 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100983 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100984 from_locked, fragments, fragment_constituent_counts,
985 fragment_count, orig_from_mode, &local_page_pool,
986 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100987
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100988 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100989 goto out;
990 }
991
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000993
994out:
995 mpool_fini(&local_page_pool);
996
997 /*
998 * Tidy up the page table by reclaiming failed mappings (if there was an
999 * error) or merging entries into blocks where possible (on success).
1000 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001001 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001002
1003 return ret;
1004}
1005
1006/**
1007 * Validates and maps memory shared from one VM to another.
1008 *
1009 * This function requires the calling context to hold the <to> lock.
1010 *
1011 * Returns:
1012 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001013 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001014 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001015 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001016 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001018 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001019struct ffa_value ffa_retrieve_check_update(
J-Alves19e20cf2023-08-02 12:48:55 +01001020 struct vm_locked to_locked, ffa_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001021 struct ffa_memory_region_constituent **fragments,
1022 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1023 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
1024 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001025{
Andrew Walbranca808b12020-05-15 17:22:28 +01001026 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001027 uint32_t to_mode;
1028 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001029 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001030
1031 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001032 * Make sure constituents are properly aligned to a 64-bit boundary. If
1033 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001034 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001035 for (i = 0; i < fragment_count; ++i) {
1036 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001037 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001038 return ffa_error(FFA_INVALID_PARAMETERS);
1039 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001040 }
1041
1042 /*
1043 * Check if the state transition is lawful for the recipient, and ensure
1044 * that all constituents of the memory region being retrieved are at the
1045 * same state.
1046 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001047 ret = ffa_retrieve_check_transition(
1048 to_locked, share_func, fragments, fragment_constituent_counts,
1049 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001050 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001051 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001052 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001053 }
1054
1055 /*
1056 * Create a local pool so any freed memory can't be used by another
1057 * thread. This is to ensure the original mapping can be restored if the
1058 * clear fails.
1059 */
1060 mpool_init_with_fallback(&local_page_pool, page_pool);
1061
1062 /*
1063 * First reserve all required memory for the new page table entries in
1064 * the recipient page tables without committing, to make sure the entire
1065 * operation will succeed without exhausting the page pool.
1066 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001067 if (!ffa_region_group_identity_map(
1068 to_locked, fragments, fragment_constituent_counts,
1069 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001070 /* TODO: partial defrag of failed range. */
1071 dlog_verbose(
1072 "Insufficient memory to update recipient page "
1073 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001074 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001075 goto out;
1076 }
1077
1078 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001079 if (clear &&
1080 !ffa_clear_memory_constituents(
1081 plat_ffa_owner_world_mode(from_id), fragments,
1082 fragment_constituent_counts, fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001083 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001084 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001085 goto out;
1086 }
1087
Jose Marinho09b1db82019-08-08 09:16:59 +01001088 /*
1089 * Complete the transfer by mapping the memory into the recipient. This
1090 * won't allocate because the transaction was already prepared above, so
1091 * it doesn't need to use the `local_page_pool`.
1092 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001093 CHECK(ffa_region_group_identity_map(
1094 to_locked, fragments, fragment_constituent_counts,
1095 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001096
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001097 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001098
1099out:
1100 mpool_fini(&local_page_pool);
1101
1102 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001103 * Tidy up the page table by reclaiming failed mappings (if there was an
1104 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001105 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001106 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001107
1108 return ret;
1109}
1110
Andrew Walbran996d1d12020-05-27 14:08:43 +01001111static struct ffa_value ffa_relinquish_check_update(
J-Alves19e20cf2023-08-02 12:48:55 +01001112 struct vm_locked from_locked, ffa_id_t owner_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001113 struct ffa_memory_region_constituent **fragments,
1114 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1115 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001116{
1117 uint32_t orig_from_mode;
1118 uint32_t from_mode;
1119 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001120 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001121
Andrew Walbranca808b12020-05-15 17:22:28 +01001122 ret = ffa_relinquish_check_transition(
1123 from_locked, &orig_from_mode, fragments,
1124 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001125 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001126 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001127 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001128 }
1129
1130 /*
1131 * Create a local pool so any freed memory can't be used by another
1132 * thread. This is to ensure the original mapping can be restored if the
1133 * clear fails.
1134 */
1135 mpool_init_with_fallback(&local_page_pool, page_pool);
1136
1137 /*
1138 * First reserve all required memory for the new page table entries
1139 * without committing, to make sure the entire operation will succeed
1140 * without exhausting the page pool.
1141 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001142 if (!ffa_region_group_identity_map(
1143 from_locked, fragments, fragment_constituent_counts,
1144 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001145 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001146 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001147 goto out;
1148 }
1149
1150 /*
1151 * Update the mapping for the sender. This won't allocate because the
1152 * transaction was already prepared above, but may free pages in the
1153 * case that a whole block is being unmapped that was previously
1154 * partially mapped.
1155 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001156 CHECK(ffa_region_group_identity_map(
1157 from_locked, fragments, fragment_constituent_counts,
1158 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001159
1160 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001161 if (clear &&
1162 !ffa_clear_memory_constituents(
J-Alves3c5b2072022-11-21 12:45:40 +00001163 plat_ffa_owner_world_mode(owner_id), fragments,
J-Alves7db32002021-12-14 14:44:50 +00001164 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001165 /*
1166 * On failure, roll back by returning memory to the sender. This
1167 * may allocate pages which were previously freed into
1168 * `local_page_pool` by the call above, but will never allocate
1169 * more pages than that so can never fail.
1170 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001171 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001172 from_locked, fragments, fragment_constituent_counts,
1173 fragment_count, orig_from_mode, &local_page_pool,
1174 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001175
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001176 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001177 goto out;
1178 }
1179
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001180 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001181
1182out:
1183 mpool_fini(&local_page_pool);
1184
1185 /*
1186 * Tidy up the page table by reclaiming failed mappings (if there was an
1187 * error) or merging entries into blocks where possible (on success).
1188 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001189 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001190
1191 return ret;
1192}
1193
1194/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001195 * Complete a memory sending operation by checking that it is valid, updating
1196 * the sender page table, and then either marking the share state as having
1197 * completed sending (on success) or freeing it (on failure).
1198 *
1199 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1200 */
J-Alvesfdd29272022-07-19 13:16:31 +01001201struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001202 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001203 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1204 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001205{
1206 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001207 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001208 struct ffa_value ret;
1209
1210 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001211 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001212 assert(memory_region != NULL);
1213 composite = ffa_memory_region_get_composite(memory_region, 0);
1214 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001215
1216 /* Check that state is valid in sender page table and update. */
1217 ret = ffa_send_check_update(
1218 from_locked, share_state->fragments,
1219 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001220 share_state->fragment_count, composite->page_count,
1221 share_state->share_func, memory_region->receivers,
1222 memory_region->receiver_count, page_pool,
1223 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001224 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001225 if (ret.func != FFA_SUCCESS_32) {
1226 /*
1227 * Free share state, it failed to send so it can't be retrieved.
1228 */
1229 dlog_verbose("Complete failed, freeing share state.\n");
1230 share_state_free(share_states, share_state, page_pool);
1231 return ret;
1232 }
1233
1234 share_state->sending_complete = true;
1235 dlog_verbose("Marked sending complete.\n");
1236
J-Alvesee68c542020-10-29 17:48:20 +00001237 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001238}
1239
1240/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001241 * Check that the memory attributes match Hafnium expectations:
1242 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1243 * Write-Allocate Cacheable.
1244 */
1245static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001246 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001247{
1248 enum ffa_memory_type memory_type;
1249 enum ffa_memory_cacheability cacheability;
1250 enum ffa_memory_shareability shareability;
1251
1252 memory_type = ffa_get_memory_type_attr(attributes);
1253 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1254 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1255 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001256 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001257 }
1258
1259 cacheability = ffa_get_memory_cacheability_attr(attributes);
1260 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1261 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1262 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001263 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001264 }
1265
1266 shareability = ffa_get_memory_shareability_attr(attributes);
1267 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1268 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1269 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001270 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001271 }
1272
1273 return (struct ffa_value){.func = FFA_SUCCESS_32};
1274}
1275
1276/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001277 * Check that the given `memory_region` represents a valid memory send request
1278 * of the given `share_func` type, return the clear flag and permissions via the
1279 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001280 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001281 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001282 * not.
1283 */
J-Alves66652252022-07-06 09:49:51 +01001284struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001285 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1286 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001287 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001288{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001289 struct ffa_composite_memory_region *composite;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001290 uint64_t receivers_end;
1291 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001292 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001293 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001294 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001295 enum ffa_data_access data_access;
1296 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001297 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001298 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001299 const size_t minimum_first_fragment_length =
1300 (sizeof(struct ffa_memory_region) +
1301 sizeof(struct ffa_memory_access) +
1302 sizeof(struct ffa_composite_memory_region));
1303
1304 if (fragment_length < minimum_first_fragment_length) {
1305 dlog_verbose("Fragment length %u too short (min %u).\n",
1306 (size_t)fragment_length,
1307 minimum_first_fragment_length);
1308 return ffa_error(FFA_INVALID_PARAMETERS);
1309 }
1310
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001311 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1312 "struct ffa_memory_region_constituent must be 16 bytes");
1313 if (!is_aligned(fragment_length,
1314 sizeof(struct ffa_memory_region_constituent)) ||
1315 !is_aligned(memory_share_length,
1316 sizeof(struct ffa_memory_region_constituent))) {
1317 dlog_verbose(
1318 "Fragment length %u or total length %u"
1319 " is not 16-byte aligned.\n",
1320 fragment_length, memory_share_length);
1321 return ffa_error(FFA_INVALID_PARAMETERS);
1322 }
1323
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001324 if (fragment_length > memory_share_length) {
1325 dlog_verbose(
1326 "Fragment length %u greater than total length %u.\n",
1327 (size_t)fragment_length, (size_t)memory_share_length);
1328 return ffa_error(FFA_INVALID_PARAMETERS);
1329 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001330
J-Alves0b6653d2022-04-22 13:17:38 +01001331 assert(memory_region->receivers_offset ==
1332 offsetof(struct ffa_memory_region, receivers));
1333 assert(memory_region->memory_access_desc_size ==
1334 sizeof(struct ffa_memory_access));
1335
J-Alves95df0ef2022-12-07 10:09:48 +00001336 /* The sender must match the caller. */
1337 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1338 vm_id_is_current_world(memory_region->sender)) ||
1339 (vm_id_is_current_world(from_locked.vm->id) &&
1340 memory_region->sender != from_locked.vm->id)) {
1341 dlog_verbose("Invalid memory sender ID.\n");
1342 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001343 }
1344
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001345 if (memory_region->receiver_count <= 0) {
1346 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001347 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001348 }
1349
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001350 /*
1351 * Ensure that the composite header is within the memory bounds and
1352 * doesn't overlap the first part of the message. Cast to uint64_t
1353 * to prevent overflow.
1354 */
1355 receivers_end = ((uint64_t)sizeof(struct ffa_memory_access) *
1356 (uint64_t)memory_region->receiver_count) +
1357 sizeof(struct ffa_memory_region);
1358 min_length = receivers_end +
1359 sizeof(struct ffa_composite_memory_region) +
1360 sizeof(struct ffa_memory_region_constituent);
1361 if (min_length > memory_share_length) {
1362 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1363 (size_t)memory_share_length, (size_t)min_length);
1364 return ffa_error(FFA_INVALID_PARAMETERS);
1365 }
1366
1367 composite_memory_region_offset =
1368 memory_region->receivers[0].composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001369
1370 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001371 * Check that the composite memory region descriptor is after the access
1372 * descriptors, is at least 16-byte aligned, and fits in the first
1373 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001374 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001375 if ((composite_memory_region_offset < receivers_end) ||
1376 (composite_memory_region_offset % 16 != 0) ||
1377 (composite_memory_region_offset >
1378 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1379 dlog_verbose(
1380 "Invalid composite memory region descriptor offset "
1381 "%u.\n",
1382 (size_t)composite_memory_region_offset);
1383 return ffa_error(FFA_INVALID_PARAMETERS);
1384 }
1385
1386 /*
1387 * Compute the start of the constituent regions. Already checked
1388 * to be not more than fragment_length and thus not more than
1389 * memory_share_length.
1390 */
1391 constituents_start = composite_memory_region_offset +
1392 sizeof(struct ffa_composite_memory_region);
1393 constituents_length = memory_share_length - constituents_start;
1394
1395 /*
1396 * Check that the number of constituents is consistent with the length
1397 * of the constituent region.
1398 */
1399 composite = ffa_memory_region_get_composite(memory_region, 0);
1400 if ((constituents_length %
1401 sizeof(struct ffa_memory_region_constituent) !=
1402 0) ||
1403 ((constituents_length /
1404 sizeof(struct ffa_memory_region_constituent)) !=
1405 composite->constituent_count)) {
1406 dlog_verbose("Invalid length %u or composite offset %u.\n",
1407 (size_t)memory_share_length,
1408 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001409 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001410 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001411 if (fragment_length < memory_share_length &&
1412 fragment_length < HF_MAILBOX_SIZE) {
1413 dlog_warning(
1414 "Initial fragment length %d smaller than mailbox "
1415 "size.\n",
1416 fragment_length);
1417 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001418
Andrew Walbrana65a1322020-04-06 19:32:32 +01001419 /*
1420 * Clear is not allowed for memory sharing, as the sender still has
1421 * access to the memory.
1422 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001423 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1424 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001425 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001426 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001427 }
1428
1429 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001430 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001431 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001432 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001433 }
1434
J-Alves363f5722022-04-25 17:37:37 +01001435 /* Check that the permissions are valid, for each specified receiver. */
1436 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1437 ffa_memory_access_permissions_t permissions =
1438 memory_region->receivers[i]
1439 .receiver_permissions.permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01001440 ffa_id_t receiver_id = memory_region->receivers[i]
1441 .receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01001442
1443 if (memory_region->sender == receiver_id) {
1444 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001445 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001446 }
Federico Recanati85090c42021-12-15 13:17:54 +01001447
J-Alves363f5722022-04-25 17:37:37 +01001448 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1449 j++) {
1450 if (receiver_id ==
1451 memory_region->receivers[j]
1452 .receiver_permissions.receiver) {
1453 dlog_verbose(
1454 "Repeated receiver(%x) in memory send "
1455 "operation.\n",
1456 memory_region->receivers[j]
1457 .receiver_permissions.receiver);
1458 return ffa_error(FFA_INVALID_PARAMETERS);
1459 }
1460 }
1461
1462 if (composite_memory_region_offset !=
1463 memory_region->receivers[i]
1464 .composite_memory_region_offset) {
1465 dlog_verbose(
1466 "All ffa_memory_access should point to the "
1467 "same composite memory region offset.\n");
1468 return ffa_error(FFA_INVALID_PARAMETERS);
1469 }
1470
1471 data_access = ffa_get_data_access_attr(permissions);
1472 instruction_access =
1473 ffa_get_instruction_access_attr(permissions);
1474 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1475 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1476 dlog_verbose(
1477 "Reserved value for receiver permissions "
1478 "%#x.\n",
1479 permissions);
1480 return ffa_error(FFA_INVALID_PARAMETERS);
1481 }
1482 if (instruction_access !=
1483 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1484 dlog_verbose(
1485 "Invalid instruction access permissions %#x "
1486 "for sending memory.\n",
1487 permissions);
1488 return ffa_error(FFA_INVALID_PARAMETERS);
1489 }
1490 if (share_func == FFA_MEM_SHARE_32) {
1491 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1492 dlog_verbose(
1493 "Invalid data access permissions %#x "
1494 "for sharing memory.\n",
1495 permissions);
1496 return ffa_error(FFA_INVALID_PARAMETERS);
1497 }
J-Alves363f5722022-04-25 17:37:37 +01001498 }
1499 if (share_func == FFA_MEM_LEND_32 &&
1500 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1501 dlog_verbose(
1502 "Invalid data access permissions %#x for "
1503 "lending memory.\n",
1504 permissions);
1505 return ffa_error(FFA_INVALID_PARAMETERS);
1506 }
1507
1508 if (share_func == FFA_MEM_DONATE_32 &&
1509 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1510 dlog_verbose(
1511 "Invalid data access permissions %#x for "
1512 "donating memory.\n",
1513 permissions);
1514 return ffa_error(FFA_INVALID_PARAMETERS);
1515 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001516 }
1517
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001518 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1519 security_state =
1520 ffa_get_memory_security_attr(memory_region->attributes);
1521 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1522 dlog_verbose(
1523 "Invalid security state for memory share operation.\n");
1524 return ffa_error(FFA_INVALID_PARAMETERS);
1525 }
1526
Federico Recanatid937f5e2021-12-20 17:38:23 +01001527 /*
J-Alves807794e2022-06-16 13:42:47 +01001528 * If a memory donate or lend with single borrower, the memory type
1529 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001530 */
J-Alves807794e2022-06-16 13:42:47 +01001531 if (share_func == FFA_MEM_DONATE_32 ||
1532 (share_func == FFA_MEM_LEND_32 &&
1533 memory_region->receiver_count == 1)) {
1534 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1535 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1536 dlog_verbose(
1537 "Memory type shall not be specified by "
1538 "sender.\n");
1539 return ffa_error(FFA_INVALID_PARAMETERS);
1540 }
1541 } else {
1542 /*
1543 * Check that sender's memory attributes match Hafnium
1544 * expectations: Normal Memory, Inner shareable, Write-Back
1545 * Read-Allocate Write-Allocate Cacheable.
1546 */
1547 ret = ffa_memory_attributes_validate(memory_region->attributes);
1548 if (ret.func != FFA_SUCCESS_32) {
1549 return ret;
1550 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001551 }
1552
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001553 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001554}
1555
1556/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001557 * Gets the share state for continuing an operation to donate, lend or share
1558 * memory, and checks that it is a valid request.
1559 *
1560 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1561 * not.
1562 */
J-Alvesfdd29272022-07-19 13:16:31 +01001563struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001564 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001565 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001566 struct mpool *page_pool)
1567{
1568 struct ffa_memory_share_state *share_state;
1569 struct ffa_memory_region *memory_region;
1570
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001571 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001572
1573 /*
1574 * Look up the share state by handle and make sure that the VM ID
1575 * matches.
1576 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01001577 share_state = get_share_state(share_states, handle);
1578 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001579 dlog_verbose(
1580 "Invalid handle %#x for memory send continuation.\n",
1581 handle);
1582 return ffa_error(FFA_INVALID_PARAMETERS);
1583 }
1584 memory_region = share_state->memory_region;
1585
J-Alvesfdd29272022-07-19 13:16:31 +01001586 if (vm_id_is_current_world(from_vm_id) &&
1587 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001588 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1589 return ffa_error(FFA_INVALID_PARAMETERS);
1590 }
1591
1592 if (share_state->sending_complete) {
1593 dlog_verbose(
1594 "Sending of memory handle %#x is already complete.\n",
1595 handle);
1596 return ffa_error(FFA_INVALID_PARAMETERS);
1597 }
1598
1599 if (share_state->fragment_count == MAX_FRAGMENTS) {
1600 /*
1601 * Log a warning as this is a sign that MAX_FRAGMENTS should
1602 * probably be increased.
1603 */
1604 dlog_warning(
1605 "Too many fragments for memory share with handle %#x; "
1606 "only %d supported.\n",
1607 handle, MAX_FRAGMENTS);
1608 /* Free share state, as it's not possible to complete it. */
1609 share_state_free(share_states, share_state, page_pool);
1610 return ffa_error(FFA_NO_MEMORY);
1611 }
1612
1613 *share_state_ret = share_state;
1614
1615 return (struct ffa_value){.func = FFA_SUCCESS_32};
1616}
1617
1618/**
J-Alves95df0ef2022-12-07 10:09:48 +00001619 * Checks if there is at least one receiver from the other world.
1620 */
J-Alvesfdd29272022-07-19 13:16:31 +01001621bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001622 struct ffa_memory_region *memory_region)
1623{
1624 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001625 ffa_id_t receiver = memory_region->receivers[i]
1626 .receiver_permissions.receiver;
J-Alves95df0ef2022-12-07 10:09:48 +00001627 if (!vm_id_is_current_world(receiver)) {
1628 return true;
1629 }
1630 }
1631 return false;
1632}
1633
1634/**
J-Alves9da280b2022-12-21 14:55:39 +00001635 * Validates a call to donate, lend or share memory in which Hafnium is the
1636 * designated allocator of the memory handle. In practice, this also means
1637 * Hafnium is responsible for managing the state structures for the transaction.
1638 * If Hafnium is the SPMC, it should allocate the memory handle when either the
1639 * sender is an SP or there is at least one borrower that is an SP.
1640 * If Hafnium is the hypervisor, it should allocate the memory handle when
1641 * operation involves only NWd VMs.
1642 *
1643 * If validation goes well, Hafnium updates the stage-2 page tables of the
1644 * sender. Validation consists of checking if the message length and number of
1645 * memory region constituents match, and if the transition is valid for the
1646 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001647 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001648 * Assumes that the caller has already found and locked the sender VM and copied
1649 * the memory region descriptor from the sender's TX buffer to a freshly
1650 * allocated page from Hafnium's internal pool. The caller must have also
1651 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001652 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001653 * This function takes ownership of the `memory_region` passed in and will free
1654 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001655 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001656struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001657 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001658 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001659 uint32_t fragment_length, uint32_t share_func,
1660 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001661{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001662 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001663 struct share_states_locked share_states;
1664 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001665
1666 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001667 * If there is an error validating the `memory_region` then we need to
1668 * free it because we own it but we won't be storing it in a share state
1669 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001670 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001671 ret = ffa_memory_send_validate(from_locked, memory_region,
1672 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001673 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001674 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001675 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001676 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001677 }
1678
Andrew Walbrana65a1322020-04-06 19:32:32 +01001679 /* Set flag for share function, ready to be retrieved later. */
1680 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001681 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001682 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001683 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001684 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001685 case FFA_MEM_LEND_32:
1686 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001687 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001688 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001689 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001690 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001691 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001692 }
1693
Andrew Walbranca808b12020-05-15 17:22:28 +01001694 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001695 /*
1696 * Allocate a share state before updating the page table. Otherwise if
1697 * updating the page table succeeded but allocating the share state
1698 * failed then it would leave the memory in a state where nobody could
1699 * get it back.
1700 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01001701 share_state = allocate_share_state(share_states, share_func,
1702 memory_region, fragment_length,
1703 FFA_MEMORY_HANDLE_INVALID);
1704 if (!share_state) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001705 dlog_verbose("Failed to allocate share state.\n");
1706 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001707 ret = ffa_error(FFA_NO_MEMORY);
1708 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001709 }
1710
Andrew Walbranca808b12020-05-15 17:22:28 +01001711 if (fragment_length == memory_share_length) {
1712 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001713 ret = ffa_memory_send_complete(
1714 from_locked, share_states, share_state, page_pool,
1715 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001716 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01001717 /*
1718 * Use sender ID from 'memory_region' assuming
1719 * that at this point it has been validated:
1720 * - MBZ at virtual FF-A instance.
1721 */
J-Alves19e20cf2023-08-02 12:48:55 +01001722 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01001723 (from_locked.vm->id == HF_OTHER_WORLD_ID)
1724 ? memory_region->sender
1725 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01001726 ret = (struct ffa_value){
1727 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001728 .arg1 = (uint32_t)memory_region->handle,
1729 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01001730 .arg3 = fragment_length,
1731 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01001732 }
1733
1734out:
1735 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001736 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001737 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001738}
1739
1740/**
J-Alves8505a8a2022-06-15 18:10:18 +01001741 * Continues an operation to donate, lend or share memory to a VM from current
1742 * world. If this is the last fragment then checks that the transition is valid
1743 * for the type of memory sending operation and updates the stage-2 page tables
1744 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001745 *
1746 * Assumes that the caller has already found and locked the sender VM and copied
1747 * the memory region descriptor from the sender's TX buffer to a freshly
1748 * allocated page from Hafnium's internal pool.
1749 *
1750 * This function takes ownership of the `fragment` passed in; it must not be
1751 * freed by the caller.
1752 */
1753struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1754 void *fragment,
1755 uint32_t fragment_length,
1756 ffa_memory_handle_t handle,
1757 struct mpool *page_pool)
1758{
1759 struct share_states_locked share_states = share_states_lock();
1760 struct ffa_memory_share_state *share_state;
1761 struct ffa_value ret;
1762 struct ffa_memory_region *memory_region;
1763
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001764 CHECK(is_aligned(fragment,
1765 alignof(struct ffa_memory_region_constituent)));
1766 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
1767 0) {
1768 dlog_verbose("Fragment length %u misaligned.\n",
1769 fragment_length);
1770 ret = ffa_error(FFA_INVALID_PARAMETERS);
1771 goto out_free_fragment;
1772 }
1773
Andrew Walbranca808b12020-05-15 17:22:28 +01001774 ret = ffa_memory_send_continue_validate(share_states, handle,
1775 &share_state,
1776 from_locked.vm->id, page_pool);
1777 if (ret.func != FFA_SUCCESS_32) {
1778 goto out_free_fragment;
1779 }
1780 memory_region = share_state->memory_region;
1781
J-Alves95df0ef2022-12-07 10:09:48 +00001782 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001783 dlog_error(
1784 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01001785 "other world. This should never happen, and indicates "
1786 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01001787 "EL3 code.\n");
1788 ret = ffa_error(FFA_INVALID_PARAMETERS);
1789 goto out_free_fragment;
1790 }
1791
1792 /* Add this fragment. */
1793 share_state->fragments[share_state->fragment_count] = fragment;
1794 share_state->fragment_constituent_counts[share_state->fragment_count] =
1795 fragment_length / sizeof(struct ffa_memory_region_constituent);
1796 share_state->fragment_count++;
1797
1798 /* Check whether the memory send operation is now ready to complete. */
1799 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001800 ret = ffa_memory_send_complete(
1801 from_locked, share_states, share_state, page_pool,
1802 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001803 } else {
1804 ret = (struct ffa_value){
1805 .func = FFA_MEM_FRAG_RX_32,
1806 .arg1 = (uint32_t)handle,
1807 .arg2 = (uint32_t)(handle >> 32),
1808 .arg3 = share_state_next_fragment_offset(share_states,
1809 share_state)};
1810 }
1811 goto out;
1812
1813out_free_fragment:
1814 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001815
1816out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001817 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001818 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001819}
1820
Andrew Walbranca808b12020-05-15 17:22:28 +01001821/** Clean up after the receiver has finished retrieving a memory region. */
1822static void ffa_memory_retrieve_complete(
1823 struct share_states_locked share_states,
1824 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1825{
1826 if (share_state->share_func == FFA_MEM_DONATE_32) {
1827 /*
1828 * Memory that has been donated can't be relinquished,
1829 * so no need to keep the share state around.
1830 */
1831 share_state_free(share_states, share_state, page_pool);
1832 dlog_verbose("Freed share state for donate.\n");
1833 }
1834}
1835
J-Alves2d8457f2022-10-05 11:06:41 +01001836/**
1837 * Initialises the given memory region descriptor to be used for an
1838 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
1839 * fragment.
1840 * The memory region descriptor is initialized according to retriever's
1841 * FF-A version.
1842 *
1843 * Returns true on success, or false if the given constituents won't all fit in
1844 * the first fragment.
1845 */
1846static bool ffa_retrieved_memory_region_init(
1847 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01001848 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01001849 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001850 ffa_id_t receiver_id, ffa_memory_access_permissions_t permissions,
J-Alves2d8457f2022-10-05 11:06:41 +01001851 uint32_t page_count, uint32_t total_constituent_count,
1852 const struct ffa_memory_region_constituent constituents[],
1853 uint32_t fragment_constituent_count, uint32_t *total_length,
1854 uint32_t *fragment_length)
1855{
1856 struct ffa_composite_memory_region *composite_memory_region;
1857 struct ffa_memory_access *receiver;
1858 uint32_t i;
1859 uint32_t constituents_offset;
1860 uint32_t receiver_count;
1861
1862 assert(response != NULL);
1863
1864 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1865 struct ffa_memory_region_v1_0 *retrieve_response =
1866 (struct ffa_memory_region_v1_0 *)response;
1867
J-Alves5da37d92022-10-24 16:33:48 +01001868 ffa_memory_region_init_header_v1_0(
1869 retrieve_response, sender, attributes, flags, handle, 0,
1870 RECEIVERS_COUNT_IN_RETRIEVE_RESP);
J-Alves2d8457f2022-10-05 11:06:41 +01001871
1872 receiver = &retrieve_response->receivers[0];
1873 receiver_count = retrieve_response->receiver_count;
1874
1875 receiver->composite_memory_region_offset =
1876 sizeof(struct ffa_memory_region_v1_0) +
1877 receiver_count * sizeof(struct ffa_memory_access);
1878
1879 composite_memory_region = ffa_memory_region_get_composite_v1_0(
1880 retrieve_response, 0);
1881 } else {
1882 /* Default to FF-A v1.1 version. */
1883 struct ffa_memory_region *retrieve_response =
1884 (struct ffa_memory_region *)response;
1885
1886 ffa_memory_region_init_header(retrieve_response, sender,
1887 attributes, flags, handle, 0, 1);
1888
1889 receiver = &retrieve_response->receivers[0];
1890 receiver_count = retrieve_response->receiver_count;
1891
1892 /*
1893 * Note that `sizeof(struct_ffa_memory_region)` and
1894 * `sizeof(struct ffa_memory_access)` must both be multiples of
1895 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
1896 * guaranteed that the offset we calculate here is aligned to a
1897 * 64-bit boundary and so 64-bit values can be copied without
1898 * alignment faults.
1899 */
1900 receiver->composite_memory_region_offset =
1901 sizeof(struct ffa_memory_region) +
1902 receiver_count * sizeof(struct ffa_memory_access);
1903
1904 composite_memory_region =
1905 ffa_memory_region_get_composite(retrieve_response, 0);
1906 }
1907
1908 assert(receiver != NULL);
1909 assert(composite_memory_region != NULL);
1910
1911 /*
1912 * Initialized here as in memory retrieve responses we currently expect
1913 * one borrower to be specified.
1914 */
1915 ffa_memory_access_init_permissions(receiver, receiver_id, 0, 0, flags);
1916 receiver->receiver_permissions.permissions = permissions;
1917
1918 composite_memory_region->page_count = page_count;
1919 composite_memory_region->constituent_count = total_constituent_count;
1920 composite_memory_region->reserved_0 = 0;
1921
1922 constituents_offset = receiver->composite_memory_region_offset +
1923 sizeof(struct ffa_composite_memory_region);
1924 if (constituents_offset +
1925 fragment_constituent_count *
1926 sizeof(struct ffa_memory_region_constituent) >
1927 response_max_size) {
1928 return false;
1929 }
1930
1931 for (i = 0; i < fragment_constituent_count; ++i) {
1932 composite_memory_region->constituents[i] = constituents[i];
1933 }
1934
1935 if (total_length != NULL) {
1936 *total_length =
1937 constituents_offset +
1938 composite_memory_region->constituent_count *
1939 sizeof(struct ffa_memory_region_constituent);
1940 }
1941 if (fragment_length != NULL) {
1942 *fragment_length =
1943 constituents_offset +
1944 fragment_constituent_count *
1945 sizeof(struct ffa_memory_region_constituent);
1946 }
1947
1948 return true;
1949}
1950
J-Alves96de29f2022-04-26 16:05:24 +01001951/*
1952 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
1953 * returns its index in the receiver's array. If receiver's ID doesn't exist
1954 * in the array, return the region's 'receiver_count'.
1955 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001956uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01001957 ffa_id_t receiver)
J-Alves96de29f2022-04-26 16:05:24 +01001958{
1959 struct ffa_memory_access *receivers;
1960 uint32_t i;
1961
1962 assert(memory_region != NULL);
1963
1964 receivers = memory_region->receivers;
1965
1966 for (i = 0U; i < memory_region->receiver_count; i++) {
1967 if (receivers[i].receiver_permissions.receiver == receiver) {
1968 break;
1969 }
1970 }
1971
1972 return i;
1973}
1974
1975/**
1976 * Validates the retrieved permissions against those specified by the lender
1977 * of memory share operation. Optionally can help set the permissions to be used
1978 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01001979 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
1980 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
1981 * specification for each ABI.
1982 * - FFA_DENIED -> if the permissions specified by the retriever are not
1983 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01001984 */
J-Alvesdcad8992023-09-15 14:10:35 +01001985static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
1986 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01001987 enum ffa_data_access requested_data_access,
1988 enum ffa_instruction_access sent_instruction_access,
1989 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01001990 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01001991{
1992 switch (sent_data_access) {
1993 case FFA_DATA_ACCESS_NOT_SPECIFIED:
1994 case FFA_DATA_ACCESS_RW:
1995 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1996 requested_data_access == FFA_DATA_ACCESS_RW) {
1997 if (permissions != NULL) {
1998 ffa_set_data_access_attr(permissions,
1999 FFA_DATA_ACCESS_RW);
2000 }
2001 break;
2002 }
2003 /* Intentional fall-through. */
2004 case FFA_DATA_ACCESS_RO:
2005 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2006 requested_data_access == FFA_DATA_ACCESS_RO) {
2007 if (permissions != NULL) {
2008 ffa_set_data_access_attr(permissions,
2009 FFA_DATA_ACCESS_RO);
2010 }
2011 break;
2012 }
2013 dlog_verbose(
2014 "Invalid data access requested; sender specified "
2015 "permissions %#x but receiver requested %#x.\n",
2016 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002017 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002018 case FFA_DATA_ACCESS_RESERVED:
2019 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2020 "checked before this point.");
2021 }
2022
J-Alvesdcad8992023-09-15 14:10:35 +01002023 /*
2024 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2025 * or FFA_MEMORY_DONATE the retriever should have specifed the
2026 * instruction permissions it wishes to receive.
2027 */
2028 switch (share_func) {
2029 case FFA_MEM_SHARE_32:
2030 if (requested_instruction_access !=
2031 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2032 dlog_verbose(
2033 "%s: for share instruction permissions must "
2034 "NOT be specified.\n",
2035 __func__);
2036 return ffa_error(FFA_INVALID_PARAMETERS);
2037 }
2038 break;
2039 case FFA_MEM_LEND_32:
2040 /*
2041 * For operations with multiple borrowers only permit XN
2042 * permissions, and both Sender and borrower should have used
2043 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2044 */
2045 if (multiple_borrowers) {
2046 if (requested_instruction_access !=
2047 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2048 dlog_verbose(
2049 "%s: lend/share/donate with multiple "
2050 "borrowers "
2051 "instruction permissions must NOT be "
2052 "specified.\n",
2053 __func__);
2054 return ffa_error(FFA_INVALID_PARAMETERS);
2055 }
2056 break;
2057 }
2058 /* Fall through if the operation targets a single borrower. */
2059 case FFA_MEM_DONATE_32:
2060 if (!multiple_borrowers &&
2061 requested_instruction_access ==
2062 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2063 dlog_verbose(
2064 "%s: for lend/donate with single borrower "
2065 "instruction permissions must be speficified "
2066 "by borrower\n",
2067 __func__);
2068 return ffa_error(FFA_INVALID_PARAMETERS);
2069 }
2070 break;
2071 default:
2072 panic("%s: Wrong func id provided.\n", __func__);
2073 }
2074
J-Alves96de29f2022-04-26 16:05:24 +01002075 switch (sent_instruction_access) {
2076 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2077 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002078 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002079 if (permissions != NULL) {
2080 ffa_set_instruction_access_attr(
2081 permissions, FFA_INSTRUCTION_ACCESS_X);
2082 }
2083 break;
2084 }
J-Alvesdcad8992023-09-15 14:10:35 +01002085 /*
2086 * Fall through if requested permissions are less
2087 * permissive than those provided by the sender.
2088 */
J-Alves96de29f2022-04-26 16:05:24 +01002089 case FFA_INSTRUCTION_ACCESS_NX:
2090 if (requested_instruction_access ==
2091 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2092 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2093 if (permissions != NULL) {
2094 ffa_set_instruction_access_attr(
2095 permissions, FFA_INSTRUCTION_ACCESS_NX);
2096 }
2097 break;
2098 }
2099 dlog_verbose(
2100 "Invalid instruction access requested; sender "
2101 "specified permissions %#x but receiver requested "
2102 "%#x.\n",
2103 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002104 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002105 case FFA_INSTRUCTION_ACCESS_RESERVED:
2106 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2107 "be checked before this point.");
2108 }
2109
J-Alvesdcad8992023-09-15 14:10:35 +01002110 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002111}
2112
2113/**
2114 * Validate the receivers' permissions in the retrieve request against those
2115 * specified by the lender.
2116 * In the `permissions` argument returns the permissions to set at S2 for the
2117 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002118 * The function looks into the flag to bypass multiple borrower checks:
2119 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2120 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2121 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2122 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002123 */
2124static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2125 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002126 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
J-Alvesdcad8992023-09-15 14:10:35 +01002127 ffa_memory_access_permissions_t *permissions, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002128{
2129 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002130 bool bypass_multi_receiver_check =
2131 (retrieve_request->flags &
2132 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002133 const uint32_t region_receiver_count = memory_region->receiver_count;
2134 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002135
2136 assert(permissions != NULL);
2137
J-Alves3456e032023-07-20 12:20:05 +01002138 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002139 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002140 dlog_verbose(
2141 "Retrieve request should contain same list of "
2142 "borrowers, as specified by the lender.\n");
2143 return ffa_error(FFA_INVALID_PARAMETERS);
2144 }
2145 } else {
2146 if (retrieve_request->receiver_count != 1) {
2147 dlog_verbose(
2148 "Set bypass multiple borrower check, receiver "
2149 "list must be sized 1 (%x)\n",
2150 memory_region->receiver_count);
2151 return ffa_error(FFA_INVALID_PARAMETERS);
2152 }
J-Alves96de29f2022-04-26 16:05:24 +01002153 }
2154
2155 retrieve_receiver_index = retrieve_request->receiver_count;
2156
2157 /* Should be populated with the permissions of the retriever. */
2158 *permissions = 0;
2159
2160 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2161 ffa_memory_access_permissions_t sent_permissions;
2162 struct ffa_memory_access *current_receiver =
2163 &retrieve_request->receivers[i];
2164 ffa_memory_access_permissions_t requested_permissions =
2165 current_receiver->receiver_permissions.permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002166 ffa_id_t current_receiver_id =
J-Alves96de29f2022-04-26 16:05:24 +01002167 current_receiver->receiver_permissions.receiver;
2168 bool found_to_id = current_receiver_id == to_vm_id;
2169
J-Alves3456e032023-07-20 12:20:05 +01002170 if (bypass_multi_receiver_check && !found_to_id) {
2171 dlog_verbose(
2172 "Bypass multiple borrower check for id %x.\n",
2173 current_receiver_id);
2174 continue;
2175 }
2176
J-Alves96de29f2022-04-26 16:05:24 +01002177 /*
2178 * Find the current receiver in the transaction descriptor from
2179 * sender.
2180 */
2181 uint32_t mem_region_receiver_index =
2182 ffa_memory_region_get_receiver(memory_region,
2183 current_receiver_id);
2184
2185 if (mem_region_receiver_index ==
2186 memory_region->receiver_count) {
2187 dlog_verbose("%s: receiver %x not found\n", __func__,
2188 current_receiver_id);
2189 return ffa_error(FFA_DENIED);
2190 }
2191
2192 sent_permissions =
2193 memory_region->receivers[mem_region_receiver_index]
2194 .receiver_permissions.permissions;
2195
2196 if (found_to_id) {
2197 retrieve_receiver_index = i;
2198 }
2199
2200 /*
2201 * Since we are traversing the list of receivers, save the index
2202 * of the caller. As it needs to be there.
2203 */
2204
2205 if (current_receiver->composite_memory_region_offset != 0U) {
2206 dlog_verbose(
2207 "Retriever specified address ranges not "
2208 "supported (got offset %d).\n",
2209 current_receiver
2210 ->composite_memory_region_offset);
2211 return ffa_error(FFA_INVALID_PARAMETERS);
2212 }
2213
2214 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002215 * Check if retrieve request memory access list is valid:
2216 * - The retrieve request complies with the specification.
2217 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002218 */
J-Alvesdcad8992023-09-15 14:10:35 +01002219 ret = ffa_memory_retrieve_is_memory_access_valid(
2220 func_id, ffa_get_data_access_attr(sent_permissions),
2221 ffa_get_data_access_attr(requested_permissions),
2222 ffa_get_instruction_access_attr(sent_permissions),
2223 ffa_get_instruction_access_attr(requested_permissions),
2224 found_to_id ? permissions : NULL,
2225 region_receiver_count > 1);
2226 if (ret.func != FFA_SUCCESS_32) {
2227 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002228 }
2229
2230 /*
2231 * Can't request PM to clear memory if only provided with RO
2232 * permissions.
2233 */
2234 if (found_to_id &&
2235 (ffa_get_data_access_attr(*permissions) ==
2236 FFA_DATA_ACCESS_RO) &&
2237 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2238 0U) {
2239 dlog_verbose(
2240 "Receiver has RO permissions can not request "
2241 "clear.\n");
2242 return ffa_error(FFA_DENIED);
2243 }
2244 }
2245
2246 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2247 dlog_verbose(
2248 "Retrieve request does not contain caller's (%x) "
2249 "permissions\n",
2250 to_vm_id);
2251 return ffa_error(FFA_INVALID_PARAMETERS);
2252 }
2253
2254 return (struct ffa_value){.func = FFA_SUCCESS_32};
2255}
2256
J-Alvesa9cd7e32022-07-01 13:49:33 +01002257/*
2258 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2259 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2260 * of a pending memory sharing operation whose allocator is the SPM, for
2261 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2262 * the memory region descriptor of the retrieve request must be zeroed with the
2263 * exception of the sender ID and handle.
2264 */
2265bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
2266 struct vm_locked to_locked)
2267{
2268 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2269 request->attributes == 0U && request->flags == 0U &&
2270 request->tag == 0U && request->receiver_count == 0U &&
2271 plat_ffa_memory_handle_allocated_by_current_world(
2272 request->handle);
2273}
2274
2275/*
2276 * Helper to reset count of fragments retrieved by the hypervisor.
2277 */
2278static void ffa_memory_retrieve_complete_from_hyp(
2279 struct ffa_memory_share_state *share_state)
2280{
2281 if (share_state->hypervisor_fragment_count ==
2282 share_state->fragment_count) {
2283 share_state->hypervisor_fragment_count = 0;
2284 }
2285}
2286
J-Alves089004f2022-07-13 14:25:44 +01002287/**
2288 * Validate that the memory region descriptor provided by the borrower on
2289 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2290 * memory sharing call.
2291 */
2292static struct ffa_value ffa_memory_retrieve_validate(
J-Alves19e20cf2023-08-02 12:48:55 +01002293 ffa_id_t receiver_id, struct ffa_memory_region *retrieve_request,
J-Alves089004f2022-07-13 14:25:44 +01002294 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2295 uint32_t share_func)
2296{
2297 ffa_memory_region_flags_t transaction_type =
2298 retrieve_request->flags &
2299 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002300 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01002301
2302 assert(retrieve_request != NULL);
2303 assert(memory_region != NULL);
2304 assert(receiver_index != NULL);
2305 assert(retrieve_request->sender == memory_region->sender);
2306
2307 /*
2308 * Check that the transaction type expected by the receiver is
2309 * correct, if it has been specified.
2310 */
2311 if (transaction_type !=
2312 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2313 transaction_type != (memory_region->flags &
2314 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2315 dlog_verbose(
2316 "Incorrect transaction type %#x for "
2317 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2318 transaction_type,
2319 memory_region->flags &
2320 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2321 retrieve_request->handle);
2322 return ffa_error(FFA_INVALID_PARAMETERS);
2323 }
2324
2325 if (retrieve_request->tag != memory_region->tag) {
2326 dlog_verbose(
2327 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2328 "%d for handle %#x.\n",
2329 retrieve_request->tag, memory_region->tag,
2330 retrieve_request->handle);
2331 return ffa_error(FFA_INVALID_PARAMETERS);
2332 }
2333
2334 *receiver_index =
2335 ffa_memory_region_get_receiver(memory_region, receiver_id);
2336
2337 if (*receiver_index == memory_region->receiver_count) {
2338 dlog_verbose(
2339 "Incorrect receiver VM ID %d for "
2340 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01002341 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002342 return ffa_error(FFA_INVALID_PARAMETERS);
2343 }
2344
2345 if ((retrieve_request->flags &
2346 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2347 dlog_verbose(
2348 "Retriever specified 'address range alignment 'hint' "
2349 "not supported.\n");
2350 return ffa_error(FFA_INVALID_PARAMETERS);
2351 }
2352 if ((retrieve_request->flags &
2353 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2354 dlog_verbose(
2355 "Bits 8-5 must be zero in memory region's flags "
2356 "(address range alignment hint not supported).\n");
2357 return ffa_error(FFA_INVALID_PARAMETERS);
2358 }
2359
2360 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2361 dlog_verbose(
2362 "Bits 31-10 must be zero in memory region's flags.\n");
2363 return ffa_error(FFA_INVALID_PARAMETERS);
2364 }
2365
2366 if (share_func == FFA_MEM_SHARE_32 &&
2367 (retrieve_request->flags &
2368 (FFA_MEMORY_REGION_FLAG_CLEAR |
2369 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2370 dlog_verbose(
2371 "Memory Share operation can't clean after relinquish "
2372 "memory region.\n");
2373 return ffa_error(FFA_INVALID_PARAMETERS);
2374 }
2375
2376 /*
2377 * If the borrower needs the memory to be cleared before mapping
2378 * to its address space, the sender should have set the flag
2379 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2380 * FFA_DENIED.
2381 */
2382 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2383 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2384 dlog_verbose(
2385 "Borrower needs memory cleared. Sender needs to set "
2386 "flag for clearing memory.\n");
2387 return ffa_error(FFA_DENIED);
2388 }
2389
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002390 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2391 security_state =
2392 ffa_get_memory_security_attr(retrieve_request->attributes);
2393 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2394 dlog_verbose(
2395 "Invalid security state for memory retrieve request "
2396 "operation.\n");
2397 return ffa_error(FFA_INVALID_PARAMETERS);
2398 }
2399
J-Alves089004f2022-07-13 14:25:44 +01002400 /*
2401 * If memory type is not specified, bypass validation of memory
2402 * attributes in the retrieve request. The retriever is expecting to
2403 * obtain this information from the SPMC.
2404 */
2405 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2406 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2407 return (struct ffa_value){.func = FFA_SUCCESS_32};
2408 }
2409
2410 /*
2411 * Ensure receiver's attributes are compatible with how
2412 * Hafnium maps memory: Normal Memory, Inner shareable,
2413 * Write-Back Read-Allocate Write-Allocate Cacheable.
2414 */
2415 return ffa_memory_attributes_validate(retrieve_request->attributes);
2416}
2417
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002418struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2419 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002420 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002421 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002422{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002423 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002424 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002425 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002426 sizeof(struct ffa_memory_access);
2427 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002428 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002429 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002430 uint32_t memory_to_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002431 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002432 struct ffa_memory_share_state *share_state;
2433 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002434 struct ffa_composite_memory_region *composite;
2435 uint32_t total_length;
2436 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01002437 ffa_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002438 bool is_send_complete = false;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002439 ffa_memory_attributes_t attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002440
2441 dump_share_states();
2442
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002443 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002444 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002445 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002446 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002447 expected_retrieve_request_length,
2448 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002449 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002450 }
2451
2452 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002453 share_state = get_share_state(share_states, handle);
2454 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002455 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002456 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002457 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002458 goto out;
2459 }
2460
J-Alves96de29f2022-04-26 16:05:24 +01002461 if (!share_state->sending_complete) {
2462 dlog_verbose(
2463 "Memory with handle %#x not fully sent, can't "
2464 "retrieve.\n",
2465 handle);
2466 ret = ffa_error(FFA_INVALID_PARAMETERS);
2467 goto out;
2468 }
2469
Andrew Walbrana65a1322020-04-06 19:32:32 +01002470 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002471
Andrew Walbrana65a1322020-04-06 19:32:32 +01002472 CHECK(memory_region != NULL);
2473
J-Alves089004f2022-07-13 14:25:44 +01002474 if (retrieve_request->sender != memory_region->sender) {
2475 dlog_verbose(
2476 "Memory with handle %#x not fully sent, can't "
2477 "retrieve.\n",
2478 handle);
2479 ret = ffa_error(FFA_INVALID_PARAMETERS);
2480 goto out;
2481 }
J-Alves96de29f2022-04-26 16:05:24 +01002482
J-Alvesa9cd7e32022-07-01 13:49:33 +01002483 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2484 to_locked)) {
2485 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002486
J-Alvesb5084cf2022-07-06 14:20:12 +01002487 /*
2488 * The SPMC can only process retrieve requests to memory share
2489 * operations with one borrower from the other world. It can't
2490 * determine the ID of the NWd VM that invoked the retrieve
2491 * request interface call. It relies on the hypervisor to
2492 * validate the caller's ID against that provided in the
2493 * `receivers` list of the retrieve response.
2494 * In case there is only one borrower from the NWd in the
2495 * transaction descriptor, record that in the `receiver_id` for
2496 * later use, and validate in the retrieve request message.
J-Alves3fa82aa2023-09-20 18:19:21 +01002497 * This limitation is due to the fact SPMC can't determine the
2498 * index in the memory share structures state to update.
J-Alvesb5084cf2022-07-06 14:20:12 +01002499 */
2500 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2501 uint32_t other_world_count = 0;
2502
2503 for (uint32_t i = 0; i < memory_region->receiver_count;
2504 i++) {
2505 receiver_id =
2506 retrieve_request->receivers[0]
2507 .receiver_permissions.receiver;
2508 if (!vm_id_is_current_world(receiver_id)) {
2509 other_world_count++;
2510 }
2511 }
2512 if (other_world_count > 1) {
2513 dlog_verbose(
2514 "Support one receiver from the other "
2515 "world.\n");
2516 return ffa_error(FFA_NOT_SUPPORTED);
2517 }
2518 }
2519
2520 /*
2521 * Validate retrieve request, according to what was sent by the
2522 * sender. Function will output the `receiver_index` from the
J-Alves3fa82aa2023-09-20 18:19:21 +01002523 * provided memory region.
J-Alvesb5084cf2022-07-06 14:20:12 +01002524 */
J-Alves089004f2022-07-13 14:25:44 +01002525 ret = ffa_memory_retrieve_validate(
2526 receiver_id, retrieve_request, memory_region,
2527 &receiver_index, share_state->share_func);
2528 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002529 goto out;
2530 }
2531
2532 if (share_state->retrieved_fragment_count[receiver_index] !=
2533 0U) {
2534 dlog_verbose(
2535 "Memory with handle %#x already retrieved.\n",
2536 handle);
2537 ret = ffa_error(FFA_DENIED);
2538 goto out;
2539 }
2540
J-Alves3fa82aa2023-09-20 18:19:21 +01002541 /*
2542 * Validate the requested permissions against the sent
2543 * permissions.
2544 * Outputs the permissions to give to retriever at S2
2545 * PTs.
2546 */
J-Alvesa9cd7e32022-07-01 13:49:33 +01002547 ret = ffa_memory_retrieve_validate_memory_access_list(
2548 memory_region, retrieve_request, receiver_id,
J-Alvesdcad8992023-09-15 14:10:35 +01002549 &permissions, share_state->share_func);
J-Alves614d9f42022-06-28 14:03:10 +01002550 if (ret.func != FFA_SUCCESS_32) {
2551 goto out;
2552 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002553
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002554 memory_to_mode = ffa_memory_permissions_to_mode(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002555 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002556
J-Alvesa9cd7e32022-07-01 13:49:33 +01002557 ret = ffa_retrieve_check_update(
2558 to_locked, memory_region->sender,
2559 share_state->fragments,
2560 share_state->fragment_constituent_counts,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002561 share_state->fragment_count, memory_to_mode,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002562 share_state->share_func, false, page_pool);
2563
2564 if (ret.func != FFA_SUCCESS_32) {
2565 goto out;
2566 }
2567
2568 share_state->retrieved_fragment_count[receiver_index] = 1;
2569 is_send_complete =
2570 share_state->retrieved_fragment_count[receiver_index] ==
2571 share_state->fragment_count;
J-Alves3c5b2072022-11-21 12:45:40 +00002572
2573 share_state->clear_after_relinquish =
2574 (retrieve_request->flags &
2575 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) != 0U;
2576
J-Alvesa9cd7e32022-07-01 13:49:33 +01002577 } else {
2578 if (share_state->hypervisor_fragment_count != 0U) {
2579 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002580 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002581 "the hypervisor.\n",
2582 handle);
2583 ret = ffa_error(FFA_DENIED);
2584 goto out;
2585 }
2586
2587 share_state->hypervisor_fragment_count = 1;
2588
2589 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002590 }
2591
J-Alvesb5084cf2022-07-06 14:20:12 +01002592 /* VMs acquire the RX buffer from SPMC. */
2593 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2594
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002595 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002596 * Copy response to RX buffer of caller and deliver the message.
2597 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002598 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002599 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002600 composite = ffa_memory_region_get_composite(memory_region, 0);
2601 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002602 * Constituents which we received in the first fragment should
2603 * always fit in the first fragment we are sending, because the
2604 * header is the same size in both cases and we have a fixed
2605 * message buffer size. So `ffa_retrieved_memory_region_init`
2606 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002607 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002608
2609 /*
2610 * Set the security state in the memory retrieve response attributes
2611 * if specified by the target mode.
2612 */
2613 attributes = plat_ffa_memory_security_mode(
2614 memory_region->attributes, share_state->sender_orig_mode);
2615
Andrew Walbranca808b12020-05-15 17:22:28 +01002616 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01002617 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002618 HF_MAILBOX_SIZE, memory_region->sender, attributes,
2619 memory_region->flags, handle, receiver_id, permissions,
2620 composite->page_count, composite->constituent_count,
2621 share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01002622 share_state->fragment_constituent_counts[0], &total_length,
2623 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002624
Andrew Walbranca808b12020-05-15 17:22:28 +01002625 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002626 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002627 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002628 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002629
J-Alvesa9cd7e32022-07-01 13:49:33 +01002630 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002631 ffa_memory_retrieve_complete(share_states, share_state,
2632 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002633 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002634 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002635 .arg1 = total_length,
2636 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002637out:
2638 share_states_unlock(&share_states);
2639 dump_share_states();
2640 return ret;
2641}
2642
J-Alves5da37d92022-10-24 16:33:48 +01002643/**
2644 * Determine expected fragment offset according to the FF-A version of
2645 * the caller.
2646 */
2647static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
2648 struct ffa_memory_region *memory_region,
2649 uint32_t retrieved_constituents_count, uint32_t ffa_version)
2650{
2651 uint32_t expected_fragment_offset;
2652 uint32_t composite_constituents_offset;
2653
2654 if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
2655 /*
2656 * Hafnium operates memory regions in FF-A v1.1 format, so we
2657 * can retrieve the constituents offset from descriptor.
2658 */
2659 composite_constituents_offset =
2660 ffa_composite_constituent_offset(memory_region, 0);
2661 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2662 /*
2663 * If retriever is FF-A v1.0, determine the composite offset
2664 * as it is expected to have been configured in the
2665 * retrieve response.
2666 */
2667 composite_constituents_offset =
2668 sizeof(struct ffa_memory_region_v1_0) +
2669 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
2670 sizeof(struct ffa_memory_access) +
2671 sizeof(struct ffa_composite_memory_region);
2672 } else {
2673 panic("%s received an invalid FF-A version.\n", __func__);
2674 }
2675
2676 expected_fragment_offset =
2677 composite_constituents_offset +
2678 retrieved_constituents_count *
2679 sizeof(struct ffa_memory_region_constituent) -
2680 sizeof(struct ffa_memory_access) *
2681 (memory_region->receiver_count - 1);
2682
2683 return expected_fragment_offset;
2684}
2685
Andrew Walbranca808b12020-05-15 17:22:28 +01002686struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2687 ffa_memory_handle_t handle,
2688 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01002689 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002690 struct mpool *page_pool)
2691{
2692 struct ffa_memory_region *memory_region;
2693 struct share_states_locked share_states;
2694 struct ffa_memory_share_state *share_state;
2695 struct ffa_value ret;
2696 uint32_t fragment_index;
2697 uint32_t retrieved_constituents_count;
2698 uint32_t i;
2699 uint32_t expected_fragment_offset;
2700 uint32_t remaining_constituent_count;
2701 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002702 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01002703 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01002704
2705 dump_share_states();
2706
2707 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002708 share_state = get_share_state(share_states, handle);
2709 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002710 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2711 handle);
2712 ret = ffa_error(FFA_INVALID_PARAMETERS);
2713 goto out;
2714 }
2715
2716 memory_region = share_state->memory_region;
2717 CHECK(memory_region != NULL);
2718
Andrew Walbranca808b12020-05-15 17:22:28 +01002719 if (!share_state->sending_complete) {
2720 dlog_verbose(
2721 "Memory with handle %#x not fully sent, can't "
2722 "retrieve.\n",
2723 handle);
2724 ret = ffa_error(FFA_INVALID_PARAMETERS);
2725 goto out;
2726 }
2727
J-Alves59ed0042022-07-28 18:26:41 +01002728 /*
2729 * If retrieve request from the hypervisor has been initiated in the
2730 * given share_state, continue it, else assume it is a continuation of
2731 * retrieve request from a NWd VM.
2732 */
2733 continue_ffa_hyp_mem_retrieve_req =
2734 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
2735 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01002736 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01002737
J-Alves59ed0042022-07-28 18:26:41 +01002738 if (!continue_ffa_hyp_mem_retrieve_req) {
2739 receiver_index = ffa_memory_region_get_receiver(
2740 memory_region, to_locked.vm->id);
2741
2742 if (receiver_index == memory_region->receiver_count) {
2743 dlog_verbose(
2744 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
2745 "borrower to memory sharing transaction (%x)\n",
2746 to_locked.vm->id, handle);
2747 ret = ffa_error(FFA_INVALID_PARAMETERS);
2748 goto out;
2749 }
2750
2751 if (share_state->retrieved_fragment_count[receiver_index] ==
2752 0 ||
2753 share_state->retrieved_fragment_count[receiver_index] >=
2754 share_state->fragment_count) {
2755 dlog_verbose(
2756 "Retrieval of memory with handle %#x not yet "
2757 "started or already completed (%d/%d fragments "
2758 "retrieved).\n",
2759 handle,
2760 share_state->retrieved_fragment_count
2761 [receiver_index],
2762 share_state->fragment_count);
2763 ret = ffa_error(FFA_INVALID_PARAMETERS);
2764 goto out;
2765 }
2766
2767 fragment_index =
2768 share_state->retrieved_fragment_count[receiver_index];
2769 } else {
2770 if (share_state->hypervisor_fragment_count == 0 ||
2771 share_state->hypervisor_fragment_count >=
2772 share_state->fragment_count) {
2773 dlog_verbose(
2774 "Retrieve of memory with handle %x not "
2775 "started from hypervisor.\n",
2776 handle);
2777 ret = ffa_error(FFA_INVALID_PARAMETERS);
2778 goto out;
2779 }
2780
2781 if (memory_region->sender != sender_vm_id) {
2782 dlog_verbose(
2783 "Sender ID (%x) is not as expected for memory "
2784 "handle %x\n",
2785 sender_vm_id, handle);
2786 ret = ffa_error(FFA_INVALID_PARAMETERS);
2787 goto out;
2788 }
2789
2790 fragment_index = share_state->hypervisor_fragment_count;
2791
2792 receiver_index = 0;
2793 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002794
2795 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002796 * Check that the given fragment offset is correct by counting
2797 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01002798 */
2799 retrieved_constituents_count = 0;
2800 for (i = 0; i < fragment_index; ++i) {
2801 retrieved_constituents_count +=
2802 share_state->fragment_constituent_counts[i];
2803 }
J-Alvesc7484f12022-05-13 12:41:14 +01002804
2805 CHECK(memory_region->receiver_count > 0);
2806
Andrew Walbranca808b12020-05-15 17:22:28 +01002807 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01002808 ffa_memory_retrieve_expected_offset_per_ffa_version(
2809 memory_region, retrieved_constituents_count,
2810 to_locked.vm->ffa_version);
2811
Andrew Walbranca808b12020-05-15 17:22:28 +01002812 if (fragment_offset != expected_fragment_offset) {
2813 dlog_verbose("Fragment offset was %d but expected %d.\n",
2814 fragment_offset, expected_fragment_offset);
2815 ret = ffa_error(FFA_INVALID_PARAMETERS);
2816 goto out;
2817 }
2818
J-Alves59ed0042022-07-28 18:26:41 +01002819 /* VMs acquire the RX buffer from SPMC. */
2820 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2821
Andrew Walbranca808b12020-05-15 17:22:28 +01002822 remaining_constituent_count = ffa_memory_fragment_init(
2823 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2824 share_state->fragments[fragment_index],
2825 share_state->fragment_constituent_counts[fragment_index],
2826 &fragment_length);
2827 CHECK(remaining_constituent_count == 0);
2828 to_locked.vm->mailbox.recv_size = fragment_length;
2829 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2830 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002831 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01002832
J-Alves59ed0042022-07-28 18:26:41 +01002833 if (!continue_ffa_hyp_mem_retrieve_req) {
2834 share_state->retrieved_fragment_count[receiver_index]++;
2835 if (share_state->retrieved_fragment_count[receiver_index] ==
2836 share_state->fragment_count) {
2837 ffa_memory_retrieve_complete(share_states, share_state,
2838 page_pool);
2839 }
2840 } else {
2841 share_state->hypervisor_fragment_count++;
2842
2843 ffa_memory_retrieve_complete_from_hyp(share_state);
2844 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002845 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2846 .arg1 = (uint32_t)handle,
2847 .arg2 = (uint32_t)(handle >> 32),
2848 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002849
2850out:
2851 share_states_unlock(&share_states);
2852 dump_share_states();
2853 return ret;
2854}
2855
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002856struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002857 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002858 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002859{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002860 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002861 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002862 struct ffa_memory_share_state *share_state;
2863 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002864 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002865 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002866 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00002867 bool receivers_relinquished_memory;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002868
Andrew Walbrana65a1322020-04-06 19:32:32 +01002869 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002870 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002871 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01002872 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002873 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002874 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002875 }
2876
Andrew Walbrana65a1322020-04-06 19:32:32 +01002877 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002878 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002879 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01002880 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002881 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002882 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002883 }
2884
2885 dump_share_states();
2886
2887 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002888 share_state = get_share_state(share_states, handle);
2889 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002890 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002891 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002892 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002893 goto out;
2894 }
2895
Andrew Walbranca808b12020-05-15 17:22:28 +01002896 if (!share_state->sending_complete) {
2897 dlog_verbose(
2898 "Memory with handle %#x not fully sent, can't "
2899 "relinquish.\n",
2900 handle);
2901 ret = ffa_error(FFA_INVALID_PARAMETERS);
2902 goto out;
2903 }
2904
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002905 memory_region = share_state->memory_region;
2906 CHECK(memory_region != NULL);
2907
J-Alves8eb19162022-04-28 10:56:48 +01002908 receiver_index = ffa_memory_region_get_receiver(memory_region,
2909 from_locked.vm->id);
2910
2911 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002912 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002913 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01002914 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01002915 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002916 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002917 goto out;
2918 }
2919
J-Alves8eb19162022-04-28 10:56:48 +01002920 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002921 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002922 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002923 "Memory with handle %#x not yet fully "
2924 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01002925 "receiver %x can't relinquish.\n",
2926 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002927 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002928 goto out;
2929 }
2930
J-Alves3c5b2072022-11-21 12:45:40 +00002931 /*
2932 * Either clear if requested in relinquish call, or in a retrieve
2933 * request from one of the borrowers.
2934 */
2935 receivers_relinquished_memory = true;
2936
2937 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2938 struct ffa_memory_access *receiver =
2939 &memory_region->receivers[i];
2940
2941 if (receiver->receiver_permissions.receiver ==
2942 from_locked.vm->id) {
2943 continue;
2944 }
2945
2946 if (share_state->retrieved_fragment_count[i] != 0U) {
2947 receivers_relinquished_memory = false;
2948 break;
2949 }
2950 }
2951
2952 clear = receivers_relinquished_memory &&
2953 (share_state->clear_after_relinquish ||
2954 (relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2955 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002956
2957 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002958 * Clear is not allowed for memory that was shared, as the
2959 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002960 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002961 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002962 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002963 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002964 goto out;
2965 }
2966
Andrew Walbranca808b12020-05-15 17:22:28 +01002967 ret = ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00002968 from_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002969 share_state->fragment_constituent_counts,
2970 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002971
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002972 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002973 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002974 * Mark memory handle as not retrieved, so it can be
2975 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002976 */
J-Alves8eb19162022-04-28 10:56:48 +01002977 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002978 }
2979
2980out:
2981 share_states_unlock(&share_states);
2982 dump_share_states();
2983 return ret;
2984}
2985
2986/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01002987 * Validates that the reclaim transition is allowed for the given
2988 * handle, updates the page table of the reclaiming VM, and frees the
2989 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002990 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002991struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002992 ffa_memory_handle_t handle,
2993 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002994 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002995{
2996 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002997 struct ffa_memory_share_state *share_state;
2998 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002999 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003000
3001 dump_share_states();
3002
3003 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003004
Karl Meakin4a2854a2023-06-30 16:26:52 +01003005 share_state = get_share_state(share_states, handle);
3006 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003007 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003008 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003009 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003010 goto out;
3011 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003012 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003013
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003014 CHECK(memory_region != NULL);
3015
J-Alvesa9cd7e32022-07-01 13:49:33 +01003016 if (vm_id_is_current_world(to_locked.vm->id) &&
3017 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003018 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003019 "VM %#x attempted to reclaim memory handle %#x "
3020 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003021 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003022 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003023 goto out;
3024 }
3025
Andrew Walbranca808b12020-05-15 17:22:28 +01003026 if (!share_state->sending_complete) {
3027 dlog_verbose(
3028 "Memory with handle %#x not fully sent, can't "
3029 "reclaim.\n",
3030 handle);
3031 ret = ffa_error(FFA_INVALID_PARAMETERS);
3032 goto out;
3033 }
3034
J-Alves752236c2022-04-28 11:07:47 +01003035 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3036 if (share_state->retrieved_fragment_count[i] != 0) {
3037 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003038 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00003039 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003040 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003041 handle,
3042 memory_region->receivers[i]
3043 .receiver_permissions.receiver);
3044 ret = ffa_error(FFA_DENIED);
3045 goto out;
3046 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003047 }
3048
Andrew Walbranca808b12020-05-15 17:22:28 +01003049 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00003050 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003051 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003052 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01003053 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003054
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003055 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003056 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003057 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003058 }
3059
3060out:
3061 share_states_unlock(&share_states);
3062 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003063}