blob: 6c8169eeaf81b215e58b4652a64d334d0f8308cc [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
J-Alves5952d942022-12-22 16:03:00 +000015#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010016#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000017#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010020#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010022#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010023#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000024#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010026#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
J-Alves2d8457f2022-10-05 11:06:41 +010029#include "vmapi/hf/ffa_v1_0.h"
30
J-Alves5da37d92022-10-24 16:33:48 +010031#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
32
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000033/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000035 * by this lock.
36 */
37static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000039
40/**
J-Alves917d2f22020-10-30 18:39:30 +000041 * Extracts the index from a memory handle allocated by Hafnium's current world.
42 */
43uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
44{
45 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
46}
47
48/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010049 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
50 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
51 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010052 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010053 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
54 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010055 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010056struct ffa_memory_share_state *allocate_share_state(
57 struct share_states_locked share_states, uint32_t share_func,
58 struct ffa_memory_region *memory_region, uint32_t fragment_length,
59 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000060{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000061 assert(share_states.share_states != NULL);
62 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000063
Karl Meakin52cdfe72023-06-30 14:49:10 +010064 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010065 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010066 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010067 &share_states.share_states[i];
68 struct ffa_composite_memory_region *composite =
69 ffa_memory_region_get_composite(memory_region,
70 0);
71
72 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000073 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010075 } else {
J-Alvesee68c542020-10-29 17:48:20 +000076 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010077 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000078 allocated_state->share_func = share_func;
79 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +010080 allocated_state->fragment_count = 1;
81 allocated_state->fragments[0] = composite->constituents;
82 allocated_state->fragment_constituent_counts[0] =
83 (fragment_length -
84 ffa_composite_constituent_offset(memory_region,
85 0)) /
86 sizeof(struct ffa_memory_region_constituent);
87 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +010088 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
89 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +010090 allocated_state->retrieved_fragment_count[j] =
91 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000092 }
Karl Meakin52cdfe72023-06-30 14:49:10 +010093 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000094 }
95 }
96
Karl Meakin52cdfe72023-06-30 14:49:10 +010097 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000098}
99
100/** Locks the share states lock. */
101struct share_states_locked share_states_lock(void)
102{
103 sl_lock(&share_states_lock_instance);
104
105 return (struct share_states_locked){.share_states = share_states};
106}
107
108/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100109void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000111 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112 share_states->share_states = NULL;
113 sl_unlock(&share_states_lock_instance);
114}
115
116/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100117 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100118 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100120struct ffa_memory_share_state *get_share_state(
121 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000124
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000125 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100126
127 /*
128 * First look for a share_state allocated by us, in which case the
129 * handle is based on the index.
130 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200131 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100132 uint64_t index = ffa_memory_handle_get_index(handle);
133
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 if (index < MAX_MEM_SHARES) {
135 share_state = &share_states.share_states[index];
136 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100137 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100138 }
139 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140 }
141
Andrew Walbranca808b12020-05-15 17:22:28 +0100142 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100143 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100144 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000145 if (share_state->memory_region != NULL &&
146 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100149 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000150 }
151
Karl Meakin4a2854a2023-06-30 16:26:52 +0100152 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000153}
154
155/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100156void share_state_free(struct share_states_locked share_states,
157 struct ffa_memory_share_state *share_state,
158 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000159{
Andrew Walbranca808b12020-05-15 17:22:28 +0100160 uint32_t i;
161
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000162 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000163 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000165 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 /*
167 * First fragment is part of the same page as the `memory_region`, so it
168 * doesn't need to be freed separately.
169 */
170 share_state->fragments[0] = NULL;
171 share_state->fragment_constituent_counts[0] = 0;
172 for (i = 1; i < share_state->fragment_count; ++i) {
173 mpool_free(page_pool, share_state->fragments[i]);
174 share_state->fragments[i] = NULL;
175 share_state->fragment_constituent_counts[i] = 0;
176 }
177 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100179 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000180}
181
Andrew Walbranca808b12020-05-15 17:22:28 +0100182/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100183bool share_state_sending_complete(struct share_states_locked share_states,
184 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000185{
Andrew Walbranca808b12020-05-15 17:22:28 +0100186 struct ffa_composite_memory_region *composite;
187 uint32_t expected_constituent_count;
188 uint32_t fragment_constituent_count_total = 0;
189 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000190
Andrew Walbranca808b12020-05-15 17:22:28 +0100191 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000192 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100193
194 /*
195 * Share state must already be valid, or it's not possible to get hold
196 * of it.
197 */
198 CHECK(share_state->memory_region != NULL &&
199 share_state->share_func != 0);
200
201 composite =
202 ffa_memory_region_get_composite(share_state->memory_region, 0);
203 expected_constituent_count = composite->constituent_count;
204 for (i = 0; i < share_state->fragment_count; ++i) {
205 fragment_constituent_count_total +=
206 share_state->fragment_constituent_counts[i];
207 }
208 dlog_verbose(
209 "Checking completion: constituent count %d/%d from %d "
210 "fragments.\n",
211 fragment_constituent_count_total, expected_constituent_count,
212 share_state->fragment_count);
213
214 return fragment_constituent_count_total == expected_constituent_count;
215}
216
217/**
218 * Calculates the offset of the next fragment expected for the given share
219 * state.
220 */
J-Alvesfdd29272022-07-19 13:16:31 +0100221uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100222 struct share_states_locked share_states,
223 struct ffa_memory_share_state *share_state)
224{
225 uint32_t next_fragment_offset;
226 uint32_t i;
227
228 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000229 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100230
231 next_fragment_offset =
232 ffa_composite_constituent_offset(share_state->memory_region, 0);
233 for (i = 0; i < share_state->fragment_count; ++i) {
234 next_fragment_offset +=
235 share_state->fragment_constituent_counts[i] *
236 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000237 }
238
Andrew Walbranca808b12020-05-15 17:22:28 +0100239 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000240}
241
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000243{
244 uint32_t i;
245
246 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
247 return;
248 }
249
Olivier Deprez935e1b12020-12-22 18:01:29 +0100250 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100251 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100252 "recipients [",
253 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100254 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100255 memory_region->receiver_count);
256 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000257 if (i != 0) {
258 dlog(", ");
259 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100260 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100261 memory_region->receivers[i].receiver_permissions.receiver,
262 memory_region->receivers[i]
263 .receiver_permissions.permissions,
264 memory_region->receivers[i]
265 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000266 }
267 dlog("]");
268}
269
J-Alves66652252022-07-06 09:49:51 +0100270void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000271{
272 uint32_t i;
273
274 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
275 return;
276 }
277
278 dlog("Current share states:\n");
279 sl_lock(&share_states_lock_instance);
280 for (i = 0; i < MAX_MEM_SHARES; ++i) {
281 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000282 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100283 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000284 dlog("SHARE");
285 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100286 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000287 dlog("LEND");
288 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100289 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000290 dlog("DONATE");
291 break;
292 default:
293 dlog("invalid share_func %#x",
294 share_states[i].share_func);
295 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100296 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000297 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100298 if (share_states[i].sending_complete) {
299 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000300 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100301 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000302 }
J-Alves2a0d2882020-10-29 14:49:50 +0000303 dlog(" with %d fragments, %d retrieved, "
304 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100305 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000306 share_states[i].retrieved_fragment_count[0],
307 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000308 }
309 }
310 sl_unlock(&share_states_lock_instance);
311}
312
Andrew Walbran475c1452020-02-07 13:22:22 +0000313/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100314static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100315 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000316{
317 uint32_t mode = 0;
318
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100319 switch (ffa_get_data_access_attr(permissions)) {
320 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000321 mode = MM_MODE_R;
322 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100323 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000324 mode = MM_MODE_R | MM_MODE_W;
325 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100326 case FFA_DATA_ACCESS_NOT_SPECIFIED:
327 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
328 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100329 case FFA_DATA_ACCESS_RESERVED:
330 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100331 }
332
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 switch (ffa_get_instruction_access_attr(permissions)) {
334 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000335 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100337 mode |= MM_MODE_X;
338 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100339 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
340 mode |= (default_mode & MM_MODE_X);
341 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100342 case FFA_INSTRUCTION_ACCESS_RESERVED:
343 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000344 }
345
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200346 /* Set the security state bit if necessary. */
347 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
348 mode |= plat_ffa_other_world_mode();
349 }
350
Andrew Walbran475c1452020-02-07 13:22:22 +0000351 return mode;
352}
353
Jose Marinho75509b42019-04-09 09:34:59 +0100354/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000355 * Get the current mode in the stage-2 page table of the given vm of all the
356 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100358 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000360 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100361 struct ffa_memory_region_constituent **fragments,
362 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100363{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100364 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100365 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100366
Andrew Walbranca808b12020-05-15 17:22:28 +0100367 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100368 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000369 * Fail if there are no constituents. Otherwise we would get an
370 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100371 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100373 }
374
Andrew Walbranca808b12020-05-15 17:22:28 +0100375 for (i = 0; i < fragment_count; ++i) {
376 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
377 ipaddr_t begin = ipa_init(fragments[i][j].address);
378 size_t size = fragments[i][j].page_count * PAGE_SIZE;
379 ipaddr_t end = ipa_add(begin, size);
380 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100381
Andrew Walbranca808b12020-05-15 17:22:28 +0100382 /* Fail if addresses are not page-aligned. */
383 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
384 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
385 return ffa_error(FFA_INVALID_PARAMETERS);
386 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100387
Andrew Walbranca808b12020-05-15 17:22:28 +0100388 /*
389 * Ensure that this constituent memory range is all
390 * mapped with the same mode.
391 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800392 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100393 return ffa_error(FFA_DENIED);
394 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100395
Andrew Walbranca808b12020-05-15 17:22:28 +0100396 /*
397 * Ensure that all constituents are mapped with the same
398 * mode.
399 */
400 if (i == 0) {
401 *orig_mode = current_mode;
402 } else if (current_mode != *orig_mode) {
403 dlog_verbose(
404 "Expected mode %#x but was %#x for %d "
405 "pages at %#x.\n",
406 *orig_mode, current_mode,
407 fragments[i][j].page_count,
408 ipa_addr(begin));
409 return ffa_error(FFA_DENIED);
410 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100411 }
Jose Marinho75509b42019-04-09 09:34:59 +0100412 }
413
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100414 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000415}
416
417/**
418 * Verify that all pages have the same mode, that the starting mode
419 * constitutes a valid state and obtain the next mode to apply
420 * to the sending VM.
421 *
422 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100423 * 1) FFA_DENIED if a state transition was not found;
424 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100427 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
429 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000430 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100431static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100432 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100433 struct ffa_memory_access *receivers, uint32_t receivers_count,
434 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100435 struct ffa_memory_region_constituent **fragments,
436 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
437 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000438{
439 const uint32_t state_mask =
440 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100441 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000442
Andrew Walbranca808b12020-05-15 17:22:28 +0100443 ret = constituents_get_mode(from, orig_from_mode, fragments,
444 fragment_constituent_counts,
445 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100446 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100447 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100448 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100449 }
450
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000451 /* Ensure the address range is normal memory and not a device. */
452 if (*orig_from_mode & MM_MODE_D) {
453 dlog_verbose("Can't share device memory (mode is %#x).\n",
454 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100455 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000456 }
457
458 /*
459 * Ensure the sender is the owner and has exclusive access to the
460 * memory.
461 */
462 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100463 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100464 }
465
J-Alves363f5722022-04-25 17:37:37 +0100466 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100467
J-Alves363f5722022-04-25 17:37:37 +0100468 for (uint32_t i = 0U; i < receivers_count; i++) {
469 ffa_memory_access_permissions_t permissions =
470 receivers[i].receiver_permissions.permissions;
471 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
472 permissions, *orig_from_mode);
473
474 if ((*orig_from_mode & required_from_mode) !=
475 required_from_mode) {
476 dlog_verbose(
477 "Sender tried to send memory with permissions "
478 "which "
479 "required mode %#x but only had %#x itself.\n",
480 required_from_mode, *orig_from_mode);
481 return ffa_error(FFA_DENIED);
482 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000483 }
484
485 /* Find the appropriate new mode. */
486 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000487 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100488 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000489 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100490 break;
491
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100492 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000493 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100494 break;
495
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100496 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000497 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100498 break;
499
Jose Marinho75509b42019-04-09 09:34:59 +0100500 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100501 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100502 }
503
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100504 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000505}
506
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100507static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000508 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100509 struct ffa_memory_region_constituent **fragments,
510 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
511 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000512{
513 const uint32_t state_mask =
514 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
515 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100516 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000517
Andrew Walbranca808b12020-05-15 17:22:28 +0100518 ret = constituents_get_mode(from, orig_from_mode, fragments,
519 fragment_constituent_counts,
520 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100521 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100522 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000523 }
524
525 /* Ensure the address range is normal memory and not a device. */
526 if (*orig_from_mode & MM_MODE_D) {
527 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
528 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100529 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000530 }
531
532 /*
533 * Ensure the relinquishing VM is not the owner but has access to the
534 * memory.
535 */
536 orig_from_state = *orig_from_mode & state_mask;
537 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
538 dlog_verbose(
539 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100540 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000541 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100542 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000543 }
544
545 /* Find the appropriate new mode. */
546 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
547
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100548 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000549}
550
551/**
552 * Verify that all pages have the same mode, that the starting mode
553 * constitutes a valid state and obtain the next mode to apply
554 * to the retrieving VM.
555 *
556 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100557 * 1) FFA_DENIED if a state transition was not found;
558 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100559 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100560 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100561 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100562 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
563 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000564 */
J-Alvesfc19b372022-07-06 12:17:35 +0100565struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000566 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100567 struct ffa_memory_region_constituent **fragments,
568 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
569 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000570{
571 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100572 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000573
Andrew Walbranca808b12020-05-15 17:22:28 +0100574 ret = constituents_get_mode(to, &orig_to_mode, fragments,
575 fragment_constituent_counts,
576 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100578 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100579 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000580 }
581
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100582 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000583 /*
584 * If the original ffa memory send call has been processed
585 * successfully, it is expected the orig_to_mode would overlay
586 * with `state_mask`, as a result of the function
587 * `ffa_send_check_transition`.
588 */
J-Alves59ed0042022-07-28 18:26:41 +0100589 if (vm_id_is_current_world(to.vm->id)) {
590 assert((orig_to_mode &
591 (MM_MODE_INVALID | MM_MODE_UNOWNED |
592 MM_MODE_SHARED)) != 0U);
593 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000594 } else {
595 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100596 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000597 * Ensure the retriever has the expected state. We don't care
598 * about the MM_MODE_SHARED bit; either with or without it set
599 * are both valid representations of the !O-NA state.
600 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100601 if (vm_id_is_current_world(to.vm->id) &&
602 to.vm->id != HF_PRIMARY_VM_ID &&
603 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
604 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100605 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000606 }
607 }
608
609 /* Find the appropriate new mode. */
610 *to_mode = memory_to_attributes;
611 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100612 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000613 *to_mode |= 0;
614 break;
615
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100616 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000617 *to_mode |= MM_MODE_UNOWNED;
618 break;
619
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100620 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000621 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
622 break;
623
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100624 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000625 *to_mode |= 0;
626 break;
627
628 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100629 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100630 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000631 }
632
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100633 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100634}
Jose Marinho09b1db82019-08-08 09:16:59 +0100635
636/**
637 * Updates a VM's page table such that the given set of physical address ranges
638 * are mapped in the address space at the corresponding address ranges, in the
639 * mode provided.
640 *
641 * If commit is false, the page tables will be allocated from the mpool but no
642 * mappings will actually be updated. This function must always be called first
643 * with commit false to check that it will succeed before calling with commit
644 * true, to avoid leaving the page table in a half-updated state. To make a
645 * series of changes atomically you can call them all with commit false before
646 * calling them all with commit true.
647 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700648 * vm_ptable_defrag should always be called after a series of page table
649 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100650 *
651 * Returns true on success, or false if the update failed and no changes were
652 * made to memory mappings.
653 */
J-Alves66652252022-07-06 09:49:51 +0100654bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000655 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100656 struct ffa_memory_region_constituent **fragments,
657 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100658 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100659{
Andrew Walbranca808b12020-05-15 17:22:28 +0100660 uint32_t i;
661 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100662
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700663 if (vm_locked.vm->el0_partition) {
664 mode |= MM_MODE_USER | MM_MODE_NG;
665 }
666
Andrew Walbranca808b12020-05-15 17:22:28 +0100667 /* Iterate over the memory region constituents within each fragment. */
668 for (i = 0; i < fragment_count; ++i) {
669 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
670 size_t size = fragments[i][j].page_count * PAGE_SIZE;
671 paddr_t pa_begin =
672 pa_from_ipa(ipa_init(fragments[i][j].address));
673 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200674 uint32_t pa_bits =
675 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100676
677 /*
678 * Ensure the requested region falls into system's PA
679 * range.
680 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200681 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
682 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100683 dlog_error("Region is outside of PA Range\n");
684 return false;
685 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100686
687 if (commit) {
688 vm_identity_commit(vm_locked, pa_begin, pa_end,
689 mode, ppool, NULL);
690 } else if (!vm_identity_prepare(vm_locked, pa_begin,
691 pa_end, mode, ppool)) {
692 return false;
693 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100694 }
695 }
696
697 return true;
698}
699
700/**
701 * Clears a region of physical memory by overwriting it with zeros. The data is
702 * flushed from the cache so the memory has been cleared across the system.
703 */
J-Alves7db32002021-12-14 14:44:50 +0000704static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
705 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100706{
707 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000708 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100709 * global mapping of the whole range. Such an approach will limit
710 * the changes to stage-1 tables and will allow only local
711 * invalidation.
712 */
713 bool ret;
714 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000715 void *ptr = mm_identity_map(stage1_locked, begin, end,
716 MM_MODE_W | (extra_mode_attributes &
717 plat_ffa_other_world_mode()),
718 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100719 size_t size = pa_difference(begin, end);
720
721 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100722 goto fail;
723 }
724
725 memset_s(ptr, size, 0, size);
726 arch_mm_flush_dcache(ptr, size);
727 mm_unmap(stage1_locked, begin, end, ppool);
728
729 ret = true;
730 goto out;
731
732fail:
733 ret = false;
734
735out:
736 mm_unlock_stage1(&stage1_locked);
737
738 return ret;
739}
740
741/**
742 * Clears a region of physical memory by overwriting it with zeros. The data is
743 * flushed from the cache so the memory has been cleared across the system.
744 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100745static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000746 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100747 struct ffa_memory_region_constituent **fragments,
748 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
749 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100750{
751 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100752 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100753 bool ret = false;
754
755 /*
756 * Create a local pool so any freed memory can't be used by another
757 * thread. This is to ensure each constituent that is mapped can be
758 * unmapped again afterwards.
759 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000760 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100761
Andrew Walbranca808b12020-05-15 17:22:28 +0100762 /* Iterate over the memory region constituents within each fragment. */
763 for (i = 0; i < fragment_count; ++i) {
764 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100765
Andrew Walbranca808b12020-05-15 17:22:28 +0100766 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
767 size_t size = fragments[i][j].page_count * PAGE_SIZE;
768 paddr_t begin =
769 pa_from_ipa(ipa_init(fragments[i][j].address));
770 paddr_t end = pa_add(begin, size);
771
J-Alves7db32002021-12-14 14:44:50 +0000772 if (!clear_memory(begin, end, &local_page_pool,
773 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100774 /*
775 * api_clear_memory will defrag on failure, so
776 * no need to do it here.
777 */
778 goto out;
779 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100780 }
781 }
782
Jose Marinho09b1db82019-08-08 09:16:59 +0100783 ret = true;
784
785out:
786 mpool_fini(&local_page_pool);
787 return ret;
788}
789
J-Alves5952d942022-12-22 16:03:00 +0000790static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
791 ipaddr_t in_begin, ipaddr_t in_end)
792{
793 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
794 ipa_addr(begin) < ipa_addr(in_end)) ||
795 (ipa_addr(end) <= ipa_addr(in_end) &&
796 ipa_addr(end) > ipa_addr(in_begin));
797}
798
799/**
800 * Receives a memory range and looks for overlaps with the remainder
801 * constituents of the memory share/lend/donate operation. Assumes they are
802 * passed in order to avoid having to loop over all the elements at each call.
803 * The function only compares the received memory ranges with those that follow
804 * within the same fragment, and subsequent fragments from the same operation.
805 */
806static bool ffa_memory_check_overlap(
807 struct ffa_memory_region_constituent **fragments,
808 const uint32_t *fragment_constituent_counts,
809 const uint32_t fragment_count, const uint32_t current_fragment,
810 const uint32_t current_constituent)
811{
812 uint32_t i = current_fragment;
813 uint32_t j = current_constituent;
814 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
815 const uint32_t current_page_count = fragments[i][j].page_count;
816 size_t current_size = current_page_count * PAGE_SIZE;
817 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
818
819 if (current_size == 0 ||
820 current_size > UINT64_MAX - ipa_addr(current_begin)) {
821 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
822 current_begin, current_page_count);
823 return false;
824 }
825
826 for (; i < fragment_count; i++) {
827 j = (i == current_fragment) ? j + 1 : 0;
828
829 for (; j < fragment_constituent_counts[i]; j++) {
830 ipaddr_t begin = ipa_init(fragments[i][j].address);
831 const uint32_t page_count = fragments[i][j].page_count;
832 size_t size = page_count * PAGE_SIZE;
833 ipaddr_t end = ipa_add(begin, size - 1);
834
835 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
836 dlog_verbose(
837 "Invalid page count. Addr: %x "
838 "page_count: %x\n",
839 begin, page_count);
840 return false;
841 }
842
843 /*
844 * Check if current ranges is within begin and end, as
845 * well as the reverse. This should help optimize the
846 * loop, and reduce the number of iterations.
847 */
848 if (is_memory_range_within(begin, end, current_begin,
849 current_end) ||
850 is_memory_range_within(current_begin, current_end,
851 begin, end)) {
852 dlog_verbose(
853 "Overlapping memory ranges: %#x - %#x "
854 "with %#x - %#x\n",
855 ipa_addr(begin), ipa_addr(end),
856 ipa_addr(current_begin),
857 ipa_addr(current_end));
858 return true;
859 }
860 }
861 }
862
863 return false;
864}
865
Jose Marinho09b1db82019-08-08 09:16:59 +0100866/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100868 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000869 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100870 *
871 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000872 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100873 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100874 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100875 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
876 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100877 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100878 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100879 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100880 */
J-Alves66652252022-07-06 09:49:51 +0100881struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000882 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100883 struct ffa_memory_region_constituent **fragments,
884 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +0000885 uint32_t composite_total_page_count, uint32_t share_func,
886 struct ffa_memory_access *receivers, uint32_t receivers_count,
887 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100888{
Andrew Walbranca808b12020-05-15 17:22:28 +0100889 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +0000890 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100891 uint32_t orig_from_mode;
892 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100893 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100894 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +0000895 uint32_t constituents_total_page_count = 0;
Jose Marinho09b1db82019-08-08 09:16:59 +0100896
897 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100898 * Make sure constituents are properly aligned to a 64-bit boundary. If
899 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100900 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100901 for (i = 0; i < fragment_count; ++i) {
902 if (!is_aligned(fragments[i], 8)) {
903 dlog_verbose("Constituents not aligned.\n");
904 return ffa_error(FFA_INVALID_PARAMETERS);
905 }
J-Alves8f11cde2022-12-21 16:18:22 +0000906 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
907 constituents_total_page_count +=
908 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +0000909 if (ffa_memory_check_overlap(
910 fragments, fragment_constituent_counts,
911 fragment_count, i, j)) {
912 return ffa_error(FFA_INVALID_PARAMETERS);
913 }
J-Alves8f11cde2022-12-21 16:18:22 +0000914 }
915 }
916
917 if (constituents_total_page_count != composite_total_page_count) {
918 dlog_verbose(
919 "Composite page count differs from calculated page "
920 "count from constituents.\n");
921 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +0100922 }
923
924 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000925 * Check if the state transition is lawful for the sender, ensure that
926 * all constituents of a memory region being shared are at the same
927 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100928 */
J-Alves363f5722022-04-25 17:37:37 +0100929 ret = ffa_send_check_transition(from_locked, share_func, receivers,
930 receivers_count, &orig_from_mode,
931 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100932 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100933 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100934 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100935 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100936 }
937
Andrew Walbran37c574e2020-06-03 11:45:46 +0100938 if (orig_from_mode_ret != NULL) {
939 *orig_from_mode_ret = orig_from_mode;
940 }
941
Jose Marinho09b1db82019-08-08 09:16:59 +0100942 /*
943 * Create a local pool so any freed memory can't be used by another
944 * thread. This is to ensure the original mapping can be restored if the
945 * clear fails.
946 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000947 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100948
949 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000950 * First reserve all required memory for the new page table entries
951 * without committing, to make sure the entire operation will succeed
952 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100953 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100954 if (!ffa_region_group_identity_map(
955 from_locked, fragments, fragment_constituent_counts,
956 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100957 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100958 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100959 goto out;
960 }
961
962 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000963 * Update the mapping for the sender. This won't allocate because the
964 * transaction was already prepared above, but may free pages in the
965 * case that a whole block is being unmapped that was previously
966 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100967 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100968 CHECK(ffa_region_group_identity_map(
969 from_locked, fragments, fragment_constituent_counts,
970 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100971
972 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000973 if (clear &&
974 !ffa_clear_memory_constituents(
975 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
976 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100977 /*
978 * On failure, roll back by returning memory to the sender. This
979 * may allocate pages which were previously freed into
980 * `local_page_pool` by the call above, but will never allocate
981 * more pages than that so can never fail.
982 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100983 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100984 from_locked, fragments, fragment_constituent_counts,
985 fragment_count, orig_from_mode, &local_page_pool,
986 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100987
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100988 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100989 goto out;
990 }
991
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000993
994out:
995 mpool_fini(&local_page_pool);
996
997 /*
998 * Tidy up the page table by reclaiming failed mappings (if there was an
999 * error) or merging entries into blocks where possible (on success).
1000 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001001 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001002
1003 return ret;
1004}
1005
1006/**
1007 * Validates and maps memory shared from one VM to another.
1008 *
1009 * This function requires the calling context to hold the <to> lock.
1010 *
1011 * Returns:
1012 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001013 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001014 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001015 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001016 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001018 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001019struct ffa_value ffa_retrieve_check_update(
J-Alves19e20cf2023-08-02 12:48:55 +01001020 struct vm_locked to_locked, ffa_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001021 struct ffa_memory_region_constituent **fragments,
1022 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1023 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
1024 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001025{
Andrew Walbranca808b12020-05-15 17:22:28 +01001026 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001027 uint32_t to_mode;
1028 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001029 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001030
1031 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001032 * Make sure constituents are properly aligned to a 64-bit boundary. If
1033 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001034 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001035 for (i = 0; i < fragment_count; ++i) {
1036 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001037 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001038 return ffa_error(FFA_INVALID_PARAMETERS);
1039 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001040 }
1041
1042 /*
1043 * Check if the state transition is lawful for the recipient, and ensure
1044 * that all constituents of the memory region being retrieved are at the
1045 * same state.
1046 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001047 ret = ffa_retrieve_check_transition(
1048 to_locked, share_func, fragments, fragment_constituent_counts,
1049 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001050 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001051 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001052 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001053 }
1054
1055 /*
1056 * Create a local pool so any freed memory can't be used by another
1057 * thread. This is to ensure the original mapping can be restored if the
1058 * clear fails.
1059 */
1060 mpool_init_with_fallback(&local_page_pool, page_pool);
1061
1062 /*
1063 * First reserve all required memory for the new page table entries in
1064 * the recipient page tables without committing, to make sure the entire
1065 * operation will succeed without exhausting the page pool.
1066 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001067 if (!ffa_region_group_identity_map(
1068 to_locked, fragments, fragment_constituent_counts,
1069 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001070 /* TODO: partial defrag of failed range. */
1071 dlog_verbose(
1072 "Insufficient memory to update recipient page "
1073 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001074 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001075 goto out;
1076 }
1077
1078 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001079 if (clear &&
1080 !ffa_clear_memory_constituents(
1081 plat_ffa_owner_world_mode(from_id), fragments,
1082 fragment_constituent_counts, fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001083 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001084 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001085 goto out;
1086 }
1087
Jose Marinho09b1db82019-08-08 09:16:59 +01001088 /*
1089 * Complete the transfer by mapping the memory into the recipient. This
1090 * won't allocate because the transaction was already prepared above, so
1091 * it doesn't need to use the `local_page_pool`.
1092 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001093 CHECK(ffa_region_group_identity_map(
1094 to_locked, fragments, fragment_constituent_counts,
1095 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001096
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001097 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001098
1099out:
1100 mpool_fini(&local_page_pool);
1101
1102 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001103 * Tidy up the page table by reclaiming failed mappings (if there was an
1104 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001105 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001106 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001107
1108 return ret;
1109}
1110
Andrew Walbran996d1d12020-05-27 14:08:43 +01001111static struct ffa_value ffa_relinquish_check_update(
J-Alves19e20cf2023-08-02 12:48:55 +01001112 struct vm_locked from_locked, ffa_id_t owner_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001113 struct ffa_memory_region_constituent **fragments,
1114 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1115 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001116{
1117 uint32_t orig_from_mode;
1118 uint32_t from_mode;
1119 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001120 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001121
Andrew Walbranca808b12020-05-15 17:22:28 +01001122 ret = ffa_relinquish_check_transition(
1123 from_locked, &orig_from_mode, fragments,
1124 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001125 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001126 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001127 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001128 }
1129
1130 /*
1131 * Create a local pool so any freed memory can't be used by another
1132 * thread. This is to ensure the original mapping can be restored if the
1133 * clear fails.
1134 */
1135 mpool_init_with_fallback(&local_page_pool, page_pool);
1136
1137 /*
1138 * First reserve all required memory for the new page table entries
1139 * without committing, to make sure the entire operation will succeed
1140 * without exhausting the page pool.
1141 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001142 if (!ffa_region_group_identity_map(
1143 from_locked, fragments, fragment_constituent_counts,
1144 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001145 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001146 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001147 goto out;
1148 }
1149
1150 /*
1151 * Update the mapping for the sender. This won't allocate because the
1152 * transaction was already prepared above, but may free pages in the
1153 * case that a whole block is being unmapped that was previously
1154 * partially mapped.
1155 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001156 CHECK(ffa_region_group_identity_map(
1157 from_locked, fragments, fragment_constituent_counts,
1158 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001159
1160 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001161 if (clear &&
1162 !ffa_clear_memory_constituents(
J-Alves3c5b2072022-11-21 12:45:40 +00001163 plat_ffa_owner_world_mode(owner_id), fragments,
J-Alves7db32002021-12-14 14:44:50 +00001164 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001165 /*
1166 * On failure, roll back by returning memory to the sender. This
1167 * may allocate pages which were previously freed into
1168 * `local_page_pool` by the call above, but will never allocate
1169 * more pages than that so can never fail.
1170 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001171 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001172 from_locked, fragments, fragment_constituent_counts,
1173 fragment_count, orig_from_mode, &local_page_pool,
1174 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001175
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001176 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001177 goto out;
1178 }
1179
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001180 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001181
1182out:
1183 mpool_fini(&local_page_pool);
1184
1185 /*
1186 * Tidy up the page table by reclaiming failed mappings (if there was an
1187 * error) or merging entries into blocks where possible (on success).
1188 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001189 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001190
1191 return ret;
1192}
1193
1194/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001195 * Complete a memory sending operation by checking that it is valid, updating
1196 * the sender page table, and then either marking the share state as having
1197 * completed sending (on success) or freeing it (on failure).
1198 *
1199 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1200 */
J-Alvesfdd29272022-07-19 13:16:31 +01001201struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001202 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001203 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1204 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001205{
1206 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001207 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001208 struct ffa_value ret;
1209
1210 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001211 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001212 assert(memory_region != NULL);
1213 composite = ffa_memory_region_get_composite(memory_region, 0);
1214 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001215
1216 /* Check that state is valid in sender page table and update. */
1217 ret = ffa_send_check_update(
1218 from_locked, share_state->fragments,
1219 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001220 share_state->fragment_count, composite->page_count,
1221 share_state->share_func, memory_region->receivers,
1222 memory_region->receiver_count, page_pool,
1223 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001224 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001225 if (ret.func != FFA_SUCCESS_32) {
1226 /*
1227 * Free share state, it failed to send so it can't be retrieved.
1228 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001229 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1230 __func__, ffa_func_name(ret.func),
1231 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001232 share_state_free(share_states, share_state, page_pool);
1233 return ret;
1234 }
1235
1236 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001237 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001238
J-Alvesee68c542020-10-29 17:48:20 +00001239 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001240}
1241
1242/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001243 * Check that the memory attributes match Hafnium expectations:
1244 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1245 * Write-Allocate Cacheable.
1246 */
1247static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001248 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001249{
1250 enum ffa_memory_type memory_type;
1251 enum ffa_memory_cacheability cacheability;
1252 enum ffa_memory_shareability shareability;
1253
1254 memory_type = ffa_get_memory_type_attr(attributes);
1255 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1256 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1257 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001258 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001259 }
1260
1261 cacheability = ffa_get_memory_cacheability_attr(attributes);
1262 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1263 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1264 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001265 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001266 }
1267
1268 shareability = ffa_get_memory_shareability_attr(attributes);
1269 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1270 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1271 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001272 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001273 }
1274
1275 return (struct ffa_value){.func = FFA_SUCCESS_32};
1276}
1277
1278/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001279 * Check that the given `memory_region` represents a valid memory send request
1280 * of the given `share_func` type, return the clear flag and permissions via the
1281 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001282 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001283 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001284 * not.
1285 */
J-Alves66652252022-07-06 09:49:51 +01001286struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001287 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1288 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001289 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001290{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001291 struct ffa_composite_memory_region *composite;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001292 uint64_t receivers_end;
1293 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001294 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001295 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001296 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001297 enum ffa_data_access data_access;
1298 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001299 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001300 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001301 const size_t minimum_first_fragment_length =
1302 (sizeof(struct ffa_memory_region) +
1303 sizeof(struct ffa_memory_access) +
1304 sizeof(struct ffa_composite_memory_region));
1305
1306 if (fragment_length < minimum_first_fragment_length) {
1307 dlog_verbose("Fragment length %u too short (min %u).\n",
1308 (size_t)fragment_length,
1309 minimum_first_fragment_length);
1310 return ffa_error(FFA_INVALID_PARAMETERS);
1311 }
1312
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001313 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1314 "struct ffa_memory_region_constituent must be 16 bytes");
1315 if (!is_aligned(fragment_length,
1316 sizeof(struct ffa_memory_region_constituent)) ||
1317 !is_aligned(memory_share_length,
1318 sizeof(struct ffa_memory_region_constituent))) {
1319 dlog_verbose(
1320 "Fragment length %u or total length %u"
1321 " is not 16-byte aligned.\n",
1322 fragment_length, memory_share_length);
1323 return ffa_error(FFA_INVALID_PARAMETERS);
1324 }
1325
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001326 if (fragment_length > memory_share_length) {
1327 dlog_verbose(
1328 "Fragment length %u greater than total length %u.\n",
1329 (size_t)fragment_length, (size_t)memory_share_length);
1330 return ffa_error(FFA_INVALID_PARAMETERS);
1331 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001332
J-Alves0b6653d2022-04-22 13:17:38 +01001333 assert(memory_region->receivers_offset ==
1334 offsetof(struct ffa_memory_region, receivers));
1335 assert(memory_region->memory_access_desc_size ==
1336 sizeof(struct ffa_memory_access));
1337
J-Alves95df0ef2022-12-07 10:09:48 +00001338 /* The sender must match the caller. */
1339 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1340 vm_id_is_current_world(memory_region->sender)) ||
1341 (vm_id_is_current_world(from_locked.vm->id) &&
1342 memory_region->sender != from_locked.vm->id)) {
1343 dlog_verbose("Invalid memory sender ID.\n");
1344 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001345 }
1346
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001347 if (memory_region->receiver_count <= 0) {
1348 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001349 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001350 }
1351
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001352 /*
1353 * Ensure that the composite header is within the memory bounds and
1354 * doesn't overlap the first part of the message. Cast to uint64_t
1355 * to prevent overflow.
1356 */
1357 receivers_end = ((uint64_t)sizeof(struct ffa_memory_access) *
1358 (uint64_t)memory_region->receiver_count) +
1359 sizeof(struct ffa_memory_region);
1360 min_length = receivers_end +
1361 sizeof(struct ffa_composite_memory_region) +
1362 sizeof(struct ffa_memory_region_constituent);
1363 if (min_length > memory_share_length) {
1364 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1365 (size_t)memory_share_length, (size_t)min_length);
1366 return ffa_error(FFA_INVALID_PARAMETERS);
1367 }
1368
1369 composite_memory_region_offset =
1370 memory_region->receivers[0].composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001371
1372 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001373 * Check that the composite memory region descriptor is after the access
1374 * descriptors, is at least 16-byte aligned, and fits in the first
1375 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001376 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001377 if ((composite_memory_region_offset < receivers_end) ||
1378 (composite_memory_region_offset % 16 != 0) ||
1379 (composite_memory_region_offset >
1380 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1381 dlog_verbose(
1382 "Invalid composite memory region descriptor offset "
1383 "%u.\n",
1384 (size_t)composite_memory_region_offset);
1385 return ffa_error(FFA_INVALID_PARAMETERS);
1386 }
1387
1388 /*
1389 * Compute the start of the constituent regions. Already checked
1390 * to be not more than fragment_length and thus not more than
1391 * memory_share_length.
1392 */
1393 constituents_start = composite_memory_region_offset +
1394 sizeof(struct ffa_composite_memory_region);
1395 constituents_length = memory_share_length - constituents_start;
1396
1397 /*
1398 * Check that the number of constituents is consistent with the length
1399 * of the constituent region.
1400 */
1401 composite = ffa_memory_region_get_composite(memory_region, 0);
1402 if ((constituents_length %
1403 sizeof(struct ffa_memory_region_constituent) !=
1404 0) ||
1405 ((constituents_length /
1406 sizeof(struct ffa_memory_region_constituent)) !=
1407 composite->constituent_count)) {
1408 dlog_verbose("Invalid length %u or composite offset %u.\n",
1409 (size_t)memory_share_length,
1410 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001411 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001412 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001413 if (fragment_length < memory_share_length &&
1414 fragment_length < HF_MAILBOX_SIZE) {
1415 dlog_warning(
1416 "Initial fragment length %d smaller than mailbox "
1417 "size.\n",
1418 fragment_length);
1419 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001420
Andrew Walbrana65a1322020-04-06 19:32:32 +01001421 /*
1422 * Clear is not allowed for memory sharing, as the sender still has
1423 * access to the memory.
1424 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001425 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1426 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001427 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001428 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001429 }
1430
1431 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001432 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001433 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001434 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001435 }
1436
J-Alves363f5722022-04-25 17:37:37 +01001437 /* Check that the permissions are valid, for each specified receiver. */
1438 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1439 ffa_memory_access_permissions_t permissions =
1440 memory_region->receivers[i]
1441 .receiver_permissions.permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01001442 ffa_id_t receiver_id = memory_region->receivers[i]
1443 .receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01001444
1445 if (memory_region->sender == receiver_id) {
1446 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001447 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001448 }
Federico Recanati85090c42021-12-15 13:17:54 +01001449
J-Alves363f5722022-04-25 17:37:37 +01001450 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1451 j++) {
1452 if (receiver_id ==
1453 memory_region->receivers[j]
1454 .receiver_permissions.receiver) {
1455 dlog_verbose(
1456 "Repeated receiver(%x) in memory send "
1457 "operation.\n",
1458 memory_region->receivers[j]
1459 .receiver_permissions.receiver);
1460 return ffa_error(FFA_INVALID_PARAMETERS);
1461 }
1462 }
1463
1464 if (composite_memory_region_offset !=
1465 memory_region->receivers[i]
1466 .composite_memory_region_offset) {
1467 dlog_verbose(
1468 "All ffa_memory_access should point to the "
1469 "same composite memory region offset.\n");
1470 return ffa_error(FFA_INVALID_PARAMETERS);
1471 }
1472
1473 data_access = ffa_get_data_access_attr(permissions);
1474 instruction_access =
1475 ffa_get_instruction_access_attr(permissions);
1476 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1477 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1478 dlog_verbose(
1479 "Reserved value for receiver permissions "
1480 "%#x.\n",
1481 permissions);
1482 return ffa_error(FFA_INVALID_PARAMETERS);
1483 }
1484 if (instruction_access !=
1485 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1486 dlog_verbose(
1487 "Invalid instruction access permissions %#x "
1488 "for sending memory.\n",
1489 permissions);
1490 return ffa_error(FFA_INVALID_PARAMETERS);
1491 }
1492 if (share_func == FFA_MEM_SHARE_32) {
1493 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1494 dlog_verbose(
1495 "Invalid data access permissions %#x "
1496 "for sharing memory.\n",
1497 permissions);
1498 return ffa_error(FFA_INVALID_PARAMETERS);
1499 }
J-Alves363f5722022-04-25 17:37:37 +01001500 }
1501 if (share_func == FFA_MEM_LEND_32 &&
1502 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1503 dlog_verbose(
1504 "Invalid data access permissions %#x for "
1505 "lending memory.\n",
1506 permissions);
1507 return ffa_error(FFA_INVALID_PARAMETERS);
1508 }
1509
1510 if (share_func == FFA_MEM_DONATE_32 &&
1511 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1512 dlog_verbose(
1513 "Invalid data access permissions %#x for "
1514 "donating memory.\n",
1515 permissions);
1516 return ffa_error(FFA_INVALID_PARAMETERS);
1517 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001518 }
1519
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001520 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1521 security_state =
1522 ffa_get_memory_security_attr(memory_region->attributes);
1523 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1524 dlog_verbose(
1525 "Invalid security state for memory share operation.\n");
1526 return ffa_error(FFA_INVALID_PARAMETERS);
1527 }
1528
Federico Recanatid937f5e2021-12-20 17:38:23 +01001529 /*
J-Alves807794e2022-06-16 13:42:47 +01001530 * If a memory donate or lend with single borrower, the memory type
1531 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001532 */
J-Alves807794e2022-06-16 13:42:47 +01001533 if (share_func == FFA_MEM_DONATE_32 ||
1534 (share_func == FFA_MEM_LEND_32 &&
1535 memory_region->receiver_count == 1)) {
1536 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1537 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1538 dlog_verbose(
1539 "Memory type shall not be specified by "
1540 "sender.\n");
1541 return ffa_error(FFA_INVALID_PARAMETERS);
1542 }
1543 } else {
1544 /*
1545 * Check that sender's memory attributes match Hafnium
1546 * expectations: Normal Memory, Inner shareable, Write-Back
1547 * Read-Allocate Write-Allocate Cacheable.
1548 */
1549 ret = ffa_memory_attributes_validate(memory_region->attributes);
1550 if (ret.func != FFA_SUCCESS_32) {
1551 return ret;
1552 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001553 }
1554
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001555 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001556}
1557
1558/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001559 * Gets the share state for continuing an operation to donate, lend or share
1560 * memory, and checks that it is a valid request.
1561 *
1562 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1563 * not.
1564 */
J-Alvesfdd29272022-07-19 13:16:31 +01001565struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001566 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001567 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001568 struct mpool *page_pool)
1569{
1570 struct ffa_memory_share_state *share_state;
1571 struct ffa_memory_region *memory_region;
1572
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001573 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001574
1575 /*
1576 * Look up the share state by handle and make sure that the VM ID
1577 * matches.
1578 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01001579 share_state = get_share_state(share_states, handle);
1580 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001581 dlog_verbose(
1582 "Invalid handle %#x for memory send continuation.\n",
1583 handle);
1584 return ffa_error(FFA_INVALID_PARAMETERS);
1585 }
1586 memory_region = share_state->memory_region;
1587
J-Alvesfdd29272022-07-19 13:16:31 +01001588 if (vm_id_is_current_world(from_vm_id) &&
1589 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001590 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1591 return ffa_error(FFA_INVALID_PARAMETERS);
1592 }
1593
1594 if (share_state->sending_complete) {
1595 dlog_verbose(
1596 "Sending of memory handle %#x is already complete.\n",
1597 handle);
1598 return ffa_error(FFA_INVALID_PARAMETERS);
1599 }
1600
1601 if (share_state->fragment_count == MAX_FRAGMENTS) {
1602 /*
1603 * Log a warning as this is a sign that MAX_FRAGMENTS should
1604 * probably be increased.
1605 */
1606 dlog_warning(
1607 "Too many fragments for memory share with handle %#x; "
1608 "only %d supported.\n",
1609 handle, MAX_FRAGMENTS);
1610 /* Free share state, as it's not possible to complete it. */
1611 share_state_free(share_states, share_state, page_pool);
1612 return ffa_error(FFA_NO_MEMORY);
1613 }
1614
1615 *share_state_ret = share_state;
1616
1617 return (struct ffa_value){.func = FFA_SUCCESS_32};
1618}
1619
1620/**
J-Alves95df0ef2022-12-07 10:09:48 +00001621 * Checks if there is at least one receiver from the other world.
1622 */
J-Alvesfdd29272022-07-19 13:16:31 +01001623bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001624 struct ffa_memory_region *memory_region)
1625{
1626 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001627 ffa_id_t receiver = memory_region->receivers[i]
1628 .receiver_permissions.receiver;
J-Alves95df0ef2022-12-07 10:09:48 +00001629 if (!vm_id_is_current_world(receiver)) {
1630 return true;
1631 }
1632 }
1633 return false;
1634}
1635
1636/**
J-Alves9da280b2022-12-21 14:55:39 +00001637 * Validates a call to donate, lend or share memory in which Hafnium is the
1638 * designated allocator of the memory handle. In practice, this also means
1639 * Hafnium is responsible for managing the state structures for the transaction.
1640 * If Hafnium is the SPMC, it should allocate the memory handle when either the
1641 * sender is an SP or there is at least one borrower that is an SP.
1642 * If Hafnium is the hypervisor, it should allocate the memory handle when
1643 * operation involves only NWd VMs.
1644 *
1645 * If validation goes well, Hafnium updates the stage-2 page tables of the
1646 * sender. Validation consists of checking if the message length and number of
1647 * memory region constituents match, and if the transition is valid for the
1648 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001649 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001650 * Assumes that the caller has already found and locked the sender VM and copied
1651 * the memory region descriptor from the sender's TX buffer to a freshly
1652 * allocated page from Hafnium's internal pool. The caller must have also
1653 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001654 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001655 * This function takes ownership of the `memory_region` passed in and will free
1656 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001657 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001658struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001659 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001660 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001661 uint32_t fragment_length, uint32_t share_func,
1662 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001663{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001664 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001665 struct share_states_locked share_states;
1666 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001667
1668 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001669 * If there is an error validating the `memory_region` then we need to
1670 * free it because we own it but we won't be storing it in a share state
1671 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001672 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001673 ret = ffa_memory_send_validate(from_locked, memory_region,
1674 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001675 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001676 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001677 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001678 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001679 }
1680
Andrew Walbrana65a1322020-04-06 19:32:32 +01001681 /* Set flag for share function, ready to be retrieved later. */
1682 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001683 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001684 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001685 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001686 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001687 case FFA_MEM_LEND_32:
1688 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001689 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001690 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001691 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001692 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001693 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001694 }
1695
Andrew Walbranca808b12020-05-15 17:22:28 +01001696 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001697 /*
1698 * Allocate a share state before updating the page table. Otherwise if
1699 * updating the page table succeeded but allocating the share state
1700 * failed then it would leave the memory in a state where nobody could
1701 * get it back.
1702 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01001703 share_state = allocate_share_state(share_states, share_func,
1704 memory_region, fragment_length,
1705 FFA_MEMORY_HANDLE_INVALID);
1706 if (!share_state) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001707 dlog_verbose("Failed to allocate share state.\n");
1708 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001709 ret = ffa_error(FFA_NO_MEMORY);
1710 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001711 }
1712
Andrew Walbranca808b12020-05-15 17:22:28 +01001713 if (fragment_length == memory_share_length) {
1714 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001715 ret = ffa_memory_send_complete(
1716 from_locked, share_states, share_state, page_pool,
1717 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001718 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01001719 /*
1720 * Use sender ID from 'memory_region' assuming
1721 * that at this point it has been validated:
1722 * - MBZ at virtual FF-A instance.
1723 */
J-Alves19e20cf2023-08-02 12:48:55 +01001724 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01001725 (from_locked.vm->id == HF_OTHER_WORLD_ID)
1726 ? memory_region->sender
1727 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01001728 ret = (struct ffa_value){
1729 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001730 .arg1 = (uint32_t)memory_region->handle,
1731 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01001732 .arg3 = fragment_length,
1733 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01001734 }
1735
1736out:
1737 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001738 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001739 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001740}
1741
1742/**
J-Alves8505a8a2022-06-15 18:10:18 +01001743 * Continues an operation to donate, lend or share memory to a VM from current
1744 * world. If this is the last fragment then checks that the transition is valid
1745 * for the type of memory sending operation and updates the stage-2 page tables
1746 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001747 *
1748 * Assumes that the caller has already found and locked the sender VM and copied
1749 * the memory region descriptor from the sender's TX buffer to a freshly
1750 * allocated page from Hafnium's internal pool.
1751 *
1752 * This function takes ownership of the `fragment` passed in; it must not be
1753 * freed by the caller.
1754 */
1755struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1756 void *fragment,
1757 uint32_t fragment_length,
1758 ffa_memory_handle_t handle,
1759 struct mpool *page_pool)
1760{
1761 struct share_states_locked share_states = share_states_lock();
1762 struct ffa_memory_share_state *share_state;
1763 struct ffa_value ret;
1764 struct ffa_memory_region *memory_region;
1765
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001766 CHECK(is_aligned(fragment,
1767 alignof(struct ffa_memory_region_constituent)));
1768 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
1769 0) {
1770 dlog_verbose("Fragment length %u misaligned.\n",
1771 fragment_length);
1772 ret = ffa_error(FFA_INVALID_PARAMETERS);
1773 goto out_free_fragment;
1774 }
1775
Andrew Walbranca808b12020-05-15 17:22:28 +01001776 ret = ffa_memory_send_continue_validate(share_states, handle,
1777 &share_state,
1778 from_locked.vm->id, page_pool);
1779 if (ret.func != FFA_SUCCESS_32) {
1780 goto out_free_fragment;
1781 }
1782 memory_region = share_state->memory_region;
1783
J-Alves95df0ef2022-12-07 10:09:48 +00001784 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001785 dlog_error(
1786 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01001787 "other world. This should never happen, and indicates "
1788 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01001789 "EL3 code.\n");
1790 ret = ffa_error(FFA_INVALID_PARAMETERS);
1791 goto out_free_fragment;
1792 }
1793
1794 /* Add this fragment. */
1795 share_state->fragments[share_state->fragment_count] = fragment;
1796 share_state->fragment_constituent_counts[share_state->fragment_count] =
1797 fragment_length / sizeof(struct ffa_memory_region_constituent);
1798 share_state->fragment_count++;
1799
1800 /* Check whether the memory send operation is now ready to complete. */
1801 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001802 ret = ffa_memory_send_complete(
1803 from_locked, share_states, share_state, page_pool,
1804 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001805 } else {
1806 ret = (struct ffa_value){
1807 .func = FFA_MEM_FRAG_RX_32,
1808 .arg1 = (uint32_t)handle,
1809 .arg2 = (uint32_t)(handle >> 32),
1810 .arg3 = share_state_next_fragment_offset(share_states,
1811 share_state)};
1812 }
1813 goto out;
1814
1815out_free_fragment:
1816 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001817
1818out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001819 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001820 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001821}
1822
Andrew Walbranca808b12020-05-15 17:22:28 +01001823/** Clean up after the receiver has finished retrieving a memory region. */
1824static void ffa_memory_retrieve_complete(
1825 struct share_states_locked share_states,
1826 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1827{
1828 if (share_state->share_func == FFA_MEM_DONATE_32) {
1829 /*
1830 * Memory that has been donated can't be relinquished,
1831 * so no need to keep the share state around.
1832 */
1833 share_state_free(share_states, share_state, page_pool);
1834 dlog_verbose("Freed share state for donate.\n");
1835 }
1836}
1837
J-Alves2d8457f2022-10-05 11:06:41 +01001838/**
1839 * Initialises the given memory region descriptor to be used for an
1840 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
1841 * fragment.
1842 * The memory region descriptor is initialized according to retriever's
1843 * FF-A version.
1844 *
1845 * Returns true on success, or false if the given constituents won't all fit in
1846 * the first fragment.
1847 */
1848static bool ffa_retrieved_memory_region_init(
1849 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01001850 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01001851 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001852 ffa_id_t receiver_id, ffa_memory_access_permissions_t permissions,
J-Alves2d8457f2022-10-05 11:06:41 +01001853 uint32_t page_count, uint32_t total_constituent_count,
1854 const struct ffa_memory_region_constituent constituents[],
1855 uint32_t fragment_constituent_count, uint32_t *total_length,
1856 uint32_t *fragment_length)
1857{
1858 struct ffa_composite_memory_region *composite_memory_region;
1859 struct ffa_memory_access *receiver;
1860 uint32_t i;
1861 uint32_t constituents_offset;
1862 uint32_t receiver_count;
1863
1864 assert(response != NULL);
1865
1866 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1867 struct ffa_memory_region_v1_0 *retrieve_response =
1868 (struct ffa_memory_region_v1_0 *)response;
1869
J-Alves5da37d92022-10-24 16:33:48 +01001870 ffa_memory_region_init_header_v1_0(
1871 retrieve_response, sender, attributes, flags, handle, 0,
1872 RECEIVERS_COUNT_IN_RETRIEVE_RESP);
J-Alves2d8457f2022-10-05 11:06:41 +01001873
1874 receiver = &retrieve_response->receivers[0];
1875 receiver_count = retrieve_response->receiver_count;
1876
1877 receiver->composite_memory_region_offset =
1878 sizeof(struct ffa_memory_region_v1_0) +
1879 receiver_count * sizeof(struct ffa_memory_access);
1880
1881 composite_memory_region = ffa_memory_region_get_composite_v1_0(
1882 retrieve_response, 0);
1883 } else {
1884 /* Default to FF-A v1.1 version. */
1885 struct ffa_memory_region *retrieve_response =
1886 (struct ffa_memory_region *)response;
1887
1888 ffa_memory_region_init_header(retrieve_response, sender,
1889 attributes, flags, handle, 0, 1);
1890
1891 receiver = &retrieve_response->receivers[0];
1892 receiver_count = retrieve_response->receiver_count;
1893
1894 /*
1895 * Note that `sizeof(struct_ffa_memory_region)` and
1896 * `sizeof(struct ffa_memory_access)` must both be multiples of
1897 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
1898 * guaranteed that the offset we calculate here is aligned to a
1899 * 64-bit boundary and so 64-bit values can be copied without
1900 * alignment faults.
1901 */
1902 receiver->composite_memory_region_offset =
1903 sizeof(struct ffa_memory_region) +
1904 receiver_count * sizeof(struct ffa_memory_access);
1905
1906 composite_memory_region =
1907 ffa_memory_region_get_composite(retrieve_response, 0);
1908 }
1909
1910 assert(receiver != NULL);
1911 assert(composite_memory_region != NULL);
1912
1913 /*
1914 * Initialized here as in memory retrieve responses we currently expect
1915 * one borrower to be specified.
1916 */
1917 ffa_memory_access_init_permissions(receiver, receiver_id, 0, 0, flags);
1918 receiver->receiver_permissions.permissions = permissions;
1919
1920 composite_memory_region->page_count = page_count;
1921 composite_memory_region->constituent_count = total_constituent_count;
1922 composite_memory_region->reserved_0 = 0;
1923
1924 constituents_offset = receiver->composite_memory_region_offset +
1925 sizeof(struct ffa_composite_memory_region);
1926 if (constituents_offset +
1927 fragment_constituent_count *
1928 sizeof(struct ffa_memory_region_constituent) >
1929 response_max_size) {
1930 return false;
1931 }
1932
1933 for (i = 0; i < fragment_constituent_count; ++i) {
1934 composite_memory_region->constituents[i] = constituents[i];
1935 }
1936
1937 if (total_length != NULL) {
1938 *total_length =
1939 constituents_offset +
1940 composite_memory_region->constituent_count *
1941 sizeof(struct ffa_memory_region_constituent);
1942 }
1943 if (fragment_length != NULL) {
1944 *fragment_length =
1945 constituents_offset +
1946 fragment_constituent_count *
1947 sizeof(struct ffa_memory_region_constituent);
1948 }
1949
1950 return true;
1951}
1952
J-Alves96de29f2022-04-26 16:05:24 +01001953/*
1954 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
1955 * returns its index in the receiver's array. If receiver's ID doesn't exist
1956 * in the array, return the region's 'receiver_count'.
1957 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001958uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01001959 ffa_id_t receiver)
J-Alves96de29f2022-04-26 16:05:24 +01001960{
1961 struct ffa_memory_access *receivers;
1962 uint32_t i;
1963
1964 assert(memory_region != NULL);
1965
1966 receivers = memory_region->receivers;
1967
1968 for (i = 0U; i < memory_region->receiver_count; i++) {
1969 if (receivers[i].receiver_permissions.receiver == receiver) {
1970 break;
1971 }
1972 }
1973
1974 return i;
1975}
1976
1977/**
1978 * Validates the retrieved permissions against those specified by the lender
1979 * of memory share operation. Optionally can help set the permissions to be used
1980 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01001981 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
1982 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
1983 * specification for each ABI.
1984 * - FFA_DENIED -> if the permissions specified by the retriever are not
1985 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01001986 */
J-Alvesdcad8992023-09-15 14:10:35 +01001987static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
1988 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01001989 enum ffa_data_access requested_data_access,
1990 enum ffa_instruction_access sent_instruction_access,
1991 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01001992 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01001993{
1994 switch (sent_data_access) {
1995 case FFA_DATA_ACCESS_NOT_SPECIFIED:
1996 case FFA_DATA_ACCESS_RW:
1997 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1998 requested_data_access == FFA_DATA_ACCESS_RW) {
1999 if (permissions != NULL) {
2000 ffa_set_data_access_attr(permissions,
2001 FFA_DATA_ACCESS_RW);
2002 }
2003 break;
2004 }
2005 /* Intentional fall-through. */
2006 case FFA_DATA_ACCESS_RO:
2007 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2008 requested_data_access == FFA_DATA_ACCESS_RO) {
2009 if (permissions != NULL) {
2010 ffa_set_data_access_attr(permissions,
2011 FFA_DATA_ACCESS_RO);
2012 }
2013 break;
2014 }
2015 dlog_verbose(
2016 "Invalid data access requested; sender specified "
2017 "permissions %#x but receiver requested %#x.\n",
2018 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002019 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002020 case FFA_DATA_ACCESS_RESERVED:
2021 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2022 "checked before this point.");
2023 }
2024
J-Alvesdcad8992023-09-15 14:10:35 +01002025 /*
2026 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2027 * or FFA_MEMORY_DONATE the retriever should have specifed the
2028 * instruction permissions it wishes to receive.
2029 */
2030 switch (share_func) {
2031 case FFA_MEM_SHARE_32:
2032 if (requested_instruction_access !=
2033 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2034 dlog_verbose(
2035 "%s: for share instruction permissions must "
2036 "NOT be specified.\n",
2037 __func__);
2038 return ffa_error(FFA_INVALID_PARAMETERS);
2039 }
2040 break;
2041 case FFA_MEM_LEND_32:
2042 /*
2043 * For operations with multiple borrowers only permit XN
2044 * permissions, and both Sender and borrower should have used
2045 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2046 */
2047 if (multiple_borrowers) {
2048 if (requested_instruction_access !=
2049 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2050 dlog_verbose(
2051 "%s: lend/share/donate with multiple "
2052 "borrowers "
2053 "instruction permissions must NOT be "
2054 "specified.\n",
2055 __func__);
2056 return ffa_error(FFA_INVALID_PARAMETERS);
2057 }
2058 break;
2059 }
2060 /* Fall through if the operation targets a single borrower. */
2061 case FFA_MEM_DONATE_32:
2062 if (!multiple_borrowers &&
2063 requested_instruction_access ==
2064 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2065 dlog_verbose(
2066 "%s: for lend/donate with single borrower "
2067 "instruction permissions must be speficified "
2068 "by borrower\n",
2069 __func__);
2070 return ffa_error(FFA_INVALID_PARAMETERS);
2071 }
2072 break;
2073 default:
2074 panic("%s: Wrong func id provided.\n", __func__);
2075 }
2076
J-Alves96de29f2022-04-26 16:05:24 +01002077 switch (sent_instruction_access) {
2078 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2079 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002080 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002081 if (permissions != NULL) {
2082 ffa_set_instruction_access_attr(
2083 permissions, FFA_INSTRUCTION_ACCESS_X);
2084 }
2085 break;
2086 }
J-Alvesdcad8992023-09-15 14:10:35 +01002087 /*
2088 * Fall through if requested permissions are less
2089 * permissive than those provided by the sender.
2090 */
J-Alves96de29f2022-04-26 16:05:24 +01002091 case FFA_INSTRUCTION_ACCESS_NX:
2092 if (requested_instruction_access ==
2093 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2094 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2095 if (permissions != NULL) {
2096 ffa_set_instruction_access_attr(
2097 permissions, FFA_INSTRUCTION_ACCESS_NX);
2098 }
2099 break;
2100 }
2101 dlog_verbose(
2102 "Invalid instruction access requested; sender "
2103 "specified permissions %#x but receiver requested "
2104 "%#x.\n",
2105 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002106 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002107 case FFA_INSTRUCTION_ACCESS_RESERVED:
2108 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2109 "be checked before this point.");
2110 }
2111
J-Alvesdcad8992023-09-15 14:10:35 +01002112 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002113}
2114
2115/**
2116 * Validate the receivers' permissions in the retrieve request against those
2117 * specified by the lender.
2118 * In the `permissions` argument returns the permissions to set at S2 for the
2119 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002120 * The function looks into the flag to bypass multiple borrower checks:
2121 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2122 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2123 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2124 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002125 */
2126static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2127 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002128 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
J-Alvesdcad8992023-09-15 14:10:35 +01002129 ffa_memory_access_permissions_t *permissions, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002130{
2131 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002132 bool bypass_multi_receiver_check =
2133 (retrieve_request->flags &
2134 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002135 const uint32_t region_receiver_count = memory_region->receiver_count;
2136 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002137
2138 assert(permissions != NULL);
2139
J-Alves3456e032023-07-20 12:20:05 +01002140 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002141 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002142 dlog_verbose(
2143 "Retrieve request should contain same list of "
2144 "borrowers, as specified by the lender.\n");
2145 return ffa_error(FFA_INVALID_PARAMETERS);
2146 }
2147 } else {
2148 if (retrieve_request->receiver_count != 1) {
2149 dlog_verbose(
2150 "Set bypass multiple borrower check, receiver "
2151 "list must be sized 1 (%x)\n",
2152 memory_region->receiver_count);
2153 return ffa_error(FFA_INVALID_PARAMETERS);
2154 }
J-Alves96de29f2022-04-26 16:05:24 +01002155 }
2156
2157 retrieve_receiver_index = retrieve_request->receiver_count;
2158
2159 /* Should be populated with the permissions of the retriever. */
2160 *permissions = 0;
2161
2162 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2163 ffa_memory_access_permissions_t sent_permissions;
2164 struct ffa_memory_access *current_receiver =
2165 &retrieve_request->receivers[i];
2166 ffa_memory_access_permissions_t requested_permissions =
2167 current_receiver->receiver_permissions.permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002168 ffa_id_t current_receiver_id =
J-Alves96de29f2022-04-26 16:05:24 +01002169 current_receiver->receiver_permissions.receiver;
2170 bool found_to_id = current_receiver_id == to_vm_id;
2171
J-Alves3456e032023-07-20 12:20:05 +01002172 if (bypass_multi_receiver_check && !found_to_id) {
2173 dlog_verbose(
2174 "Bypass multiple borrower check for id %x.\n",
2175 current_receiver_id);
2176 continue;
2177 }
2178
J-Alves96de29f2022-04-26 16:05:24 +01002179 /*
2180 * Find the current receiver in the transaction descriptor from
2181 * sender.
2182 */
2183 uint32_t mem_region_receiver_index =
2184 ffa_memory_region_get_receiver(memory_region,
2185 current_receiver_id);
2186
2187 if (mem_region_receiver_index ==
2188 memory_region->receiver_count) {
2189 dlog_verbose("%s: receiver %x not found\n", __func__,
2190 current_receiver_id);
2191 return ffa_error(FFA_DENIED);
2192 }
2193
2194 sent_permissions =
2195 memory_region->receivers[mem_region_receiver_index]
2196 .receiver_permissions.permissions;
2197
2198 if (found_to_id) {
2199 retrieve_receiver_index = i;
2200 }
2201
2202 /*
2203 * Since we are traversing the list of receivers, save the index
2204 * of the caller. As it needs to be there.
2205 */
2206
2207 if (current_receiver->composite_memory_region_offset != 0U) {
2208 dlog_verbose(
2209 "Retriever specified address ranges not "
2210 "supported (got offset %d).\n",
2211 current_receiver
2212 ->composite_memory_region_offset);
2213 return ffa_error(FFA_INVALID_PARAMETERS);
2214 }
2215
2216 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002217 * Check if retrieve request memory access list is valid:
2218 * - The retrieve request complies with the specification.
2219 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002220 */
J-Alvesdcad8992023-09-15 14:10:35 +01002221 ret = ffa_memory_retrieve_is_memory_access_valid(
2222 func_id, ffa_get_data_access_attr(sent_permissions),
2223 ffa_get_data_access_attr(requested_permissions),
2224 ffa_get_instruction_access_attr(sent_permissions),
2225 ffa_get_instruction_access_attr(requested_permissions),
2226 found_to_id ? permissions : NULL,
2227 region_receiver_count > 1);
2228 if (ret.func != FFA_SUCCESS_32) {
2229 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002230 }
2231
2232 /*
2233 * Can't request PM to clear memory if only provided with RO
2234 * permissions.
2235 */
2236 if (found_to_id &&
2237 (ffa_get_data_access_attr(*permissions) ==
2238 FFA_DATA_ACCESS_RO) &&
2239 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2240 0U) {
2241 dlog_verbose(
2242 "Receiver has RO permissions can not request "
2243 "clear.\n");
2244 return ffa_error(FFA_DENIED);
2245 }
2246 }
2247
2248 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2249 dlog_verbose(
2250 "Retrieve request does not contain caller's (%x) "
2251 "permissions\n",
2252 to_vm_id);
2253 return ffa_error(FFA_INVALID_PARAMETERS);
2254 }
2255
2256 return (struct ffa_value){.func = FFA_SUCCESS_32};
2257}
2258
J-Alvesa9cd7e32022-07-01 13:49:33 +01002259/*
2260 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2261 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2262 * of a pending memory sharing operation whose allocator is the SPM, for
2263 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2264 * the memory region descriptor of the retrieve request must be zeroed with the
2265 * exception of the sender ID and handle.
2266 */
2267bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
2268 struct vm_locked to_locked)
2269{
2270 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2271 request->attributes == 0U && request->flags == 0U &&
2272 request->tag == 0U && request->receiver_count == 0U &&
2273 plat_ffa_memory_handle_allocated_by_current_world(
2274 request->handle);
2275}
2276
2277/*
2278 * Helper to reset count of fragments retrieved by the hypervisor.
2279 */
2280static void ffa_memory_retrieve_complete_from_hyp(
2281 struct ffa_memory_share_state *share_state)
2282{
2283 if (share_state->hypervisor_fragment_count ==
2284 share_state->fragment_count) {
2285 share_state->hypervisor_fragment_count = 0;
2286 }
2287}
2288
J-Alves089004f2022-07-13 14:25:44 +01002289/**
2290 * Validate that the memory region descriptor provided by the borrower on
2291 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2292 * memory sharing call.
2293 */
2294static struct ffa_value ffa_memory_retrieve_validate(
J-Alves19e20cf2023-08-02 12:48:55 +01002295 ffa_id_t receiver_id, struct ffa_memory_region *retrieve_request,
J-Alves089004f2022-07-13 14:25:44 +01002296 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2297 uint32_t share_func)
2298{
2299 ffa_memory_region_flags_t transaction_type =
2300 retrieve_request->flags &
2301 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002302 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01002303
2304 assert(retrieve_request != NULL);
2305 assert(memory_region != NULL);
2306 assert(receiver_index != NULL);
2307 assert(retrieve_request->sender == memory_region->sender);
2308
2309 /*
2310 * Check that the transaction type expected by the receiver is
2311 * correct, if it has been specified.
2312 */
2313 if (transaction_type !=
2314 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2315 transaction_type != (memory_region->flags &
2316 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2317 dlog_verbose(
2318 "Incorrect transaction type %#x for "
2319 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2320 transaction_type,
2321 memory_region->flags &
2322 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2323 retrieve_request->handle);
2324 return ffa_error(FFA_INVALID_PARAMETERS);
2325 }
2326
2327 if (retrieve_request->tag != memory_region->tag) {
2328 dlog_verbose(
2329 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2330 "%d for handle %#x.\n",
2331 retrieve_request->tag, memory_region->tag,
2332 retrieve_request->handle);
2333 return ffa_error(FFA_INVALID_PARAMETERS);
2334 }
2335
2336 *receiver_index =
2337 ffa_memory_region_get_receiver(memory_region, receiver_id);
2338
2339 if (*receiver_index == memory_region->receiver_count) {
2340 dlog_verbose(
2341 "Incorrect receiver VM ID %d for "
2342 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01002343 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002344 return ffa_error(FFA_INVALID_PARAMETERS);
2345 }
2346
2347 if ((retrieve_request->flags &
2348 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2349 dlog_verbose(
2350 "Retriever specified 'address range alignment 'hint' "
2351 "not supported.\n");
2352 return ffa_error(FFA_INVALID_PARAMETERS);
2353 }
2354 if ((retrieve_request->flags &
2355 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2356 dlog_verbose(
2357 "Bits 8-5 must be zero in memory region's flags "
2358 "(address range alignment hint not supported).\n");
2359 return ffa_error(FFA_INVALID_PARAMETERS);
2360 }
2361
2362 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2363 dlog_verbose(
2364 "Bits 31-10 must be zero in memory region's flags.\n");
2365 return ffa_error(FFA_INVALID_PARAMETERS);
2366 }
2367
2368 if (share_func == FFA_MEM_SHARE_32 &&
2369 (retrieve_request->flags &
2370 (FFA_MEMORY_REGION_FLAG_CLEAR |
2371 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2372 dlog_verbose(
2373 "Memory Share operation can't clean after relinquish "
2374 "memory region.\n");
2375 return ffa_error(FFA_INVALID_PARAMETERS);
2376 }
2377
2378 /*
2379 * If the borrower needs the memory to be cleared before mapping
2380 * to its address space, the sender should have set the flag
2381 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2382 * FFA_DENIED.
2383 */
2384 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2385 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2386 dlog_verbose(
2387 "Borrower needs memory cleared. Sender needs to set "
2388 "flag for clearing memory.\n");
2389 return ffa_error(FFA_DENIED);
2390 }
2391
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002392 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2393 security_state =
2394 ffa_get_memory_security_attr(retrieve_request->attributes);
2395 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2396 dlog_verbose(
2397 "Invalid security state for memory retrieve request "
2398 "operation.\n");
2399 return ffa_error(FFA_INVALID_PARAMETERS);
2400 }
2401
J-Alves089004f2022-07-13 14:25:44 +01002402 /*
2403 * If memory type is not specified, bypass validation of memory
2404 * attributes in the retrieve request. The retriever is expecting to
2405 * obtain this information from the SPMC.
2406 */
2407 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2408 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2409 return (struct ffa_value){.func = FFA_SUCCESS_32};
2410 }
2411
2412 /*
2413 * Ensure receiver's attributes are compatible with how
2414 * Hafnium maps memory: Normal Memory, Inner shareable,
2415 * Write-Back Read-Allocate Write-Allocate Cacheable.
2416 */
2417 return ffa_memory_attributes_validate(retrieve_request->attributes);
2418}
2419
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002420struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2421 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002422 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002423 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002424{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002425 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002426 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002427 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002428 sizeof(struct ffa_memory_access);
2429 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002430 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002431 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002432 uint32_t memory_to_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002433 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002434 struct ffa_memory_share_state *share_state;
2435 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002436 struct ffa_composite_memory_region *composite;
2437 uint32_t total_length;
2438 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01002439 ffa_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002440 bool is_send_complete = false;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002441 ffa_memory_attributes_t attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002442
2443 dump_share_states();
2444
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002445 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002446 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002447 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002448 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002449 expected_retrieve_request_length,
2450 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002451 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002452 }
2453
2454 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002455 share_state = get_share_state(share_states, handle);
2456 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002457 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002458 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002459 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002460 goto out;
2461 }
2462
J-Alves96de29f2022-04-26 16:05:24 +01002463 if (!share_state->sending_complete) {
2464 dlog_verbose(
2465 "Memory with handle %#x not fully sent, can't "
2466 "retrieve.\n",
2467 handle);
2468 ret = ffa_error(FFA_INVALID_PARAMETERS);
2469 goto out;
2470 }
2471
Andrew Walbrana65a1322020-04-06 19:32:32 +01002472 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002473
Andrew Walbrana65a1322020-04-06 19:32:32 +01002474 CHECK(memory_region != NULL);
2475
J-Alves089004f2022-07-13 14:25:44 +01002476 if (retrieve_request->sender != memory_region->sender) {
2477 dlog_verbose(
2478 "Memory with handle %#x not fully sent, can't "
2479 "retrieve.\n",
2480 handle);
2481 ret = ffa_error(FFA_INVALID_PARAMETERS);
2482 goto out;
2483 }
J-Alves96de29f2022-04-26 16:05:24 +01002484
J-Alvesa9cd7e32022-07-01 13:49:33 +01002485 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2486 to_locked)) {
2487 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002488
J-Alvesb5084cf2022-07-06 14:20:12 +01002489 /*
2490 * The SPMC can only process retrieve requests to memory share
2491 * operations with one borrower from the other world. It can't
2492 * determine the ID of the NWd VM that invoked the retrieve
2493 * request interface call. It relies on the hypervisor to
2494 * validate the caller's ID against that provided in the
2495 * `receivers` list of the retrieve response.
2496 * In case there is only one borrower from the NWd in the
2497 * transaction descriptor, record that in the `receiver_id` for
2498 * later use, and validate in the retrieve request message.
J-Alves3fa82aa2023-09-20 18:19:21 +01002499 * This limitation is due to the fact SPMC can't determine the
2500 * index in the memory share structures state to update.
J-Alvesb5084cf2022-07-06 14:20:12 +01002501 */
2502 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2503 uint32_t other_world_count = 0;
2504
2505 for (uint32_t i = 0; i < memory_region->receiver_count;
2506 i++) {
2507 receiver_id =
2508 retrieve_request->receivers[0]
2509 .receiver_permissions.receiver;
2510 if (!vm_id_is_current_world(receiver_id)) {
2511 other_world_count++;
2512 }
2513 }
2514 if (other_world_count > 1) {
2515 dlog_verbose(
2516 "Support one receiver from the other "
2517 "world.\n");
2518 return ffa_error(FFA_NOT_SUPPORTED);
2519 }
2520 }
2521
2522 /*
2523 * Validate retrieve request, according to what was sent by the
2524 * sender. Function will output the `receiver_index` from the
J-Alves3fa82aa2023-09-20 18:19:21 +01002525 * provided memory region.
J-Alvesb5084cf2022-07-06 14:20:12 +01002526 */
J-Alves089004f2022-07-13 14:25:44 +01002527 ret = ffa_memory_retrieve_validate(
2528 receiver_id, retrieve_request, memory_region,
2529 &receiver_index, share_state->share_func);
2530 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002531 goto out;
2532 }
2533
2534 if (share_state->retrieved_fragment_count[receiver_index] !=
2535 0U) {
2536 dlog_verbose(
2537 "Memory with handle %#x already retrieved.\n",
2538 handle);
2539 ret = ffa_error(FFA_DENIED);
2540 goto out;
2541 }
2542
J-Alves3fa82aa2023-09-20 18:19:21 +01002543 /*
2544 * Validate the requested permissions against the sent
2545 * permissions.
2546 * Outputs the permissions to give to retriever at S2
2547 * PTs.
2548 */
J-Alvesa9cd7e32022-07-01 13:49:33 +01002549 ret = ffa_memory_retrieve_validate_memory_access_list(
2550 memory_region, retrieve_request, receiver_id,
J-Alvesdcad8992023-09-15 14:10:35 +01002551 &permissions, share_state->share_func);
J-Alves614d9f42022-06-28 14:03:10 +01002552 if (ret.func != FFA_SUCCESS_32) {
2553 goto out;
2554 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002555
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002556 memory_to_mode = ffa_memory_permissions_to_mode(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002557 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002558
J-Alvesa9cd7e32022-07-01 13:49:33 +01002559 ret = ffa_retrieve_check_update(
2560 to_locked, memory_region->sender,
2561 share_state->fragments,
2562 share_state->fragment_constituent_counts,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002563 share_state->fragment_count, memory_to_mode,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002564 share_state->share_func, false, page_pool);
2565
2566 if (ret.func != FFA_SUCCESS_32) {
2567 goto out;
2568 }
2569
2570 share_state->retrieved_fragment_count[receiver_index] = 1;
2571 is_send_complete =
2572 share_state->retrieved_fragment_count[receiver_index] ==
2573 share_state->fragment_count;
J-Alves3c5b2072022-11-21 12:45:40 +00002574
2575 share_state->clear_after_relinquish =
2576 (retrieve_request->flags &
2577 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) != 0U;
2578
J-Alvesa9cd7e32022-07-01 13:49:33 +01002579 } else {
2580 if (share_state->hypervisor_fragment_count != 0U) {
2581 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002582 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002583 "the hypervisor.\n",
2584 handle);
2585 ret = ffa_error(FFA_DENIED);
2586 goto out;
2587 }
2588
2589 share_state->hypervisor_fragment_count = 1;
2590
2591 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002592 }
2593
J-Alvesb5084cf2022-07-06 14:20:12 +01002594 /* VMs acquire the RX buffer from SPMC. */
2595 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2596
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002597 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002598 * Copy response to RX buffer of caller and deliver the message.
2599 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002600 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002601 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002602 composite = ffa_memory_region_get_composite(memory_region, 0);
2603 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002604 * Constituents which we received in the first fragment should
2605 * always fit in the first fragment we are sending, because the
2606 * header is the same size in both cases and we have a fixed
2607 * message buffer size. So `ffa_retrieved_memory_region_init`
2608 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002609 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002610
2611 /*
2612 * Set the security state in the memory retrieve response attributes
2613 * if specified by the target mode.
2614 */
2615 attributes = plat_ffa_memory_security_mode(
2616 memory_region->attributes, share_state->sender_orig_mode);
2617
Andrew Walbranca808b12020-05-15 17:22:28 +01002618 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01002619 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002620 HF_MAILBOX_SIZE, memory_region->sender, attributes,
2621 memory_region->flags, handle, receiver_id, permissions,
2622 composite->page_count, composite->constituent_count,
2623 share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01002624 share_state->fragment_constituent_counts[0], &total_length,
2625 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002626
Andrew Walbranca808b12020-05-15 17:22:28 +01002627 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002628 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002629 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002630 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002631
J-Alvesa9cd7e32022-07-01 13:49:33 +01002632 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002633 ffa_memory_retrieve_complete(share_states, share_state,
2634 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002635 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002636 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002637 .arg1 = total_length,
2638 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002639out:
2640 share_states_unlock(&share_states);
2641 dump_share_states();
2642 return ret;
2643}
2644
J-Alves5da37d92022-10-24 16:33:48 +01002645/**
2646 * Determine expected fragment offset according to the FF-A version of
2647 * the caller.
2648 */
2649static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
2650 struct ffa_memory_region *memory_region,
2651 uint32_t retrieved_constituents_count, uint32_t ffa_version)
2652{
2653 uint32_t expected_fragment_offset;
2654 uint32_t composite_constituents_offset;
2655
2656 if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
2657 /*
2658 * Hafnium operates memory regions in FF-A v1.1 format, so we
2659 * can retrieve the constituents offset from descriptor.
2660 */
2661 composite_constituents_offset =
2662 ffa_composite_constituent_offset(memory_region, 0);
2663 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2664 /*
2665 * If retriever is FF-A v1.0, determine the composite offset
2666 * as it is expected to have been configured in the
2667 * retrieve response.
2668 */
2669 composite_constituents_offset =
2670 sizeof(struct ffa_memory_region_v1_0) +
2671 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
2672 sizeof(struct ffa_memory_access) +
2673 sizeof(struct ffa_composite_memory_region);
2674 } else {
2675 panic("%s received an invalid FF-A version.\n", __func__);
2676 }
2677
2678 expected_fragment_offset =
2679 composite_constituents_offset +
2680 retrieved_constituents_count *
2681 sizeof(struct ffa_memory_region_constituent) -
2682 sizeof(struct ffa_memory_access) *
2683 (memory_region->receiver_count - 1);
2684
2685 return expected_fragment_offset;
2686}
2687
Andrew Walbranca808b12020-05-15 17:22:28 +01002688struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2689 ffa_memory_handle_t handle,
2690 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01002691 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002692 struct mpool *page_pool)
2693{
2694 struct ffa_memory_region *memory_region;
2695 struct share_states_locked share_states;
2696 struct ffa_memory_share_state *share_state;
2697 struct ffa_value ret;
2698 uint32_t fragment_index;
2699 uint32_t retrieved_constituents_count;
2700 uint32_t i;
2701 uint32_t expected_fragment_offset;
2702 uint32_t remaining_constituent_count;
2703 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002704 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01002705 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01002706
2707 dump_share_states();
2708
2709 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002710 share_state = get_share_state(share_states, handle);
2711 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002712 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2713 handle);
2714 ret = ffa_error(FFA_INVALID_PARAMETERS);
2715 goto out;
2716 }
2717
2718 memory_region = share_state->memory_region;
2719 CHECK(memory_region != NULL);
2720
Andrew Walbranca808b12020-05-15 17:22:28 +01002721 if (!share_state->sending_complete) {
2722 dlog_verbose(
2723 "Memory with handle %#x not fully sent, can't "
2724 "retrieve.\n",
2725 handle);
2726 ret = ffa_error(FFA_INVALID_PARAMETERS);
2727 goto out;
2728 }
2729
J-Alves59ed0042022-07-28 18:26:41 +01002730 /*
2731 * If retrieve request from the hypervisor has been initiated in the
2732 * given share_state, continue it, else assume it is a continuation of
2733 * retrieve request from a NWd VM.
2734 */
2735 continue_ffa_hyp_mem_retrieve_req =
2736 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
2737 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01002738 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01002739
J-Alves59ed0042022-07-28 18:26:41 +01002740 if (!continue_ffa_hyp_mem_retrieve_req) {
2741 receiver_index = ffa_memory_region_get_receiver(
2742 memory_region, to_locked.vm->id);
2743
2744 if (receiver_index == memory_region->receiver_count) {
2745 dlog_verbose(
2746 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
2747 "borrower to memory sharing transaction (%x)\n",
2748 to_locked.vm->id, handle);
2749 ret = ffa_error(FFA_INVALID_PARAMETERS);
2750 goto out;
2751 }
2752
2753 if (share_state->retrieved_fragment_count[receiver_index] ==
2754 0 ||
2755 share_state->retrieved_fragment_count[receiver_index] >=
2756 share_state->fragment_count) {
2757 dlog_verbose(
2758 "Retrieval of memory with handle %#x not yet "
2759 "started or already completed (%d/%d fragments "
2760 "retrieved).\n",
2761 handle,
2762 share_state->retrieved_fragment_count
2763 [receiver_index],
2764 share_state->fragment_count);
2765 ret = ffa_error(FFA_INVALID_PARAMETERS);
2766 goto out;
2767 }
2768
2769 fragment_index =
2770 share_state->retrieved_fragment_count[receiver_index];
2771 } else {
2772 if (share_state->hypervisor_fragment_count == 0 ||
2773 share_state->hypervisor_fragment_count >=
2774 share_state->fragment_count) {
2775 dlog_verbose(
2776 "Retrieve of memory with handle %x not "
2777 "started from hypervisor.\n",
2778 handle);
2779 ret = ffa_error(FFA_INVALID_PARAMETERS);
2780 goto out;
2781 }
2782
2783 if (memory_region->sender != sender_vm_id) {
2784 dlog_verbose(
2785 "Sender ID (%x) is not as expected for memory "
2786 "handle %x\n",
2787 sender_vm_id, handle);
2788 ret = ffa_error(FFA_INVALID_PARAMETERS);
2789 goto out;
2790 }
2791
2792 fragment_index = share_state->hypervisor_fragment_count;
2793
2794 receiver_index = 0;
2795 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002796
2797 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002798 * Check that the given fragment offset is correct by counting
2799 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01002800 */
2801 retrieved_constituents_count = 0;
2802 for (i = 0; i < fragment_index; ++i) {
2803 retrieved_constituents_count +=
2804 share_state->fragment_constituent_counts[i];
2805 }
J-Alvesc7484f12022-05-13 12:41:14 +01002806
2807 CHECK(memory_region->receiver_count > 0);
2808
Andrew Walbranca808b12020-05-15 17:22:28 +01002809 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01002810 ffa_memory_retrieve_expected_offset_per_ffa_version(
2811 memory_region, retrieved_constituents_count,
2812 to_locked.vm->ffa_version);
2813
Andrew Walbranca808b12020-05-15 17:22:28 +01002814 if (fragment_offset != expected_fragment_offset) {
2815 dlog_verbose("Fragment offset was %d but expected %d.\n",
2816 fragment_offset, expected_fragment_offset);
2817 ret = ffa_error(FFA_INVALID_PARAMETERS);
2818 goto out;
2819 }
2820
J-Alves59ed0042022-07-28 18:26:41 +01002821 /* VMs acquire the RX buffer from SPMC. */
2822 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2823
Andrew Walbranca808b12020-05-15 17:22:28 +01002824 remaining_constituent_count = ffa_memory_fragment_init(
2825 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2826 share_state->fragments[fragment_index],
2827 share_state->fragment_constituent_counts[fragment_index],
2828 &fragment_length);
2829 CHECK(remaining_constituent_count == 0);
2830 to_locked.vm->mailbox.recv_size = fragment_length;
2831 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2832 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002833 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01002834
J-Alves59ed0042022-07-28 18:26:41 +01002835 if (!continue_ffa_hyp_mem_retrieve_req) {
2836 share_state->retrieved_fragment_count[receiver_index]++;
2837 if (share_state->retrieved_fragment_count[receiver_index] ==
2838 share_state->fragment_count) {
2839 ffa_memory_retrieve_complete(share_states, share_state,
2840 page_pool);
2841 }
2842 } else {
2843 share_state->hypervisor_fragment_count++;
2844
2845 ffa_memory_retrieve_complete_from_hyp(share_state);
2846 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002847 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2848 .arg1 = (uint32_t)handle,
2849 .arg2 = (uint32_t)(handle >> 32),
2850 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002851
2852out:
2853 share_states_unlock(&share_states);
2854 dump_share_states();
2855 return ret;
2856}
2857
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002858struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002859 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002860 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002861{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002862 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002863 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002864 struct ffa_memory_share_state *share_state;
2865 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002866 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002867 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002868 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00002869 bool receivers_relinquished_memory;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002870
Andrew Walbrana65a1322020-04-06 19:32:32 +01002871 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002872 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002873 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01002874 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002875 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002876 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002877 }
2878
Andrew Walbrana65a1322020-04-06 19:32:32 +01002879 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002880 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002881 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01002882 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002883 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002884 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002885 }
2886
2887 dump_share_states();
2888
2889 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002890 share_state = get_share_state(share_states, handle);
2891 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002892 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002893 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002894 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002895 goto out;
2896 }
2897
Andrew Walbranca808b12020-05-15 17:22:28 +01002898 if (!share_state->sending_complete) {
2899 dlog_verbose(
2900 "Memory with handle %#x not fully sent, can't "
2901 "relinquish.\n",
2902 handle);
2903 ret = ffa_error(FFA_INVALID_PARAMETERS);
2904 goto out;
2905 }
2906
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002907 memory_region = share_state->memory_region;
2908 CHECK(memory_region != NULL);
2909
J-Alves8eb19162022-04-28 10:56:48 +01002910 receiver_index = ffa_memory_region_get_receiver(memory_region,
2911 from_locked.vm->id);
2912
2913 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002914 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002915 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01002916 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01002917 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002918 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002919 goto out;
2920 }
2921
J-Alves8eb19162022-04-28 10:56:48 +01002922 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002923 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002924 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002925 "Memory with handle %#x not yet fully "
2926 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01002927 "receiver %x can't relinquish.\n",
2928 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002929 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002930 goto out;
2931 }
2932
J-Alves3c5b2072022-11-21 12:45:40 +00002933 /*
2934 * Either clear if requested in relinquish call, or in a retrieve
2935 * request from one of the borrowers.
2936 */
2937 receivers_relinquished_memory = true;
2938
2939 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2940 struct ffa_memory_access *receiver =
2941 &memory_region->receivers[i];
2942
2943 if (receiver->receiver_permissions.receiver ==
2944 from_locked.vm->id) {
2945 continue;
2946 }
2947
2948 if (share_state->retrieved_fragment_count[i] != 0U) {
2949 receivers_relinquished_memory = false;
2950 break;
2951 }
2952 }
2953
2954 clear = receivers_relinquished_memory &&
2955 (share_state->clear_after_relinquish ||
2956 (relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2957 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002958
2959 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002960 * Clear is not allowed for memory that was shared, as the
2961 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002962 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002963 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002964 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002965 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002966 goto out;
2967 }
2968
Andrew Walbranca808b12020-05-15 17:22:28 +01002969 ret = ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00002970 from_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002971 share_state->fragment_constituent_counts,
2972 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002973
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002974 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002975 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002976 * Mark memory handle as not retrieved, so it can be
2977 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002978 */
J-Alves8eb19162022-04-28 10:56:48 +01002979 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002980 }
2981
2982out:
2983 share_states_unlock(&share_states);
2984 dump_share_states();
2985 return ret;
2986}
2987
2988/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01002989 * Validates that the reclaim transition is allowed for the given
2990 * handle, updates the page table of the reclaiming VM, and frees the
2991 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002992 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002993struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002994 ffa_memory_handle_t handle,
2995 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002996 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002997{
2998 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002999 struct ffa_memory_share_state *share_state;
3000 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003001 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003002
3003 dump_share_states();
3004
3005 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003006
Karl Meakin4a2854a2023-06-30 16:26:52 +01003007 share_state = get_share_state(share_states, handle);
3008 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003009 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003010 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003011 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003012 goto out;
3013 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003014 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003015
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003016 CHECK(memory_region != NULL);
3017
J-Alvesa9cd7e32022-07-01 13:49:33 +01003018 if (vm_id_is_current_world(to_locked.vm->id) &&
3019 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003020 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003021 "VM %#x attempted to reclaim memory handle %#x "
3022 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003023 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003024 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003025 goto out;
3026 }
3027
Andrew Walbranca808b12020-05-15 17:22:28 +01003028 if (!share_state->sending_complete) {
3029 dlog_verbose(
3030 "Memory with handle %#x not fully sent, can't "
3031 "reclaim.\n",
3032 handle);
3033 ret = ffa_error(FFA_INVALID_PARAMETERS);
3034 goto out;
3035 }
3036
J-Alves752236c2022-04-28 11:07:47 +01003037 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3038 if (share_state->retrieved_fragment_count[i] != 0) {
3039 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003040 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00003041 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003042 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003043 handle,
3044 memory_region->receivers[i]
3045 .receiver_permissions.receiver);
3046 ret = ffa_error(FFA_DENIED);
3047 goto out;
3048 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003049 }
3050
Andrew Walbranca808b12020-05-15 17:22:28 +01003051 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00003052 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003053 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003054 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01003055 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003056
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003057 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003058 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003059 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003060 }
3061
3062out:
3063 share_states_unlock(&share_states);
3064 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003065}