blob: ebc5db7e71510b67020ff71d0f8a6e621f79c2b7 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
J-Alves5952d942022-12-22 16:03:00 +000015#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010016#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000017#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010020#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010022#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010023#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000024#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010026#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
J-Alves2d8457f2022-10-05 11:06:41 +010029#include "vmapi/hf/ffa_v1_0.h"
30
J-Alves5da37d92022-10-24 16:33:48 +010031#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
32
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000033/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000035 * by this lock.
36 */
37static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000039
40/**
J-Alves917d2f22020-10-30 18:39:30 +000041 * Extracts the index from a memory handle allocated by Hafnium's current world.
42 */
43uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
44{
45 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
46}
47
48/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010049 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
50 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
51 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010052 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010053 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
54 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010055 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010056struct ffa_memory_share_state *allocate_share_state(
57 struct share_states_locked share_states, uint32_t share_func,
58 struct ffa_memory_region *memory_region, uint32_t fragment_length,
59 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000060{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000061 assert(share_states.share_states != NULL);
62 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000063
Karl Meakin52cdfe72023-06-30 14:49:10 +010064 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010065 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010066 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010067 &share_states.share_states[i];
68 struct ffa_composite_memory_region *composite =
69 ffa_memory_region_get_composite(memory_region,
70 0);
71
72 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000073 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010075 } else {
J-Alvesee68c542020-10-29 17:48:20 +000076 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010077 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000078 allocated_state->share_func = share_func;
79 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +010080 allocated_state->fragment_count = 1;
81 allocated_state->fragments[0] = composite->constituents;
82 allocated_state->fragment_constituent_counts[0] =
83 (fragment_length -
84 ffa_composite_constituent_offset(memory_region,
85 0)) /
86 sizeof(struct ffa_memory_region_constituent);
87 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +010088 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
89 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +010090 allocated_state->retrieved_fragment_count[j] =
91 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000092 }
Karl Meakin52cdfe72023-06-30 14:49:10 +010093 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000094 }
95 }
96
Karl Meakin52cdfe72023-06-30 14:49:10 +010097 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000098}
99
100/** Locks the share states lock. */
101struct share_states_locked share_states_lock(void)
102{
103 sl_lock(&share_states_lock_instance);
104
105 return (struct share_states_locked){.share_states = share_states};
106}
107
108/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100109void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000110{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000111 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112 share_states->share_states = NULL;
113 sl_unlock(&share_states_lock_instance);
114}
115
116/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100117 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100118 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100120struct ffa_memory_share_state *get_share_state(
121 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000124
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000125 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100126
127 /*
128 * First look for a share_state allocated by us, in which case the
129 * handle is based on the index.
130 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200131 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100132 uint64_t index = ffa_memory_handle_get_index(handle);
133
Andrew Walbranca808b12020-05-15 17:22:28 +0100134 if (index < MAX_MEM_SHARES) {
135 share_state = &share_states.share_states[index];
136 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100137 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100138 }
139 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140 }
141
Andrew Walbranca808b12020-05-15 17:22:28 +0100142 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100143 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100144 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000145 if (share_state->memory_region != NULL &&
146 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100149 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000150 }
151
Karl Meakin4a2854a2023-06-30 16:26:52 +0100152 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000153}
154
155/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100156void share_state_free(struct share_states_locked share_states,
157 struct ffa_memory_share_state *share_state,
158 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000159{
Andrew Walbranca808b12020-05-15 17:22:28 +0100160 uint32_t i;
161
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000162 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000163 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000165 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 /*
167 * First fragment is part of the same page as the `memory_region`, so it
168 * doesn't need to be freed separately.
169 */
170 share_state->fragments[0] = NULL;
171 share_state->fragment_constituent_counts[0] = 0;
172 for (i = 1; i < share_state->fragment_count; ++i) {
173 mpool_free(page_pool, share_state->fragments[i]);
174 share_state->fragments[i] = NULL;
175 share_state->fragment_constituent_counts[i] = 0;
176 }
177 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100179 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000180}
181
Andrew Walbranca808b12020-05-15 17:22:28 +0100182/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100183bool share_state_sending_complete(struct share_states_locked share_states,
184 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000185{
Andrew Walbranca808b12020-05-15 17:22:28 +0100186 struct ffa_composite_memory_region *composite;
187 uint32_t expected_constituent_count;
188 uint32_t fragment_constituent_count_total = 0;
189 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000190
Andrew Walbranca808b12020-05-15 17:22:28 +0100191 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000192 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100193
194 /*
195 * Share state must already be valid, or it's not possible to get hold
196 * of it.
197 */
198 CHECK(share_state->memory_region != NULL &&
199 share_state->share_func != 0);
200
201 composite =
202 ffa_memory_region_get_composite(share_state->memory_region, 0);
203 expected_constituent_count = composite->constituent_count;
204 for (i = 0; i < share_state->fragment_count; ++i) {
205 fragment_constituent_count_total +=
206 share_state->fragment_constituent_counts[i];
207 }
208 dlog_verbose(
209 "Checking completion: constituent count %d/%d from %d "
210 "fragments.\n",
211 fragment_constituent_count_total, expected_constituent_count,
212 share_state->fragment_count);
213
214 return fragment_constituent_count_total == expected_constituent_count;
215}
216
217/**
218 * Calculates the offset of the next fragment expected for the given share
219 * state.
220 */
J-Alvesfdd29272022-07-19 13:16:31 +0100221uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100222 struct share_states_locked share_states,
223 struct ffa_memory_share_state *share_state)
224{
225 uint32_t next_fragment_offset;
226 uint32_t i;
227
228 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000229 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100230
231 next_fragment_offset =
232 ffa_composite_constituent_offset(share_state->memory_region, 0);
233 for (i = 0; i < share_state->fragment_count; ++i) {
234 next_fragment_offset +=
235 share_state->fragment_constituent_counts[i] *
236 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000237 }
238
Andrew Walbranca808b12020-05-15 17:22:28 +0100239 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000240}
241
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000243{
244 uint32_t i;
245
246 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
247 return;
248 }
249
Olivier Deprez935e1b12020-12-22 18:01:29 +0100250 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100251 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100252 "recipients [",
253 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100254 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100255 memory_region->receiver_count);
256 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000257 if (i != 0) {
258 dlog(", ");
259 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100260 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100261 memory_region->receivers[i].receiver_permissions.receiver,
262 memory_region->receivers[i]
263 .receiver_permissions.permissions,
264 memory_region->receivers[i]
265 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000266 }
267 dlog("]");
268}
269
J-Alves66652252022-07-06 09:49:51 +0100270void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000271{
272 uint32_t i;
273
274 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
275 return;
276 }
277
278 dlog("Current share states:\n");
279 sl_lock(&share_states_lock_instance);
280 for (i = 0; i < MAX_MEM_SHARES; ++i) {
281 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000282 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100283 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000284 dlog("SHARE");
285 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100286 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000287 dlog("LEND");
288 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100289 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000290 dlog("DONATE");
291 break;
292 default:
293 dlog("invalid share_func %#x",
294 share_states[i].share_func);
295 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100296 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000297 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100298 if (share_states[i].sending_complete) {
299 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000300 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100301 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000302 }
J-Alves2a0d2882020-10-29 14:49:50 +0000303 dlog(" with %d fragments, %d retrieved, "
304 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100305 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000306 share_states[i].retrieved_fragment_count[0],
307 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000308 }
309 }
310 sl_unlock(&share_states_lock_instance);
311}
312
Andrew Walbran475c1452020-02-07 13:22:22 +0000313/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100314static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100315 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000316{
317 uint32_t mode = 0;
318
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100319 switch (ffa_get_data_access_attr(permissions)) {
320 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000321 mode = MM_MODE_R;
322 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100323 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000324 mode = MM_MODE_R | MM_MODE_W;
325 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100326 case FFA_DATA_ACCESS_NOT_SPECIFIED:
327 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
328 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100329 case FFA_DATA_ACCESS_RESERVED:
330 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100331 }
332
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 switch (ffa_get_instruction_access_attr(permissions)) {
334 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000335 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100337 mode |= MM_MODE_X;
338 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100339 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
340 mode |= (default_mode & MM_MODE_X);
341 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100342 case FFA_INSTRUCTION_ACCESS_RESERVED:
343 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000344 }
345
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200346 /* Set the security state bit if necessary. */
347 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
348 mode |= plat_ffa_other_world_mode();
349 }
350
Andrew Walbran475c1452020-02-07 13:22:22 +0000351 return mode;
352}
353
Jose Marinho75509b42019-04-09 09:34:59 +0100354/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000355 * Get the current mode in the stage-2 page table of the given vm of all the
356 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100358 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000360 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100361 struct ffa_memory_region_constituent **fragments,
362 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100363{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100364 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100365 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100366
Andrew Walbranca808b12020-05-15 17:22:28 +0100367 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100368 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000369 * Fail if there are no constituents. Otherwise we would get an
370 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100371 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100373 }
374
Andrew Walbranca808b12020-05-15 17:22:28 +0100375 for (i = 0; i < fragment_count; ++i) {
376 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
377 ipaddr_t begin = ipa_init(fragments[i][j].address);
378 size_t size = fragments[i][j].page_count * PAGE_SIZE;
379 ipaddr_t end = ipa_add(begin, size);
380 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100381
Andrew Walbranca808b12020-05-15 17:22:28 +0100382 /* Fail if addresses are not page-aligned. */
383 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
384 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
385 return ffa_error(FFA_INVALID_PARAMETERS);
386 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100387
Andrew Walbranca808b12020-05-15 17:22:28 +0100388 /*
389 * Ensure that this constituent memory range is all
390 * mapped with the same mode.
391 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800392 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100393 return ffa_error(FFA_DENIED);
394 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100395
Andrew Walbranca808b12020-05-15 17:22:28 +0100396 /*
397 * Ensure that all constituents are mapped with the same
398 * mode.
399 */
400 if (i == 0) {
401 *orig_mode = current_mode;
402 } else if (current_mode != *orig_mode) {
403 dlog_verbose(
404 "Expected mode %#x but was %#x for %d "
405 "pages at %#x.\n",
406 *orig_mode, current_mode,
407 fragments[i][j].page_count,
408 ipa_addr(begin));
409 return ffa_error(FFA_DENIED);
410 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100411 }
Jose Marinho75509b42019-04-09 09:34:59 +0100412 }
413
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100414 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000415}
416
417/**
418 * Verify that all pages have the same mode, that the starting mode
419 * constitutes a valid state and obtain the next mode to apply
420 * to the sending VM.
421 *
422 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100423 * 1) FFA_DENIED if a state transition was not found;
424 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100425 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100426 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100427 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
429 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000430 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100431static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100432 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100433 struct ffa_memory_access *receivers, uint32_t receivers_count,
434 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100435 struct ffa_memory_region_constituent **fragments,
436 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
437 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000438{
439 const uint32_t state_mask =
440 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100441 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000442
Andrew Walbranca808b12020-05-15 17:22:28 +0100443 ret = constituents_get_mode(from, orig_from_mode, fragments,
444 fragment_constituent_counts,
445 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100446 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100447 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100448 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100449 }
450
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000451 /* Ensure the address range is normal memory and not a device. */
452 if (*orig_from_mode & MM_MODE_D) {
453 dlog_verbose("Can't share device memory (mode is %#x).\n",
454 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100455 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000456 }
457
458 /*
459 * Ensure the sender is the owner and has exclusive access to the
460 * memory.
461 */
462 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100463 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100464 }
465
J-Alves363f5722022-04-25 17:37:37 +0100466 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100467
J-Alves363f5722022-04-25 17:37:37 +0100468 for (uint32_t i = 0U; i < receivers_count; i++) {
469 ffa_memory_access_permissions_t permissions =
470 receivers[i].receiver_permissions.permissions;
471 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
472 permissions, *orig_from_mode);
473
474 if ((*orig_from_mode & required_from_mode) !=
475 required_from_mode) {
476 dlog_verbose(
477 "Sender tried to send memory with permissions "
478 "which "
479 "required mode %#x but only had %#x itself.\n",
480 required_from_mode, *orig_from_mode);
481 return ffa_error(FFA_DENIED);
482 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000483 }
484
485 /* Find the appropriate new mode. */
486 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000487 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100488 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000489 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100490 break;
491
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100492 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000493 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100494 break;
495
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100496 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000497 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100498 break;
499
Jose Marinho75509b42019-04-09 09:34:59 +0100500 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100501 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100502 }
503
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100504 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000505}
506
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100507static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000508 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100509 struct ffa_memory_region_constituent **fragments,
510 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
511 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000512{
513 const uint32_t state_mask =
514 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
515 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100516 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000517
Andrew Walbranca808b12020-05-15 17:22:28 +0100518 ret = constituents_get_mode(from, orig_from_mode, fragments,
519 fragment_constituent_counts,
520 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100521 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100522 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000523 }
524
525 /* Ensure the address range is normal memory and not a device. */
526 if (*orig_from_mode & MM_MODE_D) {
527 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
528 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100529 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000530 }
531
532 /*
533 * Ensure the relinquishing VM is not the owner but has access to the
534 * memory.
535 */
536 orig_from_state = *orig_from_mode & state_mask;
537 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
538 dlog_verbose(
539 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100540 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000541 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100542 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000543 }
544
545 /* Find the appropriate new mode. */
546 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
547
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100548 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000549}
550
551/**
552 * Verify that all pages have the same mode, that the starting mode
553 * constitutes a valid state and obtain the next mode to apply
554 * to the retrieving VM.
555 *
556 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100557 * 1) FFA_DENIED if a state transition was not found;
558 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100559 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100560 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100561 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100562 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
563 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000564 */
J-Alvesfc19b372022-07-06 12:17:35 +0100565struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000566 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100567 struct ffa_memory_region_constituent **fragments,
568 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
569 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000570{
571 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100572 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000573
Andrew Walbranca808b12020-05-15 17:22:28 +0100574 ret = constituents_get_mode(to, &orig_to_mode, fragments,
575 fragment_constituent_counts,
576 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100577 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100578 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100579 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000580 }
581
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100582 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000583 /*
584 * If the original ffa memory send call has been processed
585 * successfully, it is expected the orig_to_mode would overlay
586 * with `state_mask`, as a result of the function
587 * `ffa_send_check_transition`.
588 */
J-Alves59ed0042022-07-28 18:26:41 +0100589 if (vm_id_is_current_world(to.vm->id)) {
590 assert((orig_to_mode &
591 (MM_MODE_INVALID | MM_MODE_UNOWNED |
592 MM_MODE_SHARED)) != 0U);
593 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000594 } else {
595 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100596 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000597 * Ensure the retriever has the expected state. We don't care
598 * about the MM_MODE_SHARED bit; either with or without it set
599 * are both valid representations of the !O-NA state.
600 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100601 if (vm_id_is_current_world(to.vm->id) &&
602 to.vm->id != HF_PRIMARY_VM_ID &&
603 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
604 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100605 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000606 }
607 }
608
609 /* Find the appropriate new mode. */
610 *to_mode = memory_to_attributes;
611 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100612 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000613 *to_mode |= 0;
614 break;
615
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100616 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000617 *to_mode |= MM_MODE_UNOWNED;
618 break;
619
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100620 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000621 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
622 break;
623
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100624 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000625 *to_mode |= 0;
626 break;
627
628 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100629 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100630 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000631 }
632
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100633 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100634}
Jose Marinho09b1db82019-08-08 09:16:59 +0100635
636/**
637 * Updates a VM's page table such that the given set of physical address ranges
638 * are mapped in the address space at the corresponding address ranges, in the
639 * mode provided.
640 *
641 * If commit is false, the page tables will be allocated from the mpool but no
642 * mappings will actually be updated. This function must always be called first
643 * with commit false to check that it will succeed before calling with commit
644 * true, to avoid leaving the page table in a half-updated state. To make a
645 * series of changes atomically you can call them all with commit false before
646 * calling them all with commit true.
647 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700648 * vm_ptable_defrag should always be called after a series of page table
649 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100650 *
651 * Returns true on success, or false if the update failed and no changes were
652 * made to memory mappings.
653 */
J-Alves66652252022-07-06 09:49:51 +0100654bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000655 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100656 struct ffa_memory_region_constituent **fragments,
657 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100658 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100659{
Andrew Walbranca808b12020-05-15 17:22:28 +0100660 uint32_t i;
661 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100662
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700663 if (vm_locked.vm->el0_partition) {
664 mode |= MM_MODE_USER | MM_MODE_NG;
665 }
666
Andrew Walbranca808b12020-05-15 17:22:28 +0100667 /* Iterate over the memory region constituents within each fragment. */
668 for (i = 0; i < fragment_count; ++i) {
669 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
670 size_t size = fragments[i][j].page_count * PAGE_SIZE;
671 paddr_t pa_begin =
672 pa_from_ipa(ipa_init(fragments[i][j].address));
673 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200674 uint32_t pa_bits =
675 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100676
677 /*
678 * Ensure the requested region falls into system's PA
679 * range.
680 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200681 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
682 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100683 dlog_error("Region is outside of PA Range\n");
684 return false;
685 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100686
687 if (commit) {
688 vm_identity_commit(vm_locked, pa_begin, pa_end,
689 mode, ppool, NULL);
690 } else if (!vm_identity_prepare(vm_locked, pa_begin,
691 pa_end, mode, ppool)) {
692 return false;
693 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100694 }
695 }
696
697 return true;
698}
699
700/**
701 * Clears a region of physical memory by overwriting it with zeros. The data is
702 * flushed from the cache so the memory has been cleared across the system.
703 */
J-Alves7db32002021-12-14 14:44:50 +0000704static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
705 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100706{
707 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000708 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100709 * global mapping of the whole range. Such an approach will limit
710 * the changes to stage-1 tables and will allow only local
711 * invalidation.
712 */
713 bool ret;
714 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000715 void *ptr = mm_identity_map(stage1_locked, begin, end,
716 MM_MODE_W | (extra_mode_attributes &
717 plat_ffa_other_world_mode()),
718 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100719 size_t size = pa_difference(begin, end);
720
721 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100722 goto fail;
723 }
724
725 memset_s(ptr, size, 0, size);
726 arch_mm_flush_dcache(ptr, size);
727 mm_unmap(stage1_locked, begin, end, ppool);
728
729 ret = true;
730 goto out;
731
732fail:
733 ret = false;
734
735out:
736 mm_unlock_stage1(&stage1_locked);
737
738 return ret;
739}
740
741/**
742 * Clears a region of physical memory by overwriting it with zeros. The data is
743 * flushed from the cache so the memory has been cleared across the system.
744 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100745static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000746 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100747 struct ffa_memory_region_constituent **fragments,
748 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
749 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100750{
751 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100752 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100753 bool ret = false;
754
755 /*
756 * Create a local pool so any freed memory can't be used by another
757 * thread. This is to ensure each constituent that is mapped can be
758 * unmapped again afterwards.
759 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000760 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100761
Andrew Walbranca808b12020-05-15 17:22:28 +0100762 /* Iterate over the memory region constituents within each fragment. */
763 for (i = 0; i < fragment_count; ++i) {
764 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100765
Andrew Walbranca808b12020-05-15 17:22:28 +0100766 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
767 size_t size = fragments[i][j].page_count * PAGE_SIZE;
768 paddr_t begin =
769 pa_from_ipa(ipa_init(fragments[i][j].address));
770 paddr_t end = pa_add(begin, size);
771
J-Alves7db32002021-12-14 14:44:50 +0000772 if (!clear_memory(begin, end, &local_page_pool,
773 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100774 /*
775 * api_clear_memory will defrag on failure, so
776 * no need to do it here.
777 */
778 goto out;
779 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100780 }
781 }
782
Jose Marinho09b1db82019-08-08 09:16:59 +0100783 ret = true;
784
785out:
786 mpool_fini(&local_page_pool);
787 return ret;
788}
789
J-Alves5952d942022-12-22 16:03:00 +0000790static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
791 ipaddr_t in_begin, ipaddr_t in_end)
792{
793 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
794 ipa_addr(begin) < ipa_addr(in_end)) ||
795 (ipa_addr(end) <= ipa_addr(in_end) &&
796 ipa_addr(end) > ipa_addr(in_begin));
797}
798
799/**
800 * Receives a memory range and looks for overlaps with the remainder
801 * constituents of the memory share/lend/donate operation. Assumes they are
802 * passed in order to avoid having to loop over all the elements at each call.
803 * The function only compares the received memory ranges with those that follow
804 * within the same fragment, and subsequent fragments from the same operation.
805 */
806static bool ffa_memory_check_overlap(
807 struct ffa_memory_region_constituent **fragments,
808 const uint32_t *fragment_constituent_counts,
809 const uint32_t fragment_count, const uint32_t current_fragment,
810 const uint32_t current_constituent)
811{
812 uint32_t i = current_fragment;
813 uint32_t j = current_constituent;
814 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
815 const uint32_t current_page_count = fragments[i][j].page_count;
816 size_t current_size = current_page_count * PAGE_SIZE;
817 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
818
819 if (current_size == 0 ||
820 current_size > UINT64_MAX - ipa_addr(current_begin)) {
821 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
822 current_begin, current_page_count);
823 return false;
824 }
825
826 for (; i < fragment_count; i++) {
827 j = (i == current_fragment) ? j + 1 : 0;
828
829 for (; j < fragment_constituent_counts[i]; j++) {
830 ipaddr_t begin = ipa_init(fragments[i][j].address);
831 const uint32_t page_count = fragments[i][j].page_count;
832 size_t size = page_count * PAGE_SIZE;
833 ipaddr_t end = ipa_add(begin, size - 1);
834
835 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
836 dlog_verbose(
837 "Invalid page count. Addr: %x "
838 "page_count: %x\n",
839 begin, page_count);
840 return false;
841 }
842
843 /*
844 * Check if current ranges is within begin and end, as
845 * well as the reverse. This should help optimize the
846 * loop, and reduce the number of iterations.
847 */
848 if (is_memory_range_within(begin, end, current_begin,
849 current_end) ||
850 is_memory_range_within(current_begin, current_end,
851 begin, end)) {
852 dlog_verbose(
853 "Overlapping memory ranges: %#x - %#x "
854 "with %#x - %#x\n",
855 ipa_addr(begin), ipa_addr(end),
856 ipa_addr(current_begin),
857 ipa_addr(current_end));
858 return true;
859 }
860 }
861 }
862
863 return false;
864}
865
Jose Marinho09b1db82019-08-08 09:16:59 +0100866/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100868 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000869 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100870 *
871 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000872 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100873 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100874 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100875 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
876 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100877 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100878 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100879 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100880 */
J-Alves66652252022-07-06 09:49:51 +0100881struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000882 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100883 struct ffa_memory_region_constituent **fragments,
884 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +0000885 uint32_t composite_total_page_count, uint32_t share_func,
886 struct ffa_memory_access *receivers, uint32_t receivers_count,
887 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100888{
Andrew Walbranca808b12020-05-15 17:22:28 +0100889 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +0000890 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100891 uint32_t orig_from_mode;
892 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100893 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100894 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +0000895 uint32_t constituents_total_page_count = 0;
Jose Marinho09b1db82019-08-08 09:16:59 +0100896
897 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100898 * Make sure constituents are properly aligned to a 64-bit boundary. If
899 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100900 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100901 for (i = 0; i < fragment_count; ++i) {
902 if (!is_aligned(fragments[i], 8)) {
903 dlog_verbose("Constituents not aligned.\n");
904 return ffa_error(FFA_INVALID_PARAMETERS);
905 }
J-Alves8f11cde2022-12-21 16:18:22 +0000906 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
907 constituents_total_page_count +=
908 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +0000909 if (ffa_memory_check_overlap(
910 fragments, fragment_constituent_counts,
911 fragment_count, i, j)) {
912 return ffa_error(FFA_INVALID_PARAMETERS);
913 }
J-Alves8f11cde2022-12-21 16:18:22 +0000914 }
915 }
916
917 if (constituents_total_page_count != composite_total_page_count) {
918 dlog_verbose(
919 "Composite page count differs from calculated page "
920 "count from constituents.\n");
921 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +0100922 }
923
924 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000925 * Check if the state transition is lawful for the sender, ensure that
926 * all constituents of a memory region being shared are at the same
927 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100928 */
J-Alves363f5722022-04-25 17:37:37 +0100929 ret = ffa_send_check_transition(from_locked, share_func, receivers,
930 receivers_count, &orig_from_mode,
931 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100932 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100933 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100934 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100935 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100936 }
937
Andrew Walbran37c574e2020-06-03 11:45:46 +0100938 if (orig_from_mode_ret != NULL) {
939 *orig_from_mode_ret = orig_from_mode;
940 }
941
Jose Marinho09b1db82019-08-08 09:16:59 +0100942 /*
943 * Create a local pool so any freed memory can't be used by another
944 * thread. This is to ensure the original mapping can be restored if the
945 * clear fails.
946 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000947 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100948
949 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000950 * First reserve all required memory for the new page table entries
951 * without committing, to make sure the entire operation will succeed
952 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100953 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100954 if (!ffa_region_group_identity_map(
955 from_locked, fragments, fragment_constituent_counts,
956 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100957 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100958 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100959 goto out;
960 }
961
962 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000963 * Update the mapping for the sender. This won't allocate because the
964 * transaction was already prepared above, but may free pages in the
965 * case that a whole block is being unmapped that was previously
966 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100967 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100968 CHECK(ffa_region_group_identity_map(
969 from_locked, fragments, fragment_constituent_counts,
970 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100971
972 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000973 if (clear &&
974 !ffa_clear_memory_constituents(
975 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
976 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100977 /*
978 * On failure, roll back by returning memory to the sender. This
979 * may allocate pages which were previously freed into
980 * `local_page_pool` by the call above, but will never allocate
981 * more pages than that so can never fail.
982 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100983 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100984 from_locked, fragments, fragment_constituent_counts,
985 fragment_count, orig_from_mode, &local_page_pool,
986 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100987
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100988 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100989 goto out;
990 }
991
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000993
994out:
995 mpool_fini(&local_page_pool);
996
997 /*
998 * Tidy up the page table by reclaiming failed mappings (if there was an
999 * error) or merging entries into blocks where possible (on success).
1000 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001001 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001002
1003 return ret;
1004}
1005
1006/**
1007 * Validates and maps memory shared from one VM to another.
1008 *
1009 * This function requires the calling context to hold the <to> lock.
1010 *
1011 * Returns:
1012 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001013 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001014 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001015 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001016 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001018 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001019struct ffa_value ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00001020 struct vm_locked to_locked, ffa_vm_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001021 struct ffa_memory_region_constituent **fragments,
1022 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1023 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
1024 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001025{
Andrew Walbranca808b12020-05-15 17:22:28 +01001026 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001027 uint32_t to_mode;
1028 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001029 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001030
1031 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001032 * Make sure constituents are properly aligned to a 64-bit boundary. If
1033 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001034 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001035 for (i = 0; i < fragment_count; ++i) {
1036 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001037 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001038 return ffa_error(FFA_INVALID_PARAMETERS);
1039 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001040 }
1041
1042 /*
1043 * Check if the state transition is lawful for the recipient, and ensure
1044 * that all constituents of the memory region being retrieved are at the
1045 * same state.
1046 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001047 ret = ffa_retrieve_check_transition(
1048 to_locked, share_func, fragments, fragment_constituent_counts,
1049 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001050 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001051 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001052 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001053 }
1054
1055 /*
1056 * Create a local pool so any freed memory can't be used by another
1057 * thread. This is to ensure the original mapping can be restored if the
1058 * clear fails.
1059 */
1060 mpool_init_with_fallback(&local_page_pool, page_pool);
1061
1062 /*
1063 * First reserve all required memory for the new page table entries in
1064 * the recipient page tables without committing, to make sure the entire
1065 * operation will succeed without exhausting the page pool.
1066 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001067 if (!ffa_region_group_identity_map(
1068 to_locked, fragments, fragment_constituent_counts,
1069 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001070 /* TODO: partial defrag of failed range. */
1071 dlog_verbose(
1072 "Insufficient memory to update recipient page "
1073 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001074 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001075 goto out;
1076 }
1077
1078 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001079 if (clear &&
1080 !ffa_clear_memory_constituents(
1081 plat_ffa_owner_world_mode(from_id), fragments,
1082 fragment_constituent_counts, fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001083 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001084 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001085 goto out;
1086 }
1087
Jose Marinho09b1db82019-08-08 09:16:59 +01001088 /*
1089 * Complete the transfer by mapping the memory into the recipient. This
1090 * won't allocate because the transaction was already prepared above, so
1091 * it doesn't need to use the `local_page_pool`.
1092 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001093 CHECK(ffa_region_group_identity_map(
1094 to_locked, fragments, fragment_constituent_counts,
1095 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001096
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001097 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001098
1099out:
1100 mpool_fini(&local_page_pool);
1101
1102 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001103 * Tidy up the page table by reclaiming failed mappings (if there was an
1104 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001105 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001106 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001107
1108 return ret;
1109}
1110
Andrew Walbran996d1d12020-05-27 14:08:43 +01001111static struct ffa_value ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00001112 struct vm_locked from_locked, ffa_vm_id_t owner_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001113 struct ffa_memory_region_constituent **fragments,
1114 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1115 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001116{
1117 uint32_t orig_from_mode;
1118 uint32_t from_mode;
1119 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001120 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001121
Andrew Walbranca808b12020-05-15 17:22:28 +01001122 ret = ffa_relinquish_check_transition(
1123 from_locked, &orig_from_mode, fragments,
1124 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001125 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001126 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001127 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001128 }
1129
1130 /*
1131 * Create a local pool so any freed memory can't be used by another
1132 * thread. This is to ensure the original mapping can be restored if the
1133 * clear fails.
1134 */
1135 mpool_init_with_fallback(&local_page_pool, page_pool);
1136
1137 /*
1138 * First reserve all required memory for the new page table entries
1139 * without committing, to make sure the entire operation will succeed
1140 * without exhausting the page pool.
1141 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001142 if (!ffa_region_group_identity_map(
1143 from_locked, fragments, fragment_constituent_counts,
1144 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001145 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001146 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001147 goto out;
1148 }
1149
1150 /*
1151 * Update the mapping for the sender. This won't allocate because the
1152 * transaction was already prepared above, but may free pages in the
1153 * case that a whole block is being unmapped that was previously
1154 * partially mapped.
1155 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001156 CHECK(ffa_region_group_identity_map(
1157 from_locked, fragments, fragment_constituent_counts,
1158 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001159
1160 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001161 if (clear &&
1162 !ffa_clear_memory_constituents(
J-Alves3c5b2072022-11-21 12:45:40 +00001163 plat_ffa_owner_world_mode(owner_id), fragments,
J-Alves7db32002021-12-14 14:44:50 +00001164 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001165 /*
1166 * On failure, roll back by returning memory to the sender. This
1167 * may allocate pages which were previously freed into
1168 * `local_page_pool` by the call above, but will never allocate
1169 * more pages than that so can never fail.
1170 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001171 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001172 from_locked, fragments, fragment_constituent_counts,
1173 fragment_count, orig_from_mode, &local_page_pool,
1174 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001175
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001176 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001177 goto out;
1178 }
1179
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001180 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001181
1182out:
1183 mpool_fini(&local_page_pool);
1184
1185 /*
1186 * Tidy up the page table by reclaiming failed mappings (if there was an
1187 * error) or merging entries into blocks where possible (on success).
1188 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001189 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001190
1191 return ret;
1192}
1193
1194/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001195 * Complete a memory sending operation by checking that it is valid, updating
1196 * the sender page table, and then either marking the share state as having
1197 * completed sending (on success) or freeing it (on failure).
1198 *
1199 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1200 */
J-Alvesfdd29272022-07-19 13:16:31 +01001201struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001202 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001203 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1204 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001205{
1206 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001207 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001208 struct ffa_value ret;
1209
1210 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001211 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001212 assert(memory_region != NULL);
1213 composite = ffa_memory_region_get_composite(memory_region, 0);
1214 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001215
1216 /* Check that state is valid in sender page table and update. */
1217 ret = ffa_send_check_update(
1218 from_locked, share_state->fragments,
1219 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001220 share_state->fragment_count, composite->page_count,
1221 share_state->share_func, memory_region->receivers,
1222 memory_region->receiver_count, page_pool,
1223 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001224 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001225 if (ret.func != FFA_SUCCESS_32) {
1226 /*
1227 * Free share state, it failed to send so it can't be retrieved.
1228 */
1229 dlog_verbose("Complete failed, freeing share state.\n");
1230 share_state_free(share_states, share_state, page_pool);
1231 return ret;
1232 }
1233
1234 share_state->sending_complete = true;
1235 dlog_verbose("Marked sending complete.\n");
1236
J-Alvesee68c542020-10-29 17:48:20 +00001237 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001238}
1239
1240/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001241 * Check that the memory attributes match Hafnium expectations:
1242 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1243 * Write-Allocate Cacheable.
1244 */
1245static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001246 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001247{
1248 enum ffa_memory_type memory_type;
1249 enum ffa_memory_cacheability cacheability;
1250 enum ffa_memory_shareability shareability;
1251
1252 memory_type = ffa_get_memory_type_attr(attributes);
1253 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1254 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1255 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001256 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001257 }
1258
1259 cacheability = ffa_get_memory_cacheability_attr(attributes);
1260 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1261 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1262 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001263 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001264 }
1265
1266 shareability = ffa_get_memory_shareability_attr(attributes);
1267 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1268 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1269 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001270 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001271 }
1272
1273 return (struct ffa_value){.func = FFA_SUCCESS_32};
1274}
1275
1276/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001277 * Check that the given `memory_region` represents a valid memory send request
1278 * of the given `share_func` type, return the clear flag and permissions via the
1279 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001280 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001281 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001282 * not.
1283 */
J-Alves66652252022-07-06 09:49:51 +01001284struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001285 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1286 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001287 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001288{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001289 struct ffa_composite_memory_region *composite;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001290 uint64_t receivers_end;
1291 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001292 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001293 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001294 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001295 enum ffa_data_access data_access;
1296 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001297 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001298 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001299 const size_t minimum_first_fragment_length =
1300 (sizeof(struct ffa_memory_region) +
1301 sizeof(struct ffa_memory_access) +
1302 sizeof(struct ffa_composite_memory_region));
1303
1304 if (fragment_length < minimum_first_fragment_length) {
1305 dlog_verbose("Fragment length %u too short (min %u).\n",
1306 (size_t)fragment_length,
1307 minimum_first_fragment_length);
1308 return ffa_error(FFA_INVALID_PARAMETERS);
1309 }
1310
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001311 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1312 "struct ffa_memory_region_constituent must be 16 bytes");
1313 if (!is_aligned(fragment_length,
1314 sizeof(struct ffa_memory_region_constituent)) ||
1315 !is_aligned(memory_share_length,
1316 sizeof(struct ffa_memory_region_constituent))) {
1317 dlog_verbose(
1318 "Fragment length %u or total length %u"
1319 " is not 16-byte aligned.\n",
1320 fragment_length, memory_share_length);
1321 return ffa_error(FFA_INVALID_PARAMETERS);
1322 }
1323
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001324 if (fragment_length > memory_share_length) {
1325 dlog_verbose(
1326 "Fragment length %u greater than total length %u.\n",
1327 (size_t)fragment_length, (size_t)memory_share_length);
1328 return ffa_error(FFA_INVALID_PARAMETERS);
1329 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001330
J-Alves0b6653d2022-04-22 13:17:38 +01001331 assert(memory_region->receivers_offset ==
1332 offsetof(struct ffa_memory_region, receivers));
1333 assert(memory_region->memory_access_desc_size ==
1334 sizeof(struct ffa_memory_access));
1335
J-Alves95df0ef2022-12-07 10:09:48 +00001336 /* The sender must match the caller. */
1337 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1338 vm_id_is_current_world(memory_region->sender)) ||
1339 (vm_id_is_current_world(from_locked.vm->id) &&
1340 memory_region->sender != from_locked.vm->id)) {
1341 dlog_verbose("Invalid memory sender ID.\n");
1342 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001343 }
1344
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001345 if (memory_region->receiver_count <= 0) {
1346 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001347 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001348 }
1349
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001350 /*
1351 * Ensure that the composite header is within the memory bounds and
1352 * doesn't overlap the first part of the message. Cast to uint64_t
1353 * to prevent overflow.
1354 */
1355 receivers_end = ((uint64_t)sizeof(struct ffa_memory_access) *
1356 (uint64_t)memory_region->receiver_count) +
1357 sizeof(struct ffa_memory_region);
1358 min_length = receivers_end +
1359 sizeof(struct ffa_composite_memory_region) +
1360 sizeof(struct ffa_memory_region_constituent);
1361 if (min_length > memory_share_length) {
1362 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1363 (size_t)memory_share_length, (size_t)min_length);
1364 return ffa_error(FFA_INVALID_PARAMETERS);
1365 }
1366
1367 composite_memory_region_offset =
1368 memory_region->receivers[0].composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001369
1370 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001371 * Check that the composite memory region descriptor is after the access
1372 * descriptors, is at least 16-byte aligned, and fits in the first
1373 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001374 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001375 if ((composite_memory_region_offset < receivers_end) ||
1376 (composite_memory_region_offset % 16 != 0) ||
1377 (composite_memory_region_offset >
1378 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1379 dlog_verbose(
1380 "Invalid composite memory region descriptor offset "
1381 "%u.\n",
1382 (size_t)composite_memory_region_offset);
1383 return ffa_error(FFA_INVALID_PARAMETERS);
1384 }
1385
1386 /*
1387 * Compute the start of the constituent regions. Already checked
1388 * to be not more than fragment_length and thus not more than
1389 * memory_share_length.
1390 */
1391 constituents_start = composite_memory_region_offset +
1392 sizeof(struct ffa_composite_memory_region);
1393 constituents_length = memory_share_length - constituents_start;
1394
1395 /*
1396 * Check that the number of constituents is consistent with the length
1397 * of the constituent region.
1398 */
1399 composite = ffa_memory_region_get_composite(memory_region, 0);
1400 if ((constituents_length %
1401 sizeof(struct ffa_memory_region_constituent) !=
1402 0) ||
1403 ((constituents_length /
1404 sizeof(struct ffa_memory_region_constituent)) !=
1405 composite->constituent_count)) {
1406 dlog_verbose("Invalid length %u or composite offset %u.\n",
1407 (size_t)memory_share_length,
1408 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001409 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001410 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001411 if (fragment_length < memory_share_length &&
1412 fragment_length < HF_MAILBOX_SIZE) {
1413 dlog_warning(
1414 "Initial fragment length %d smaller than mailbox "
1415 "size.\n",
1416 fragment_length);
1417 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001418
Andrew Walbrana65a1322020-04-06 19:32:32 +01001419 /*
1420 * Clear is not allowed for memory sharing, as the sender still has
1421 * access to the memory.
1422 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001423 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1424 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001425 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001426 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001427 }
1428
1429 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001430 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001431 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001432 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001433 }
1434
J-Alves363f5722022-04-25 17:37:37 +01001435 /* Check that the permissions are valid, for each specified receiver. */
1436 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1437 ffa_memory_access_permissions_t permissions =
1438 memory_region->receivers[i]
1439 .receiver_permissions.permissions;
1440 ffa_vm_id_t receiver_id =
1441 memory_region->receivers[i]
1442 .receiver_permissions.receiver;
1443
1444 if (memory_region->sender == receiver_id) {
1445 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001446 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001447 }
Federico Recanati85090c42021-12-15 13:17:54 +01001448
J-Alves363f5722022-04-25 17:37:37 +01001449 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1450 j++) {
1451 if (receiver_id ==
1452 memory_region->receivers[j]
1453 .receiver_permissions.receiver) {
1454 dlog_verbose(
1455 "Repeated receiver(%x) in memory send "
1456 "operation.\n",
1457 memory_region->receivers[j]
1458 .receiver_permissions.receiver);
1459 return ffa_error(FFA_INVALID_PARAMETERS);
1460 }
1461 }
1462
1463 if (composite_memory_region_offset !=
1464 memory_region->receivers[i]
1465 .composite_memory_region_offset) {
1466 dlog_verbose(
1467 "All ffa_memory_access should point to the "
1468 "same composite memory region offset.\n");
1469 return ffa_error(FFA_INVALID_PARAMETERS);
1470 }
1471
1472 data_access = ffa_get_data_access_attr(permissions);
1473 instruction_access =
1474 ffa_get_instruction_access_attr(permissions);
1475 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1476 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1477 dlog_verbose(
1478 "Reserved value for receiver permissions "
1479 "%#x.\n",
1480 permissions);
1481 return ffa_error(FFA_INVALID_PARAMETERS);
1482 }
1483 if (instruction_access !=
1484 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1485 dlog_verbose(
1486 "Invalid instruction access permissions %#x "
1487 "for sending memory.\n",
1488 permissions);
1489 return ffa_error(FFA_INVALID_PARAMETERS);
1490 }
1491 if (share_func == FFA_MEM_SHARE_32) {
1492 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1493 dlog_verbose(
1494 "Invalid data access permissions %#x "
1495 "for sharing memory.\n",
1496 permissions);
1497 return ffa_error(FFA_INVALID_PARAMETERS);
1498 }
1499 /*
1500 * According to section 10.10.3 of the FF-A v1.1 EAC0
1501 * spec, NX is required for share operations (but must
1502 * not be specified by the sender) so set it in the
1503 * copy that we store, ready to be returned to the
1504 * retriever.
1505 */
J-Alvesb19731a2022-06-20 17:30:33 +01001506 if (vm_id_is_current_world(receiver_id)) {
1507 ffa_set_instruction_access_attr(
1508 &permissions,
1509 FFA_INSTRUCTION_ACCESS_NX);
1510 memory_region->receivers[i]
1511 .receiver_permissions.permissions =
1512 permissions;
1513 }
J-Alves363f5722022-04-25 17:37:37 +01001514 }
1515 if (share_func == FFA_MEM_LEND_32 &&
1516 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1517 dlog_verbose(
1518 "Invalid data access permissions %#x for "
1519 "lending memory.\n",
1520 permissions);
1521 return ffa_error(FFA_INVALID_PARAMETERS);
1522 }
1523
1524 if (share_func == FFA_MEM_DONATE_32 &&
1525 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1526 dlog_verbose(
1527 "Invalid data access permissions %#x for "
1528 "donating memory.\n",
1529 permissions);
1530 return ffa_error(FFA_INVALID_PARAMETERS);
1531 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001532 }
1533
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001534 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1535 security_state =
1536 ffa_get_memory_security_attr(memory_region->attributes);
1537 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1538 dlog_verbose(
1539 "Invalid security state for memory share operation.\n");
1540 return ffa_error(FFA_INVALID_PARAMETERS);
1541 }
1542
Federico Recanatid937f5e2021-12-20 17:38:23 +01001543 /*
J-Alves807794e2022-06-16 13:42:47 +01001544 * If a memory donate or lend with single borrower, the memory type
1545 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001546 */
J-Alves807794e2022-06-16 13:42:47 +01001547 if (share_func == FFA_MEM_DONATE_32 ||
1548 (share_func == FFA_MEM_LEND_32 &&
1549 memory_region->receiver_count == 1)) {
1550 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1551 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1552 dlog_verbose(
1553 "Memory type shall not be specified by "
1554 "sender.\n");
1555 return ffa_error(FFA_INVALID_PARAMETERS);
1556 }
1557 } else {
1558 /*
1559 * Check that sender's memory attributes match Hafnium
1560 * expectations: Normal Memory, Inner shareable, Write-Back
1561 * Read-Allocate Write-Allocate Cacheable.
1562 */
1563 ret = ffa_memory_attributes_validate(memory_region->attributes);
1564 if (ret.func != FFA_SUCCESS_32) {
1565 return ret;
1566 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001567 }
1568
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001569 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001570}
1571
1572/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001573 * Gets the share state for continuing an operation to donate, lend or share
1574 * memory, and checks that it is a valid request.
1575 *
1576 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1577 * not.
1578 */
J-Alvesfdd29272022-07-19 13:16:31 +01001579struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001580 struct share_states_locked share_states, ffa_memory_handle_t handle,
1581 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1582 struct mpool *page_pool)
1583{
1584 struct ffa_memory_share_state *share_state;
1585 struct ffa_memory_region *memory_region;
1586
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001587 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001588
1589 /*
1590 * Look up the share state by handle and make sure that the VM ID
1591 * matches.
1592 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01001593 share_state = get_share_state(share_states, handle);
1594 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001595 dlog_verbose(
1596 "Invalid handle %#x for memory send continuation.\n",
1597 handle);
1598 return ffa_error(FFA_INVALID_PARAMETERS);
1599 }
1600 memory_region = share_state->memory_region;
1601
J-Alvesfdd29272022-07-19 13:16:31 +01001602 if (vm_id_is_current_world(from_vm_id) &&
1603 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001604 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1605 return ffa_error(FFA_INVALID_PARAMETERS);
1606 }
1607
1608 if (share_state->sending_complete) {
1609 dlog_verbose(
1610 "Sending of memory handle %#x is already complete.\n",
1611 handle);
1612 return ffa_error(FFA_INVALID_PARAMETERS);
1613 }
1614
1615 if (share_state->fragment_count == MAX_FRAGMENTS) {
1616 /*
1617 * Log a warning as this is a sign that MAX_FRAGMENTS should
1618 * probably be increased.
1619 */
1620 dlog_warning(
1621 "Too many fragments for memory share with handle %#x; "
1622 "only %d supported.\n",
1623 handle, MAX_FRAGMENTS);
1624 /* Free share state, as it's not possible to complete it. */
1625 share_state_free(share_states, share_state, page_pool);
1626 return ffa_error(FFA_NO_MEMORY);
1627 }
1628
1629 *share_state_ret = share_state;
1630
1631 return (struct ffa_value){.func = FFA_SUCCESS_32};
1632}
1633
1634/**
J-Alves95df0ef2022-12-07 10:09:48 +00001635 * Checks if there is at least one receiver from the other world.
1636 */
J-Alvesfdd29272022-07-19 13:16:31 +01001637bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001638 struct ffa_memory_region *memory_region)
1639{
1640 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
1641 ffa_vm_id_t receiver = memory_region->receivers[i]
1642 .receiver_permissions.receiver;
1643 if (!vm_id_is_current_world(receiver)) {
1644 return true;
1645 }
1646 }
1647 return false;
1648}
1649
1650/**
J-Alves9da280b2022-12-21 14:55:39 +00001651 * Validates a call to donate, lend or share memory in which Hafnium is the
1652 * designated allocator of the memory handle. In practice, this also means
1653 * Hafnium is responsible for managing the state structures for the transaction.
1654 * If Hafnium is the SPMC, it should allocate the memory handle when either the
1655 * sender is an SP or there is at least one borrower that is an SP.
1656 * If Hafnium is the hypervisor, it should allocate the memory handle when
1657 * operation involves only NWd VMs.
1658 *
1659 * If validation goes well, Hafnium updates the stage-2 page tables of the
1660 * sender. Validation consists of checking if the message length and number of
1661 * memory region constituents match, and if the transition is valid for the
1662 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001663 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001664 * Assumes that the caller has already found and locked the sender VM and copied
1665 * the memory region descriptor from the sender's TX buffer to a freshly
1666 * allocated page from Hafnium's internal pool. The caller must have also
1667 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001668 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001669 * This function takes ownership of the `memory_region` passed in and will free
1670 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001671 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001672struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001673 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001674 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001675 uint32_t fragment_length, uint32_t share_func,
1676 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001677{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001678 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001679 struct share_states_locked share_states;
1680 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001681
1682 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001683 * If there is an error validating the `memory_region` then we need to
1684 * free it because we own it but we won't be storing it in a share state
1685 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001686 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001687 ret = ffa_memory_send_validate(from_locked, memory_region,
1688 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001689 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001690 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001691 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001692 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001693 }
1694
Andrew Walbrana65a1322020-04-06 19:32:32 +01001695 /* Set flag for share function, ready to be retrieved later. */
1696 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001697 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001698 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001699 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001700 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001701 case FFA_MEM_LEND_32:
1702 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001703 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001704 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001705 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001706 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001707 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001708 }
1709
Andrew Walbranca808b12020-05-15 17:22:28 +01001710 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001711 /*
1712 * Allocate a share state before updating the page table. Otherwise if
1713 * updating the page table succeeded but allocating the share state
1714 * failed then it would leave the memory in a state where nobody could
1715 * get it back.
1716 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01001717 share_state = allocate_share_state(share_states, share_func,
1718 memory_region, fragment_length,
1719 FFA_MEMORY_HANDLE_INVALID);
1720 if (!share_state) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001721 dlog_verbose("Failed to allocate share state.\n");
1722 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001723 ret = ffa_error(FFA_NO_MEMORY);
1724 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001725 }
1726
Andrew Walbranca808b12020-05-15 17:22:28 +01001727 if (fragment_length == memory_share_length) {
1728 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001729 ret = ffa_memory_send_complete(
1730 from_locked, share_states, share_state, page_pool,
1731 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001732 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01001733 /*
1734 * Use sender ID from 'memory_region' assuming
1735 * that at this point it has been validated:
1736 * - MBZ at virtual FF-A instance.
1737 */
1738 ffa_vm_id_t sender_to_ret =
1739 (from_locked.vm->id == HF_OTHER_WORLD_ID)
1740 ? memory_region->sender
1741 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01001742 ret = (struct ffa_value){
1743 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001744 .arg1 = (uint32_t)memory_region->handle,
1745 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01001746 .arg3 = fragment_length,
1747 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01001748 }
1749
1750out:
1751 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001752 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001753 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001754}
1755
1756/**
J-Alves8505a8a2022-06-15 18:10:18 +01001757 * Continues an operation to donate, lend or share memory to a VM from current
1758 * world. If this is the last fragment then checks that the transition is valid
1759 * for the type of memory sending operation and updates the stage-2 page tables
1760 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001761 *
1762 * Assumes that the caller has already found and locked the sender VM and copied
1763 * the memory region descriptor from the sender's TX buffer to a freshly
1764 * allocated page from Hafnium's internal pool.
1765 *
1766 * This function takes ownership of the `fragment` passed in; it must not be
1767 * freed by the caller.
1768 */
1769struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1770 void *fragment,
1771 uint32_t fragment_length,
1772 ffa_memory_handle_t handle,
1773 struct mpool *page_pool)
1774{
1775 struct share_states_locked share_states = share_states_lock();
1776 struct ffa_memory_share_state *share_state;
1777 struct ffa_value ret;
1778 struct ffa_memory_region *memory_region;
1779
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001780 CHECK(is_aligned(fragment,
1781 alignof(struct ffa_memory_region_constituent)));
1782 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
1783 0) {
1784 dlog_verbose("Fragment length %u misaligned.\n",
1785 fragment_length);
1786 ret = ffa_error(FFA_INVALID_PARAMETERS);
1787 goto out_free_fragment;
1788 }
1789
Andrew Walbranca808b12020-05-15 17:22:28 +01001790 ret = ffa_memory_send_continue_validate(share_states, handle,
1791 &share_state,
1792 from_locked.vm->id, page_pool);
1793 if (ret.func != FFA_SUCCESS_32) {
1794 goto out_free_fragment;
1795 }
1796 memory_region = share_state->memory_region;
1797
J-Alves95df0ef2022-12-07 10:09:48 +00001798 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001799 dlog_error(
1800 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01001801 "other world. This should never happen, and indicates "
1802 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01001803 "EL3 code.\n");
1804 ret = ffa_error(FFA_INVALID_PARAMETERS);
1805 goto out_free_fragment;
1806 }
1807
1808 /* Add this fragment. */
1809 share_state->fragments[share_state->fragment_count] = fragment;
1810 share_state->fragment_constituent_counts[share_state->fragment_count] =
1811 fragment_length / sizeof(struct ffa_memory_region_constituent);
1812 share_state->fragment_count++;
1813
1814 /* Check whether the memory send operation is now ready to complete. */
1815 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001816 ret = ffa_memory_send_complete(
1817 from_locked, share_states, share_state, page_pool,
1818 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001819 } else {
1820 ret = (struct ffa_value){
1821 .func = FFA_MEM_FRAG_RX_32,
1822 .arg1 = (uint32_t)handle,
1823 .arg2 = (uint32_t)(handle >> 32),
1824 .arg3 = share_state_next_fragment_offset(share_states,
1825 share_state)};
1826 }
1827 goto out;
1828
1829out_free_fragment:
1830 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001831
1832out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001833 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001834 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001835}
1836
Andrew Walbranca808b12020-05-15 17:22:28 +01001837/** Clean up after the receiver has finished retrieving a memory region. */
1838static void ffa_memory_retrieve_complete(
1839 struct share_states_locked share_states,
1840 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1841{
1842 if (share_state->share_func == FFA_MEM_DONATE_32) {
1843 /*
1844 * Memory that has been donated can't be relinquished,
1845 * so no need to keep the share state around.
1846 */
1847 share_state_free(share_states, share_state, page_pool);
1848 dlog_verbose("Freed share state for donate.\n");
1849 }
1850}
1851
J-Alves2d8457f2022-10-05 11:06:41 +01001852/**
1853 * Initialises the given memory region descriptor to be used for an
1854 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
1855 * fragment.
1856 * The memory region descriptor is initialized according to retriever's
1857 * FF-A version.
1858 *
1859 * Returns true on success, or false if the given constituents won't all fit in
1860 * the first fragment.
1861 */
1862static bool ffa_retrieved_memory_region_init(
1863 void *response, uint32_t ffa_version, size_t response_max_size,
1864 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
1865 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
1866 ffa_vm_id_t receiver_id, ffa_memory_access_permissions_t permissions,
1867 uint32_t page_count, uint32_t total_constituent_count,
1868 const struct ffa_memory_region_constituent constituents[],
1869 uint32_t fragment_constituent_count, uint32_t *total_length,
1870 uint32_t *fragment_length)
1871{
1872 struct ffa_composite_memory_region *composite_memory_region;
1873 struct ffa_memory_access *receiver;
1874 uint32_t i;
1875 uint32_t constituents_offset;
1876 uint32_t receiver_count;
1877
1878 assert(response != NULL);
1879
1880 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1881 struct ffa_memory_region_v1_0 *retrieve_response =
1882 (struct ffa_memory_region_v1_0 *)response;
1883
J-Alves5da37d92022-10-24 16:33:48 +01001884 ffa_memory_region_init_header_v1_0(
1885 retrieve_response, sender, attributes, flags, handle, 0,
1886 RECEIVERS_COUNT_IN_RETRIEVE_RESP);
J-Alves2d8457f2022-10-05 11:06:41 +01001887
1888 receiver = &retrieve_response->receivers[0];
1889 receiver_count = retrieve_response->receiver_count;
1890
1891 receiver->composite_memory_region_offset =
1892 sizeof(struct ffa_memory_region_v1_0) +
1893 receiver_count * sizeof(struct ffa_memory_access);
1894
1895 composite_memory_region = ffa_memory_region_get_composite_v1_0(
1896 retrieve_response, 0);
1897 } else {
1898 /* Default to FF-A v1.1 version. */
1899 struct ffa_memory_region *retrieve_response =
1900 (struct ffa_memory_region *)response;
1901
1902 ffa_memory_region_init_header(retrieve_response, sender,
1903 attributes, flags, handle, 0, 1);
1904
1905 receiver = &retrieve_response->receivers[0];
1906 receiver_count = retrieve_response->receiver_count;
1907
1908 /*
1909 * Note that `sizeof(struct_ffa_memory_region)` and
1910 * `sizeof(struct ffa_memory_access)` must both be multiples of
1911 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
1912 * guaranteed that the offset we calculate here is aligned to a
1913 * 64-bit boundary and so 64-bit values can be copied without
1914 * alignment faults.
1915 */
1916 receiver->composite_memory_region_offset =
1917 sizeof(struct ffa_memory_region) +
1918 receiver_count * sizeof(struct ffa_memory_access);
1919
1920 composite_memory_region =
1921 ffa_memory_region_get_composite(retrieve_response, 0);
1922 }
1923
1924 assert(receiver != NULL);
1925 assert(composite_memory_region != NULL);
1926
1927 /*
1928 * Initialized here as in memory retrieve responses we currently expect
1929 * one borrower to be specified.
1930 */
1931 ffa_memory_access_init_permissions(receiver, receiver_id, 0, 0, flags);
1932 receiver->receiver_permissions.permissions = permissions;
1933
1934 composite_memory_region->page_count = page_count;
1935 composite_memory_region->constituent_count = total_constituent_count;
1936 composite_memory_region->reserved_0 = 0;
1937
1938 constituents_offset = receiver->composite_memory_region_offset +
1939 sizeof(struct ffa_composite_memory_region);
1940 if (constituents_offset +
1941 fragment_constituent_count *
1942 sizeof(struct ffa_memory_region_constituent) >
1943 response_max_size) {
1944 return false;
1945 }
1946
1947 for (i = 0; i < fragment_constituent_count; ++i) {
1948 composite_memory_region->constituents[i] = constituents[i];
1949 }
1950
1951 if (total_length != NULL) {
1952 *total_length =
1953 constituents_offset +
1954 composite_memory_region->constituent_count *
1955 sizeof(struct ffa_memory_region_constituent);
1956 }
1957 if (fragment_length != NULL) {
1958 *fragment_length =
1959 constituents_offset +
1960 fragment_constituent_count *
1961 sizeof(struct ffa_memory_region_constituent);
1962 }
1963
1964 return true;
1965}
1966
J-Alves96de29f2022-04-26 16:05:24 +01001967/*
1968 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
1969 * returns its index in the receiver's array. If receiver's ID doesn't exist
1970 * in the array, return the region's 'receiver_count'.
1971 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001972uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
1973 ffa_vm_id_t receiver)
J-Alves96de29f2022-04-26 16:05:24 +01001974{
1975 struct ffa_memory_access *receivers;
1976 uint32_t i;
1977
1978 assert(memory_region != NULL);
1979
1980 receivers = memory_region->receivers;
1981
1982 for (i = 0U; i < memory_region->receiver_count; i++) {
1983 if (receivers[i].receiver_permissions.receiver == receiver) {
1984 break;
1985 }
1986 }
1987
1988 return i;
1989}
1990
1991/**
1992 * Validates the retrieved permissions against those specified by the lender
1993 * of memory share operation. Optionally can help set the permissions to be used
1994 * for the S2 mapping, through the `permissions` argument.
1995 * Returns true if permissions are valid, false otherwise.
1996 */
1997static bool ffa_memory_retrieve_is_memory_access_valid(
1998 enum ffa_data_access sent_data_access,
1999 enum ffa_data_access requested_data_access,
2000 enum ffa_instruction_access sent_instruction_access,
2001 enum ffa_instruction_access requested_instruction_access,
2002 ffa_memory_access_permissions_t *permissions)
2003{
2004 switch (sent_data_access) {
2005 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2006 case FFA_DATA_ACCESS_RW:
2007 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2008 requested_data_access == FFA_DATA_ACCESS_RW) {
2009 if (permissions != NULL) {
2010 ffa_set_data_access_attr(permissions,
2011 FFA_DATA_ACCESS_RW);
2012 }
2013 break;
2014 }
2015 /* Intentional fall-through. */
2016 case FFA_DATA_ACCESS_RO:
2017 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2018 requested_data_access == FFA_DATA_ACCESS_RO) {
2019 if (permissions != NULL) {
2020 ffa_set_data_access_attr(permissions,
2021 FFA_DATA_ACCESS_RO);
2022 }
2023 break;
2024 }
2025 dlog_verbose(
2026 "Invalid data access requested; sender specified "
2027 "permissions %#x but receiver requested %#x.\n",
2028 sent_data_access, requested_data_access);
2029 return false;
2030 case FFA_DATA_ACCESS_RESERVED:
2031 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2032 "checked before this point.");
2033 }
2034
2035 switch (sent_instruction_access) {
2036 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2037 case FFA_INSTRUCTION_ACCESS_X:
2038 if (requested_instruction_access ==
2039 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2040 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
2041 if (permissions != NULL) {
2042 ffa_set_instruction_access_attr(
2043 permissions, FFA_INSTRUCTION_ACCESS_X);
2044 }
2045 break;
2046 }
2047 case FFA_INSTRUCTION_ACCESS_NX:
2048 if (requested_instruction_access ==
2049 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2050 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2051 if (permissions != NULL) {
2052 ffa_set_instruction_access_attr(
2053 permissions, FFA_INSTRUCTION_ACCESS_NX);
2054 }
2055 break;
2056 }
2057 dlog_verbose(
2058 "Invalid instruction access requested; sender "
2059 "specified permissions %#x but receiver requested "
2060 "%#x.\n",
2061 sent_instruction_access, requested_instruction_access);
2062 return false;
2063 case FFA_INSTRUCTION_ACCESS_RESERVED:
2064 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2065 "be checked before this point.");
2066 }
2067
2068 return true;
2069}
2070
2071/**
2072 * Validate the receivers' permissions in the retrieve request against those
2073 * specified by the lender.
2074 * In the `permissions` argument returns the permissions to set at S2 for the
2075 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002076 * The function looks into the flag to bypass multiple borrower checks:
2077 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2078 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2079 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2080 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002081 */
2082static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2083 struct ffa_memory_region *memory_region,
2084 struct ffa_memory_region *retrieve_request, ffa_vm_id_t to_vm_id,
2085 ffa_memory_access_permissions_t *permissions)
2086{
2087 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002088 bool bypass_multi_receiver_check =
2089 (retrieve_request->flags &
2090 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alves96de29f2022-04-26 16:05:24 +01002091
2092 assert(permissions != NULL);
2093
J-Alves3456e032023-07-20 12:20:05 +01002094 if (!bypass_multi_receiver_check) {
2095 if (retrieve_request->receiver_count !=
2096 memory_region->receiver_count) {
2097 dlog_verbose(
2098 "Retrieve request should contain same list of "
2099 "borrowers, as specified by the lender.\n");
2100 return ffa_error(FFA_INVALID_PARAMETERS);
2101 }
2102 } else {
2103 if (retrieve_request->receiver_count != 1) {
2104 dlog_verbose(
2105 "Set bypass multiple borrower check, receiver "
2106 "list must be sized 1 (%x)\n",
2107 memory_region->receiver_count);
2108 return ffa_error(FFA_INVALID_PARAMETERS);
2109 }
J-Alves96de29f2022-04-26 16:05:24 +01002110 }
2111
2112 retrieve_receiver_index = retrieve_request->receiver_count;
2113
2114 /* Should be populated with the permissions of the retriever. */
2115 *permissions = 0;
2116
2117 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2118 ffa_memory_access_permissions_t sent_permissions;
2119 struct ffa_memory_access *current_receiver =
2120 &retrieve_request->receivers[i];
2121 ffa_memory_access_permissions_t requested_permissions =
2122 current_receiver->receiver_permissions.permissions;
2123 ffa_vm_id_t current_receiver_id =
2124 current_receiver->receiver_permissions.receiver;
2125 bool found_to_id = current_receiver_id == to_vm_id;
2126
J-Alves3456e032023-07-20 12:20:05 +01002127 if (bypass_multi_receiver_check && !found_to_id) {
2128 dlog_verbose(
2129 "Bypass multiple borrower check for id %x.\n",
2130 current_receiver_id);
2131 continue;
2132 }
2133
J-Alves96de29f2022-04-26 16:05:24 +01002134 /*
2135 * Find the current receiver in the transaction descriptor from
2136 * sender.
2137 */
2138 uint32_t mem_region_receiver_index =
2139 ffa_memory_region_get_receiver(memory_region,
2140 current_receiver_id);
2141
2142 if (mem_region_receiver_index ==
2143 memory_region->receiver_count) {
2144 dlog_verbose("%s: receiver %x not found\n", __func__,
2145 current_receiver_id);
2146 return ffa_error(FFA_DENIED);
2147 }
2148
2149 sent_permissions =
2150 memory_region->receivers[mem_region_receiver_index]
2151 .receiver_permissions.permissions;
2152
2153 if (found_to_id) {
2154 retrieve_receiver_index = i;
2155 }
2156
2157 /*
2158 * Since we are traversing the list of receivers, save the index
2159 * of the caller. As it needs to be there.
2160 */
2161
2162 if (current_receiver->composite_memory_region_offset != 0U) {
2163 dlog_verbose(
2164 "Retriever specified address ranges not "
2165 "supported (got offset %d).\n",
2166 current_receiver
2167 ->composite_memory_region_offset);
2168 return ffa_error(FFA_INVALID_PARAMETERS);
2169 }
2170
2171 /*
2172 * Check permissions from sender against permissions requested
2173 * by receiver.
2174 */
2175 if (!ffa_memory_retrieve_is_memory_access_valid(
2176 ffa_get_data_access_attr(sent_permissions),
2177 ffa_get_data_access_attr(requested_permissions),
2178 ffa_get_instruction_access_attr(sent_permissions),
2179 ffa_get_instruction_access_attr(
2180 requested_permissions),
2181 found_to_id ? permissions : NULL)) {
2182 return ffa_error(FFA_DENIED);
2183 }
2184
2185 /*
2186 * Can't request PM to clear memory if only provided with RO
2187 * permissions.
2188 */
2189 if (found_to_id &&
2190 (ffa_get_data_access_attr(*permissions) ==
2191 FFA_DATA_ACCESS_RO) &&
2192 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2193 0U) {
2194 dlog_verbose(
2195 "Receiver has RO permissions can not request "
2196 "clear.\n");
2197 return ffa_error(FFA_DENIED);
2198 }
2199 }
2200
2201 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2202 dlog_verbose(
2203 "Retrieve request does not contain caller's (%x) "
2204 "permissions\n",
2205 to_vm_id);
2206 return ffa_error(FFA_INVALID_PARAMETERS);
2207 }
2208
2209 return (struct ffa_value){.func = FFA_SUCCESS_32};
2210}
2211
J-Alvesa9cd7e32022-07-01 13:49:33 +01002212/*
2213 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2214 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2215 * of a pending memory sharing operation whose allocator is the SPM, for
2216 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2217 * the memory region descriptor of the retrieve request must be zeroed with the
2218 * exception of the sender ID and handle.
2219 */
2220bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
2221 struct vm_locked to_locked)
2222{
2223 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2224 request->attributes == 0U && request->flags == 0U &&
2225 request->tag == 0U && request->receiver_count == 0U &&
2226 plat_ffa_memory_handle_allocated_by_current_world(
2227 request->handle);
2228}
2229
2230/*
2231 * Helper to reset count of fragments retrieved by the hypervisor.
2232 */
2233static void ffa_memory_retrieve_complete_from_hyp(
2234 struct ffa_memory_share_state *share_state)
2235{
2236 if (share_state->hypervisor_fragment_count ==
2237 share_state->fragment_count) {
2238 share_state->hypervisor_fragment_count = 0;
2239 }
2240}
2241
J-Alves089004f2022-07-13 14:25:44 +01002242/**
2243 * Validate that the memory region descriptor provided by the borrower on
2244 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2245 * memory sharing call.
2246 */
2247static struct ffa_value ffa_memory_retrieve_validate(
2248 ffa_vm_id_t receiver_id, struct ffa_memory_region *retrieve_request,
2249 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2250 uint32_t share_func)
2251{
2252 ffa_memory_region_flags_t transaction_type =
2253 retrieve_request->flags &
2254 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002255 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01002256
2257 assert(retrieve_request != NULL);
2258 assert(memory_region != NULL);
2259 assert(receiver_index != NULL);
2260 assert(retrieve_request->sender == memory_region->sender);
2261
2262 /*
2263 * Check that the transaction type expected by the receiver is
2264 * correct, if it has been specified.
2265 */
2266 if (transaction_type !=
2267 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2268 transaction_type != (memory_region->flags &
2269 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2270 dlog_verbose(
2271 "Incorrect transaction type %#x for "
2272 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2273 transaction_type,
2274 memory_region->flags &
2275 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2276 retrieve_request->handle);
2277 return ffa_error(FFA_INVALID_PARAMETERS);
2278 }
2279
2280 if (retrieve_request->tag != memory_region->tag) {
2281 dlog_verbose(
2282 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2283 "%d for handle %#x.\n",
2284 retrieve_request->tag, memory_region->tag,
2285 retrieve_request->handle);
2286 return ffa_error(FFA_INVALID_PARAMETERS);
2287 }
2288
2289 *receiver_index =
2290 ffa_memory_region_get_receiver(memory_region, receiver_id);
2291
2292 if (*receiver_index == memory_region->receiver_count) {
2293 dlog_verbose(
2294 "Incorrect receiver VM ID %d for "
2295 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01002296 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002297 return ffa_error(FFA_INVALID_PARAMETERS);
2298 }
2299
2300 if ((retrieve_request->flags &
2301 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2302 dlog_verbose(
2303 "Retriever specified 'address range alignment 'hint' "
2304 "not supported.\n");
2305 return ffa_error(FFA_INVALID_PARAMETERS);
2306 }
2307 if ((retrieve_request->flags &
2308 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2309 dlog_verbose(
2310 "Bits 8-5 must be zero in memory region's flags "
2311 "(address range alignment hint not supported).\n");
2312 return ffa_error(FFA_INVALID_PARAMETERS);
2313 }
2314
2315 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2316 dlog_verbose(
2317 "Bits 31-10 must be zero in memory region's flags.\n");
2318 return ffa_error(FFA_INVALID_PARAMETERS);
2319 }
2320
2321 if (share_func == FFA_MEM_SHARE_32 &&
2322 (retrieve_request->flags &
2323 (FFA_MEMORY_REGION_FLAG_CLEAR |
2324 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2325 dlog_verbose(
2326 "Memory Share operation can't clean after relinquish "
2327 "memory region.\n");
2328 return ffa_error(FFA_INVALID_PARAMETERS);
2329 }
2330
2331 /*
2332 * If the borrower needs the memory to be cleared before mapping
2333 * to its address space, the sender should have set the flag
2334 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2335 * FFA_DENIED.
2336 */
2337 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2338 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2339 dlog_verbose(
2340 "Borrower needs memory cleared. Sender needs to set "
2341 "flag for clearing memory.\n");
2342 return ffa_error(FFA_DENIED);
2343 }
2344
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002345 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2346 security_state =
2347 ffa_get_memory_security_attr(retrieve_request->attributes);
2348 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2349 dlog_verbose(
2350 "Invalid security state for memory retrieve request "
2351 "operation.\n");
2352 return ffa_error(FFA_INVALID_PARAMETERS);
2353 }
2354
J-Alves089004f2022-07-13 14:25:44 +01002355 /*
2356 * If memory type is not specified, bypass validation of memory
2357 * attributes in the retrieve request. The retriever is expecting to
2358 * obtain this information from the SPMC.
2359 */
2360 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2361 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2362 return (struct ffa_value){.func = FFA_SUCCESS_32};
2363 }
2364
2365 /*
2366 * Ensure receiver's attributes are compatible with how
2367 * Hafnium maps memory: Normal Memory, Inner shareable,
2368 * Write-Back Read-Allocate Write-Allocate Cacheable.
2369 */
2370 return ffa_memory_attributes_validate(retrieve_request->attributes);
2371}
2372
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002373struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2374 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002375 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002376 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002377{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002378 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002379 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002380 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002381 sizeof(struct ffa_memory_access);
2382 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002383 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002384 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002385 uint32_t memory_to_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002386 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002387 struct ffa_memory_share_state *share_state;
2388 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002389 struct ffa_composite_memory_region *composite;
2390 uint32_t total_length;
2391 uint32_t fragment_length;
J-Alves089004f2022-07-13 14:25:44 +01002392 ffa_vm_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002393 bool is_send_complete = false;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002394 ffa_memory_attributes_t attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002395
2396 dump_share_states();
2397
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002398 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002399 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002400 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002401 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002402 expected_retrieve_request_length,
2403 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002404 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002405 }
2406
2407 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002408 share_state = get_share_state(share_states, handle);
2409 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002410 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002411 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002412 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002413 goto out;
2414 }
2415
J-Alves96de29f2022-04-26 16:05:24 +01002416 if (!share_state->sending_complete) {
2417 dlog_verbose(
2418 "Memory with handle %#x not fully sent, can't "
2419 "retrieve.\n",
2420 handle);
2421 ret = ffa_error(FFA_INVALID_PARAMETERS);
2422 goto out;
2423 }
2424
Andrew Walbrana65a1322020-04-06 19:32:32 +01002425 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002426
Andrew Walbrana65a1322020-04-06 19:32:32 +01002427 CHECK(memory_region != NULL);
2428
J-Alves089004f2022-07-13 14:25:44 +01002429 if (retrieve_request->sender != memory_region->sender) {
2430 dlog_verbose(
2431 "Memory with handle %#x not fully sent, can't "
2432 "retrieve.\n",
2433 handle);
2434 ret = ffa_error(FFA_INVALID_PARAMETERS);
2435 goto out;
2436 }
J-Alves96de29f2022-04-26 16:05:24 +01002437
J-Alvesa9cd7e32022-07-01 13:49:33 +01002438 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2439 to_locked)) {
2440 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002441
J-Alvesb5084cf2022-07-06 14:20:12 +01002442 /*
2443 * The SPMC can only process retrieve requests to memory share
2444 * operations with one borrower from the other world. It can't
2445 * determine the ID of the NWd VM that invoked the retrieve
2446 * request interface call. It relies on the hypervisor to
2447 * validate the caller's ID against that provided in the
2448 * `receivers` list of the retrieve response.
2449 * In case there is only one borrower from the NWd in the
2450 * transaction descriptor, record that in the `receiver_id` for
2451 * later use, and validate in the retrieve request message.
2452 */
2453 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2454 uint32_t other_world_count = 0;
2455
2456 for (uint32_t i = 0; i < memory_region->receiver_count;
2457 i++) {
2458 receiver_id =
2459 retrieve_request->receivers[0]
2460 .receiver_permissions.receiver;
2461 if (!vm_id_is_current_world(receiver_id)) {
2462 other_world_count++;
2463 }
2464 }
2465 if (other_world_count > 1) {
2466 dlog_verbose(
2467 "Support one receiver from the other "
2468 "world.\n");
2469 return ffa_error(FFA_NOT_SUPPORTED);
2470 }
2471 }
2472
2473 /*
2474 * Validate retrieve request, according to what was sent by the
2475 * sender. Function will output the `receiver_index` from the
2476 * provided memory region, and will output `permissions` from
2477 * the validated requested permissions.
2478 */
J-Alves089004f2022-07-13 14:25:44 +01002479 ret = ffa_memory_retrieve_validate(
2480 receiver_id, retrieve_request, memory_region,
2481 &receiver_index, share_state->share_func);
2482 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002483 goto out;
2484 }
2485
2486 if (share_state->retrieved_fragment_count[receiver_index] !=
2487 0U) {
2488 dlog_verbose(
2489 "Memory with handle %#x already retrieved.\n",
2490 handle);
2491 ret = ffa_error(FFA_DENIED);
2492 goto out;
2493 }
2494
J-Alvesa9cd7e32022-07-01 13:49:33 +01002495 ret = ffa_memory_retrieve_validate_memory_access_list(
2496 memory_region, retrieve_request, receiver_id,
2497 &permissions);
J-Alves614d9f42022-06-28 14:03:10 +01002498 if (ret.func != FFA_SUCCESS_32) {
2499 goto out;
2500 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002501
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002502 memory_to_mode = ffa_memory_permissions_to_mode(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002503 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002504
J-Alvesa9cd7e32022-07-01 13:49:33 +01002505 ret = ffa_retrieve_check_update(
2506 to_locked, memory_region->sender,
2507 share_state->fragments,
2508 share_state->fragment_constituent_counts,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002509 share_state->fragment_count, memory_to_mode,
J-Alvesa9cd7e32022-07-01 13:49:33 +01002510 share_state->share_func, false, page_pool);
2511
2512 if (ret.func != FFA_SUCCESS_32) {
2513 goto out;
2514 }
2515
2516 share_state->retrieved_fragment_count[receiver_index] = 1;
2517 is_send_complete =
2518 share_state->retrieved_fragment_count[receiver_index] ==
2519 share_state->fragment_count;
J-Alves3c5b2072022-11-21 12:45:40 +00002520
2521 share_state->clear_after_relinquish =
2522 (retrieve_request->flags &
2523 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) != 0U;
2524
J-Alvesa9cd7e32022-07-01 13:49:33 +01002525 } else {
2526 if (share_state->hypervisor_fragment_count != 0U) {
2527 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002528 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002529 "the hypervisor.\n",
2530 handle);
2531 ret = ffa_error(FFA_DENIED);
2532 goto out;
2533 }
2534
2535 share_state->hypervisor_fragment_count = 1;
2536
2537 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002538 }
2539
J-Alvesb5084cf2022-07-06 14:20:12 +01002540 /* VMs acquire the RX buffer from SPMC. */
2541 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2542
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002543 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002544 * Copy response to RX buffer of caller and deliver the message.
2545 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002546 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002547 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002548 composite = ffa_memory_region_get_composite(memory_region, 0);
2549 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002550 * Constituents which we received in the first fragment should
2551 * always fit in the first fragment we are sending, because the
2552 * header is the same size in both cases and we have a fixed
2553 * message buffer size. So `ffa_retrieved_memory_region_init`
2554 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002555 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002556
2557 /*
2558 * Set the security state in the memory retrieve response attributes
2559 * if specified by the target mode.
2560 */
2561 attributes = plat_ffa_memory_security_mode(
2562 memory_region->attributes, share_state->sender_orig_mode);
2563
Andrew Walbranca808b12020-05-15 17:22:28 +01002564 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01002565 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002566 HF_MAILBOX_SIZE, memory_region->sender, attributes,
2567 memory_region->flags, handle, receiver_id, permissions,
2568 composite->page_count, composite->constituent_count,
2569 share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01002570 share_state->fragment_constituent_counts[0], &total_length,
2571 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002572
Andrew Walbranca808b12020-05-15 17:22:28 +01002573 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002574 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002575 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002576 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002577
J-Alvesa9cd7e32022-07-01 13:49:33 +01002578 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002579 ffa_memory_retrieve_complete(share_states, share_state,
2580 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002581 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002582 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002583 .arg1 = total_length,
2584 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002585out:
2586 share_states_unlock(&share_states);
2587 dump_share_states();
2588 return ret;
2589}
2590
J-Alves5da37d92022-10-24 16:33:48 +01002591/**
2592 * Determine expected fragment offset according to the FF-A version of
2593 * the caller.
2594 */
2595static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
2596 struct ffa_memory_region *memory_region,
2597 uint32_t retrieved_constituents_count, uint32_t ffa_version)
2598{
2599 uint32_t expected_fragment_offset;
2600 uint32_t composite_constituents_offset;
2601
2602 if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
2603 /*
2604 * Hafnium operates memory regions in FF-A v1.1 format, so we
2605 * can retrieve the constituents offset from descriptor.
2606 */
2607 composite_constituents_offset =
2608 ffa_composite_constituent_offset(memory_region, 0);
2609 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2610 /*
2611 * If retriever is FF-A v1.0, determine the composite offset
2612 * as it is expected to have been configured in the
2613 * retrieve response.
2614 */
2615 composite_constituents_offset =
2616 sizeof(struct ffa_memory_region_v1_0) +
2617 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
2618 sizeof(struct ffa_memory_access) +
2619 sizeof(struct ffa_composite_memory_region);
2620 } else {
2621 panic("%s received an invalid FF-A version.\n", __func__);
2622 }
2623
2624 expected_fragment_offset =
2625 composite_constituents_offset +
2626 retrieved_constituents_count *
2627 sizeof(struct ffa_memory_region_constituent) -
2628 sizeof(struct ffa_memory_access) *
2629 (memory_region->receiver_count - 1);
2630
2631 return expected_fragment_offset;
2632}
2633
Andrew Walbranca808b12020-05-15 17:22:28 +01002634struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2635 ffa_memory_handle_t handle,
2636 uint32_t fragment_offset,
J-Alves59ed0042022-07-28 18:26:41 +01002637 ffa_vm_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002638 struct mpool *page_pool)
2639{
2640 struct ffa_memory_region *memory_region;
2641 struct share_states_locked share_states;
2642 struct ffa_memory_share_state *share_state;
2643 struct ffa_value ret;
2644 uint32_t fragment_index;
2645 uint32_t retrieved_constituents_count;
2646 uint32_t i;
2647 uint32_t expected_fragment_offset;
2648 uint32_t remaining_constituent_count;
2649 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002650 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01002651 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01002652
2653 dump_share_states();
2654
2655 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002656 share_state = get_share_state(share_states, handle);
2657 if (!share_state) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002658 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2659 handle);
2660 ret = ffa_error(FFA_INVALID_PARAMETERS);
2661 goto out;
2662 }
2663
2664 memory_region = share_state->memory_region;
2665 CHECK(memory_region != NULL);
2666
Andrew Walbranca808b12020-05-15 17:22:28 +01002667 if (!share_state->sending_complete) {
2668 dlog_verbose(
2669 "Memory with handle %#x not fully sent, can't "
2670 "retrieve.\n",
2671 handle);
2672 ret = ffa_error(FFA_INVALID_PARAMETERS);
2673 goto out;
2674 }
2675
J-Alves59ed0042022-07-28 18:26:41 +01002676 /*
2677 * If retrieve request from the hypervisor has been initiated in the
2678 * given share_state, continue it, else assume it is a continuation of
2679 * retrieve request from a NWd VM.
2680 */
2681 continue_ffa_hyp_mem_retrieve_req =
2682 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
2683 (share_state->hypervisor_fragment_count != 0U) &&
2684 plat_ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01002685
J-Alves59ed0042022-07-28 18:26:41 +01002686 if (!continue_ffa_hyp_mem_retrieve_req) {
2687 receiver_index = ffa_memory_region_get_receiver(
2688 memory_region, to_locked.vm->id);
2689
2690 if (receiver_index == memory_region->receiver_count) {
2691 dlog_verbose(
2692 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
2693 "borrower to memory sharing transaction (%x)\n",
2694 to_locked.vm->id, handle);
2695 ret = ffa_error(FFA_INVALID_PARAMETERS);
2696 goto out;
2697 }
2698
2699 if (share_state->retrieved_fragment_count[receiver_index] ==
2700 0 ||
2701 share_state->retrieved_fragment_count[receiver_index] >=
2702 share_state->fragment_count) {
2703 dlog_verbose(
2704 "Retrieval of memory with handle %#x not yet "
2705 "started or already completed (%d/%d fragments "
2706 "retrieved).\n",
2707 handle,
2708 share_state->retrieved_fragment_count
2709 [receiver_index],
2710 share_state->fragment_count);
2711 ret = ffa_error(FFA_INVALID_PARAMETERS);
2712 goto out;
2713 }
2714
2715 fragment_index =
2716 share_state->retrieved_fragment_count[receiver_index];
2717 } else {
2718 if (share_state->hypervisor_fragment_count == 0 ||
2719 share_state->hypervisor_fragment_count >=
2720 share_state->fragment_count) {
2721 dlog_verbose(
2722 "Retrieve of memory with handle %x not "
2723 "started from hypervisor.\n",
2724 handle);
2725 ret = ffa_error(FFA_INVALID_PARAMETERS);
2726 goto out;
2727 }
2728
2729 if (memory_region->sender != sender_vm_id) {
2730 dlog_verbose(
2731 "Sender ID (%x) is not as expected for memory "
2732 "handle %x\n",
2733 sender_vm_id, handle);
2734 ret = ffa_error(FFA_INVALID_PARAMETERS);
2735 goto out;
2736 }
2737
2738 fragment_index = share_state->hypervisor_fragment_count;
2739
2740 receiver_index = 0;
2741 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002742
2743 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002744 * Check that the given fragment offset is correct by counting
2745 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01002746 */
2747 retrieved_constituents_count = 0;
2748 for (i = 0; i < fragment_index; ++i) {
2749 retrieved_constituents_count +=
2750 share_state->fragment_constituent_counts[i];
2751 }
J-Alvesc7484f12022-05-13 12:41:14 +01002752
2753 CHECK(memory_region->receiver_count > 0);
2754
Andrew Walbranca808b12020-05-15 17:22:28 +01002755 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01002756 ffa_memory_retrieve_expected_offset_per_ffa_version(
2757 memory_region, retrieved_constituents_count,
2758 to_locked.vm->ffa_version);
2759
Andrew Walbranca808b12020-05-15 17:22:28 +01002760 if (fragment_offset != expected_fragment_offset) {
2761 dlog_verbose("Fragment offset was %d but expected %d.\n",
2762 fragment_offset, expected_fragment_offset);
2763 ret = ffa_error(FFA_INVALID_PARAMETERS);
2764 goto out;
2765 }
2766
J-Alves59ed0042022-07-28 18:26:41 +01002767 /* VMs acquire the RX buffer from SPMC. */
2768 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2769
Andrew Walbranca808b12020-05-15 17:22:28 +01002770 remaining_constituent_count = ffa_memory_fragment_init(
2771 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2772 share_state->fragments[fragment_index],
2773 share_state->fragment_constituent_counts[fragment_index],
2774 &fragment_length);
2775 CHECK(remaining_constituent_count == 0);
2776 to_locked.vm->mailbox.recv_size = fragment_length;
2777 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2778 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002779 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01002780
J-Alves59ed0042022-07-28 18:26:41 +01002781 if (!continue_ffa_hyp_mem_retrieve_req) {
2782 share_state->retrieved_fragment_count[receiver_index]++;
2783 if (share_state->retrieved_fragment_count[receiver_index] ==
2784 share_state->fragment_count) {
2785 ffa_memory_retrieve_complete(share_states, share_state,
2786 page_pool);
2787 }
2788 } else {
2789 share_state->hypervisor_fragment_count++;
2790
2791 ffa_memory_retrieve_complete_from_hyp(share_state);
2792 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002793 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2794 .arg1 = (uint32_t)handle,
2795 .arg2 = (uint32_t)(handle >> 32),
2796 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002797
2798out:
2799 share_states_unlock(&share_states);
2800 dump_share_states();
2801 return ret;
2802}
2803
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002804struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002805 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002806 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002807{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002808 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002809 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002810 struct ffa_memory_share_state *share_state;
2811 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002812 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002813 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002814 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00002815 bool receivers_relinquished_memory;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002816
Andrew Walbrana65a1322020-04-06 19:32:32 +01002817 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002818 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002819 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01002820 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002821 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002822 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002823 }
2824
Andrew Walbrana65a1322020-04-06 19:32:32 +01002825 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002826 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002827 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01002828 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002829 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002830 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002831 }
2832
2833 dump_share_states();
2834
2835 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01002836 share_state = get_share_state(share_states, handle);
2837 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002838 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002839 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002840 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002841 goto out;
2842 }
2843
Andrew Walbranca808b12020-05-15 17:22:28 +01002844 if (!share_state->sending_complete) {
2845 dlog_verbose(
2846 "Memory with handle %#x not fully sent, can't "
2847 "relinquish.\n",
2848 handle);
2849 ret = ffa_error(FFA_INVALID_PARAMETERS);
2850 goto out;
2851 }
2852
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002853 memory_region = share_state->memory_region;
2854 CHECK(memory_region != NULL);
2855
J-Alves8eb19162022-04-28 10:56:48 +01002856 receiver_index = ffa_memory_region_get_receiver(memory_region,
2857 from_locked.vm->id);
2858
2859 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002860 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002861 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01002862 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01002863 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002864 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002865 goto out;
2866 }
2867
J-Alves8eb19162022-04-28 10:56:48 +01002868 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002869 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002870 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002871 "Memory with handle %#x not yet fully "
2872 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01002873 "receiver %x can't relinquish.\n",
2874 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002875 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002876 goto out;
2877 }
2878
J-Alves3c5b2072022-11-21 12:45:40 +00002879 /*
2880 * Either clear if requested in relinquish call, or in a retrieve
2881 * request from one of the borrowers.
2882 */
2883 receivers_relinquished_memory = true;
2884
2885 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2886 struct ffa_memory_access *receiver =
2887 &memory_region->receivers[i];
2888
2889 if (receiver->receiver_permissions.receiver ==
2890 from_locked.vm->id) {
2891 continue;
2892 }
2893
2894 if (share_state->retrieved_fragment_count[i] != 0U) {
2895 receivers_relinquished_memory = false;
2896 break;
2897 }
2898 }
2899
2900 clear = receivers_relinquished_memory &&
2901 (share_state->clear_after_relinquish ||
2902 (relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2903 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002904
2905 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002906 * Clear is not allowed for memory that was shared, as the
2907 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002908 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002909 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002910 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002911 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002912 goto out;
2913 }
2914
Andrew Walbranca808b12020-05-15 17:22:28 +01002915 ret = ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00002916 from_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002917 share_state->fragment_constituent_counts,
2918 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002919
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002920 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002921 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002922 * Mark memory handle as not retrieved, so it can be
2923 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002924 */
J-Alves8eb19162022-04-28 10:56:48 +01002925 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002926 }
2927
2928out:
2929 share_states_unlock(&share_states);
2930 dump_share_states();
2931 return ret;
2932}
2933
2934/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01002935 * Validates that the reclaim transition is allowed for the given
2936 * handle, updates the page table of the reclaiming VM, and frees the
2937 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002938 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002939struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002940 ffa_memory_handle_t handle,
2941 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002942 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002943{
2944 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002945 struct ffa_memory_share_state *share_state;
2946 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002947 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002948
2949 dump_share_states();
2950
2951 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01002952
Karl Meakin4a2854a2023-06-30 16:26:52 +01002953 share_state = get_share_state(share_states, handle);
2954 if (!share_state) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002955 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002956 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002957 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002958 goto out;
2959 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01002960 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002961
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002962 CHECK(memory_region != NULL);
2963
J-Alvesa9cd7e32022-07-01 13:49:33 +01002964 if (vm_id_is_current_world(to_locked.vm->id) &&
2965 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002966 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002967 "VM %#x attempted to reclaim memory handle %#x "
2968 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002969 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002970 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002971 goto out;
2972 }
2973
Andrew Walbranca808b12020-05-15 17:22:28 +01002974 if (!share_state->sending_complete) {
2975 dlog_verbose(
2976 "Memory with handle %#x not fully sent, can't "
2977 "reclaim.\n",
2978 handle);
2979 ret = ffa_error(FFA_INVALID_PARAMETERS);
2980 goto out;
2981 }
2982
J-Alves752236c2022-04-28 11:07:47 +01002983 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2984 if (share_state->retrieved_fragment_count[i] != 0) {
2985 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002986 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00002987 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002988 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01002989 handle,
2990 memory_region->receivers[i]
2991 .receiver_permissions.receiver);
2992 ret = ffa_error(FFA_DENIED);
2993 goto out;
2994 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002995 }
2996
Andrew Walbranca808b12020-05-15 17:22:28 +01002997 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00002998 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002999 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003000 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01003001 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003002
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003003 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003004 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003005 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003006 }
3007
3008out:
3009 share_states_unlock(&share_states);
3010 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003011}