blob: 1ed41ec6f8ecc4b2cdf9e6c6ea6b3ba6d62f9dcd [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
J-Alves460d36c2023-10-12 17:02:15 +010011#include <stdint.h>
12
Federico Recanati4fd065d2021-12-13 20:06:23 +010013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000016
J-Alves5952d942022-12-22 16:03:00 +000017#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000019#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010020#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010021#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010022#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010024#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010025#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000026#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000027#include "hf/mpool.h"
J-Alvescf6253e2024-01-03 13:48:48 +000028#include "hf/panic.h"
29#include "hf/plat/memory_protect.h"
Jose Marinho75509b42019-04-09 09:34:59 +010030#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000031#include "hf/vm.h"
Daniel Boulby44e9b3b2024-01-17 12:21:44 +000032#include "hf/vm_ids.h"
Jose Marinho75509b42019-04-09 09:34:59 +010033
J-Alves2d8457f2022-10-05 11:06:41 +010034#include "vmapi/hf/ffa_v1_0.h"
35
J-Alves5da37d92022-10-24 16:33:48 +010036#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
37
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000038/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010039 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000040 * by this lock.
41 */
42static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010043static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000044
45/**
J-Alvesed508c82023-05-04 16:09:48 +010046 * Return the offset to the first constituent within the
47 * `ffa_composite_memory_region` for the given receiver from an
48 * `ffa_memory_region`. The caller must check that the receiver_index is within
49 * bounds, and that it has a composite memory region offset.
50 */
51static uint32_t ffa_composite_constituent_offset(
52 struct ffa_memory_region *memory_region, uint32_t receiver_index)
53{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000054 struct ffa_memory_access *receiver;
55 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010056
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000057 CHECK(receiver_index < memory_region->receiver_count);
58
59 receiver =
60 ffa_memory_region_get_receiver(memory_region, receiver_index);
61 CHECK(receiver != NULL);
62
63 composite_offset = receiver->composite_memory_region_offset;
64
65 CHECK(composite_offset != 0);
66
67 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010068}
69
70/**
J-Alves917d2f22020-10-30 18:39:30 +000071 * Extracts the index from a memory handle allocated by Hafnium's current world.
72 */
73uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
74{
75 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
76}
77
78/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010079 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
80 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
81 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010082 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010083 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
84 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010085 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010086struct ffa_memory_share_state *allocate_share_state(
87 struct share_states_locked share_states, uint32_t share_func,
88 struct ffa_memory_region *memory_region, uint32_t fragment_length,
89 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000090{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000091 assert(share_states.share_states != NULL);
92 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000093
Karl Meakin52cdfe72023-06-30 14:49:10 +010094 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010095 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010096 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010097 &share_states.share_states[i];
98 struct ffa_composite_memory_region *composite =
99 ffa_memory_region_get_composite(memory_region,
100 0);
101
102 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000103 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +0200104 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000106 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100107 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000108 allocated_state->share_func = share_func;
109 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100110 allocated_state->fragment_count = 1;
111 allocated_state->fragments[0] = composite->constituents;
112 allocated_state->fragment_constituent_counts[0] =
113 (fragment_length -
114 ffa_composite_constituent_offset(memory_region,
115 0)) /
116 sizeof(struct ffa_memory_region_constituent);
117 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100118 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
119 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100120 allocated_state->retrieved_fragment_count[j] =
121 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100123 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000124 }
125 }
126
Karl Meakin52cdfe72023-06-30 14:49:10 +0100127 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000128}
129
130/** Locks the share states lock. */
131struct share_states_locked share_states_lock(void)
132{
133 sl_lock(&share_states_lock_instance);
134
135 return (struct share_states_locked){.share_states = share_states};
136}
137
138/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100139void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000141 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 share_states->share_states = NULL;
143 sl_unlock(&share_states_lock_instance);
144}
145
146/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000149 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100150struct ffa_memory_share_state *get_share_state(
151 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000152{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100153 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000154
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000155 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100156
157 /*
158 * First look for a share_state allocated by us, in which case the
159 * handle is based on the index.
160 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200161 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100162 uint64_t index = ffa_memory_handle_get_index(handle);
163
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 if (index < MAX_MEM_SHARES) {
165 share_state = &share_states.share_states[index];
166 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100167 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100168 }
169 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000170 }
171
Andrew Walbranca808b12020-05-15 17:22:28 +0100172 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100173 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100174 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000175 if (share_state->memory_region != NULL &&
176 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100177 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100178 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100179 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000180 }
181
Karl Meakin4a2854a2023-06-30 16:26:52 +0100182 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000183}
184
185/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100186void share_state_free(struct share_states_locked share_states,
187 struct ffa_memory_share_state *share_state,
188 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000189{
Andrew Walbranca808b12020-05-15 17:22:28 +0100190 uint32_t i;
191
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000192 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000193 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100194 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000195 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100196 /*
197 * First fragment is part of the same page as the `memory_region`, so it
198 * doesn't need to be freed separately.
199 */
200 share_state->fragments[0] = NULL;
201 share_state->fragment_constituent_counts[0] = 0;
202 for (i = 1; i < share_state->fragment_count; ++i) {
203 mpool_free(page_pool, share_state->fragments[i]);
204 share_state->fragments[i] = NULL;
205 share_state->fragment_constituent_counts[i] = 0;
206 }
207 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000208 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100209 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000210}
211
Andrew Walbranca808b12020-05-15 17:22:28 +0100212/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100213bool share_state_sending_complete(struct share_states_locked share_states,
214 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000215{
Andrew Walbranca808b12020-05-15 17:22:28 +0100216 struct ffa_composite_memory_region *composite;
217 uint32_t expected_constituent_count;
218 uint32_t fragment_constituent_count_total = 0;
219 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000220
Andrew Walbranca808b12020-05-15 17:22:28 +0100221 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000222 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100223
224 /*
225 * Share state must already be valid, or it's not possible to get hold
226 * of it.
227 */
228 CHECK(share_state->memory_region != NULL &&
229 share_state->share_func != 0);
230
231 composite =
232 ffa_memory_region_get_composite(share_state->memory_region, 0);
233 expected_constituent_count = composite->constituent_count;
234 for (i = 0; i < share_state->fragment_count; ++i) {
235 fragment_constituent_count_total +=
236 share_state->fragment_constituent_counts[i];
237 }
238 dlog_verbose(
239 "Checking completion: constituent count %d/%d from %d "
240 "fragments.\n",
241 fragment_constituent_count_total, expected_constituent_count,
242 share_state->fragment_count);
243
244 return fragment_constituent_count_total == expected_constituent_count;
245}
246
247/**
248 * Calculates the offset of the next fragment expected for the given share
249 * state.
250 */
J-Alvesfdd29272022-07-19 13:16:31 +0100251uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100252 struct share_states_locked share_states,
253 struct ffa_memory_share_state *share_state)
254{
255 uint32_t next_fragment_offset;
256 uint32_t i;
257
258 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000259 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100260
261 next_fragment_offset =
262 ffa_composite_constituent_offset(share_state->memory_region, 0);
263 for (i = 0; i < share_state->fragment_count; ++i) {
264 next_fragment_offset +=
265 share_state->fragment_constituent_counts[i] *
266 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000267 }
268
Andrew Walbranca808b12020-05-15 17:22:28 +0100269 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000270}
271
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100272static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000273{
274 uint32_t i;
275
276 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
277 return;
278 }
279
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000280 dlog("from VM %#x, attributes %#x, flags %#x, handle %#x "
281 "tag %u, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100282 "recipients [",
283 memory_region->sender, memory_region->attributes,
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000284 memory_region->flags, memory_region->handle, memory_region->tag,
285 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100286 memory_region->receiver_count);
287 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000288 struct ffa_memory_access *receiver =
289 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000290 if (i != 0) {
291 dlog(", ");
292 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000293 dlog("Receiver %#x: %#x (offset %u)",
294 receiver->receiver_permissions.receiver,
295 receiver->receiver_permissions.permissions,
296 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000297 /* The impdef field is only present from v1.2 and later */
298 if (ffa_version_from_memory_access_desc_size(
299 memory_region->memory_access_desc_size) >=
300 MAKE_FFA_VERSION(1, 2)) {
301 dlog(", impdef: %#x %#x", receiver->impdef.val[0],
302 receiver->impdef.val[1]);
303 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000304 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000305 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000306}
307
J-Alves66652252022-07-06 09:49:51 +0100308void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000309{
310 uint32_t i;
311
312 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
313 return;
314 }
315
316 dlog("Current share states:\n");
317 sl_lock(&share_states_lock_instance);
318 for (i = 0; i < MAX_MEM_SHARES; ++i) {
319 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000320 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100321 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000322 dlog("SHARE");
323 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100324 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000325 dlog("LEND");
326 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100327 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000328 dlog("DONATE");
329 break;
330 default:
331 dlog("invalid share_func %#x",
332 share_states[i].share_func);
333 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100334 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000335 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100336 if (share_states[i].sending_complete) {
337 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000338 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100339 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000340 }
J-Alves2a0d2882020-10-29 14:49:50 +0000341 dlog(" with %d fragments, %d retrieved, "
342 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100343 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000344 share_states[i].retrieved_fragment_count[0],
345 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000346 }
347 }
348 sl_unlock(&share_states_lock_instance);
349}
350
Andrew Walbran475c1452020-02-07 13:22:22 +0000351/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100352static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100353 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000354{
355 uint32_t mode = 0;
356
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357 switch (ffa_get_data_access_attr(permissions)) {
358 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000359 mode = MM_MODE_R;
360 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100361 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000362 mode = MM_MODE_R | MM_MODE_W;
363 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100364 case FFA_DATA_ACCESS_NOT_SPECIFIED:
365 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
366 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100367 case FFA_DATA_ACCESS_RESERVED:
368 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100369 }
370
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100371 switch (ffa_get_instruction_access_attr(permissions)) {
372 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000373 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100374 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100375 mode |= MM_MODE_X;
376 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100377 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
378 mode |= (default_mode & MM_MODE_X);
379 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100380 case FFA_INSTRUCTION_ACCESS_RESERVED:
381 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000382 }
383
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200384 /* Set the security state bit if necessary. */
385 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
386 mode |= plat_ffa_other_world_mode();
387 }
388
Andrew Walbran475c1452020-02-07 13:22:22 +0000389 return mode;
390}
391
Jose Marinho75509b42019-04-09 09:34:59 +0100392/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000393 * Get the current mode in the stage-2 page table of the given vm of all the
394 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100395 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100396 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100397static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000398 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100399 struct ffa_memory_region_constituent **fragments,
400 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100401{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100402 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100403 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100404
Andrew Walbranca808b12020-05-15 17:22:28 +0100405 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100406 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000407 * Fail if there are no constituents. Otherwise we would get an
408 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100409 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100410 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100411 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100412 }
413
Andrew Walbranca808b12020-05-15 17:22:28 +0100414 for (i = 0; i < fragment_count; ++i) {
415 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
416 ipaddr_t begin = ipa_init(fragments[i][j].address);
417 size_t size = fragments[i][j].page_count * PAGE_SIZE;
418 ipaddr_t end = ipa_add(begin, size);
419 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100420
Andrew Walbranca808b12020-05-15 17:22:28 +0100421 /* Fail if addresses are not page-aligned. */
422 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
423 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100424 dlog_verbose("%s: addresses not page-aligned\n",
425 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100426 return ffa_error(FFA_INVALID_PARAMETERS);
427 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100428
Andrew Walbranca808b12020-05-15 17:22:28 +0100429 /*
430 * Ensure that this constituent memory range is all
431 * mapped with the same mode.
432 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800433 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100434 dlog_verbose(
435 "%s: constituent memory range %#x..%#x "
436 "not mapped with the same mode\n",
437 __func__, begin, end);
Andrew Walbranca808b12020-05-15 17:22:28 +0100438 return ffa_error(FFA_DENIED);
439 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100440
Andrew Walbranca808b12020-05-15 17:22:28 +0100441 /*
442 * Ensure that all constituents are mapped with the same
443 * mode.
444 */
445 if (i == 0) {
446 *orig_mode = current_mode;
447 } else if (current_mode != *orig_mode) {
448 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100449 "%s: expected mode %#x but was %#x for "
450 "%d pages at %#x.\n",
451 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100452 fragments[i][j].page_count,
453 ipa_addr(begin));
454 return ffa_error(FFA_DENIED);
455 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100456 }
Jose Marinho75509b42019-04-09 09:34:59 +0100457 }
458
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100459 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000460}
461
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100462uint32_t ffa_version_from_memory_access_desc_size(
463 uint32_t memory_access_desc_size)
464{
465 switch (memory_access_desc_size) {
466 /*
467 * v1.0 and v1.1 memory access descriptors are the same size however
468 * v1.1 is the first version to include the memory access descriptor
469 * size field so return v1.1.
470 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000471 case sizeof(struct ffa_memory_access_v1_0):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100472 return MAKE_FFA_VERSION(1, 1);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000473 case sizeof(struct ffa_memory_access):
474 return MAKE_FFA_VERSION(1, 2);
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100475 }
476 return 0;
477}
478
479/**
480 * Check if the receivers size and offset given is valid for the senders
481 * FF-A version.
482 */
483static bool receiver_size_and_offset_valid_for_version(
484 uint32_t receivers_size, uint32_t receivers_offset,
485 uint32_t ffa_version)
486{
487 /*
488 * Check that the version that the memory access descriptor size belongs
489 * to is compatible with the FF-A version we believe the sender to be.
490 */
491 uint32_t expected_ffa_version =
492 ffa_version_from_memory_access_desc_size(receivers_size);
493 if (!FFA_VERSIONS_ARE_COMPATIBLE(expected_ffa_version, ffa_version)) {
494 return false;
495 }
496
497 /*
498 * Check the receivers_offset matches the version we found from
499 * memory access descriptor size.
500 */
501 switch (expected_ffa_version) {
502 case MAKE_FFA_VERSION(1, 1):
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000503 case MAKE_FFA_VERSION(1, 2):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100504 return receivers_offset == sizeof(struct ffa_memory_region);
505 default:
506 return false;
507 }
508}
509
510/**
511 * Check the values set for fields in the memory region are valid and safe.
512 * Offset values are within safe bounds, receiver count will not cause overflows
513 * and reserved fields are 0.
514 */
515bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
516 uint32_t ffa_version,
517 uint32_t fragment_length,
518 bool send_transaction)
519{
520 uint32_t receiver_count;
521 struct ffa_memory_access *receiver;
522 uint32_t composite_offset_0;
523
524 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
525 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
526 (struct ffa_memory_region_v1_0 *)memory_region;
527 /* Check the reserved fields are 0. */
528 if (memory_region_v1_0->reserved_0 != 0 ||
529 memory_region_v1_0->reserved_1 != 0) {
530 dlog_verbose("Reserved fields must be 0.\n");
531 return false;
532 }
533
534 receiver_count = memory_region_v1_0->receiver_count;
535 } else {
536 uint32_t receivers_size =
537 memory_region->memory_access_desc_size;
538 uint32_t receivers_offset = memory_region->receivers_offset;
539
540 /* Check the reserved field is 0. */
541 if (memory_region->reserved[0] != 0 ||
542 memory_region->reserved[1] != 0 ||
543 memory_region->reserved[2] != 0) {
544 dlog_verbose("Reserved fields must be 0.\n");
545 return false;
546 }
547
548 /*
549 * Check memory_access_desc_size matches the size of the struct
550 * for the senders FF-A version.
551 */
552 if (!receiver_size_and_offset_valid_for_version(
553 receivers_size, receivers_offset, ffa_version)) {
554 dlog_verbose(
555 "Invalid memory access descriptor size %d, "
556 " or receiver offset %d, "
557 "for FF-A version %#x\n",
558 receivers_size, receivers_offset, ffa_version);
559 return false;
560 }
561
562 receiver_count = memory_region->receiver_count;
563 }
564
565 /* Check receiver count is not too large. */
566 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
567 dlog_verbose(
568 "Max number of recipients supported is %u "
569 "specified %u\n",
570 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
571 return false;
572 }
573
574 /* Check values in the memory access descriptors. */
575 /*
576 * The composite offset values must be the same for all recievers so
577 * check the first one is valid and then they are all the same.
578 */
579 receiver = ffa_version == MAKE_FFA_VERSION(1, 0)
580 ? (struct ffa_memory_access *)&(
581 (struct ffa_memory_region_v1_0 *)
582 memory_region)
583 ->receivers[0]
584 : ffa_memory_region_get_receiver(memory_region, 0);
585 assert(receiver != NULL);
586 composite_offset_0 = receiver->composite_memory_region_offset;
587
588 if (!send_transaction) {
589 if (composite_offset_0 != 0) {
590 dlog_verbose(
591 "Composite offset memory region descriptor "
592 "offset must be 0 for retrieve requests. "
593 "Currently %d",
594 composite_offset_0);
595 return false;
596 }
597 } else {
598 bool comp_offset_is_zero = composite_offset_0 == 0U;
599 bool comp_offset_lt_transaction_descriptor_size =
600 composite_offset_0 <
601 (sizeof(struct ffa_memory_region) +
602 (uint32_t)(memory_region->memory_access_desc_size *
603 memory_region->receiver_count));
604 bool comp_offset_with_comp_gt_fragment_length =
605 composite_offset_0 +
606 sizeof(struct ffa_composite_memory_region) >
607 fragment_length;
608 if (comp_offset_is_zero ||
609 comp_offset_lt_transaction_descriptor_size ||
610 comp_offset_with_comp_gt_fragment_length) {
611 dlog_verbose(
612 "Invalid composite memory region descriptor "
613 "offset for send transaction %u\n",
614 composite_offset_0);
615 return false;
616 }
617 }
618
619 for (int i = 0; i < memory_region->receiver_count; i++) {
620 uint32_t composite_offset;
621
622 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
623 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
624 (struct ffa_memory_region_v1_0 *)memory_region;
625
626 struct ffa_memory_access_v1_0 *receiver_v1_0 =
627 &memory_region_v1_0->receivers[i];
628 /* Check reserved fields are 0 */
629 if (receiver_v1_0->reserved_0 != 0) {
630 dlog_verbose(
631 "Reserved field in the memory access "
632 " descriptor must be zero "
633 " Currently reciever %d has a reserved "
634 " field with a value of %d\n",
635 i, receiver_v1_0->reserved_0);
636 return false;
637 }
638 /*
639 * We can cast to the current version receiver as the
640 * remaining fields we are checking have the same
641 * offsets for all versions since memory access
642 * descriptors are forwards compatible.
643 */
644 receiver = (struct ffa_memory_access *)receiver_v1_0;
645 } else {
646 receiver = ffa_memory_region_get_receiver(memory_region,
647 i);
648 assert(receiver != NULL);
649
650 if (receiver->reserved_0 != 0) {
651 dlog_verbose(
652 "Reserved field in the memory access "
653 " descriptor must be zero "
654 " Currently reciever %d has a reserved "
655 " field with a value of %d\n",
656 i, receiver->reserved_0);
657 return false;
658 }
659 }
660
661 /* Check composite offset values are equal for all receivers. */
662 composite_offset = receiver->composite_memory_region_offset;
663 if (composite_offset != composite_offset_0) {
664 dlog_verbose(
665 "Composite offset %x differs from %x in index "
666 "%u\n",
667 composite_offset, composite_offset_0);
668 return false;
669 }
670 }
671 return true;
672}
673
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000674/**
J-Alves460d36c2023-10-12 17:02:15 +0100675 * If the receivers for the memory management operation are all from the
676 * secure world and this isn't a FFA_MEM_SHARE, then request memory security
677 * state update by returning MAP_ACTION_CHECK_PROTECT.
678 */
679static enum ffa_map_action ffa_mem_send_get_map_action(
680 bool all_receivers_from_current_world, ffa_id_t sender_id,
681 uint32_t mem_func_id)
682{
683 bool protect_memory =
684 (mem_func_id != FFA_MEM_SHARE_32 &&
685 all_receivers_from_current_world && ffa_is_vm_id(sender_id));
686
687 return protect_memory ? MAP_ACTION_CHECK_PROTECT : MAP_ACTION_CHECK;
688}
689
690/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000691 * Verify that all pages have the same mode, that the starting mode
692 * constitutes a valid state and obtain the next mode to apply
J-Alves460d36c2023-10-12 17:02:15 +0100693 * to the sending VM. It outputs the mapping action that needs to be
694 * invoked for the given memory range. On memory lend/donate there
695 * could be a need to protect the memory from the normal world.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000696 *
697 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100698 * 1) FFA_DENIED if a state transition was not found;
699 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100700 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100701 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100702 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100703 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
704 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000705 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100706static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100707 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100708 struct ffa_memory_access *receivers, uint32_t receivers_count,
709 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100710 struct ffa_memory_region_constituent **fragments,
711 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves460d36c2023-10-12 17:02:15 +0100712 uint32_t *from_mode, enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000713{
714 const uint32_t state_mask =
715 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100716 struct ffa_value ret;
J-Alves460d36c2023-10-12 17:02:15 +0100717 bool all_receivers_from_current_world = true;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000718
Andrew Walbranca808b12020-05-15 17:22:28 +0100719 ret = constituents_get_mode(from, orig_from_mode, fragments,
720 fragment_constituent_counts,
721 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100722 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100723 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100724 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100725 }
726
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000727 /* Ensure the address range is normal memory and not a device. */
J-Alves788b4492023-04-18 14:01:23 +0100728 if ((*orig_from_mode & MM_MODE_D) != 0U) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000729 dlog_verbose("Can't share device memory (mode is %#x).\n",
730 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100731 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000732 }
733
734 /*
735 * Ensure the sender is the owner and has exclusive access to the
736 * memory.
737 */
738 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100739 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100740 }
741
J-Alves363f5722022-04-25 17:37:37 +0100742 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100743
J-Alves363f5722022-04-25 17:37:37 +0100744 for (uint32_t i = 0U; i < receivers_count; i++) {
745 ffa_memory_access_permissions_t permissions =
746 receivers[i].receiver_permissions.permissions;
747 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
748 permissions, *orig_from_mode);
749
J-Alves788b4492023-04-18 14:01:23 +0100750 /*
751 * The assumption is that at this point, the operation from
752 * SP to a receiver VM, should have returned an FFA_ERROR
753 * already.
754 */
755 if (!ffa_is_vm_id(from.vm->id)) {
756 assert(!ffa_is_vm_id(
757 receivers[i].receiver_permissions.receiver));
758 }
759
J-Alves460d36c2023-10-12 17:02:15 +0100760 /* Track if all senders are from current world. */
761 all_receivers_from_current_world =
762 all_receivers_from_current_world &&
763 vm_id_is_current_world(
764 receivers[i].receiver_permissions.receiver);
765
J-Alves363f5722022-04-25 17:37:37 +0100766 if ((*orig_from_mode & required_from_mode) !=
767 required_from_mode) {
768 dlog_verbose(
769 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100770 "which required mode %#x but only had %#x "
771 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100772 required_from_mode, *orig_from_mode);
773 return ffa_error(FFA_DENIED);
774 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000775 }
776
J-Alves460d36c2023-10-12 17:02:15 +0100777 *map_action = ffa_mem_send_get_map_action(
778 all_receivers_from_current_world, from.vm->id, share_func);
779
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000780 /* Find the appropriate new mode. */
781 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000782 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100783 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000784 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100785 break;
786
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100787 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000788 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100789 break;
790
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100791 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000792 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100793 break;
794
Jose Marinho75509b42019-04-09 09:34:59 +0100795 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100796 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100797 }
798
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100799 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000800}
801
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100802static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000803 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100804 struct ffa_memory_region_constituent **fragments,
805 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
806 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000807{
808 const uint32_t state_mask =
809 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
810 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100811 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000812
Andrew Walbranca808b12020-05-15 17:22:28 +0100813 ret = constituents_get_mode(from, orig_from_mode, fragments,
814 fragment_constituent_counts,
815 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100816 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100817 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000818 }
819
820 /* Ensure the address range is normal memory and not a device. */
821 if (*orig_from_mode & MM_MODE_D) {
822 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
823 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100824 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000825 }
826
827 /*
828 * Ensure the relinquishing VM is not the owner but has access to the
829 * memory.
830 */
831 orig_from_state = *orig_from_mode & state_mask;
832 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
833 dlog_verbose(
834 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100835 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000836 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100837 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000838 }
839
840 /* Find the appropriate new mode. */
841 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
842
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100843 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000844}
845
846/**
847 * Verify that all pages have the same mode, that the starting mode
848 * constitutes a valid state and obtain the next mode to apply
849 * to the retrieving VM.
850 *
851 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100852 * 1) FFA_DENIED if a state transition was not found;
853 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100854 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100855 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100856 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100857 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
858 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000859 */
J-Alvesfc19b372022-07-06 12:17:35 +0100860struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000861 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100862 struct ffa_memory_region_constituent **fragments,
863 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves460d36c2023-10-12 17:02:15 +0100864 uint32_t memory_to_attributes, uint32_t *to_mode, bool memory_protected)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000865{
866 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100867 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000868
Andrew Walbranca808b12020-05-15 17:22:28 +0100869 ret = constituents_get_mode(to, &orig_to_mode, fragments,
870 fragment_constituent_counts,
871 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100872 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100873 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100874 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000875 }
876
J-Alves460d36c2023-10-12 17:02:15 +0100877 /* Find the appropriate new mode. */
878 *to_mode = memory_to_attributes;
879
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100880 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000881 /*
882 * If the original ffa memory send call has been processed
883 * successfully, it is expected the orig_to_mode would overlay
884 * with `state_mask`, as a result of the function
885 * `ffa_send_check_transition`.
886 */
J-Alves59ed0042022-07-28 18:26:41 +0100887 if (vm_id_is_current_world(to.vm->id)) {
888 assert((orig_to_mode &
889 (MM_MODE_INVALID | MM_MODE_UNOWNED |
890 MM_MODE_SHARED)) != 0U);
891 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000892 } else {
893 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100894 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000895 * Ensure the retriever has the expected state. We don't care
896 * about the MM_MODE_SHARED bit; either with or without it set
897 * are both valid representations of the !O-NA state.
898 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100899 if (vm_id_is_current_world(to.vm->id) &&
900 to.vm->id != HF_PRIMARY_VM_ID &&
901 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
902 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100903 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000904 }
J-Alves460d36c2023-10-12 17:02:15 +0100905
906 /*
907 * If memory has been protected before, clear the NS bit to
908 * allow the secure access from the SP.
909 */
910 if (memory_protected) {
911 *to_mode &= ~plat_ffa_other_world_mode();
912 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000913 }
914
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000915 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100916 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000917 *to_mode |= 0;
918 break;
919
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100920 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000921 *to_mode |= MM_MODE_UNOWNED;
922 break;
923
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100924 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000925 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
926 break;
927
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100928 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000929 *to_mode |= 0;
930 break;
931
932 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100933 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100934 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000935 }
936
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100937 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100938}
Jose Marinho09b1db82019-08-08 09:16:59 +0100939
J-Alvescf6253e2024-01-03 13:48:48 +0000940/*
941 * Performs the operations related to the `action` MAP_ACTION_CHECK*.
942 * Returns:
943 * - FFA_SUCCESS_32: if all goes well.
944 * - FFA_ERROR_32: with FFA_NO_MEMORY, if there is no memory to manage
945 * the page table update. Or error code provided by the function
946 * `arch_memory_protect`.
947 */
948static struct ffa_value ffa_region_group_check_actions(
949 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
950 struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
951 bool *memory_protected)
952{
953 struct ffa_value ret;
954 bool is_memory_protected;
955
956 if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, mode, ppool)) {
957 dlog_verbose(
958 "%s: memory can't be mapped to %x due to lack of "
959 "memory. Base: %lx end: %x\n",
960 __func__, vm_locked.vm->id, pa_addr(pa_begin),
961 pa_addr(pa_end));
962 return ffa_error(FFA_NO_MEMORY);
963 }
964
965 switch (action) {
966 case MAP_ACTION_CHECK:
967 /* No protect requested. */
968 is_memory_protected = false;
969 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
970 break;
971 case MAP_ACTION_CHECK_PROTECT: {
972 paddr_t last_protected_pa = pa_init(0);
973
974 ret = arch_memory_protect(pa_begin, pa_end, &last_protected_pa);
975
976 is_memory_protected = (ret.func == FFA_SUCCESS_32);
977
978 /*
979 * - If protect memory has failed with FFA_DENIED, means some
980 * range of memory was in the wrong state. In such case, SPM
981 * reverts the state of the pages that were successfully
982 * updated.
983 * - If protect memory has failed with FFA_NOT_SUPPORTED, it
984 * means the platform doesn't support the protection mechanism.
985 * That said, it still permits the page table update to go
986 * through. The variable
987 * `is_memory_protected` will be equal to false.
988 * - If protect memory has failed with FFA_INVALID_PARAMETERS,
989 * break from switch and return the error.
990 */
991 if (ret.func == FFA_ERROR_32) {
992 assert(!is_memory_protected);
993 if (ffa_error_code(ret) == FFA_DENIED &&
994 pa_addr(last_protected_pa) != (uintptr_t)0) {
995 CHECK(arch_memory_unprotect(
996 pa_begin,
997 pa_add(last_protected_pa, PAGE_SIZE)));
998 } else if (ffa_error_code(ret) == FFA_NOT_SUPPORTED) {
999 ret = (struct ffa_value){
1000 .func = FFA_SUCCESS_32,
1001 };
1002 }
1003 }
1004 } break;
1005 default:
1006 panic("%s: invalid action to process %x\n", __func__, action);
1007 }
1008
1009 if (memory_protected != NULL) {
1010 *memory_protected = is_memory_protected;
1011 }
1012
1013 return ret;
1014}
1015
1016static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
1017 paddr_t pa_begin, paddr_t pa_end,
1018 struct mpool *ppool, uint32_t mode,
1019 enum ffa_map_action action)
1020{
1021 switch (action) {
1022 case MAP_ACTION_COMMIT_UNPROTECT:
1023 /*
1024 * Checking that it should succeed because SPM should be
1025 * unprotecting memory that it had protected before.
1026 */
1027 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1028 case MAP_ACTION_COMMIT:
1029 vm_identity_commit(vm_locked, pa_begin, pa_end, mode, ppool,
1030 NULL);
1031 break;
1032 default:
1033 panic("%s: invalid action to process %x\n", __func__, action);
1034 }
1035}
1036
Jose Marinho09b1db82019-08-08 09:16:59 +01001037/**
1038 * Updates a VM's page table such that the given set of physical address ranges
1039 * are mapped in the address space at the corresponding address ranges, in the
1040 * mode provided.
1041 *
J-Alves0a83dc22023-05-05 09:50:37 +01001042 * The enum ffa_map_action determines the action taken from a call to the
1043 * function below:
1044 * - If action is MAP_ACTION_CHECK, the page tables will be allocated from the
1045 * mpool but no mappings will actually be updated. This function must always
1046 * be called first with action set to MAP_ACTION_CHECK to check that it will
1047 * succeed before calling ffa_region_group_identity_map with whichever one of
1048 * the remaining actions, to avoid leaving the page table in a half-updated
1049 * state.
1050 * - The action MAP_ACTION_COMMIT allocates the page tables from the mpool, and
1051 * changes the memory mappings.
J-Alvescf6253e2024-01-03 13:48:48 +00001052 * - The action MAP_ACTION_CHECK_PROTECT extends the MAP_ACTION_CHECK with an
1053 * invocation to the monitor to update the security state of the memory,
1054 * to that of the SPMC.
1055 * - The action MAP_ACTION_COMMIT_UNPROTECT extends the MAP_ACTION_COMMIT
1056 * with a call into the monitor, to reset the security state of memory
1057 * that has priorly been mapped with the MAP_ACTION_CHECK_PROTECT action.
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001058 * vm_ptable_defrag should always be called after a series of page table
1059 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +01001060 *
J-Alvescf6253e2024-01-03 13:48:48 +00001061 * If all goes well, returns FFA_SUCCESS_32; or FFA_ERROR, with following
1062 * error codes:
1063 * - FFA_INVALID_PARAMETERS: invalid range of memory.
1064 * - FFA_DENIED:
1065 *
Jose Marinho09b1db82019-08-08 09:16:59 +01001066 * made to memory mappings.
1067 */
J-Alvescf6253e2024-01-03 13:48:48 +00001068struct ffa_value ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +00001069 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001070 struct ffa_memory_region_constituent **fragments,
1071 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvescf6253e2024-01-03 13:48:48 +00001072 uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
1073 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001074{
Andrew Walbranca808b12020-05-15 17:22:28 +01001075 uint32_t i;
1076 uint32_t j;
J-Alvescf6253e2024-01-03 13:48:48 +00001077 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001078
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001079 if (vm_locked.vm->el0_partition) {
1080 mode |= MM_MODE_USER | MM_MODE_NG;
1081 }
1082
Andrew Walbranca808b12020-05-15 17:22:28 +01001083 /* Iterate over the memory region constituents within each fragment. */
1084 for (i = 0; i < fragment_count; ++i) {
1085 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1086 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1087 paddr_t pa_begin =
1088 pa_from_ipa(ipa_init(fragments[i][j].address));
1089 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001090 uint32_t pa_bits =
1091 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +01001092
1093 /*
1094 * Ensure the requested region falls into system's PA
1095 * range.
1096 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001097 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
1098 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +01001099 dlog_error("Region is outside of PA Range\n");
J-Alvescf6253e2024-01-03 13:48:48 +00001100 return ffa_error(FFA_INVALID_PARAMETERS);
Federico Recanati4fd065d2021-12-13 20:06:23 +01001101 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001102
J-Alvescf6253e2024-01-03 13:48:48 +00001103 if (action <= MAP_ACTION_CHECK_PROTECT) {
1104 ret = ffa_region_group_check_actions(
1105 vm_locked, pa_begin, pa_end, ppool,
1106 mode, action, memory_protected);
1107 } else if (action >= MAP_ACTION_COMMIT &&
1108 action < MAP_ACTION_MAX) {
1109 ffa_region_group_commit_actions(
1110 vm_locked, pa_begin, pa_end, ppool,
1111 mode, action);
1112 ret = (struct ffa_value){
1113 .func = FFA_SUCCESS_32};
1114 } else {
1115 panic("%s: Unknown ffa_map_action.\n",
1116 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001117 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001118 }
1119 }
1120
J-Alvescf6253e2024-01-03 13:48:48 +00001121 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001122}
1123
1124/**
1125 * Clears a region of physical memory by overwriting it with zeros. The data is
1126 * flushed from the cache so the memory has been cleared across the system.
1127 */
J-Alves7db32002021-12-14 14:44:50 +00001128static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
1129 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +01001130{
1131 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +00001132 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +01001133 * global mapping of the whole range. Such an approach will limit
1134 * the changes to stage-1 tables and will allow only local
1135 * invalidation.
1136 */
1137 bool ret;
1138 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +00001139 void *ptr = mm_identity_map(stage1_locked, begin, end,
1140 MM_MODE_W | (extra_mode_attributes &
1141 plat_ffa_other_world_mode()),
1142 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001143 size_t size = pa_difference(begin, end);
1144
1145 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001146 goto fail;
1147 }
1148
1149 memset_s(ptr, size, 0, size);
1150 arch_mm_flush_dcache(ptr, size);
1151 mm_unmap(stage1_locked, begin, end, ppool);
1152
1153 ret = true;
1154 goto out;
1155
1156fail:
1157 ret = false;
1158
1159out:
1160 mm_unlock_stage1(&stage1_locked);
1161
1162 return ret;
1163}
1164
1165/**
1166 * Clears a region of physical memory by overwriting it with zeros. The data is
1167 * flushed from the cache so the memory has been cleared across the system.
1168 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001169static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001170 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001171 struct ffa_memory_region_constituent **fragments,
1172 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1173 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001174{
1175 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001176 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001177 bool ret = false;
1178
1179 /*
1180 * Create a local pool so any freed memory can't be used by another
1181 * thread. This is to ensure each constituent that is mapped can be
1182 * unmapped again afterwards.
1183 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001184 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001185
Andrew Walbranca808b12020-05-15 17:22:28 +01001186 /* Iterate over the memory region constituents within each fragment. */
1187 for (i = 0; i < fragment_count; ++i) {
1188 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001189
J-Alves8457f932023-10-11 16:41:45 +01001190 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001191 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1192 paddr_t begin =
1193 pa_from_ipa(ipa_init(fragments[i][j].address));
1194 paddr_t end = pa_add(begin, size);
1195
J-Alves7db32002021-12-14 14:44:50 +00001196 if (!clear_memory(begin, end, &local_page_pool,
1197 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001198 /*
1199 * api_clear_memory will defrag on failure, so
1200 * no need to do it here.
1201 */
1202 goto out;
1203 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001204 }
1205 }
1206
Jose Marinho09b1db82019-08-08 09:16:59 +01001207 ret = true;
1208
1209out:
1210 mpool_fini(&local_page_pool);
1211 return ret;
1212}
1213
J-Alves5952d942022-12-22 16:03:00 +00001214static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1215 ipaddr_t in_begin, ipaddr_t in_end)
1216{
1217 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1218 ipa_addr(begin) < ipa_addr(in_end)) ||
1219 (ipa_addr(end) <= ipa_addr(in_end) &&
1220 ipa_addr(end) > ipa_addr(in_begin));
1221}
1222
1223/**
1224 * Receives a memory range and looks for overlaps with the remainder
1225 * constituents of the memory share/lend/donate operation. Assumes they are
1226 * passed in order to avoid having to loop over all the elements at each call.
1227 * The function only compares the received memory ranges with those that follow
1228 * within the same fragment, and subsequent fragments from the same operation.
1229 */
1230static bool ffa_memory_check_overlap(
1231 struct ffa_memory_region_constituent **fragments,
1232 const uint32_t *fragment_constituent_counts,
1233 const uint32_t fragment_count, const uint32_t current_fragment,
1234 const uint32_t current_constituent)
1235{
1236 uint32_t i = current_fragment;
1237 uint32_t j = current_constituent;
1238 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1239 const uint32_t current_page_count = fragments[i][j].page_count;
1240 size_t current_size = current_page_count * PAGE_SIZE;
1241 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1242
1243 if (current_size == 0 ||
1244 current_size > UINT64_MAX - ipa_addr(current_begin)) {
1245 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
1246 current_begin, current_page_count);
1247 return false;
1248 }
1249
1250 for (; i < fragment_count; i++) {
1251 j = (i == current_fragment) ? j + 1 : 0;
1252
1253 for (; j < fragment_constituent_counts[i]; j++) {
1254 ipaddr_t begin = ipa_init(fragments[i][j].address);
1255 const uint32_t page_count = fragments[i][j].page_count;
1256 size_t size = page_count * PAGE_SIZE;
1257 ipaddr_t end = ipa_add(begin, size - 1);
1258
1259 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1260 dlog_verbose(
1261 "Invalid page count. Addr: %x "
1262 "page_count: %x\n",
1263 begin, page_count);
1264 return false;
1265 }
1266
1267 /*
1268 * Check if current ranges is within begin and end, as
1269 * well as the reverse. This should help optimize the
1270 * loop, and reduce the number of iterations.
1271 */
1272 if (is_memory_range_within(begin, end, current_begin,
1273 current_end) ||
1274 is_memory_range_within(current_begin, current_end,
1275 begin, end)) {
1276 dlog_verbose(
1277 "Overlapping memory ranges: %#x - %#x "
1278 "with %#x - %#x\n",
1279 ipa_addr(begin), ipa_addr(end),
1280 ipa_addr(current_begin),
1281 ipa_addr(current_end));
1282 return true;
1283 }
1284 }
1285 }
1286
1287 return false;
1288}
1289
Jose Marinho09b1db82019-08-08 09:16:59 +01001290/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001291 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001292 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001293 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001294 *
1295 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001296 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001297 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001298 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001299 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1300 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001301 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001302 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001303 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001304 */
J-Alves66652252022-07-06 09:49:51 +01001305struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001306 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001307 struct ffa_memory_region_constituent **fragments,
1308 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001309 uint32_t composite_total_page_count, uint32_t share_func,
1310 struct ffa_memory_access *receivers, uint32_t receivers_count,
J-Alves460d36c2023-10-12 17:02:15 +01001311 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret,
1312 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001313{
Andrew Walbranca808b12020-05-15 17:22:28 +01001314 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001315 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001316 uint32_t orig_from_mode;
J-Alves460d36c2023-10-12 17:02:15 +01001317 uint32_t clean_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001318 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001319 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001320 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001321 uint32_t constituents_total_page_count = 0;
J-Alves460d36c2023-10-12 17:02:15 +01001322 enum ffa_map_action map_action = MAP_ACTION_CHECK;
Jose Marinho09b1db82019-08-08 09:16:59 +01001323
1324 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001325 * Make sure constituents are properly aligned to a 64-bit boundary. If
1326 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001327 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001328 for (i = 0; i < fragment_count; ++i) {
1329 if (!is_aligned(fragments[i], 8)) {
1330 dlog_verbose("Constituents not aligned.\n");
1331 return ffa_error(FFA_INVALID_PARAMETERS);
1332 }
J-Alves8f11cde2022-12-21 16:18:22 +00001333 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1334 constituents_total_page_count +=
1335 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001336 if (ffa_memory_check_overlap(
1337 fragments, fragment_constituent_counts,
1338 fragment_count, i, j)) {
1339 return ffa_error(FFA_INVALID_PARAMETERS);
1340 }
J-Alves8f11cde2022-12-21 16:18:22 +00001341 }
1342 }
1343
1344 if (constituents_total_page_count != composite_total_page_count) {
1345 dlog_verbose(
1346 "Composite page count differs from calculated page "
1347 "count from constituents.\n");
1348 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001349 }
1350
1351 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001352 * Check if the state transition is lawful for the sender, ensure that
1353 * all constituents of a memory region being shared are at the same
1354 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001355 */
J-Alves460d36c2023-10-12 17:02:15 +01001356 ret = ffa_send_check_transition(
1357 from_locked, share_func, receivers, receivers_count,
1358 &orig_from_mode, fragments, fragment_constituent_counts,
1359 fragment_count, &from_mode, &map_action);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001360 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001361 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001362 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001363 }
1364
Andrew Walbran37c574e2020-06-03 11:45:46 +01001365 if (orig_from_mode_ret != NULL) {
1366 *orig_from_mode_ret = orig_from_mode;
1367 }
1368
Jose Marinho09b1db82019-08-08 09:16:59 +01001369 /*
1370 * Create a local pool so any freed memory can't be used by another
1371 * thread. This is to ensure the original mapping can be restored if the
1372 * clear fails.
1373 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001374 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001375
1376 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001377 * First reserve all required memory for the new page table entries
1378 * without committing, to make sure the entire operation will succeed
1379 * without exhausting the page pool.
J-Alves460d36c2023-10-12 17:02:15 +01001380 * Provide the map_action as populated by 'ffa_send_check_transition'.
1381 * It may request memory to be protected.
Jose Marinho09b1db82019-08-08 09:16:59 +01001382 */
J-Alvescf6253e2024-01-03 13:48:48 +00001383 ret = ffa_region_group_identity_map(
1384 from_locked, fragments, fragment_constituent_counts,
J-Alves460d36c2023-10-12 17:02:15 +01001385 fragment_count, from_mode, page_pool, map_action,
1386 memory_protected);
J-Alvescf6253e2024-01-03 13:48:48 +00001387 if (ret.func == FFA_ERROR_32) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001388 goto out;
1389 }
1390
1391 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001392 * Update the mapping for the sender. This won't allocate because the
1393 * transaction was already prepared above, but may free pages in the
1394 * case that a whole block is being unmapped that was previously
1395 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001396 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001397 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001398 from_locked, fragments, fragment_constituent_counts,
1399 fragment_count, from_mode, &local_page_pool,
1400 MAP_ACTION_COMMIT, NULL)
1401 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001402
J-Alves460d36c2023-10-12 17:02:15 +01001403 /*
1404 * If memory has been protected, it is now part of the secure PAS
1405 * (happens for lend/donate from NWd to SWd), and the `orig_from_mode`
1406 * should have the MM_MODE_NS set, as such mask it in `clean_mode` for
1407 * SPM's S1 translation.
1408 * In case memory hasn't been protected, and it is in the non-secure
1409 * PAS (e.g. memory share from NWd to SWd), as such the SPM needs to
1410 * perform a non-secure memory access. In such case `clean_mode` takes
1411 * the same mode as `orig_from_mode`.
1412 */
1413 clean_mode = (memory_protected != NULL && *memory_protected)
1414 ? orig_from_mode & ~plat_ffa_other_world_mode()
1415 : orig_from_mode;
1416
Jose Marinho09b1db82019-08-08 09:16:59 +01001417 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves460d36c2023-10-12 17:02:15 +01001418 if (clear && !ffa_clear_memory_constituents(
1419 clean_mode, fragments, fragment_constituent_counts,
1420 fragment_count, page_pool)) {
1421 map_action = (memory_protected != NULL && *memory_protected)
1422 ? MAP_ACTION_COMMIT_UNPROTECT
1423 : MAP_ACTION_COMMIT;
1424
Jose Marinho09b1db82019-08-08 09:16:59 +01001425 /*
1426 * On failure, roll back by returning memory to the sender. This
1427 * may allocate pages which were previously freed into
1428 * `local_page_pool` by the call above, but will never allocate
1429 * more pages than that so can never fail.
1430 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001431 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001432 from_locked, fragments,
1433 fragment_constituent_counts, fragment_count,
1434 orig_from_mode, &local_page_pool,
1435 MAP_ACTION_COMMIT, NULL)
1436 .func == FFA_SUCCESS_32);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001437 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001438 goto out;
1439 }
1440
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001441 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001442
1443out:
1444 mpool_fini(&local_page_pool);
1445
1446 /*
1447 * Tidy up the page table by reclaiming failed mappings (if there was an
1448 * error) or merging entries into blocks where possible (on success).
1449 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001450 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001451
1452 return ret;
1453}
1454
1455/**
1456 * Validates and maps memory shared from one VM to another.
1457 *
1458 * This function requires the calling context to hold the <to> lock.
1459 *
1460 * Returns:
1461 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001462 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001463 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001464 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001465 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001466 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001467 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001468struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001469 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001470 struct ffa_memory_region_constituent **fragments,
1471 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001472 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
J-Alves460d36c2023-10-12 17:02:15 +01001473 struct mpool *page_pool, uint32_t *response_mode, bool memory_protected)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001474{
Andrew Walbranca808b12020-05-15 17:22:28 +01001475 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001476 uint32_t to_mode;
1477 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001478 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001479
1480 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001481 * Make sure constituents are properly aligned to a 64-bit boundary. If
1482 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001483 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001484 for (i = 0; i < fragment_count; ++i) {
1485 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001486 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001487 return ffa_error(FFA_INVALID_PARAMETERS);
1488 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001489 }
1490
1491 /*
1492 * Check if the state transition is lawful for the recipient, and ensure
1493 * that all constituents of the memory region being retrieved are at the
1494 * same state.
1495 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001496 ret = ffa_retrieve_check_transition(
1497 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alves460d36c2023-10-12 17:02:15 +01001498 fragment_count, sender_orig_mode, &to_mode, memory_protected);
1499
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001500 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001501 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001502 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001503 }
1504
1505 /*
1506 * Create a local pool so any freed memory can't be used by another
1507 * thread. This is to ensure the original mapping can be restored if the
1508 * clear fails.
1509 */
1510 mpool_init_with_fallback(&local_page_pool, page_pool);
1511
1512 /*
1513 * First reserve all required memory for the new page table entries in
1514 * the recipient page tables without committing, to make sure the entire
1515 * operation will succeed without exhausting the page pool.
1516 */
J-Alvescf6253e2024-01-03 13:48:48 +00001517 ret = ffa_region_group_identity_map(
1518 to_locked, fragments, fragment_constituent_counts,
1519 fragment_count, to_mode, page_pool, MAP_ACTION_CHECK, NULL);
1520 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001521 /* TODO: partial defrag of failed range. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001522 goto out;
1523 }
1524
1525 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001526 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001527 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1528 fragment_constituent_counts,
1529 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001530 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001531 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001532 goto out;
1533 }
1534
Jose Marinho09b1db82019-08-08 09:16:59 +01001535 /*
1536 * Complete the transfer by mapping the memory into the recipient. This
1537 * won't allocate because the transaction was already prepared above, so
1538 * it doesn't need to use the `local_page_pool`.
1539 */
J-Alvescf6253e2024-01-03 13:48:48 +00001540 CHECK(ffa_region_group_identity_map(to_locked, fragments,
1541 fragment_constituent_counts,
1542 fragment_count, to_mode, page_pool,
1543 MAP_ACTION_COMMIT, NULL)
1544 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001545
J-Alves460d36c2023-10-12 17:02:15 +01001546 /* Return the mode used in mapping the memory in retriever's PT. */
1547 if (response_mode != NULL) {
1548 *response_mode = to_mode;
1549 }
1550
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001551 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001552
1553out:
1554 mpool_fini(&local_page_pool);
1555
1556 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001557 * Tidy up the page table by reclaiming failed mappings (if there was an
1558 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001559 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001560 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001561
1562 return ret;
1563}
1564
Andrew Walbran996d1d12020-05-27 14:08:43 +01001565static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001566 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001567 struct ffa_memory_region_constituent **fragments,
1568 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1569 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001570{
1571 uint32_t orig_from_mode;
1572 uint32_t from_mode;
1573 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001574 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001575
Andrew Walbranca808b12020-05-15 17:22:28 +01001576 ret = ffa_relinquish_check_transition(
1577 from_locked, &orig_from_mode, fragments,
1578 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001579 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001580 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001581 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001582 }
1583
1584 /*
1585 * Create a local pool so any freed memory can't be used by another
1586 * thread. This is to ensure the original mapping can be restored if the
1587 * clear fails.
1588 */
1589 mpool_init_with_fallback(&local_page_pool, page_pool);
1590
1591 /*
1592 * First reserve all required memory for the new page table entries
1593 * without committing, to make sure the entire operation will succeed
1594 * without exhausting the page pool.
1595 */
J-Alvescf6253e2024-01-03 13:48:48 +00001596 ret = ffa_region_group_identity_map(
1597 from_locked, fragments, fragment_constituent_counts,
1598 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK, NULL);
1599 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001600 goto out;
1601 }
1602
1603 /*
1604 * Update the mapping for the sender. This won't allocate because the
1605 * transaction was already prepared above, but may free pages in the
1606 * case that a whole block is being unmapped that was previously
1607 * partially mapped.
1608 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001609 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001610 from_locked, fragments, fragment_constituent_counts,
1611 fragment_count, from_mode, &local_page_pool,
1612 MAP_ACTION_COMMIT, NULL)
1613 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001614
1615 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001616 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001617 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1618 fragment_constituent_counts,
1619 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001620 /*
1621 * On failure, roll back by returning memory to the sender. This
1622 * may allocate pages which were previously freed into
1623 * `local_page_pool` by the call above, but will never allocate
1624 * more pages than that so can never fail.
1625 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001626 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001627 from_locked, fragments,
1628 fragment_constituent_counts, fragment_count,
1629 orig_from_mode, &local_page_pool,
1630 MAP_ACTION_COMMIT, NULL)
1631 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001632
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001633 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001634 goto out;
1635 }
1636
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001637 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001638
1639out:
1640 mpool_fini(&local_page_pool);
1641
1642 /*
1643 * Tidy up the page table by reclaiming failed mappings (if there was an
1644 * error) or merging entries into blocks where possible (on success).
1645 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001646 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001647
1648 return ret;
1649}
1650
1651/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001652 * Complete a memory sending operation by checking that it is valid, updating
1653 * the sender page table, and then either marking the share state as having
1654 * completed sending (on success) or freeing it (on failure).
1655 *
1656 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1657 */
J-Alvesfdd29272022-07-19 13:16:31 +01001658struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001659 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001660 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1661 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001662{
1663 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001664 struct ffa_composite_memory_region *composite;
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001665 struct ffa_memory_access *receiver;
Andrew Walbranca808b12020-05-15 17:22:28 +01001666 struct ffa_value ret;
1667
1668 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001669 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001670 assert(memory_region != NULL);
1671 composite = ffa_memory_region_get_composite(memory_region, 0);
1672 assert(composite != NULL);
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001673 receiver = ffa_memory_region_get_receiver(memory_region, 0);
1674 assert(receiver != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001675
1676 /* Check that state is valid in sender page table and update. */
1677 ret = ffa_send_check_update(
1678 from_locked, share_state->fragments,
1679 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001680 share_state->fragment_count, composite->page_count,
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001681 share_state->share_func, receiver,
J-Alves8f11cde2022-12-21 16:18:22 +00001682 memory_region->receiver_count, page_pool,
1683 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
J-Alves460d36c2023-10-12 17:02:15 +01001684 orig_from_mode_ret, &share_state->memory_protected);
Andrew Walbranca808b12020-05-15 17:22:28 +01001685 if (ret.func != FFA_SUCCESS_32) {
1686 /*
1687 * Free share state, it failed to send so it can't be retrieved.
1688 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001689 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1690 __func__, ffa_func_name(ret.func),
1691 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001692 share_state_free(share_states, share_state, page_pool);
1693 return ret;
1694 }
1695
1696 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001697 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001698
J-Alvesee68c542020-10-29 17:48:20 +00001699 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001700}
1701
1702/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001703 * Check that the memory attributes match Hafnium expectations:
1704 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1705 * Write-Allocate Cacheable.
1706 */
1707static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001708 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001709{
1710 enum ffa_memory_type memory_type;
1711 enum ffa_memory_cacheability cacheability;
1712 enum ffa_memory_shareability shareability;
1713
1714 memory_type = ffa_get_memory_type_attr(attributes);
1715 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1716 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1717 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001718 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001719 }
1720
1721 cacheability = ffa_get_memory_cacheability_attr(attributes);
1722 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1723 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1724 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001725 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001726 }
1727
1728 shareability = ffa_get_memory_shareability_attr(attributes);
1729 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1730 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1731 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001732 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001733 }
1734
1735 return (struct ffa_value){.func = FFA_SUCCESS_32};
1736}
1737
1738/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001739 * Check that the given `memory_region` represents a valid memory send request
1740 * of the given `share_func` type, return the clear flag and permissions via the
1741 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001742 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001743 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001744 * not.
1745 */
J-Alves66652252022-07-06 09:49:51 +01001746struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001747 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1748 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001749 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001750{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001751 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001752 struct ffa_memory_access *receiver =
1753 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001754 uint64_t receivers_end;
1755 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001756 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001757 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001758 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001759 enum ffa_data_access data_access;
1760 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001761 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001762 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001763 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001764 memory_region->receivers_offset +
1765 memory_region->memory_access_desc_size +
1766 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001767
1768 if (fragment_length < minimum_first_fragment_length) {
1769 dlog_verbose("Fragment length %u too short (min %u).\n",
1770 (size_t)fragment_length,
1771 minimum_first_fragment_length);
1772 return ffa_error(FFA_INVALID_PARAMETERS);
1773 }
1774
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001775 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1776 "struct ffa_memory_region_constituent must be 16 bytes");
1777 if (!is_aligned(fragment_length,
1778 sizeof(struct ffa_memory_region_constituent)) ||
1779 !is_aligned(memory_share_length,
1780 sizeof(struct ffa_memory_region_constituent))) {
1781 dlog_verbose(
1782 "Fragment length %u or total length %u"
1783 " is not 16-byte aligned.\n",
1784 fragment_length, memory_share_length);
1785 return ffa_error(FFA_INVALID_PARAMETERS);
1786 }
1787
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001788 if (fragment_length > memory_share_length) {
1789 dlog_verbose(
1790 "Fragment length %u greater than total length %u.\n",
1791 (size_t)fragment_length, (size_t)memory_share_length);
1792 return ffa_error(FFA_INVALID_PARAMETERS);
1793 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001794
J-Alves95df0ef2022-12-07 10:09:48 +00001795 /* The sender must match the caller. */
1796 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1797 vm_id_is_current_world(memory_region->sender)) ||
1798 (vm_id_is_current_world(from_locked.vm->id) &&
1799 memory_region->sender != from_locked.vm->id)) {
1800 dlog_verbose("Invalid memory sender ID.\n");
1801 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001802 }
1803
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001804 if (memory_region->receiver_count <= 0) {
1805 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001806 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001807 }
1808
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001809 /*
1810 * Ensure that the composite header is within the memory bounds and
1811 * doesn't overlap the first part of the message. Cast to uint64_t
1812 * to prevent overflow.
1813 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001814 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001815 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001816 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001817 min_length = receivers_end +
1818 sizeof(struct ffa_composite_memory_region) +
1819 sizeof(struct ffa_memory_region_constituent);
1820 if (min_length > memory_share_length) {
1821 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1822 (size_t)memory_share_length, (size_t)min_length);
1823 return ffa_error(FFA_INVALID_PARAMETERS);
1824 }
1825
1826 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001827 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001828
1829 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001830 * Check that the composite memory region descriptor is after the access
1831 * descriptors, is at least 16-byte aligned, and fits in the first
1832 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001833 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001834 if ((composite_memory_region_offset < receivers_end) ||
1835 (composite_memory_region_offset % 16 != 0) ||
1836 (composite_memory_region_offset >
1837 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1838 dlog_verbose(
1839 "Invalid composite memory region descriptor offset "
1840 "%u.\n",
1841 (size_t)composite_memory_region_offset);
1842 return ffa_error(FFA_INVALID_PARAMETERS);
1843 }
1844
1845 /*
1846 * Compute the start of the constituent regions. Already checked
1847 * to be not more than fragment_length and thus not more than
1848 * memory_share_length.
1849 */
1850 constituents_start = composite_memory_region_offset +
1851 sizeof(struct ffa_composite_memory_region);
1852 constituents_length = memory_share_length - constituents_start;
1853
1854 /*
1855 * Check that the number of constituents is consistent with the length
1856 * of the constituent region.
1857 */
1858 composite = ffa_memory_region_get_composite(memory_region, 0);
1859 if ((constituents_length %
1860 sizeof(struct ffa_memory_region_constituent) !=
1861 0) ||
1862 ((constituents_length /
1863 sizeof(struct ffa_memory_region_constituent)) !=
1864 composite->constituent_count)) {
1865 dlog_verbose("Invalid length %u or composite offset %u.\n",
1866 (size_t)memory_share_length,
1867 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001868 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001869 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001870 if (fragment_length < memory_share_length &&
1871 fragment_length < HF_MAILBOX_SIZE) {
1872 dlog_warning(
1873 "Initial fragment length %d smaller than mailbox "
1874 "size.\n",
1875 fragment_length);
1876 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001877
Andrew Walbrana65a1322020-04-06 19:32:32 +01001878 /*
1879 * Clear is not allowed for memory sharing, as the sender still has
1880 * access to the memory.
1881 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001882 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1883 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001884 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001885 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001886 }
1887
1888 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001889 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001890 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001891 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001892 }
1893
J-Alves363f5722022-04-25 17:37:37 +01001894 /* Check that the permissions are valid, for each specified receiver. */
1895 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001896 struct ffa_memory_region_attributes receiver_permissions;
1897
1898 receiver = ffa_memory_region_get_receiver(memory_region, i);
1899 assert(receiver != NULL);
1900 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01001901 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001902 receiver_permissions.permissions;
1903 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01001904
1905 if (memory_region->sender == receiver_id) {
1906 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001907 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001908 }
Federico Recanati85090c42021-12-15 13:17:54 +01001909
J-Alves363f5722022-04-25 17:37:37 +01001910 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1911 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001912 struct ffa_memory_access *other_receiver =
1913 ffa_memory_region_get_receiver(memory_region,
1914 j);
1915 assert(other_receiver != NULL);
1916
J-Alves363f5722022-04-25 17:37:37 +01001917 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001918 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01001919 dlog_verbose(
1920 "Repeated receiver(%x) in memory send "
1921 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001922 other_receiver->receiver_permissions
1923 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01001924 return ffa_error(FFA_INVALID_PARAMETERS);
1925 }
1926 }
1927
1928 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001929 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01001930 dlog_verbose(
1931 "All ffa_memory_access should point to the "
1932 "same composite memory region offset.\n");
1933 return ffa_error(FFA_INVALID_PARAMETERS);
1934 }
1935
1936 data_access = ffa_get_data_access_attr(permissions);
1937 instruction_access =
1938 ffa_get_instruction_access_attr(permissions);
1939 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1940 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1941 dlog_verbose(
1942 "Reserved value for receiver permissions "
1943 "%#x.\n",
1944 permissions);
1945 return ffa_error(FFA_INVALID_PARAMETERS);
1946 }
1947 if (instruction_access !=
1948 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1949 dlog_verbose(
1950 "Invalid instruction access permissions %#x "
1951 "for sending memory.\n",
1952 permissions);
1953 return ffa_error(FFA_INVALID_PARAMETERS);
1954 }
1955 if (share_func == FFA_MEM_SHARE_32) {
1956 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1957 dlog_verbose(
1958 "Invalid data access permissions %#x "
1959 "for sharing memory.\n",
1960 permissions);
1961 return ffa_error(FFA_INVALID_PARAMETERS);
1962 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001963 /*
1964 * According to section 10.10.3 of the FF-A v1.1 EAC0
1965 * spec, NX is required for share operations (but must
1966 * not be specified by the sender) so set it in the
1967 * copy that we store, ready to be returned to the
1968 * retriever.
1969 */
1970 if (vm_id_is_current_world(receiver_id)) {
1971 ffa_set_instruction_access_attr(
1972 &permissions,
1973 FFA_INSTRUCTION_ACCESS_NX);
1974 receiver_permissions.permissions = permissions;
1975 }
J-Alves363f5722022-04-25 17:37:37 +01001976 }
1977 if (share_func == FFA_MEM_LEND_32 &&
1978 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1979 dlog_verbose(
1980 "Invalid data access permissions %#x for "
1981 "lending memory.\n",
1982 permissions);
1983 return ffa_error(FFA_INVALID_PARAMETERS);
1984 }
1985
1986 if (share_func == FFA_MEM_DONATE_32 &&
1987 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1988 dlog_verbose(
1989 "Invalid data access permissions %#x for "
1990 "donating memory.\n",
1991 permissions);
1992 return ffa_error(FFA_INVALID_PARAMETERS);
1993 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001994 }
1995
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001996 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1997 security_state =
1998 ffa_get_memory_security_attr(memory_region->attributes);
1999 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2000 dlog_verbose(
2001 "Invalid security state for memory share operation.\n");
2002 return ffa_error(FFA_INVALID_PARAMETERS);
2003 }
2004
Federico Recanatid937f5e2021-12-20 17:38:23 +01002005 /*
J-Alves807794e2022-06-16 13:42:47 +01002006 * If a memory donate or lend with single borrower, the memory type
2007 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01002008 */
J-Alves807794e2022-06-16 13:42:47 +01002009 if (share_func == FFA_MEM_DONATE_32 ||
2010 (share_func == FFA_MEM_LEND_32 &&
2011 memory_region->receiver_count == 1)) {
2012 if (ffa_get_memory_type_attr(memory_region->attributes) !=
2013 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2014 dlog_verbose(
2015 "Memory type shall not be specified by "
2016 "sender.\n");
2017 return ffa_error(FFA_INVALID_PARAMETERS);
2018 }
2019 } else {
2020 /*
2021 * Check that sender's memory attributes match Hafnium
2022 * expectations: Normal Memory, Inner shareable, Write-Back
2023 * Read-Allocate Write-Allocate Cacheable.
2024 */
2025 ret = ffa_memory_attributes_validate(memory_region->attributes);
2026 if (ret.func != FFA_SUCCESS_32) {
2027 return ret;
2028 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01002029 }
2030
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002031 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01002032}
2033
2034/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002035 * Gets the share state for continuing an operation to donate, lend or share
2036 * memory, and checks that it is a valid request.
2037 *
2038 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
2039 * not.
2040 */
J-Alvesfdd29272022-07-19 13:16:31 +01002041struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01002042 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01002043 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002044 struct mpool *page_pool)
2045{
2046 struct ffa_memory_share_state *share_state;
2047 struct ffa_memory_region *memory_region;
2048
Daniel Boulbya2f8c662021-11-26 17:52:53 +00002049 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01002050
2051 /*
2052 * Look up the share state by handle and make sure that the VM ID
2053 * matches.
2054 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01002055 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00002056 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002057 dlog_verbose(
2058 "Invalid handle %#x for memory send continuation.\n",
2059 handle);
2060 return ffa_error(FFA_INVALID_PARAMETERS);
2061 }
2062 memory_region = share_state->memory_region;
2063
J-Alvesfdd29272022-07-19 13:16:31 +01002064 if (vm_id_is_current_world(from_vm_id) &&
2065 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002066 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
2067 return ffa_error(FFA_INVALID_PARAMETERS);
2068 }
2069
2070 if (share_state->sending_complete) {
2071 dlog_verbose(
2072 "Sending of memory handle %#x is already complete.\n",
2073 handle);
2074 return ffa_error(FFA_INVALID_PARAMETERS);
2075 }
2076
2077 if (share_state->fragment_count == MAX_FRAGMENTS) {
2078 /*
2079 * Log a warning as this is a sign that MAX_FRAGMENTS should
2080 * probably be increased.
2081 */
2082 dlog_warning(
2083 "Too many fragments for memory share with handle %#x; "
2084 "only %d supported.\n",
2085 handle, MAX_FRAGMENTS);
2086 /* Free share state, as it's not possible to complete it. */
2087 share_state_free(share_states, share_state, page_pool);
2088 return ffa_error(FFA_NO_MEMORY);
2089 }
2090
2091 *share_state_ret = share_state;
2092
2093 return (struct ffa_value){.func = FFA_SUCCESS_32};
2094}
2095
2096/**
J-Alves95df0ef2022-12-07 10:09:48 +00002097 * Checks if there is at least one receiver from the other world.
2098 */
J-Alvesfdd29272022-07-19 13:16:31 +01002099bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00002100 struct ffa_memory_region *memory_region)
2101{
2102 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002103 struct ffa_memory_access *receiver =
2104 ffa_memory_region_get_receiver(memory_region, i);
2105 assert(receiver != NULL);
2106 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
2107
2108 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00002109 return true;
2110 }
2111 }
2112 return false;
2113}
2114
2115/**
J-Alves9da280b2022-12-21 14:55:39 +00002116 * Validates a call to donate, lend or share memory in which Hafnium is the
2117 * designated allocator of the memory handle. In practice, this also means
2118 * Hafnium is responsible for managing the state structures for the transaction.
2119 * If Hafnium is the SPMC, it should allocate the memory handle when either the
2120 * sender is an SP or there is at least one borrower that is an SP.
2121 * If Hafnium is the hypervisor, it should allocate the memory handle when
2122 * operation involves only NWd VMs.
2123 *
2124 * If validation goes well, Hafnium updates the stage-2 page tables of the
2125 * sender. Validation consists of checking if the message length and number of
2126 * memory region constituents match, and if the transition is valid for the
2127 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00002128 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002129 * Assumes that the caller has already found and locked the sender VM and copied
2130 * the memory region descriptor from the sender's TX buffer to a freshly
2131 * allocated page from Hafnium's internal pool. The caller must have also
2132 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002133 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002134 * This function takes ownership of the `memory_region` passed in and will free
2135 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01002136 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002137struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002138 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002139 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002140 uint32_t fragment_length, uint32_t share_func,
2141 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01002142{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002143 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002144 struct share_states_locked share_states;
2145 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01002146
2147 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002148 * If there is an error validating the `memory_region` then we need to
2149 * free it because we own it but we won't be storing it in a share state
2150 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01002151 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002152 ret = ffa_memory_send_validate(from_locked, memory_region,
2153 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01002154 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002155 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002156 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002157 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002158 }
2159
Andrew Walbrana65a1322020-04-06 19:32:32 +01002160 /* Set flag for share function, ready to be retrieved later. */
2161 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002162 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002163 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002164 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002165 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002166 case FFA_MEM_LEND_32:
2167 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002168 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002169 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002170 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002171 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002172 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01002173 }
2174
Andrew Walbranca808b12020-05-15 17:22:28 +01002175 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002176 /*
2177 * Allocate a share state before updating the page table. Otherwise if
2178 * updating the page table succeeded but allocating the share state
2179 * failed then it would leave the memory in a state where nobody could
2180 * get it back.
2181 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01002182 share_state = allocate_share_state(share_states, share_func,
2183 memory_region, fragment_length,
2184 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00002185 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002186 dlog_verbose("Failed to allocate share state.\n");
2187 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01002188 ret = ffa_error(FFA_NO_MEMORY);
2189 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002190 }
2191
Andrew Walbranca808b12020-05-15 17:22:28 +01002192 if (fragment_length == memory_share_length) {
2193 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00002194 ret = ffa_memory_send_complete(
2195 from_locked, share_states, share_state, page_pool,
2196 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002197 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002198 /*
2199 * Use sender ID from 'memory_region' assuming
2200 * that at this point it has been validated:
2201 * - MBZ at virtual FF-A instance.
2202 */
J-Alves19e20cf2023-08-02 12:48:55 +01002203 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002204 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2205 ? memory_region->sender
2206 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002207 ret = (struct ffa_value){
2208 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002209 .arg1 = (uint32_t)memory_region->handle,
2210 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002211 .arg3 = fragment_length,
2212 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002213 }
2214
2215out:
2216 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002217 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002218 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002219}
2220
2221/**
J-Alves8505a8a2022-06-15 18:10:18 +01002222 * Continues an operation to donate, lend or share memory to a VM from current
2223 * world. If this is the last fragment then checks that the transition is valid
2224 * for the type of memory sending operation and updates the stage-2 page tables
2225 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002226 *
2227 * Assumes that the caller has already found and locked the sender VM and copied
2228 * the memory region descriptor from the sender's TX buffer to a freshly
2229 * allocated page from Hafnium's internal pool.
2230 *
2231 * This function takes ownership of the `fragment` passed in; it must not be
2232 * freed by the caller.
2233 */
2234struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2235 void *fragment,
2236 uint32_t fragment_length,
2237 ffa_memory_handle_t handle,
2238 struct mpool *page_pool)
2239{
2240 struct share_states_locked share_states = share_states_lock();
2241 struct ffa_memory_share_state *share_state;
2242 struct ffa_value ret;
2243 struct ffa_memory_region *memory_region;
2244
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002245 CHECK(is_aligned(fragment,
2246 alignof(struct ffa_memory_region_constituent)));
2247 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2248 0) {
2249 dlog_verbose("Fragment length %u misaligned.\n",
2250 fragment_length);
2251 ret = ffa_error(FFA_INVALID_PARAMETERS);
2252 goto out_free_fragment;
2253 }
2254
Andrew Walbranca808b12020-05-15 17:22:28 +01002255 ret = ffa_memory_send_continue_validate(share_states, handle,
2256 &share_state,
2257 from_locked.vm->id, page_pool);
2258 if (ret.func != FFA_SUCCESS_32) {
2259 goto out_free_fragment;
2260 }
2261 memory_region = share_state->memory_region;
2262
J-Alves95df0ef2022-12-07 10:09:48 +00002263 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002264 dlog_error(
2265 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002266 "other world. This should never happen, and indicates "
2267 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002268 "EL3 code.\n");
2269 ret = ffa_error(FFA_INVALID_PARAMETERS);
2270 goto out_free_fragment;
2271 }
2272
2273 /* Add this fragment. */
2274 share_state->fragments[share_state->fragment_count] = fragment;
2275 share_state->fragment_constituent_counts[share_state->fragment_count] =
2276 fragment_length / sizeof(struct ffa_memory_region_constituent);
2277 share_state->fragment_count++;
2278
2279 /* Check whether the memory send operation is now ready to complete. */
2280 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002281 ret = ffa_memory_send_complete(
2282 from_locked, share_states, share_state, page_pool,
2283 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002284 } else {
2285 ret = (struct ffa_value){
2286 .func = FFA_MEM_FRAG_RX_32,
2287 .arg1 = (uint32_t)handle,
2288 .arg2 = (uint32_t)(handle >> 32),
2289 .arg3 = share_state_next_fragment_offset(share_states,
2290 share_state)};
2291 }
2292 goto out;
2293
2294out_free_fragment:
2295 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002296
2297out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002298 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002299 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002300}
2301
Andrew Walbranca808b12020-05-15 17:22:28 +01002302/** Clean up after the receiver has finished retrieving a memory region. */
2303static void ffa_memory_retrieve_complete(
2304 struct share_states_locked share_states,
2305 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2306{
2307 if (share_state->share_func == FFA_MEM_DONATE_32) {
2308 /*
2309 * Memory that has been donated can't be relinquished,
2310 * so no need to keep the share state around.
2311 */
2312 share_state_free(share_states, share_state, page_pool);
2313 dlog_verbose("Freed share state for donate.\n");
2314 }
2315}
2316
J-Alves2d8457f2022-10-05 11:06:41 +01002317/**
2318 * Initialises the given memory region descriptor to be used for an
2319 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2320 * fragment.
2321 * The memory region descriptor is initialized according to retriever's
2322 * FF-A version.
2323 *
2324 * Returns true on success, or false if the given constituents won't all fit in
2325 * the first fragment.
2326 */
2327static bool ffa_retrieved_memory_region_init(
2328 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002329 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002330 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002331 ffa_memory_access_permissions_t permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002332 struct ffa_memory_access *receivers, size_t receiver_count,
2333 uint32_t memory_access_desc_size, uint32_t page_count,
2334 uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002335 const struct ffa_memory_region_constituent constituents[],
2336 uint32_t fragment_constituent_count, uint32_t *total_length,
2337 uint32_t *fragment_length)
2338{
2339 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002340 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002341 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002342 uint32_t constituents_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002343
2344 assert(response != NULL);
2345
2346 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2347 struct ffa_memory_region_v1_0 *retrieve_response =
2348 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002349 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002350
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002351 ffa_memory_region_init_header_v1_0(retrieve_response, sender,
2352 attributes, flags, handle, 0,
2353 receiver_count);
J-Alves2d8457f2022-10-05 11:06:41 +01002354
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002355 receiver = (struct ffa_memory_access_v1_0 *)
2356 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002357 receiver_count = retrieve_response->receiver_count;
2358
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002359 for (uint32_t i = 0; i < receiver_count; i++) {
2360 ffa_id_t receiver_id =
2361 receivers[i].receiver_permissions.receiver;
2362 ffa_memory_receiver_flags_t recv_flags =
2363 receivers[i].receiver_permissions.flags;
2364
2365 /*
2366 * Initialized here as in memory retrieve responses we
2367 * currently expect one borrower to be specified.
2368 */
2369 ffa_memory_access_init_v1_0(
2370 receiver, receiver_id,
2371 ffa_get_data_access_attr(permissions),
2372 ffa_get_instruction_access_attr(permissions),
2373 recv_flags);
2374 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002375
2376 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002377 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002378 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2379 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002380
2381 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2382 retrieve_response, 0);
2383 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002384 struct ffa_memory_region *retrieve_response =
2385 (struct ffa_memory_region *)response;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002386 struct ffa_memory_access *retrieve_response_receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002387
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002388 ffa_memory_region_init_header(
2389 retrieve_response, sender, attributes, flags, handle, 0,
2390 receiver_count, memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002391
2392 /*
2393 * Note that `sizeof(struct_ffa_memory_region)` and
2394 * `sizeof(struct ffa_memory_access)` must both be multiples of
2395 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2396 * guaranteed that the offset we calculate here is aligned to a
2397 * 64-bit boundary and so 64-bit values can be copied without
2398 * alignment faults.
2399 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002400 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002401 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002402 (uint32_t)(receiver_count *
2403 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002404
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002405 retrieve_response_receivers =
2406 ffa_memory_region_get_receiver(retrieve_response, 0);
2407 assert(retrieve_response_receivers != NULL);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002408
2409 /*
2410 * Initialized here as in memory retrieve responses we currently
2411 * expect one borrower to be specified.
2412 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002413 memcpy_s(retrieve_response_receivers,
2414 sizeof(struct ffa_memory_access) * receiver_count,
2415 receivers,
2416 sizeof(struct ffa_memory_access) * receiver_count);
2417
2418 retrieve_response_receivers->composite_memory_region_offset =
2419 composite_offset;
2420
J-Alves2d8457f2022-10-05 11:06:41 +01002421 composite_memory_region =
2422 ffa_memory_region_get_composite(retrieve_response, 0);
2423 }
2424
J-Alves2d8457f2022-10-05 11:06:41 +01002425 assert(composite_memory_region != NULL);
2426
J-Alves2d8457f2022-10-05 11:06:41 +01002427 composite_memory_region->page_count = page_count;
2428 composite_memory_region->constituent_count = total_constituent_count;
2429 composite_memory_region->reserved_0 = 0;
2430
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002431 constituents_offset =
2432 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002433 if (constituents_offset +
2434 fragment_constituent_count *
2435 sizeof(struct ffa_memory_region_constituent) >
2436 response_max_size) {
2437 return false;
2438 }
2439
2440 for (i = 0; i < fragment_constituent_count; ++i) {
2441 composite_memory_region->constituents[i] = constituents[i];
2442 }
2443
2444 if (total_length != NULL) {
2445 *total_length =
2446 constituents_offset +
2447 composite_memory_region->constituent_count *
2448 sizeof(struct ffa_memory_region_constituent);
2449 }
2450 if (fragment_length != NULL) {
2451 *fragment_length =
2452 constituents_offset +
2453 fragment_constituent_count *
2454 sizeof(struct ffa_memory_region_constituent);
2455 }
2456
2457 return true;
2458}
2459
J-Alves96de29f2022-04-26 16:05:24 +01002460/**
2461 * Validates the retrieved permissions against those specified by the lender
2462 * of memory share operation. Optionally can help set the permissions to be used
2463 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002464 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2465 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2466 * specification for each ABI.
2467 * - FFA_DENIED -> if the permissions specified by the retriever are not
2468 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002469 */
J-Alvesdcad8992023-09-15 14:10:35 +01002470static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2471 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002472 enum ffa_data_access requested_data_access,
2473 enum ffa_instruction_access sent_instruction_access,
2474 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002475 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002476{
2477 switch (sent_data_access) {
2478 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2479 case FFA_DATA_ACCESS_RW:
2480 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2481 requested_data_access == FFA_DATA_ACCESS_RW) {
2482 if (permissions != NULL) {
2483 ffa_set_data_access_attr(permissions,
2484 FFA_DATA_ACCESS_RW);
2485 }
2486 break;
2487 }
2488 /* Intentional fall-through. */
2489 case FFA_DATA_ACCESS_RO:
2490 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2491 requested_data_access == FFA_DATA_ACCESS_RO) {
2492 if (permissions != NULL) {
2493 ffa_set_data_access_attr(permissions,
2494 FFA_DATA_ACCESS_RO);
2495 }
2496 break;
2497 }
2498 dlog_verbose(
2499 "Invalid data access requested; sender specified "
2500 "permissions %#x but receiver requested %#x.\n",
2501 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002502 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002503 case FFA_DATA_ACCESS_RESERVED:
2504 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2505 "checked before this point.");
2506 }
2507
J-Alvesdcad8992023-09-15 14:10:35 +01002508 /*
2509 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2510 * or FFA_MEMORY_DONATE the retriever should have specifed the
2511 * instruction permissions it wishes to receive.
2512 */
2513 switch (share_func) {
2514 case FFA_MEM_SHARE_32:
2515 if (requested_instruction_access !=
2516 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2517 dlog_verbose(
2518 "%s: for share instruction permissions must "
2519 "NOT be specified.\n",
2520 __func__);
2521 return ffa_error(FFA_INVALID_PARAMETERS);
2522 }
2523 break;
2524 case FFA_MEM_LEND_32:
2525 /*
2526 * For operations with multiple borrowers only permit XN
2527 * permissions, and both Sender and borrower should have used
2528 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2529 */
2530 if (multiple_borrowers) {
2531 if (requested_instruction_access !=
2532 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2533 dlog_verbose(
2534 "%s: lend/share/donate with multiple "
2535 "borrowers "
2536 "instruction permissions must NOT be "
2537 "specified.\n",
2538 __func__);
2539 return ffa_error(FFA_INVALID_PARAMETERS);
2540 }
2541 break;
2542 }
2543 /* Fall through if the operation targets a single borrower. */
2544 case FFA_MEM_DONATE_32:
2545 if (!multiple_borrowers &&
2546 requested_instruction_access ==
2547 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2548 dlog_verbose(
2549 "%s: for lend/donate with single borrower "
2550 "instruction permissions must be speficified "
2551 "by borrower\n",
2552 __func__);
2553 return ffa_error(FFA_INVALID_PARAMETERS);
2554 }
2555 break;
2556 default:
2557 panic("%s: Wrong func id provided.\n", __func__);
2558 }
2559
J-Alves96de29f2022-04-26 16:05:24 +01002560 switch (sent_instruction_access) {
2561 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2562 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002563 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002564 if (permissions != NULL) {
2565 ffa_set_instruction_access_attr(
2566 permissions, FFA_INSTRUCTION_ACCESS_X);
2567 }
2568 break;
2569 }
J-Alvesdcad8992023-09-15 14:10:35 +01002570 /*
2571 * Fall through if requested permissions are less
2572 * permissive than those provided by the sender.
2573 */
J-Alves96de29f2022-04-26 16:05:24 +01002574 case FFA_INSTRUCTION_ACCESS_NX:
2575 if (requested_instruction_access ==
2576 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2577 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2578 if (permissions != NULL) {
2579 ffa_set_instruction_access_attr(
2580 permissions, FFA_INSTRUCTION_ACCESS_NX);
2581 }
2582 break;
2583 }
2584 dlog_verbose(
2585 "Invalid instruction access requested; sender "
2586 "specified permissions %#x but receiver requested "
2587 "%#x.\n",
2588 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002589 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002590 case FFA_INSTRUCTION_ACCESS_RESERVED:
2591 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2592 "be checked before this point.");
2593 }
2594
J-Alvesdcad8992023-09-15 14:10:35 +01002595 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002596}
2597
2598/**
2599 * Validate the receivers' permissions in the retrieve request against those
2600 * specified by the lender.
2601 * In the `permissions` argument returns the permissions to set at S2 for the
2602 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002603 * The function looks into the flag to bypass multiple borrower checks:
2604 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2605 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2606 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2607 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002608 */
2609static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2610 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002611 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002612 ffa_memory_access_permissions_t *permissions,
2613 struct ffa_memory_access **receiver_ret, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002614{
2615 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002616 bool bypass_multi_receiver_check =
2617 (retrieve_request->flags &
2618 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002619 const uint32_t region_receiver_count = memory_region->receiver_count;
2620 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002621
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002622 assert(receiver_ret != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002623 assert(permissions != NULL);
2624
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002625 *permissions = 0;
2626
J-Alves3456e032023-07-20 12:20:05 +01002627 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002628 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002629 dlog_verbose(
2630 "Retrieve request should contain same list of "
2631 "borrowers, as specified by the lender.\n");
2632 return ffa_error(FFA_INVALID_PARAMETERS);
2633 }
2634 } else {
2635 if (retrieve_request->receiver_count != 1) {
2636 dlog_verbose(
2637 "Set bypass multiple borrower check, receiver "
2638 "list must be sized 1 (%x)\n",
2639 memory_region->receiver_count);
2640 return ffa_error(FFA_INVALID_PARAMETERS);
2641 }
J-Alves96de29f2022-04-26 16:05:24 +01002642 }
2643
2644 retrieve_receiver_index = retrieve_request->receiver_count;
2645
J-Alves96de29f2022-04-26 16:05:24 +01002646 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2647 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002648 struct ffa_memory_access *retrieve_request_receiver =
2649 ffa_memory_region_get_receiver(retrieve_request, i);
2650 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002651 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002652 retrieve_request_receiver->receiver_permissions
2653 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002654 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002655 retrieve_request_receiver->receiver_permissions
2656 .receiver;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002657 struct ffa_memory_access *receiver;
2658 uint32_t mem_region_receiver_index;
2659 bool permissions_RO;
2660 bool clear_memory_flags;
J-Alves96de29f2022-04-26 16:05:24 +01002661 bool found_to_id = current_receiver_id == to_vm_id;
2662
J-Alves3456e032023-07-20 12:20:05 +01002663 if (bypass_multi_receiver_check && !found_to_id) {
2664 dlog_verbose(
2665 "Bypass multiple borrower check for id %x.\n",
2666 current_receiver_id);
2667 continue;
2668 }
2669
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002670 if (retrieve_request_receiver->composite_memory_region_offset !=
2671 0U) {
2672 dlog_verbose(
2673 "Retriever specified address ranges not "
2674 "supported (got offset %d).\n",
2675 retrieve_request_receiver
2676 ->composite_memory_region_offset);
2677 return ffa_error(FFA_INVALID_PARAMETERS);
2678 }
2679
J-Alves96de29f2022-04-26 16:05:24 +01002680 /*
2681 * Find the current receiver in the transaction descriptor from
2682 * sender.
2683 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002684 mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002685 ffa_memory_region_get_receiver_index(
2686 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002687
2688 if (mem_region_receiver_index ==
2689 memory_region->receiver_count) {
2690 dlog_verbose("%s: receiver %x not found\n", __func__,
2691 current_receiver_id);
2692 return ffa_error(FFA_DENIED);
2693 }
2694
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002695 receiver = ffa_memory_region_get_receiver(
2696 memory_region, mem_region_receiver_index);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002697 assert(receiver != NULL);
2698
2699 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002700
2701 if (found_to_id) {
2702 retrieve_receiver_index = i;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002703
2704 *receiver_ret = receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002705 }
2706
2707 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002708 * Check if retrieve request memory access list is valid:
2709 * - The retrieve request complies with the specification.
2710 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002711 */
J-Alvesdcad8992023-09-15 14:10:35 +01002712 ret = ffa_memory_retrieve_is_memory_access_valid(
2713 func_id, ffa_get_data_access_attr(sent_permissions),
2714 ffa_get_data_access_attr(requested_permissions),
2715 ffa_get_instruction_access_attr(sent_permissions),
2716 ffa_get_instruction_access_attr(requested_permissions),
2717 found_to_id ? permissions : NULL,
2718 region_receiver_count > 1);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002719
J-Alvesdcad8992023-09-15 14:10:35 +01002720 if (ret.func != FFA_SUCCESS_32) {
2721 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002722 }
2723
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002724 permissions_RO = (ffa_get_data_access_attr(*permissions) ==
2725 FFA_DATA_ACCESS_RO);
2726 clear_memory_flags = (retrieve_request->flags &
2727 FFA_MEMORY_REGION_FLAG_CLEAR) != 0U;
2728
J-Alves96de29f2022-04-26 16:05:24 +01002729 /*
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002730 * Can't request PM to clear memory if only provided
2731 * with RO permissions.
J-Alves96de29f2022-04-26 16:05:24 +01002732 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002733 if (found_to_id && permissions_RO && clear_memory_flags) {
J-Alves96de29f2022-04-26 16:05:24 +01002734 dlog_verbose(
2735 "Receiver has RO permissions can not request "
2736 "clear.\n");
2737 return ffa_error(FFA_DENIED);
2738 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002739
2740 /*
2741 * Check the impdef in the retrieve_request matches the value in
2742 * the original memory send.
2743 */
2744 if (ffa_version_from_memory_access_desc_size(
2745 memory_region->memory_access_desc_size) >=
2746 MAKE_FFA_VERSION(1, 2) &&
2747 ffa_version_from_memory_access_desc_size(
2748 retrieve_request->memory_access_desc_size) >=
2749 MAKE_FFA_VERSION(1, 2)) {
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002750 if (receiver->impdef.val[0] !=
2751 retrieve_request_receiver->impdef.val[0] ||
2752 receiver->impdef.val[1] !=
2753 retrieve_request_receiver->impdef.val[1]) {
2754 dlog_verbose(
2755 "Impdef value in memory send does not "
2756 "match retrieve request value "
2757 "send value %#x %#x retrieve request "
2758 "value %#x %#x\n",
2759 receiver->impdef.val[0],
2760 receiver->impdef.val[1],
2761 retrieve_request_receiver->impdef
2762 .val[0],
2763 retrieve_request_receiver->impdef
2764 .val[1]);
2765 return ffa_error(FFA_INVALID_PARAMETERS);
2766 }
2767 }
J-Alves96de29f2022-04-26 16:05:24 +01002768 }
2769
2770 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2771 dlog_verbose(
2772 "Retrieve request does not contain caller's (%x) "
2773 "permissions\n",
2774 to_vm_id);
2775 return ffa_error(FFA_INVALID_PARAMETERS);
2776 }
2777
2778 return (struct ffa_value){.func = FFA_SUCCESS_32};
2779}
2780
J-Alvesa9cd7e32022-07-01 13:49:33 +01002781/*
2782 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2783 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2784 * of a pending memory sharing operation whose allocator is the SPM, for
2785 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2786 * the memory region descriptor of the retrieve request must be zeroed with the
2787 * exception of the sender ID and handle.
2788 */
J-Alves4f0d9c12024-01-17 17:23:11 +00002789bool is_ffa_hypervisor_retrieve_request(struct ffa_memory_region *request,
2790 struct vm_locked to_locked)
J-Alvesa9cd7e32022-07-01 13:49:33 +01002791{
2792 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2793 request->attributes == 0U && request->flags == 0U &&
2794 request->tag == 0U && request->receiver_count == 0U &&
2795 plat_ffa_memory_handle_allocated_by_current_world(
2796 request->handle);
2797}
2798
2799/*
2800 * Helper to reset count of fragments retrieved by the hypervisor.
2801 */
2802static void ffa_memory_retrieve_complete_from_hyp(
2803 struct ffa_memory_share_state *share_state)
2804{
2805 if (share_state->hypervisor_fragment_count ==
2806 share_state->fragment_count) {
2807 share_state->hypervisor_fragment_count = 0;
2808 }
2809}
2810
J-Alves089004f2022-07-13 14:25:44 +01002811/**
J-Alves4f0d9c12024-01-17 17:23:11 +00002812 * Prepares the return of the ffa_value for the memory retrieve response.
2813 */
2814static struct ffa_value ffa_memory_retrieve_resp(uint32_t total_length,
2815 uint32_t fragment_length)
2816{
2817 return (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
2818 .arg1 = total_length,
2819 .arg2 = fragment_length};
2820}
2821
2822/**
J-Alves089004f2022-07-13 14:25:44 +01002823 * Validate that the memory region descriptor provided by the borrower on
2824 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2825 * memory sharing call.
2826 */
2827static struct ffa_value ffa_memory_retrieve_validate(
J-Alves4f0d9c12024-01-17 17:23:11 +00002828 ffa_id_t to_id, struct ffa_memory_region *retrieve_request,
2829 uint32_t retrieve_request_length,
J-Alves089004f2022-07-13 14:25:44 +01002830 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2831 uint32_t share_func)
2832{
2833 ffa_memory_region_flags_t transaction_type =
2834 retrieve_request->flags &
2835 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002836 enum ffa_memory_security security_state;
J-Alves4f0d9c12024-01-17 17:23:11 +00002837 const uint64_t memory_access_desc_size =
2838 retrieve_request->memory_access_desc_size;
2839 const uint32_t expected_retrieve_request_length =
2840 retrieve_request->receivers_offset +
2841 (uint32_t)(retrieve_request->receiver_count *
2842 memory_access_desc_size);
J-Alves089004f2022-07-13 14:25:44 +01002843
2844 assert(retrieve_request != NULL);
2845 assert(memory_region != NULL);
2846 assert(receiver_index != NULL);
2847 assert(retrieve_request->sender == memory_region->sender);
2848
J-Alves4f0d9c12024-01-17 17:23:11 +00002849 if (retrieve_request_length != expected_retrieve_request_length) {
2850 dlog_verbose(
2851 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
2852 "but was %d.\n",
2853 expected_retrieve_request_length,
2854 retrieve_request_length);
2855 return ffa_error(FFA_INVALID_PARAMETERS);
2856 }
2857
2858 if (retrieve_request->sender != memory_region->sender) {
2859 dlog_verbose(
2860 "Memory with handle %#x not fully sent, can't "
2861 "retrieve.\n",
2862 memory_region->handle);
2863 return ffa_error(FFA_DENIED);
2864 }
2865
2866 /*
2867 * The SPMC can only process retrieve requests to memory share
2868 * operations with one borrower from the other world. It can't
2869 * determine the ID of the NWd VM that invoked the retrieve
2870 * request interface call. It relies on the hypervisor to
2871 * validate the caller's ID against that provided in the
2872 * `receivers` list of the retrieve response.
2873 * In case there is only one borrower from the NWd in the
2874 * transaction descriptor, record that in the `receiver_id` for
2875 * later use, and validate in the retrieve request message.
2876 * This limitation is due to the fact SPMC can't determine the
2877 * index in the memory share structures state to update.
2878 */
2879 if (to_id == HF_HYPERVISOR_VM_ID) {
2880 uint32_t other_world_count = 0;
2881
2882 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2883 struct ffa_memory_access *receiver =
2884 ffa_memory_region_get_receiver(retrieve_request,
2885 0);
2886 assert(receiver != NULL);
2887
2888 to_id = receiver->receiver_permissions.receiver;
2889
2890 if (!vm_id_is_current_world(to_id)) {
2891 other_world_count++;
2892 }
2893 }
2894
2895 if (other_world_count > 1) {
2896 dlog_verbose(
2897 "Support one receiver from the other "
2898 "world.\n");
2899 return ffa_error(FFA_NOT_SUPPORTED);
2900 }
2901 }
J-Alves089004f2022-07-13 14:25:44 +01002902 /*
2903 * Check that the transaction type expected by the receiver is
2904 * correct, if it has been specified.
2905 */
2906 if (transaction_type !=
2907 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2908 transaction_type != (memory_region->flags &
2909 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2910 dlog_verbose(
2911 "Incorrect transaction type %#x for "
2912 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2913 transaction_type,
2914 memory_region->flags &
2915 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2916 retrieve_request->handle);
2917 return ffa_error(FFA_INVALID_PARAMETERS);
2918 }
2919
2920 if (retrieve_request->tag != memory_region->tag) {
2921 dlog_verbose(
2922 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2923 "%d for handle %#x.\n",
2924 retrieve_request->tag, memory_region->tag,
2925 retrieve_request->handle);
2926 return ffa_error(FFA_INVALID_PARAMETERS);
2927 }
2928
J-Alves4f0d9c12024-01-17 17:23:11 +00002929 *receiver_index =
2930 ffa_memory_region_get_receiver_index(memory_region, to_id);
J-Alves089004f2022-07-13 14:25:44 +01002931
2932 if (*receiver_index == memory_region->receiver_count) {
2933 dlog_verbose(
2934 "Incorrect receiver VM ID %d for "
2935 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00002936 to_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002937 return ffa_error(FFA_INVALID_PARAMETERS);
2938 }
2939
2940 if ((retrieve_request->flags &
2941 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2942 dlog_verbose(
2943 "Retriever specified 'address range alignment 'hint' "
2944 "not supported.\n");
2945 return ffa_error(FFA_INVALID_PARAMETERS);
2946 }
2947 if ((retrieve_request->flags &
2948 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2949 dlog_verbose(
2950 "Bits 8-5 must be zero in memory region's flags "
2951 "(address range alignment hint not supported).\n");
2952 return ffa_error(FFA_INVALID_PARAMETERS);
2953 }
2954
2955 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2956 dlog_verbose(
2957 "Bits 31-10 must be zero in memory region's flags.\n");
2958 return ffa_error(FFA_INVALID_PARAMETERS);
2959 }
2960
2961 if (share_func == FFA_MEM_SHARE_32 &&
2962 (retrieve_request->flags &
2963 (FFA_MEMORY_REGION_FLAG_CLEAR |
2964 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2965 dlog_verbose(
2966 "Memory Share operation can't clean after relinquish "
2967 "memory region.\n");
2968 return ffa_error(FFA_INVALID_PARAMETERS);
2969 }
2970
2971 /*
2972 * If the borrower needs the memory to be cleared before mapping
2973 * to its address space, the sender should have set the flag
2974 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2975 * FFA_DENIED.
2976 */
2977 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2978 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2979 dlog_verbose(
2980 "Borrower needs memory cleared. Sender needs to set "
2981 "flag for clearing memory.\n");
2982 return ffa_error(FFA_DENIED);
2983 }
2984
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002985 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2986 security_state =
2987 ffa_get_memory_security_attr(retrieve_request->attributes);
2988 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2989 dlog_verbose(
2990 "Invalid security state for memory retrieve request "
2991 "operation.\n");
2992 return ffa_error(FFA_INVALID_PARAMETERS);
2993 }
2994
J-Alves089004f2022-07-13 14:25:44 +01002995 /*
2996 * If memory type is not specified, bypass validation of memory
2997 * attributes in the retrieve request. The retriever is expecting to
2998 * obtain this information from the SPMC.
2999 */
3000 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
3001 FFA_MEMORY_NOT_SPECIFIED_MEM) {
3002 return (struct ffa_value){.func = FFA_SUCCESS_32};
3003 }
3004
3005 /*
3006 * Ensure receiver's attributes are compatible with how
3007 * Hafnium maps memory: Normal Memory, Inner shareable,
3008 * Write-Back Read-Allocate Write-Allocate Cacheable.
3009 */
3010 return ffa_memory_attributes_validate(retrieve_request->attributes);
3011}
3012
J-Alves4f0d9c12024-01-17 17:23:11 +00003013static struct ffa_value ffa_partition_retrieve_request(
3014 struct share_states_locked share_states,
3015 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3016 struct ffa_memory_region *retrieve_request,
3017 uint32_t retrieve_request_length, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003018{
J-Alvesa9cd7e32022-07-01 13:49:33 +01003019 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003020 uint32_t memory_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003021 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003022 struct ffa_composite_memory_region *composite;
3023 uint32_t total_length;
3024 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01003025 ffa_id_t receiver_id = to_locked.vm->id;
J-Alves4f0d9c12024-01-17 17:23:11 +00003026 bool is_retrieve_complete = false;
J-Alves4f0d9c12024-01-17 17:23:11 +00003027 const uint64_t memory_access_desc_size =
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003028 retrieve_request->memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003029 uint32_t receiver_index;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003030 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003031 ffa_memory_handle_t handle = retrieve_request->handle;
J-Alves460d36c2023-10-12 17:02:15 +01003032 ffa_memory_attributes_t attributes = 0;
3033 uint32_t retrieve_mode = 0;
J-Alves4f0d9c12024-01-17 17:23:11 +00003034 struct ffa_memory_region *memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003035
J-Alves96de29f2022-04-26 16:05:24 +01003036 if (!share_state->sending_complete) {
3037 dlog_verbose(
3038 "Memory with handle %#x not fully sent, can't "
3039 "retrieve.\n",
3040 handle);
J-Alves4f0d9c12024-01-17 17:23:11 +00003041 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alves96de29f2022-04-26 16:05:24 +01003042 }
3043
J-Alves4f0d9c12024-01-17 17:23:11 +00003044 /*
3045 * Validate retrieve request, according to what was sent by the
3046 * sender. Function will output the `receiver_index` from the
3047 * provided memory region.
3048 */
3049 ret = ffa_memory_retrieve_validate(
3050 receiver_id, retrieve_request, retrieve_request_length,
3051 memory_region, &receiver_index, share_state->share_func);
J-Alves089004f2022-07-13 14:25:44 +01003052
J-Alves4f0d9c12024-01-17 17:23:11 +00003053 if (ret.func != FFA_SUCCESS_32) {
3054 return ret;
J-Alves089004f2022-07-13 14:25:44 +01003055 }
J-Alves96de29f2022-04-26 16:05:24 +01003056
J-Alves4f0d9c12024-01-17 17:23:11 +00003057 /*
3058 * Validate the requested permissions against the sent
3059 * permissions.
3060 * Outputs the permissions to give to retriever at S2
3061 * PTs.
3062 */
3063 ret = ffa_memory_retrieve_validate_memory_access_list(
3064 memory_region, retrieve_request, receiver_id, &permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003065 &receiver, share_state->share_func);
J-Alves4f0d9c12024-01-17 17:23:11 +00003066 if (ret.func != FFA_SUCCESS_32) {
3067 return ret;
3068 }
3069
3070 memory_to_mode = ffa_memory_permissions_to_mode(
3071 permissions, share_state->sender_orig_mode);
3072
3073 ret = ffa_retrieve_check_update(
3074 to_locked, share_state->fragments,
3075 share_state->fragment_constituent_counts,
3076 share_state->fragment_count, memory_to_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003077 share_state->share_func, false, page_pool, &retrieve_mode,
3078 share_state->memory_protected);
J-Alves4f0d9c12024-01-17 17:23:11 +00003079
3080 if (ret.func != FFA_SUCCESS_32) {
3081 return ret;
3082 }
3083
3084 share_state->retrieved_fragment_count[receiver_index] = 1;
3085
3086 is_retrieve_complete =
3087 share_state->retrieved_fragment_count[receiver_index] ==
3088 share_state->fragment_count;
3089
J-Alvesb5084cf2022-07-06 14:20:12 +01003090 /* VMs acquire the RX buffer from SPMC. */
3091 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3092
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003093 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003094 * Copy response to RX buffer of caller and deliver the message.
3095 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003096 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003097 composite = ffa_memory_region_get_composite(memory_region, 0);
J-Alves4f0d9c12024-01-17 17:23:11 +00003098
Andrew Walbranca808b12020-05-15 17:22:28 +01003099 /*
J-Alves460d36c2023-10-12 17:02:15 +01003100 * Set the security state in the memory retrieve response attributes
3101 * if specified by the target mode.
3102 */
3103 attributes = plat_ffa_memory_security_mode(memory_region->attributes,
3104 retrieve_mode);
3105
3106 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003107 * Constituents which we received in the first fragment should
3108 * always fit in the first fragment we are sending, because the
3109 * header is the same size in both cases and we have a fixed
3110 * message buffer size. So `ffa_retrieved_memory_region_init`
3111 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01003112 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003113
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003114 /* Provide the permissions that had been provided. */
3115 receiver->receiver_permissions.permissions = permissions;
3116
3117 /*
3118 * Prepare the memory region descriptor for the retrieve response.
3119 * Provide the pointer to the receiver tracked in the share state
3120 * strucutures.
3121 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003122 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01003123 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003124 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003125 memory_region->flags, handle, permissions, receiver, 1,
3126 memory_access_desc_size, composite->page_count,
3127 composite->constituent_count, share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01003128 share_state->fragment_constituent_counts[0], &total_length,
3129 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01003130
J-Alves4f0d9c12024-01-17 17:23:11 +00003131 if (is_retrieve_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003132 ffa_memory_retrieve_complete(share_states, share_state,
3133 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003134 }
J-Alves4f0d9c12024-01-17 17:23:11 +00003135
3136 return ffa_memory_retrieve_resp(total_length, fragment_length);
3137}
3138
3139static struct ffa_value ffa_hypervisor_retrieve_request(
3140 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3141 struct ffa_memory_region *retrieve_request)
3142{
3143 struct ffa_value ret;
3144 struct ffa_composite_memory_region *composite;
3145 uint32_t total_length;
3146 uint32_t fragment_length;
J-Alves4f0d9c12024-01-17 17:23:11 +00003147 ffa_memory_attributes_t attributes;
J-Alves7b6ab612024-01-24 09:54:54 +00003148 uint64_t memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003149 struct ffa_memory_region *memory_region;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003150 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003151 ffa_memory_handle_t handle = retrieve_request->handle;
3152
J-Alves4f0d9c12024-01-17 17:23:11 +00003153 memory_region = share_state->memory_region;
3154
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003155 assert(to_locked.vm->id == HF_HYPERVISOR_VM_ID);
3156
J-Alves7b6ab612024-01-24 09:54:54 +00003157 switch (to_locked.vm->ffa_version) {
3158 case MAKE_FFA_VERSION(1, 2):
3159 memory_access_desc_size = sizeof(struct ffa_memory_access);
3160 break;
3161 case MAKE_FFA_VERSION(1, 0):
3162 case MAKE_FFA_VERSION(1, 1):
3163 memory_access_desc_size = sizeof(struct ffa_memory_access_v1_0);
3164 break;
3165 default:
3166 panic("version not supported: %x\n", to_locked.vm->ffa_version);
3167 }
3168
J-Alves4f0d9c12024-01-17 17:23:11 +00003169 if (share_state->hypervisor_fragment_count != 0U) {
3170 dlog_verbose(
3171 "Memory with handle %#x already retrieved by "
3172 "the hypervisor.\n",
3173 handle);
3174 return ffa_error(FFA_DENIED);
3175 }
3176
3177 share_state->hypervisor_fragment_count = 1;
3178
3179 ffa_memory_retrieve_complete_from_hyp(share_state);
3180
3181 /* VMs acquire the RX buffer from SPMC. */
3182 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3183
3184 /*
3185 * Copy response to RX buffer of caller and deliver the message.
3186 * This must be done before the share_state is (possibly) freed.
3187 */
3188 composite = ffa_memory_region_get_composite(memory_region, 0);
3189
3190 /*
3191 * Constituents which we received in the first fragment should
3192 * always fit in the first fragment we are sending, because the
3193 * header is the same size in both cases and we have a fixed
3194 * message buffer size. So `ffa_retrieved_memory_region_init`
3195 * should never fail.
3196 */
3197
3198 /*
3199 * Set the security state in the memory retrieve response attributes
3200 * if specified by the target mode.
3201 */
3202 attributes = plat_ffa_memory_security_mode(
3203 memory_region->attributes, share_state->sender_orig_mode);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003204
3205 receiver = ffa_memory_region_get_receiver(memory_region, 0);
3206
J-Alves4f0d9c12024-01-17 17:23:11 +00003207 CHECK(ffa_retrieved_memory_region_init(
3208 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
3209 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003210 memory_region->flags, handle,
3211 receiver->receiver_permissions.permissions, receiver,
3212 memory_region->receiver_count, memory_access_desc_size,
J-Alves4f0d9c12024-01-17 17:23:11 +00003213 composite->page_count, composite->constituent_count,
3214 share_state->fragments[0],
3215 share_state->fragment_constituent_counts[0], &total_length,
3216 &fragment_length));
3217
3218 return ffa_memory_retrieve_resp(total_length, fragment_length);
3219}
3220
3221struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
3222 struct ffa_memory_region *retrieve_request,
3223 uint32_t retrieve_request_length,
3224 struct mpool *page_pool)
3225{
3226 ffa_memory_handle_t handle = retrieve_request->handle;
3227 struct share_states_locked share_states;
3228 struct ffa_memory_share_state *share_state;
3229 struct ffa_value ret;
3230
3231 dump_share_states();
3232
3233 share_states = share_states_lock();
3234 share_state = get_share_state(share_states, handle);
3235 if (share_state == NULL) {
3236 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
3237 handle);
3238 ret = ffa_error(FFA_INVALID_PARAMETERS);
3239 goto out;
3240 }
3241
3242 if (is_ffa_hypervisor_retrieve_request(retrieve_request, to_locked)) {
3243 ret = ffa_hypervisor_retrieve_request(share_state, to_locked,
3244 retrieve_request);
3245 } else {
3246 ret = ffa_partition_retrieve_request(
3247 share_states, share_state, to_locked, retrieve_request,
3248 retrieve_request_length, page_pool);
3249 }
3250
3251 /* Track use of the RX buffer if the handling has succeeded. */
3252 if (ret.func == FFA_MEM_RETRIEVE_RESP_32) {
3253 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
3254 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
3255 }
3256
Andrew Walbranca808b12020-05-15 17:22:28 +01003257out:
3258 share_states_unlock(&share_states);
3259 dump_share_states();
3260 return ret;
3261}
3262
J-Alves5da37d92022-10-24 16:33:48 +01003263/**
3264 * Determine expected fragment offset according to the FF-A version of
3265 * the caller.
3266 */
3267static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
3268 struct ffa_memory_region *memory_region,
3269 uint32_t retrieved_constituents_count, uint32_t ffa_version)
3270{
3271 uint32_t expected_fragment_offset;
3272 uint32_t composite_constituents_offset;
3273
Kathleen Capellae4fe2962023-09-01 17:08:47 -04003274 if (ffa_version >= MAKE_FFA_VERSION(1, 1)) {
J-Alves5da37d92022-10-24 16:33:48 +01003275 /*
3276 * Hafnium operates memory regions in FF-A v1.1 format, so we
3277 * can retrieve the constituents offset from descriptor.
3278 */
3279 composite_constituents_offset =
3280 ffa_composite_constituent_offset(memory_region, 0);
3281 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
3282 /*
3283 * If retriever is FF-A v1.0, determine the composite offset
3284 * as it is expected to have been configured in the
3285 * retrieve response.
3286 */
3287 composite_constituents_offset =
3288 sizeof(struct ffa_memory_region_v1_0) +
3289 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003290 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01003291 sizeof(struct ffa_composite_memory_region);
3292 } else {
3293 panic("%s received an invalid FF-A version.\n", __func__);
3294 }
3295
3296 expected_fragment_offset =
3297 composite_constituents_offset +
3298 retrieved_constituents_count *
3299 sizeof(struct ffa_memory_region_constituent) -
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003300 (uint32_t)(memory_region->memory_access_desc_size *
3301 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003302
3303 return expected_fragment_offset;
3304}
3305
Andrew Walbranca808b12020-05-15 17:22:28 +01003306struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3307 ffa_memory_handle_t handle,
3308 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003309 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003310 struct mpool *page_pool)
3311{
3312 struct ffa_memory_region *memory_region;
3313 struct share_states_locked share_states;
3314 struct ffa_memory_share_state *share_state;
3315 struct ffa_value ret;
3316 uint32_t fragment_index;
3317 uint32_t retrieved_constituents_count;
3318 uint32_t i;
3319 uint32_t expected_fragment_offset;
3320 uint32_t remaining_constituent_count;
3321 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003322 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003323 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003324
3325 dump_share_states();
3326
3327 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003328 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003329 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003330 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
3331 handle);
3332 ret = ffa_error(FFA_INVALID_PARAMETERS);
3333 goto out;
3334 }
3335
3336 memory_region = share_state->memory_region;
3337 CHECK(memory_region != NULL);
3338
Andrew Walbranca808b12020-05-15 17:22:28 +01003339 if (!share_state->sending_complete) {
3340 dlog_verbose(
3341 "Memory with handle %#x not fully sent, can't "
3342 "retrieve.\n",
3343 handle);
3344 ret = ffa_error(FFA_INVALID_PARAMETERS);
3345 goto out;
3346 }
3347
J-Alves59ed0042022-07-28 18:26:41 +01003348 /*
3349 * If retrieve request from the hypervisor has been initiated in the
3350 * given share_state, continue it, else assume it is a continuation of
3351 * retrieve request from a NWd VM.
3352 */
3353 continue_ffa_hyp_mem_retrieve_req =
3354 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3355 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003356 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003357
J-Alves59ed0042022-07-28 18:26:41 +01003358 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003359 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003360 memory_region, to_locked.vm->id);
3361
3362 if (receiver_index == memory_region->receiver_count) {
3363 dlog_verbose(
3364 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
3365 "borrower to memory sharing transaction (%x)\n",
3366 to_locked.vm->id, handle);
3367 ret = ffa_error(FFA_INVALID_PARAMETERS);
3368 goto out;
3369 }
3370
3371 if (share_state->retrieved_fragment_count[receiver_index] ==
3372 0 ||
3373 share_state->retrieved_fragment_count[receiver_index] >=
3374 share_state->fragment_count) {
3375 dlog_verbose(
3376 "Retrieval of memory with handle %#x not yet "
3377 "started or already completed (%d/%d fragments "
3378 "retrieved).\n",
3379 handle,
3380 share_state->retrieved_fragment_count
3381 [receiver_index],
3382 share_state->fragment_count);
3383 ret = ffa_error(FFA_INVALID_PARAMETERS);
3384 goto out;
3385 }
3386
3387 fragment_index =
3388 share_state->retrieved_fragment_count[receiver_index];
3389 } else {
3390 if (share_state->hypervisor_fragment_count == 0 ||
3391 share_state->hypervisor_fragment_count >=
3392 share_state->fragment_count) {
3393 dlog_verbose(
3394 "Retrieve of memory with handle %x not "
3395 "started from hypervisor.\n",
3396 handle);
3397 ret = ffa_error(FFA_INVALID_PARAMETERS);
3398 goto out;
3399 }
3400
3401 if (memory_region->sender != sender_vm_id) {
3402 dlog_verbose(
3403 "Sender ID (%x) is not as expected for memory "
3404 "handle %x\n",
3405 sender_vm_id, handle);
3406 ret = ffa_error(FFA_INVALID_PARAMETERS);
3407 goto out;
3408 }
3409
3410 fragment_index = share_state->hypervisor_fragment_count;
3411
3412 receiver_index = 0;
3413 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003414
3415 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003416 * Check that the given fragment offset is correct by counting
3417 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003418 */
3419 retrieved_constituents_count = 0;
3420 for (i = 0; i < fragment_index; ++i) {
3421 retrieved_constituents_count +=
3422 share_state->fragment_constituent_counts[i];
3423 }
J-Alvesc7484f12022-05-13 12:41:14 +01003424
3425 CHECK(memory_region->receiver_count > 0);
3426
Andrew Walbranca808b12020-05-15 17:22:28 +01003427 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003428 ffa_memory_retrieve_expected_offset_per_ffa_version(
3429 memory_region, retrieved_constituents_count,
3430 to_locked.vm->ffa_version);
3431
Andrew Walbranca808b12020-05-15 17:22:28 +01003432 if (fragment_offset != expected_fragment_offset) {
3433 dlog_verbose("Fragment offset was %d but expected %d.\n",
3434 fragment_offset, expected_fragment_offset);
3435 ret = ffa_error(FFA_INVALID_PARAMETERS);
3436 goto out;
3437 }
3438
J-Alves4f0d9c12024-01-17 17:23:11 +00003439 /*
3440 * When hafnium is the hypervisor, acquire the RX buffer of a VM, that
3441 * is currently ownder by the SPMC.
3442 */
3443 assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
J-Alves59ed0042022-07-28 18:26:41 +01003444
Andrew Walbranca808b12020-05-15 17:22:28 +01003445 remaining_constituent_count = ffa_memory_fragment_init(
3446 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3447 share_state->fragments[fragment_index],
3448 share_state->fragment_constituent_counts[fragment_index],
3449 &fragment_length);
3450 CHECK(remaining_constituent_count == 0);
J-Alves674e4de2024-01-17 16:20:32 +00003451
Andrew Walbranca808b12020-05-15 17:22:28 +01003452 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003453 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003454
J-Alves59ed0042022-07-28 18:26:41 +01003455 if (!continue_ffa_hyp_mem_retrieve_req) {
3456 share_state->retrieved_fragment_count[receiver_index]++;
3457 if (share_state->retrieved_fragment_count[receiver_index] ==
3458 share_state->fragment_count) {
3459 ffa_memory_retrieve_complete(share_states, share_state,
3460 page_pool);
3461 }
3462 } else {
3463 share_state->hypervisor_fragment_count++;
3464
3465 ffa_memory_retrieve_complete_from_hyp(share_state);
3466 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003467 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3468 .arg1 = (uint32_t)handle,
3469 .arg2 = (uint32_t)(handle >> 32),
3470 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003471
3472out:
3473 share_states_unlock(&share_states);
3474 dump_share_states();
3475 return ret;
3476}
3477
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003478struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003479 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003480 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003481{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003482 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003483 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003484 struct ffa_memory_share_state *share_state;
3485 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003486 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003487 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003488 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003489 bool receivers_relinquished_memory;
J-Alves639ddfc2023-11-21 14:17:26 +00003490 ffa_memory_access_permissions_t receiver_permissions = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003491
Andrew Walbrana65a1322020-04-06 19:32:32 +01003492 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003493 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003494 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01003495 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003496 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003497 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003498 }
3499
Andrew Walbrana65a1322020-04-06 19:32:32 +01003500 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003501 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003502 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01003503 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003504 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003505 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003506 }
3507
3508 dump_share_states();
3509
3510 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003511 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003512 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003513 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003514 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003515 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003516 goto out;
3517 }
3518
Andrew Walbranca808b12020-05-15 17:22:28 +01003519 if (!share_state->sending_complete) {
3520 dlog_verbose(
3521 "Memory with handle %#x not fully sent, can't "
3522 "relinquish.\n",
3523 handle);
3524 ret = ffa_error(FFA_INVALID_PARAMETERS);
3525 goto out;
3526 }
3527
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003528 memory_region = share_state->memory_region;
3529 CHECK(memory_region != NULL);
3530
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003531 receiver_index = ffa_memory_region_get_receiver_index(
3532 memory_region, from_locked.vm->id);
J-Alves8eb19162022-04-28 10:56:48 +01003533
3534 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003535 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003536 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01003537 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003538 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003539 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003540 goto out;
3541 }
3542
J-Alves8eb19162022-04-28 10:56:48 +01003543 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003544 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003545 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003546 "Memory with handle %#x not yet fully "
3547 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003548 "receiver %x can't relinquish.\n",
3549 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003550 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003551 goto out;
3552 }
3553
J-Alves3c5b2072022-11-21 12:45:40 +00003554 /*
3555 * Either clear if requested in relinquish call, or in a retrieve
3556 * request from one of the borrowers.
3557 */
3558 receivers_relinquished_memory = true;
3559
3560 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3561 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003562 ffa_memory_region_get_receiver(memory_region, i);
3563 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003564 if (receiver->receiver_permissions.receiver ==
3565 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003566 receiver_permissions =
3567 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003568 continue;
3569 }
3570
3571 if (share_state->retrieved_fragment_count[i] != 0U) {
3572 receivers_relinquished_memory = false;
3573 break;
3574 }
3575 }
3576
3577 clear = receivers_relinquished_memory &&
Daniel Boulby2e14ebe2024-01-15 16:21:44 +00003578 ((relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3579 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003580
3581 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003582 * Clear is not allowed for memory that was shared, as the
3583 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003584 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003585 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003586 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003587 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003588 goto out;
3589 }
3590
J-Alves639ddfc2023-11-21 14:17:26 +00003591 if (clear && receiver_permissions == FFA_DATA_ACCESS_RO) {
3592 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3593 __func__);
3594 ret = ffa_error(FFA_DENIED);
3595 goto out;
3596 }
3597
Andrew Walbranca808b12020-05-15 17:22:28 +01003598 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003599 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003600 share_state->fragment_constituent_counts,
3601 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003602
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003603 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003604 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003605 * Mark memory handle as not retrieved, so it can be
3606 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003607 */
J-Alves8eb19162022-04-28 10:56:48 +01003608 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003609 }
3610
3611out:
3612 share_states_unlock(&share_states);
3613 dump_share_states();
3614 return ret;
3615}
3616
3617/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003618 * Validates that the reclaim transition is allowed for the given
3619 * handle, updates the page table of the reclaiming VM, and frees the
3620 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003621 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003622struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003623 ffa_memory_handle_t handle,
3624 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003625 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003626{
3627 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003628 struct ffa_memory_share_state *share_state;
3629 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003630 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003631
3632 dump_share_states();
3633
3634 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003635
Karl Meakin4a2854a2023-06-30 16:26:52 +01003636 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003637 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003638 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003639 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003640 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003641 goto out;
3642 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003643 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003644
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003645 CHECK(memory_region != NULL);
3646
J-Alvesa9cd7e32022-07-01 13:49:33 +01003647 if (vm_id_is_current_world(to_locked.vm->id) &&
3648 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003649 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003650 "VM %#x attempted to reclaim memory handle %#x "
3651 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003652 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003653 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003654 goto out;
3655 }
3656
Andrew Walbranca808b12020-05-15 17:22:28 +01003657 if (!share_state->sending_complete) {
3658 dlog_verbose(
3659 "Memory with handle %#x not fully sent, can't "
3660 "reclaim.\n",
3661 handle);
3662 ret = ffa_error(FFA_INVALID_PARAMETERS);
3663 goto out;
3664 }
3665
J-Alves752236c2022-04-28 11:07:47 +01003666 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3667 if (share_state->retrieved_fragment_count[i] != 0) {
3668 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003669 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00003670 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003671 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003672 handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003673 ffa_memory_region_get_receiver(memory_region, i)
3674 ->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01003675 ret = ffa_error(FFA_DENIED);
3676 goto out;
3677 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003678 }
3679
Andrew Walbranca808b12020-05-15 17:22:28 +01003680 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01003681 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003682 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003683 share_state->fragment_count, share_state->sender_orig_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003684 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool,
3685 NULL, false);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003686
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003687 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003688 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003689 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003690 }
3691
3692out:
3693 share_states_unlock(&share_states);
3694 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003695}