blob: 4f21241613ca83b8e8d0f547dd5490083ce03809 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
J-Alves460d36c2023-10-12 17:02:15 +010011#include <stdint.h>
12
Federico Recanati4fd065d2021-12-13 20:06:23 +010013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000016
J-Alves5952d942022-12-22 16:03:00 +000017#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000019#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010020#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010021#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010022#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010024#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010025#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000026#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000027#include "hf/mpool.h"
J-Alvescf6253e2024-01-03 13:48:48 +000028#include "hf/panic.h"
29#include "hf/plat/memory_protect.h"
Jose Marinho75509b42019-04-09 09:34:59 +010030#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000031#include "hf/vm.h"
Daniel Boulby44e9b3b2024-01-17 12:21:44 +000032#include "hf/vm_ids.h"
Jose Marinho75509b42019-04-09 09:34:59 +010033
J-Alves2d8457f2022-10-05 11:06:41 +010034#include "vmapi/hf/ffa_v1_0.h"
35
J-Alves5da37d92022-10-24 16:33:48 +010036#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
37
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000038/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010039 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000040 * by this lock.
41 */
42static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010043static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000044
45/**
J-Alvesed508c82023-05-04 16:09:48 +010046 * Return the offset to the first constituent within the
47 * `ffa_composite_memory_region` for the given receiver from an
48 * `ffa_memory_region`. The caller must check that the receiver_index is within
49 * bounds, and that it has a composite memory region offset.
50 */
51static uint32_t ffa_composite_constituent_offset(
52 struct ffa_memory_region *memory_region, uint32_t receiver_index)
53{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000054 struct ffa_memory_access *receiver;
55 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010056
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000057 CHECK(receiver_index < memory_region->receiver_count);
58
59 receiver =
60 ffa_memory_region_get_receiver(memory_region, receiver_index);
61 CHECK(receiver != NULL);
62
63 composite_offset = receiver->composite_memory_region_offset;
64
65 CHECK(composite_offset != 0);
66
67 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010068}
69
70/**
J-Alves917d2f22020-10-30 18:39:30 +000071 * Extracts the index from a memory handle allocated by Hafnium's current world.
72 */
73uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
74{
75 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
76}
77
78/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010079 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
80 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
81 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010082 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010083 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
84 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010085 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010086struct ffa_memory_share_state *allocate_share_state(
87 struct share_states_locked share_states, uint32_t share_func,
88 struct ffa_memory_region *memory_region, uint32_t fragment_length,
89 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000090{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000091 assert(share_states.share_states != NULL);
92 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000093
Karl Meakin52cdfe72023-06-30 14:49:10 +010094 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010095 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010096 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010097 &share_states.share_states[i];
98 struct ffa_composite_memory_region *composite =
99 ffa_memory_region_get_composite(memory_region,
100 0);
101
102 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000103 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +0200104 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000106 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100107 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000108 allocated_state->share_func = share_func;
109 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100110 allocated_state->fragment_count = 1;
111 allocated_state->fragments[0] = composite->constituents;
112 allocated_state->fragment_constituent_counts[0] =
113 (fragment_length -
114 ffa_composite_constituent_offset(memory_region,
115 0)) /
116 sizeof(struct ffa_memory_region_constituent);
117 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100118 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
119 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100120 allocated_state->retrieved_fragment_count[j] =
121 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100123 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000124 }
125 }
126
Karl Meakin52cdfe72023-06-30 14:49:10 +0100127 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000128}
129
130/** Locks the share states lock. */
131struct share_states_locked share_states_lock(void)
132{
133 sl_lock(&share_states_lock_instance);
134
135 return (struct share_states_locked){.share_states = share_states};
136}
137
138/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100139void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000141 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000142 share_states->share_states = NULL;
143 sl_unlock(&share_states_lock_instance);
144}
145
146/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000149 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100150struct ffa_memory_share_state *get_share_state(
151 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000152{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100153 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000154
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000155 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100156
157 /*
158 * First look for a share_state allocated by us, in which case the
159 * handle is based on the index.
160 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200161 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100162 uint64_t index = ffa_memory_handle_get_index(handle);
163
Andrew Walbranca808b12020-05-15 17:22:28 +0100164 if (index < MAX_MEM_SHARES) {
165 share_state = &share_states.share_states[index];
166 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100167 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100168 }
169 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000170 }
171
Andrew Walbranca808b12020-05-15 17:22:28 +0100172 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100173 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100174 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000175 if (share_state->memory_region != NULL &&
176 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100177 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100178 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100179 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000180 }
181
Karl Meakin4a2854a2023-06-30 16:26:52 +0100182 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000183}
184
185/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100186void share_state_free(struct share_states_locked share_states,
187 struct ffa_memory_share_state *share_state,
188 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000189{
Andrew Walbranca808b12020-05-15 17:22:28 +0100190 uint32_t i;
191
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000192 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000193 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100194 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000195 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100196 /*
197 * First fragment is part of the same page as the `memory_region`, so it
198 * doesn't need to be freed separately.
199 */
200 share_state->fragments[0] = NULL;
201 share_state->fragment_constituent_counts[0] = 0;
202 for (i = 1; i < share_state->fragment_count; ++i) {
203 mpool_free(page_pool, share_state->fragments[i]);
204 share_state->fragments[i] = NULL;
205 share_state->fragment_constituent_counts[i] = 0;
206 }
207 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000208 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100209 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000210}
211
Andrew Walbranca808b12020-05-15 17:22:28 +0100212/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100213bool share_state_sending_complete(struct share_states_locked share_states,
214 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000215{
Andrew Walbranca808b12020-05-15 17:22:28 +0100216 struct ffa_composite_memory_region *composite;
217 uint32_t expected_constituent_count;
218 uint32_t fragment_constituent_count_total = 0;
219 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000220
Andrew Walbranca808b12020-05-15 17:22:28 +0100221 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000222 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100223
224 /*
225 * Share state must already be valid, or it's not possible to get hold
226 * of it.
227 */
228 CHECK(share_state->memory_region != NULL &&
229 share_state->share_func != 0);
230
231 composite =
232 ffa_memory_region_get_composite(share_state->memory_region, 0);
233 expected_constituent_count = composite->constituent_count;
234 for (i = 0; i < share_state->fragment_count; ++i) {
235 fragment_constituent_count_total +=
236 share_state->fragment_constituent_counts[i];
237 }
238 dlog_verbose(
239 "Checking completion: constituent count %d/%d from %d "
240 "fragments.\n",
241 fragment_constituent_count_total, expected_constituent_count,
242 share_state->fragment_count);
243
244 return fragment_constituent_count_total == expected_constituent_count;
245}
246
247/**
248 * Calculates the offset of the next fragment expected for the given share
249 * state.
250 */
J-Alvesfdd29272022-07-19 13:16:31 +0100251uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100252 struct share_states_locked share_states,
253 struct ffa_memory_share_state *share_state)
254{
255 uint32_t next_fragment_offset;
256 uint32_t i;
257
258 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000259 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100260
261 next_fragment_offset =
262 ffa_composite_constituent_offset(share_state->memory_region, 0);
263 for (i = 0; i < share_state->fragment_count; ++i) {
264 next_fragment_offset +=
265 share_state->fragment_constituent_counts[i] *
266 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000267 }
268
Andrew Walbranca808b12020-05-15 17:22:28 +0100269 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000270}
271
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100272static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000273{
274 uint32_t i;
275
276 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
277 return;
278 }
279
Karl Meakine8937d92024-03-19 16:04:25 +0000280 dlog("from VM %#x, attributes (shareability = %s, cacheability = %s, "
281 "type = %s, security = %s), flags %#x, handle %#lx "
282 "tag %lu, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100283 "recipients [",
Karl Meakine8937d92024-03-19 16:04:25 +0000284 memory_region->sender,
285 ffa_memory_shareability_name(
286 memory_region->attributes.shareability),
287 ffa_memory_cacheability_name(
288 memory_region->attributes.cacheability),
289 ffa_memory_type_name(memory_region->attributes.type),
290 ffa_memory_security_name(memory_region->attributes.security),
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000291 memory_region->flags, memory_region->handle, memory_region->tag,
292 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100293 memory_region->receiver_count);
294 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000295 struct ffa_memory_access *receiver =
296 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000297 if (i != 0) {
298 dlog(", ");
299 }
Karl Meakine8937d92024-03-19 16:04:25 +0000300 dlog("Receiver %#x: permissions (%s, %s) (offset %u)",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000301 receiver->receiver_permissions.receiver,
Karl Meakine8937d92024-03-19 16:04:25 +0000302 ffa_data_access_name(receiver->receiver_permissions
303 .permissions.data_access),
304 ffa_instruction_access_name(
305 receiver->receiver_permissions.permissions
306 .instruction_access),
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000307 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000308 /* The impdef field is only present from v1.2 and later */
309 if (ffa_version_from_memory_access_desc_size(
310 memory_region->memory_access_desc_size) >=
311 MAKE_FFA_VERSION(1, 2)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000312 dlog(", impdef: %#lx %#lx", receiver->impdef.val[0],
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000313 receiver->impdef.val[1]);
314 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000315 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000316 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000317}
318
J-Alves66652252022-07-06 09:49:51 +0100319void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000320{
321 uint32_t i;
322
323 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
324 return;
325 }
326
327 dlog("Current share states:\n");
328 sl_lock(&share_states_lock_instance);
329 for (i = 0; i < MAX_MEM_SHARES; ++i) {
330 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000331 switch (share_states[i].share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000332 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000334 dlog("SHARE");
335 break;
J-Alves95fbb312024-03-20 15:19:16 +0000336 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100337 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000338 dlog("LEND");
339 break;
J-Alves95fbb312024-03-20 15:19:16 +0000340 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100341 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000342 dlog("DONATE");
343 break;
344 default:
345 dlog("invalid share_func %#x",
346 share_states[i].share_func);
347 }
Karl Meakine8937d92024-03-19 16:04:25 +0000348 dlog(" %#lx (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000349 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100350 if (share_states[i].sending_complete) {
351 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000352 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100353 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000354 }
J-Alves2a0d2882020-10-29 14:49:50 +0000355 dlog(" with %d fragments, %d retrieved, "
356 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100357 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000358 share_states[i].retrieved_fragment_count[0],
359 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000360 }
361 }
362 sl_unlock(&share_states_lock_instance);
363}
364
Andrew Walbran475c1452020-02-07 13:22:22 +0000365/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100366static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100367 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000368{
369 uint32_t mode = 0;
370
Karl Meakin84710f32023-10-12 15:14:49 +0100371 switch (permissions.data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000373 mode = MM_MODE_R;
374 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100375 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000376 mode = MM_MODE_R | MM_MODE_W;
377 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100378 case FFA_DATA_ACCESS_NOT_SPECIFIED:
379 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
380 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100381 case FFA_DATA_ACCESS_RESERVED:
382 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100383 }
384
Karl Meakin84710f32023-10-12 15:14:49 +0100385 switch (permissions.instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100386 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000387 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100388 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100389 mode |= MM_MODE_X;
390 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100391 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
392 mode |= (default_mode & MM_MODE_X);
393 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100394 case FFA_INSTRUCTION_ACCESS_RESERVED:
395 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000396 }
397
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200398 /* Set the security state bit if necessary. */
399 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
400 mode |= plat_ffa_other_world_mode();
401 }
402
Andrew Walbran475c1452020-02-07 13:22:22 +0000403 return mode;
404}
405
Jose Marinho75509b42019-04-09 09:34:59 +0100406/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000407 * Get the current mode in the stage-2 page table of the given vm of all the
408 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100409 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100410 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100411static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000412 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100413 struct ffa_memory_region_constituent **fragments,
414 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100415{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100416 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100417 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100418
Andrew Walbranca808b12020-05-15 17:22:28 +0100419 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100420 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000421 * Fail if there are no constituents. Otherwise we would get an
422 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100423 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100424 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100425 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100426 }
427
Andrew Walbranca808b12020-05-15 17:22:28 +0100428 for (i = 0; i < fragment_count; ++i) {
429 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
430 ipaddr_t begin = ipa_init(fragments[i][j].address);
431 size_t size = fragments[i][j].page_count * PAGE_SIZE;
432 ipaddr_t end = ipa_add(begin, size);
433 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100434
Andrew Walbranca808b12020-05-15 17:22:28 +0100435 /* Fail if addresses are not page-aligned. */
436 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
437 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100438 dlog_verbose("%s: addresses not page-aligned\n",
439 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100440 return ffa_error(FFA_INVALID_PARAMETERS);
441 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100442
Andrew Walbranca808b12020-05-15 17:22:28 +0100443 /*
444 * Ensure that this constituent memory range is all
445 * mapped with the same mode.
446 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800447 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100448 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000449 "%s: constituent memory range "
450 "%#lx..%#lx "
Karl Meakin5df422c2023-07-11 17:31:38 +0100451 "not mapped with the same mode\n",
Karl Meakine8937d92024-03-19 16:04:25 +0000452 __func__, begin.ipa, end.ipa);
Andrew Walbranca808b12020-05-15 17:22:28 +0100453 return ffa_error(FFA_DENIED);
454 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100455
Andrew Walbranca808b12020-05-15 17:22:28 +0100456 /*
457 * Ensure that all constituents are mapped with the same
458 * mode.
459 */
460 if (i == 0) {
461 *orig_mode = current_mode;
462 } else if (current_mode != *orig_mode) {
463 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100464 "%s: expected mode %#x but was %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +0000465 "%d pages at %#lx.\n",
Karl Meakin5df422c2023-07-11 17:31:38 +0100466 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100467 fragments[i][j].page_count,
468 ipa_addr(begin));
469 return ffa_error(FFA_DENIED);
470 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100471 }
Jose Marinho75509b42019-04-09 09:34:59 +0100472 }
473
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100474 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000475}
476
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100477uint32_t ffa_version_from_memory_access_desc_size(
478 uint32_t memory_access_desc_size)
479{
480 switch (memory_access_desc_size) {
481 /*
482 * v1.0 and v1.1 memory access descriptors are the same size however
483 * v1.1 is the first version to include the memory access descriptor
484 * size field so return v1.1.
485 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000486 case sizeof(struct ffa_memory_access_v1_0):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100487 return MAKE_FFA_VERSION(1, 1);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000488 case sizeof(struct ffa_memory_access):
489 return MAKE_FFA_VERSION(1, 2);
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100490 }
491 return 0;
492}
493
494/**
495 * Check if the receivers size and offset given is valid for the senders
496 * FF-A version.
497 */
498static bool receiver_size_and_offset_valid_for_version(
499 uint32_t receivers_size, uint32_t receivers_offset,
500 uint32_t ffa_version)
501{
502 /*
503 * Check that the version that the memory access descriptor size belongs
504 * to is compatible with the FF-A version we believe the sender to be.
505 */
506 uint32_t expected_ffa_version =
507 ffa_version_from_memory_access_desc_size(receivers_size);
508 if (!FFA_VERSIONS_ARE_COMPATIBLE(expected_ffa_version, ffa_version)) {
509 return false;
510 }
511
512 /*
513 * Check the receivers_offset matches the version we found from
514 * memory access descriptor size.
515 */
516 switch (expected_ffa_version) {
517 case MAKE_FFA_VERSION(1, 1):
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000518 case MAKE_FFA_VERSION(1, 2):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100519 return receivers_offset == sizeof(struct ffa_memory_region);
520 default:
521 return false;
522 }
523}
524
525/**
526 * Check the values set for fields in the memory region are valid and safe.
527 * Offset values are within safe bounds, receiver count will not cause overflows
528 * and reserved fields are 0.
529 */
530bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
531 uint32_t ffa_version,
532 uint32_t fragment_length,
533 bool send_transaction)
534{
535 uint32_t receiver_count;
536 struct ffa_memory_access *receiver;
537 uint32_t composite_offset_0;
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000538 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
539 (struct ffa_memory_region_v1_0 *)memory_region;
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100540
541 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100542 /* Check the reserved fields are 0. */
543 if (memory_region_v1_0->reserved_0 != 0 ||
544 memory_region_v1_0->reserved_1 != 0) {
545 dlog_verbose("Reserved fields must be 0.\n");
546 return false;
547 }
548
549 receiver_count = memory_region_v1_0->receiver_count;
550 } else {
551 uint32_t receivers_size =
552 memory_region->memory_access_desc_size;
553 uint32_t receivers_offset = memory_region->receivers_offset;
554
555 /* Check the reserved field is 0. */
556 if (memory_region->reserved[0] != 0 ||
557 memory_region->reserved[1] != 0 ||
558 memory_region->reserved[2] != 0) {
559 dlog_verbose("Reserved fields must be 0.\n");
560 return false;
561 }
562
563 /*
564 * Check memory_access_desc_size matches the size of the struct
565 * for the senders FF-A version.
566 */
567 if (!receiver_size_and_offset_valid_for_version(
568 receivers_size, receivers_offset, ffa_version)) {
569 dlog_verbose(
570 "Invalid memory access descriptor size %d, "
571 " or receiver offset %d, "
572 "for FF-A version %#x\n",
573 receivers_size, receivers_offset, ffa_version);
574 return false;
575 }
576
577 receiver_count = memory_region->receiver_count;
578 }
579
580 /* Check receiver count is not too large. */
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000581 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS || receiver_count < 1) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100582 dlog_verbose(
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000583 "Receiver count must be 0 < receiver_count < %u "
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100584 "specified %u\n",
585 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
586 return false;
587 }
588
589 /* Check values in the memory access descriptors. */
590 /*
591 * The composite offset values must be the same for all recievers so
592 * check the first one is valid and then they are all the same.
593 */
594 receiver = ffa_version == MAKE_FFA_VERSION(1, 0)
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000595 ? (struct ffa_memory_access *)&memory_region_v1_0
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100596 ->receivers[0]
597 : ffa_memory_region_get_receiver(memory_region, 0);
598 assert(receiver != NULL);
599 composite_offset_0 = receiver->composite_memory_region_offset;
600
601 if (!send_transaction) {
602 if (composite_offset_0 != 0) {
603 dlog_verbose(
604 "Composite offset memory region descriptor "
605 "offset must be 0 for retrieve requests. "
606 "Currently %d",
607 composite_offset_0);
608 return false;
609 }
610 } else {
611 bool comp_offset_is_zero = composite_offset_0 == 0U;
612 bool comp_offset_lt_transaction_descriptor_size =
613 composite_offset_0 <
614 (sizeof(struct ffa_memory_region) +
615 (uint32_t)(memory_region->memory_access_desc_size *
616 memory_region->receiver_count));
617 bool comp_offset_with_comp_gt_fragment_length =
618 composite_offset_0 +
619 sizeof(struct ffa_composite_memory_region) >
620 fragment_length;
621 if (comp_offset_is_zero ||
622 comp_offset_lt_transaction_descriptor_size ||
623 comp_offset_with_comp_gt_fragment_length) {
624 dlog_verbose(
625 "Invalid composite memory region descriptor "
626 "offset for send transaction %u\n",
627 composite_offset_0);
628 return false;
629 }
630 }
631
Karl Meakin824b63d2024-06-03 19:04:53 +0100632 for (size_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100633 uint32_t composite_offset;
634
635 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100636 struct ffa_memory_access_v1_0 *receiver_v1_0 =
637 &memory_region_v1_0->receivers[i];
638 /* Check reserved fields are 0 */
639 if (receiver_v1_0->reserved_0 != 0) {
640 dlog_verbose(
641 "Reserved field in the memory access "
Karl Meakine8937d92024-03-19 16:04:25 +0000642 "descriptor must be zero. Currently "
643 "reciever %zu has a reserved field "
644 "with a value of %lu\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100645 i, receiver_v1_0->reserved_0);
646 return false;
647 }
648 /*
649 * We can cast to the current version receiver as the
650 * remaining fields we are checking have the same
651 * offsets for all versions since memory access
652 * descriptors are forwards compatible.
653 */
654 receiver = (struct ffa_memory_access *)receiver_v1_0;
655 } else {
656 receiver = ffa_memory_region_get_receiver(memory_region,
657 i);
658 assert(receiver != NULL);
659
660 if (receiver->reserved_0 != 0) {
661 dlog_verbose(
662 "Reserved field in the memory access "
Karl Meakine8937d92024-03-19 16:04:25 +0000663 "descriptor must be zero. Currently "
664 "reciever %zu has a reserved field "
665 "with a value of %lu\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100666 i, receiver->reserved_0);
667 return false;
668 }
669 }
670
671 /* Check composite offset values are equal for all receivers. */
672 composite_offset = receiver->composite_memory_region_offset;
673 if (composite_offset != composite_offset_0) {
674 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000675 "Composite offset %x differs from %x in "
676 "index\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100677 composite_offset, composite_offset_0);
678 return false;
679 }
680 }
681 return true;
682}
683
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000684/**
J-Alves460d36c2023-10-12 17:02:15 +0100685 * If the receivers for the memory management operation are all from the
686 * secure world and this isn't a FFA_MEM_SHARE, then request memory security
687 * state update by returning MAP_ACTION_CHECK_PROTECT.
688 */
689static enum ffa_map_action ffa_mem_send_get_map_action(
690 bool all_receivers_from_current_world, ffa_id_t sender_id,
691 uint32_t mem_func_id)
692{
J-Alves95fbb312024-03-20 15:19:16 +0000693 const bool is_memory_share_abi = mem_func_id == FFA_MEM_SHARE_32 ||
694 mem_func_id == FFA_MEM_SHARE_64;
695 const bool protect_memory =
696 (!is_memory_share_abi && all_receivers_from_current_world &&
697 ffa_is_vm_id(sender_id));
J-Alves460d36c2023-10-12 17:02:15 +0100698
699 return protect_memory ? MAP_ACTION_CHECK_PROTECT : MAP_ACTION_CHECK;
700}
701
702/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000703 * Verify that all pages have the same mode, that the starting mode
704 * constitutes a valid state and obtain the next mode to apply
J-Alves460d36c2023-10-12 17:02:15 +0100705 * to the sending VM. It outputs the mapping action that needs to be
706 * invoked for the given memory range. On memory lend/donate there
707 * could be a need to protect the memory from the normal world.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000708 *
709 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100710 * 1) FFA_DENIED if a state transition was not found;
711 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100712 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100713 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100714 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100715 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
716 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000717 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100718static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100719 struct vm_locked from, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +0000720 struct ffa_memory_region *memory_region, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100721 struct ffa_memory_region_constituent **fragments,
722 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves460d36c2023-10-12 17:02:15 +0100723 uint32_t *from_mode, enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000724{
725 const uint32_t state_mask =
726 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100727 struct ffa_value ret;
J-Alves460d36c2023-10-12 17:02:15 +0100728 bool all_receivers_from_current_world = true;
Daniel Boulbya76fd912024-02-22 14:22:15 +0000729 uint32_t receivers_count = memory_region->receiver_count;
J-Alves95fbb312024-03-20 15:19:16 +0000730 const bool is_memory_lend = (share_func == FFA_MEM_LEND_32) ||
731 (share_func == FFA_MEM_LEND_64);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000732
Andrew Walbranca808b12020-05-15 17:22:28 +0100733 ret = constituents_get_mode(from, orig_from_mode, fragments,
734 fragment_constituent_counts,
735 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100736 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100737 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100738 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100739 }
740
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000741 /* Device memory regions can only be lent a single borrower. */
Daniel Boulby9764ff62024-01-30 17:47:39 +0000742 if ((*orig_from_mode & MM_MODE_D) != 0U &&
J-Alves95fbb312024-03-20 15:19:16 +0000743 !(is_memory_lend && receivers_count == 1)) {
Daniel Boulby9764ff62024-01-30 17:47:39 +0000744 dlog_verbose(
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000745 "Device memory can only be lent to a single borrower "
746 "(mode is %#x).\n",
Daniel Boulby9764ff62024-01-30 17:47:39 +0000747 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100748 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000749 }
750
751 /*
752 * Ensure the sender is the owner and has exclusive access to the
753 * memory.
754 */
755 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100756 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100757 }
758
Daniel Boulbya76fd912024-02-22 14:22:15 +0000759 assert(receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100760
J-Alves363f5722022-04-25 17:37:37 +0100761 for (uint32_t i = 0U; i < receivers_count; i++) {
Daniel Boulbya76fd912024-02-22 14:22:15 +0000762 struct ffa_memory_access *receiver =
763 ffa_memory_region_get_receiver(memory_region, i);
764 assert(receiver != NULL);
J-Alves363f5722022-04-25 17:37:37 +0100765 ffa_memory_access_permissions_t permissions =
Daniel Boulbya76fd912024-02-22 14:22:15 +0000766 receiver->receiver_permissions.permissions;
J-Alves363f5722022-04-25 17:37:37 +0100767 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
768 permissions, *orig_from_mode);
769
J-Alves788b4492023-04-18 14:01:23 +0100770 /*
771 * The assumption is that at this point, the operation from
772 * SP to a receiver VM, should have returned an FFA_ERROR
773 * already.
774 */
775 if (!ffa_is_vm_id(from.vm->id)) {
776 assert(!ffa_is_vm_id(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000777 receiver->receiver_permissions.receiver));
J-Alves788b4492023-04-18 14:01:23 +0100778 }
779
J-Alves460d36c2023-10-12 17:02:15 +0100780 /* Track if all senders are from current world. */
781 all_receivers_from_current_world =
782 all_receivers_from_current_world &&
783 vm_id_is_current_world(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000784 receiver->receiver_permissions.receiver);
J-Alves460d36c2023-10-12 17:02:15 +0100785
J-Alves363f5722022-04-25 17:37:37 +0100786 if ((*orig_from_mode & required_from_mode) !=
787 required_from_mode) {
788 dlog_verbose(
789 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100790 "which required mode %#x but only had %#x "
791 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100792 required_from_mode, *orig_from_mode);
793 return ffa_error(FFA_DENIED);
794 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000795 }
796
J-Alves460d36c2023-10-12 17:02:15 +0100797 *map_action = ffa_mem_send_get_map_action(
798 all_receivers_from_current_world, from.vm->id, share_func);
799
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000800 /* Find the appropriate new mode. */
801 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000802 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000803 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100804 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000805 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100806 break;
J-Alves95fbb312024-03-20 15:19:16 +0000807 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100808 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000809 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100810 break;
J-Alves95fbb312024-03-20 15:19:16 +0000811 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100812 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000813 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100814 break;
815
Jose Marinho75509b42019-04-09 09:34:59 +0100816 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100817 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100818 }
819
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100820 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000821}
822
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100823static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000824 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100825 struct ffa_memory_region_constituent **fragments,
826 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
827 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000828{
829 const uint32_t state_mask =
830 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
831 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100832 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000833
Andrew Walbranca808b12020-05-15 17:22:28 +0100834 ret = constituents_get_mode(from, orig_from_mode, fragments,
835 fragment_constituent_counts,
836 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100837 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100838 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000839 }
840
841 /* Ensure the address range is normal memory and not a device. */
842 if (*orig_from_mode & MM_MODE_D) {
843 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
844 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100845 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000846 }
847
848 /*
849 * Ensure the relinquishing VM is not the owner but has access to the
850 * memory.
851 */
852 orig_from_state = *orig_from_mode & state_mask;
853 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
854 dlog_verbose(
855 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100856 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000857 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100858 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000859 }
860
861 /* Find the appropriate new mode. */
862 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
863
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100864 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000865}
866
867/**
868 * Verify that all pages have the same mode, that the starting mode
869 * constitutes a valid state and obtain the next mode to apply
870 * to the retrieving VM.
871 *
872 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100873 * 1) FFA_DENIED if a state transition was not found;
874 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100875 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100876 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100877 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100878 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
879 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000880 */
J-Alvesfc19b372022-07-06 12:17:35 +0100881struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000882 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100883 struct ffa_memory_region_constituent **fragments,
884 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvesfd206052023-05-22 16:45:00 +0100885 uint32_t memory_to_attributes, uint32_t *to_mode, bool memory_protected,
886 enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000887{
888 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100889 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000890
Andrew Walbranca808b12020-05-15 17:22:28 +0100891 ret = constituents_get_mode(to, &orig_to_mode, fragments,
892 fragment_constituent_counts,
893 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100894 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100895 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100896 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000897 }
898
J-Alves460d36c2023-10-12 17:02:15 +0100899 /* Find the appropriate new mode. */
900 *to_mode = memory_to_attributes;
901
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100902 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000903 /*
904 * If the original ffa memory send call has been processed
905 * successfully, it is expected the orig_to_mode would overlay
906 * with `state_mask`, as a result of the function
907 * `ffa_send_check_transition`.
J-Alvesfd206052023-05-22 16:45:00 +0100908 *
909 * If Hafnium is the SPMC:
910 * - Caller of the reclaim interface is an SP, the memory shall
911 * have been protected throughout the flow.
912 * - Caller of the reclaim is from the NWd, the memory may have
913 * been protected at the time of lending/donating the memory.
914 * In such case, set action to unprotect memory in the
915 * handling of reclaim operation.
916 * - If Hafnium is the hypervisor memory shall never have been
917 * protected in memory lend/share/donate.
918 *
919 * More details in the doc comment of the function
920 * `ffa_region_group_identity_map`.
J-Alves9256f162021-12-09 13:18:43 +0000921 */
J-Alves59ed0042022-07-28 18:26:41 +0100922 if (vm_id_is_current_world(to.vm->id)) {
923 assert((orig_to_mode &
924 (MM_MODE_INVALID | MM_MODE_UNOWNED |
925 MM_MODE_SHARED)) != 0U);
J-Alvesfd206052023-05-22 16:45:00 +0100926 assert(!memory_protected);
927 } else if (to.vm->id == HF_OTHER_WORLD_ID &&
928 map_action != NULL && memory_protected) {
929 *map_action = MAP_ACTION_COMMIT_UNPROTECT;
J-Alves59ed0042022-07-28 18:26:41 +0100930 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000931 } else {
932 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100933 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000934 * Ensure the retriever has the expected state. We don't care
935 * about the MM_MODE_SHARED bit; either with or without it set
936 * are both valid representations of the !O-NA state.
937 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100938 if (vm_id_is_current_world(to.vm->id) &&
939 to.vm->id != HF_PRIMARY_VM_ID &&
940 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
941 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100942 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000943 }
J-Alves460d36c2023-10-12 17:02:15 +0100944
945 /*
946 * If memory has been protected before, clear the NS bit to
947 * allow the secure access from the SP.
948 */
949 if (memory_protected) {
950 *to_mode &= ~plat_ffa_other_world_mode();
951 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000952 }
953
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000954 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000955 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100956 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000957 *to_mode |= 0;
958 break;
J-Alves95fbb312024-03-20 15:19:16 +0000959 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100960 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000961 *to_mode |= MM_MODE_UNOWNED;
962 break;
J-Alves95fbb312024-03-20 15:19:16 +0000963 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100964 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000965 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
966 break;
967
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100968 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000969 *to_mode |= 0;
970 break;
971
972 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100973 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100974 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000975 }
976
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100977 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100978}
Jose Marinho09b1db82019-08-08 09:16:59 +0100979
J-Alvescf6253e2024-01-03 13:48:48 +0000980/*
981 * Performs the operations related to the `action` MAP_ACTION_CHECK*.
982 * Returns:
983 * - FFA_SUCCESS_32: if all goes well.
984 * - FFA_ERROR_32: with FFA_NO_MEMORY, if there is no memory to manage
985 * the page table update. Or error code provided by the function
986 * `arch_memory_protect`.
987 */
988static struct ffa_value ffa_region_group_check_actions(
989 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
990 struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
991 bool *memory_protected)
992{
993 struct ffa_value ret;
994 bool is_memory_protected;
995
996 if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, mode, ppool)) {
997 dlog_verbose(
998 "%s: memory can't be mapped to %x due to lack of "
Karl Meakine8937d92024-03-19 16:04:25 +0000999 "memory. Base: %lx end: %lx\n",
J-Alvescf6253e2024-01-03 13:48:48 +00001000 __func__, vm_locked.vm->id, pa_addr(pa_begin),
1001 pa_addr(pa_end));
1002 return ffa_error(FFA_NO_MEMORY);
1003 }
1004
1005 switch (action) {
1006 case MAP_ACTION_CHECK:
1007 /* No protect requested. */
1008 is_memory_protected = false;
1009 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1010 break;
1011 case MAP_ACTION_CHECK_PROTECT: {
1012 paddr_t last_protected_pa = pa_init(0);
1013
1014 ret = arch_memory_protect(pa_begin, pa_end, &last_protected_pa);
1015
1016 is_memory_protected = (ret.func == FFA_SUCCESS_32);
1017
1018 /*
1019 * - If protect memory has failed with FFA_DENIED, means some
1020 * range of memory was in the wrong state. In such case, SPM
1021 * reverts the state of the pages that were successfully
1022 * updated.
1023 * - If protect memory has failed with FFA_NOT_SUPPORTED, it
1024 * means the platform doesn't support the protection mechanism.
1025 * That said, it still permits the page table update to go
1026 * through. The variable
1027 * `is_memory_protected` will be equal to false.
1028 * - If protect memory has failed with FFA_INVALID_PARAMETERS,
1029 * break from switch and return the error.
1030 */
1031 if (ret.func == FFA_ERROR_32) {
1032 assert(!is_memory_protected);
1033 if (ffa_error_code(ret) == FFA_DENIED &&
1034 pa_addr(last_protected_pa) != (uintptr_t)0) {
1035 CHECK(arch_memory_unprotect(
1036 pa_begin,
1037 pa_add(last_protected_pa, PAGE_SIZE)));
1038 } else if (ffa_error_code(ret) == FFA_NOT_SUPPORTED) {
1039 ret = (struct ffa_value){
1040 .func = FFA_SUCCESS_32,
1041 };
1042 }
1043 }
1044 } break;
1045 default:
1046 panic("%s: invalid action to process %x\n", __func__, action);
1047 }
1048
1049 if (memory_protected != NULL) {
1050 *memory_protected = is_memory_protected;
1051 }
1052
1053 return ret;
1054}
1055
1056static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
1057 paddr_t pa_begin, paddr_t pa_end,
1058 struct mpool *ppool, uint32_t mode,
1059 enum ffa_map_action action)
1060{
1061 switch (action) {
1062 case MAP_ACTION_COMMIT_UNPROTECT:
1063 /*
1064 * Checking that it should succeed because SPM should be
1065 * unprotecting memory that it had protected before.
1066 */
1067 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1068 case MAP_ACTION_COMMIT:
1069 vm_identity_commit(vm_locked, pa_begin, pa_end, mode, ppool,
1070 NULL);
1071 break;
1072 default:
1073 panic("%s: invalid action to process %x\n", __func__, action);
1074 }
1075}
1076
Jose Marinho09b1db82019-08-08 09:16:59 +01001077/**
J-Alves063ad832023-10-03 18:05:40 +01001078 * Helper function to revert a failed "Protect" action from the SPMC:
1079 * - `fragment_count`: should specify the number of fragments to traverse from
1080 * `fragments`. This may not be the full amount of fragments that are part of
1081 * the share_state structure.
1082 * - `fragment_constituent_counts`: array holding the amount of constituents
1083 * per fragment.
1084 * - `end`: pointer to the constituent that failed the "protect" action. It
1085 * shall be part of the last fragment, and it shall make the loop below break.
1086 */
1087static void ffa_region_group_fragments_revert_protect(
1088 struct ffa_memory_region_constituent **fragments,
1089 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1090 const struct ffa_memory_region_constituent *end)
1091{
1092 for (uint32_t i = 0; i < fragment_count; ++i) {
1093 for (uint32_t j = 0; j < fragment_constituent_counts[i]; ++j) {
1094 struct ffa_memory_region_constituent *constituent =
1095 &fragments[i][j];
1096 size_t size = constituent->page_count * PAGE_SIZE;
1097 paddr_t pa_begin =
1098 pa_from_ipa(ipa_init(constituent->address));
1099 paddr_t pa_end = pa_add(pa_begin, size);
1100
Karl Meakine8937d92024-03-19 16:04:25 +00001101 dlog_verbose("%s: reverting fragment %lx size %zx\n",
J-Alves063ad832023-10-03 18:05:40 +01001102 __func__, pa_addr(pa_begin), size);
1103
1104 if (constituent == end) {
1105 /*
1106 * The last constituent is expected to be in the
1107 * last fragment.
1108 */
1109 assert(i == fragment_count - 1);
1110 break;
1111 }
1112
1113 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1114 }
1115 }
1116}
1117
1118/**
Jose Marinho09b1db82019-08-08 09:16:59 +01001119 * Updates a VM's page table such that the given set of physical address ranges
1120 * are mapped in the address space at the corresponding address ranges, in the
1121 * mode provided.
1122 *
J-Alves0a83dc22023-05-05 09:50:37 +01001123 * The enum ffa_map_action determines the action taken from a call to the
1124 * function below:
1125 * - If action is MAP_ACTION_CHECK, the page tables will be allocated from the
1126 * mpool but no mappings will actually be updated. This function must always
1127 * be called first with action set to MAP_ACTION_CHECK to check that it will
1128 * succeed before calling ffa_region_group_identity_map with whichever one of
1129 * the remaining actions, to avoid leaving the page table in a half-updated
1130 * state.
1131 * - The action MAP_ACTION_COMMIT allocates the page tables from the mpool, and
1132 * changes the memory mappings.
J-Alvescf6253e2024-01-03 13:48:48 +00001133 * - The action MAP_ACTION_CHECK_PROTECT extends the MAP_ACTION_CHECK with an
1134 * invocation to the monitor to update the security state of the memory,
1135 * to that of the SPMC.
1136 * - The action MAP_ACTION_COMMIT_UNPROTECT extends the MAP_ACTION_COMMIT
1137 * with a call into the monitor, to reset the security state of memory
1138 * that has priorly been mapped with the MAP_ACTION_CHECK_PROTECT action.
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001139 * vm_ptable_defrag should always be called after a series of page table
1140 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +01001141 *
J-Alvescf6253e2024-01-03 13:48:48 +00001142 * If all goes well, returns FFA_SUCCESS_32; or FFA_ERROR, with following
1143 * error codes:
1144 * - FFA_INVALID_PARAMETERS: invalid range of memory.
1145 * - FFA_DENIED:
1146 *
Jose Marinho09b1db82019-08-08 09:16:59 +01001147 * made to memory mappings.
1148 */
J-Alvescf6253e2024-01-03 13:48:48 +00001149struct ffa_value ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +00001150 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001151 struct ffa_memory_region_constituent **fragments,
1152 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvescf6253e2024-01-03 13:48:48 +00001153 uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
1154 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001155{
Andrew Walbranca808b12020-05-15 17:22:28 +01001156 uint32_t i;
1157 uint32_t j;
J-Alvescf6253e2024-01-03 13:48:48 +00001158 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001159
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001160 if (vm_locked.vm->el0_partition) {
1161 mode |= MM_MODE_USER | MM_MODE_NG;
1162 }
1163
Andrew Walbranca808b12020-05-15 17:22:28 +01001164 /* Iterate over the memory region constituents within each fragment. */
1165 for (i = 0; i < fragment_count; ++i) {
1166 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
J-Alves063ad832023-10-03 18:05:40 +01001167 struct ffa_memory_region_constituent *constituent =
1168 &fragments[i][j];
1169 size_t size = constituent->page_count * PAGE_SIZE;
Andrew Walbranca808b12020-05-15 17:22:28 +01001170 paddr_t pa_begin =
J-Alves063ad832023-10-03 18:05:40 +01001171 pa_from_ipa(ipa_init(constituent->address));
Andrew Walbranca808b12020-05-15 17:22:28 +01001172 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001173 uint32_t pa_bits =
1174 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +01001175
1176 /*
1177 * Ensure the requested region falls into system's PA
1178 * range.
1179 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001180 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
1181 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +01001182 dlog_error("Region is outside of PA Range\n");
J-Alvescf6253e2024-01-03 13:48:48 +00001183 return ffa_error(FFA_INVALID_PARAMETERS);
Federico Recanati4fd065d2021-12-13 20:06:23 +01001184 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001185
J-Alvescf6253e2024-01-03 13:48:48 +00001186 if (action <= MAP_ACTION_CHECK_PROTECT) {
1187 ret = ffa_region_group_check_actions(
1188 vm_locked, pa_begin, pa_end, ppool,
1189 mode, action, memory_protected);
J-Alves063ad832023-10-03 18:05:40 +01001190
1191 if (ret.func == FFA_ERROR_32 &&
1192 ffa_error_code(ret) == FFA_DENIED) {
1193 if (memory_protected != NULL) {
1194 assert(!*memory_protected);
1195 }
1196
1197 ffa_region_group_fragments_revert_protect(
1198 fragments,
1199 fragment_constituent_counts,
1200 i + 1, constituent);
1201 break;
1202 }
J-Alvescf6253e2024-01-03 13:48:48 +00001203 } else if (action >= MAP_ACTION_COMMIT &&
1204 action < MAP_ACTION_MAX) {
1205 ffa_region_group_commit_actions(
1206 vm_locked, pa_begin, pa_end, ppool,
1207 mode, action);
1208 ret = (struct ffa_value){
1209 .func = FFA_SUCCESS_32};
1210 } else {
1211 panic("%s: Unknown ffa_map_action.\n",
1212 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001213 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001214 }
1215 }
1216
J-Alvescf6253e2024-01-03 13:48:48 +00001217 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001218}
1219
1220/**
1221 * Clears a region of physical memory by overwriting it with zeros. The data is
1222 * flushed from the cache so the memory has been cleared across the system.
1223 */
J-Alves7db32002021-12-14 14:44:50 +00001224static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
1225 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +01001226{
1227 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +00001228 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +01001229 * global mapping of the whole range. Such an approach will limit
1230 * the changes to stage-1 tables and will allow only local
1231 * invalidation.
1232 */
1233 bool ret;
1234 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +00001235 void *ptr = mm_identity_map(stage1_locked, begin, end,
1236 MM_MODE_W | (extra_mode_attributes &
1237 plat_ffa_other_world_mode()),
1238 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001239 size_t size = pa_difference(begin, end);
1240
1241 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001242 goto fail;
1243 }
1244
1245 memset_s(ptr, size, 0, size);
1246 arch_mm_flush_dcache(ptr, size);
1247 mm_unmap(stage1_locked, begin, end, ppool);
1248
1249 ret = true;
1250 goto out;
1251
1252fail:
1253 ret = false;
1254
1255out:
1256 mm_unlock_stage1(&stage1_locked);
1257
1258 return ret;
1259}
1260
1261/**
1262 * Clears a region of physical memory by overwriting it with zeros. The data is
1263 * flushed from the cache so the memory has been cleared across the system.
1264 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001265static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001266 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001267 struct ffa_memory_region_constituent **fragments,
1268 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1269 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001270{
1271 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001272 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001273 bool ret = false;
1274
1275 /*
1276 * Create a local pool so any freed memory can't be used by another
1277 * thread. This is to ensure each constituent that is mapped can be
1278 * unmapped again afterwards.
1279 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001280 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001281
Andrew Walbranca808b12020-05-15 17:22:28 +01001282 /* Iterate over the memory region constituents within each fragment. */
1283 for (i = 0; i < fragment_count; ++i) {
1284 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001285
J-Alves8457f932023-10-11 16:41:45 +01001286 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001287 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1288 paddr_t begin =
1289 pa_from_ipa(ipa_init(fragments[i][j].address));
1290 paddr_t end = pa_add(begin, size);
1291
J-Alves7db32002021-12-14 14:44:50 +00001292 if (!clear_memory(begin, end, &local_page_pool,
1293 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001294 /*
1295 * api_clear_memory will defrag on failure, so
1296 * no need to do it here.
1297 */
1298 goto out;
1299 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001300 }
1301 }
1302
Jose Marinho09b1db82019-08-08 09:16:59 +01001303 ret = true;
1304
1305out:
1306 mpool_fini(&local_page_pool);
1307 return ret;
1308}
1309
J-Alves5952d942022-12-22 16:03:00 +00001310static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1311 ipaddr_t in_begin, ipaddr_t in_end)
1312{
1313 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1314 ipa_addr(begin) < ipa_addr(in_end)) ||
1315 (ipa_addr(end) <= ipa_addr(in_end) &&
1316 ipa_addr(end) > ipa_addr(in_begin));
1317}
1318
1319/**
1320 * Receives a memory range and looks for overlaps with the remainder
1321 * constituents of the memory share/lend/donate operation. Assumes they are
1322 * passed in order to avoid having to loop over all the elements at each call.
1323 * The function only compares the received memory ranges with those that follow
1324 * within the same fragment, and subsequent fragments from the same operation.
1325 */
1326static bool ffa_memory_check_overlap(
1327 struct ffa_memory_region_constituent **fragments,
1328 const uint32_t *fragment_constituent_counts,
1329 const uint32_t fragment_count, const uint32_t current_fragment,
1330 const uint32_t current_constituent)
1331{
1332 uint32_t i = current_fragment;
1333 uint32_t j = current_constituent;
1334 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1335 const uint32_t current_page_count = fragments[i][j].page_count;
1336 size_t current_size = current_page_count * PAGE_SIZE;
1337 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1338
1339 if (current_size == 0 ||
1340 current_size > UINT64_MAX - ipa_addr(current_begin)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001341 dlog_verbose("Invalid page count. Addr: %zx page_count: %x\n",
1342 current_begin.ipa, current_page_count);
J-Alves5952d942022-12-22 16:03:00 +00001343 return false;
1344 }
1345
1346 for (; i < fragment_count; i++) {
1347 j = (i == current_fragment) ? j + 1 : 0;
1348
1349 for (; j < fragment_constituent_counts[i]; j++) {
1350 ipaddr_t begin = ipa_init(fragments[i][j].address);
1351 const uint32_t page_count = fragments[i][j].page_count;
1352 size_t size = page_count * PAGE_SIZE;
1353 ipaddr_t end = ipa_add(begin, size - 1);
1354
1355 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1356 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001357 "Invalid page count. Addr: %lx "
J-Alves5952d942022-12-22 16:03:00 +00001358 "page_count: %x\n",
Karl Meakine8937d92024-03-19 16:04:25 +00001359 begin.ipa, page_count);
J-Alves5952d942022-12-22 16:03:00 +00001360 return false;
1361 }
1362
1363 /*
1364 * Check if current ranges is within begin and end, as
1365 * well as the reverse. This should help optimize the
1366 * loop, and reduce the number of iterations.
1367 */
1368 if (is_memory_range_within(begin, end, current_begin,
1369 current_end) ||
1370 is_memory_range_within(current_begin, current_end,
1371 begin, end)) {
1372 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001373 "Overlapping memory ranges: %#lx - "
1374 "%#lx with %#lx - %#lx\n",
J-Alves5952d942022-12-22 16:03:00 +00001375 ipa_addr(begin), ipa_addr(end),
1376 ipa_addr(current_begin),
1377 ipa_addr(current_end));
1378 return true;
1379 }
1380 }
1381 }
1382
1383 return false;
1384}
1385
Jose Marinho09b1db82019-08-08 09:16:59 +01001386/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001387 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001388 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001389 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001390 *
1391 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001392 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001393 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001394 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001395 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1396 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001397 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001398 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001399 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001400 */
Daniel Boulbya76fd912024-02-22 14:22:15 +00001401static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001402 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001403 struct ffa_memory_region_constituent **fragments,
1404 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001405 uint32_t composite_total_page_count, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001406 struct ffa_memory_region *memory_region, struct mpool *page_pool,
1407 uint32_t *orig_from_mode_ret, bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001408{
Andrew Walbranca808b12020-05-15 17:22:28 +01001409 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001410 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001411 uint32_t orig_from_mode;
J-Alves460d36c2023-10-12 17:02:15 +01001412 uint32_t clean_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001413 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001414 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001415 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001416 uint32_t constituents_total_page_count = 0;
J-Alves460d36c2023-10-12 17:02:15 +01001417 enum ffa_map_action map_action = MAP_ACTION_CHECK;
Daniel Boulbya76fd912024-02-22 14:22:15 +00001418 bool clear = memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Jose Marinho09b1db82019-08-08 09:16:59 +01001419
1420 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001421 * Make sure constituents are properly aligned to a 64-bit boundary. If
1422 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001423 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001424 for (i = 0; i < fragment_count; ++i) {
1425 if (!is_aligned(fragments[i], 8)) {
1426 dlog_verbose("Constituents not aligned.\n");
1427 return ffa_error(FFA_INVALID_PARAMETERS);
1428 }
J-Alves8f11cde2022-12-21 16:18:22 +00001429 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1430 constituents_total_page_count +=
1431 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001432 if (ffa_memory_check_overlap(
1433 fragments, fragment_constituent_counts,
1434 fragment_count, i, j)) {
1435 return ffa_error(FFA_INVALID_PARAMETERS);
1436 }
J-Alves8f11cde2022-12-21 16:18:22 +00001437 }
1438 }
1439
1440 if (constituents_total_page_count != composite_total_page_count) {
1441 dlog_verbose(
1442 "Composite page count differs from calculated page "
1443 "count from constituents.\n");
1444 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001445 }
1446
1447 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001448 * Check if the state transition is lawful for the sender, ensure that
1449 * all constituents of a memory region being shared are at the same
1450 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001451 */
J-Alves460d36c2023-10-12 17:02:15 +01001452 ret = ffa_send_check_transition(
Daniel Boulbya76fd912024-02-22 14:22:15 +00001453 from_locked, share_func, memory_region, &orig_from_mode,
1454 fragments, fragment_constituent_counts, fragment_count,
1455 &from_mode, &map_action);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001456 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001457 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001458 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001459 }
1460
Andrew Walbran37c574e2020-06-03 11:45:46 +01001461 if (orig_from_mode_ret != NULL) {
1462 *orig_from_mode_ret = orig_from_mode;
1463 }
1464
Jose Marinho09b1db82019-08-08 09:16:59 +01001465 /*
1466 * Create a local pool so any freed memory can't be used by another
1467 * thread. This is to ensure the original mapping can be restored if the
1468 * clear fails.
1469 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001470 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001471
1472 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001473 * First reserve all required memory for the new page table entries
1474 * without committing, to make sure the entire operation will succeed
1475 * without exhausting the page pool.
J-Alves460d36c2023-10-12 17:02:15 +01001476 * Provide the map_action as populated by 'ffa_send_check_transition'.
1477 * It may request memory to be protected.
Jose Marinho09b1db82019-08-08 09:16:59 +01001478 */
J-Alvescf6253e2024-01-03 13:48:48 +00001479 ret = ffa_region_group_identity_map(
1480 from_locked, fragments, fragment_constituent_counts,
J-Alves460d36c2023-10-12 17:02:15 +01001481 fragment_count, from_mode, page_pool, map_action,
1482 memory_protected);
J-Alvescf6253e2024-01-03 13:48:48 +00001483 if (ret.func == FFA_ERROR_32) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001484 goto out;
1485 }
1486
1487 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001488 * Update the mapping for the sender. This won't allocate because the
1489 * transaction was already prepared above, but may free pages in the
1490 * case that a whole block is being unmapped that was previously
1491 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001492 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001493 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001494 from_locked, fragments, fragment_constituent_counts,
1495 fragment_count, from_mode, &local_page_pool,
1496 MAP_ACTION_COMMIT, NULL)
1497 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001498
J-Alves460d36c2023-10-12 17:02:15 +01001499 /*
1500 * If memory has been protected, it is now part of the secure PAS
1501 * (happens for lend/donate from NWd to SWd), and the `orig_from_mode`
1502 * should have the MM_MODE_NS set, as such mask it in `clean_mode` for
1503 * SPM's S1 translation.
1504 * In case memory hasn't been protected, and it is in the non-secure
1505 * PAS (e.g. memory share from NWd to SWd), as such the SPM needs to
1506 * perform a non-secure memory access. In such case `clean_mode` takes
1507 * the same mode as `orig_from_mode`.
1508 */
1509 clean_mode = (memory_protected != NULL && *memory_protected)
1510 ? orig_from_mode & ~plat_ffa_other_world_mode()
1511 : orig_from_mode;
1512
Jose Marinho09b1db82019-08-08 09:16:59 +01001513 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves460d36c2023-10-12 17:02:15 +01001514 if (clear && !ffa_clear_memory_constituents(
1515 clean_mode, fragments, fragment_constituent_counts,
1516 fragment_count, page_pool)) {
1517 map_action = (memory_protected != NULL && *memory_protected)
1518 ? MAP_ACTION_COMMIT_UNPROTECT
1519 : MAP_ACTION_COMMIT;
1520
Jose Marinho09b1db82019-08-08 09:16:59 +01001521 /*
1522 * On failure, roll back by returning memory to the sender. This
1523 * may allocate pages which were previously freed into
1524 * `local_page_pool` by the call above, but will never allocate
1525 * more pages than that so can never fail.
1526 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001527 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001528 from_locked, fragments,
1529 fragment_constituent_counts, fragment_count,
1530 orig_from_mode, &local_page_pool,
1531 MAP_ACTION_COMMIT, NULL)
1532 .func == FFA_SUCCESS_32);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001533 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001534 goto out;
1535 }
1536
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001537 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001538
1539out:
1540 mpool_fini(&local_page_pool);
1541
1542 /*
1543 * Tidy up the page table by reclaiming failed mappings (if there was an
1544 * error) or merging entries into blocks where possible (on success).
1545 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001546 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001547
1548 return ret;
1549}
1550
1551/**
1552 * Validates and maps memory shared from one VM to another.
1553 *
1554 * This function requires the calling context to hold the <to> lock.
1555 *
1556 * Returns:
1557 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001558 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001559 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001560 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001561 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001562 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001563 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001564struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001565 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001566 struct ffa_memory_region_constituent **fragments,
1567 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001568 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
J-Alves460d36c2023-10-12 17:02:15 +01001569 struct mpool *page_pool, uint32_t *response_mode, bool memory_protected)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001570{
Andrew Walbranca808b12020-05-15 17:22:28 +01001571 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001572 uint32_t to_mode;
1573 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001574 struct ffa_value ret;
J-Alvesfd206052023-05-22 16:45:00 +01001575 enum ffa_map_action map_action = MAP_ACTION_COMMIT;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001576
1577 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001578 * Make sure constituents are properly aligned to a 64-bit boundary. If
1579 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001580 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001581 for (i = 0; i < fragment_count; ++i) {
1582 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001583 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001584 return ffa_error(FFA_INVALID_PARAMETERS);
1585 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001586 }
1587
1588 /*
1589 * Check if the state transition is lawful for the recipient, and ensure
1590 * that all constituents of the memory region being retrieved are at the
1591 * same state.
1592 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001593 ret = ffa_retrieve_check_transition(
1594 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alvesfd206052023-05-22 16:45:00 +01001595 fragment_count, sender_orig_mode, &to_mode, memory_protected,
1596 &map_action);
J-Alves460d36c2023-10-12 17:02:15 +01001597
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001598 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001599 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001600 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001601 }
1602
1603 /*
1604 * Create a local pool so any freed memory can't be used by another
1605 * thread. This is to ensure the original mapping can be restored if the
1606 * clear fails.
1607 */
1608 mpool_init_with_fallback(&local_page_pool, page_pool);
1609
1610 /*
1611 * First reserve all required memory for the new page table entries in
1612 * the recipient page tables without committing, to make sure the entire
1613 * operation will succeed without exhausting the page pool.
1614 */
J-Alvescf6253e2024-01-03 13:48:48 +00001615 ret = ffa_region_group_identity_map(
1616 to_locked, fragments, fragment_constituent_counts,
1617 fragment_count, to_mode, page_pool, MAP_ACTION_CHECK, NULL);
1618 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001619 /* TODO: partial defrag of failed range. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001620 goto out;
1621 }
1622
1623 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001624 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001625 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1626 fragment_constituent_counts,
1627 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001628 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001629 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001630 goto out;
1631 }
1632
Jose Marinho09b1db82019-08-08 09:16:59 +01001633 /*
1634 * Complete the transfer by mapping the memory into the recipient. This
1635 * won't allocate because the transaction was already prepared above, so
1636 * it doesn't need to use the `local_page_pool`.
1637 */
J-Alvesfd206052023-05-22 16:45:00 +01001638 CHECK(ffa_region_group_identity_map(
1639 to_locked, fragments, fragment_constituent_counts,
1640 fragment_count, to_mode, page_pool, map_action, NULL)
J-Alvescf6253e2024-01-03 13:48:48 +00001641 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001642
J-Alves460d36c2023-10-12 17:02:15 +01001643 /* Return the mode used in mapping the memory in retriever's PT. */
1644 if (response_mode != NULL) {
1645 *response_mode = to_mode;
1646 }
1647
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001648 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001649
1650out:
1651 mpool_fini(&local_page_pool);
1652
1653 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001654 * Tidy up the page table by reclaiming failed mappings (if there was an
1655 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001656 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001657 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001658
1659 return ret;
1660}
1661
Andrew Walbran996d1d12020-05-27 14:08:43 +01001662static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001663 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001664 struct ffa_memory_region_constituent **fragments,
1665 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1666 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001667{
1668 uint32_t orig_from_mode;
1669 uint32_t from_mode;
1670 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001671 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001672
Andrew Walbranca808b12020-05-15 17:22:28 +01001673 ret = ffa_relinquish_check_transition(
1674 from_locked, &orig_from_mode, fragments,
1675 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001676 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001677 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001678 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001679 }
1680
1681 /*
1682 * Create a local pool so any freed memory can't be used by another
1683 * thread. This is to ensure the original mapping can be restored if the
1684 * clear fails.
1685 */
1686 mpool_init_with_fallback(&local_page_pool, page_pool);
1687
1688 /*
1689 * First reserve all required memory for the new page table entries
1690 * without committing, to make sure the entire operation will succeed
1691 * without exhausting the page pool.
1692 */
J-Alvescf6253e2024-01-03 13:48:48 +00001693 ret = ffa_region_group_identity_map(
1694 from_locked, fragments, fragment_constituent_counts,
1695 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK, NULL);
1696 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001697 goto out;
1698 }
1699
1700 /*
1701 * Update the mapping for the sender. This won't allocate because the
1702 * transaction was already prepared above, but may free pages in the
1703 * case that a whole block is being unmapped that was previously
1704 * partially mapped.
1705 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001706 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001707 from_locked, fragments, fragment_constituent_counts,
1708 fragment_count, from_mode, &local_page_pool,
1709 MAP_ACTION_COMMIT, NULL)
1710 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001711
1712 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001713 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001714 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1715 fragment_constituent_counts,
1716 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001717 /*
1718 * On failure, roll back by returning memory to the sender. This
1719 * may allocate pages which were previously freed into
1720 * `local_page_pool` by the call above, but will never allocate
1721 * more pages than that so can never fail.
1722 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001723 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001724 from_locked, fragments,
1725 fragment_constituent_counts, fragment_count,
1726 orig_from_mode, &local_page_pool,
1727 MAP_ACTION_COMMIT, NULL)
1728 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001729
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001730 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001731 goto out;
1732 }
1733
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001734 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001735
1736out:
1737 mpool_fini(&local_page_pool);
1738
1739 /*
1740 * Tidy up the page table by reclaiming failed mappings (if there was an
1741 * error) or merging entries into blocks where possible (on success).
1742 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001743 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001744
1745 return ret;
1746}
1747
1748/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001749 * Complete a memory sending operation by checking that it is valid, updating
1750 * the sender page table, and then either marking the share state as having
1751 * completed sending (on success) or freeing it (on failure).
1752 *
1753 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1754 */
J-Alvesfdd29272022-07-19 13:16:31 +01001755struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001756 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001757 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1758 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001759{
1760 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001761 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001762 struct ffa_value ret;
1763
1764 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001765 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001766 assert(memory_region != NULL);
1767 composite = ffa_memory_region_get_composite(memory_region, 0);
1768 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001769
1770 /* Check that state is valid in sender page table and update. */
1771 ret = ffa_send_check_update(
1772 from_locked, share_state->fragments,
1773 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001774 share_state->fragment_count, composite->page_count,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001775 share_state->share_func, memory_region, page_pool,
J-Alves460d36c2023-10-12 17:02:15 +01001776 orig_from_mode_ret, &share_state->memory_protected);
Andrew Walbranca808b12020-05-15 17:22:28 +01001777 if (ret.func != FFA_SUCCESS_32) {
1778 /*
1779 * Free share state, it failed to send so it can't be retrieved.
1780 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001781 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1782 __func__, ffa_func_name(ret.func),
1783 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001784 share_state_free(share_states, share_state, page_pool);
1785 return ret;
1786 }
1787
1788 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001789 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001790
J-Alvesee68c542020-10-29 17:48:20 +00001791 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001792}
1793
1794/**
Daniel Boulby9764ff62024-01-30 17:47:39 +00001795 * Check that the memory attributes match Hafnium expectations.
1796 * Cacheability:
1797 * - Normal Memory as `FFA_MEMORY_CACHE_WRITE_BACK`.
1798 * - Device memory as `FFA_MEMORY_DEV_NGNRNE`.
1799 *
1800 * Shareability:
1801 * - Inner Shareable.
Federico Recanatia98603a2021-12-20 18:04:03 +01001802 */
1803static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001804 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001805{
1806 enum ffa_memory_type memory_type;
1807 enum ffa_memory_cacheability cacheability;
1808 enum ffa_memory_shareability shareability;
1809
Karl Meakin84710f32023-10-12 15:14:49 +01001810 memory_type = attributes.type;
Daniel Boulby9764ff62024-01-30 17:47:39 +00001811 cacheability = attributes.cacheability;
1812 if (memory_type == FFA_MEMORY_NORMAL_MEM &&
1813 cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1814 dlog_verbose(
1815 "Normal Memory: Invalid cacheability %s, "
1816 "expected %s.\n",
1817 ffa_memory_cacheability_name(cacheability),
1818 ffa_memory_cacheability_name(
1819 FFA_MEMORY_CACHE_WRITE_BACK));
Federico Recanati3d953f32022-02-17 09:31:29 +01001820 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001821 }
Daniel Boulby9764ff62024-01-30 17:47:39 +00001822 if (memory_type == FFA_MEMORY_DEVICE_MEM &&
1823 cacheability != FFA_MEMORY_DEV_NGNRNE) {
1824 dlog_verbose(
1825 "Device Memory: Invalid cacheability %s, "
1826 "expected %s.\n",
1827 ffa_device_memory_cacheability_name(cacheability),
1828 ffa_device_memory_cacheability_name(
1829 FFA_MEMORY_DEV_NGNRNE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001830 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001831 }
1832
Karl Meakin84710f32023-10-12 15:14:49 +01001833 shareability = attributes.shareability;
Federico Recanatia98603a2021-12-20 18:04:03 +01001834 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001835 dlog_verbose("Invalid shareability %s, expected %s.\n",
1836 ffa_memory_shareability_name(shareability),
1837 ffa_memory_shareability_name(
1838 FFA_MEMORY_INNER_SHAREABLE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001839 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001840 }
1841
1842 return (struct ffa_value){.func = FFA_SUCCESS_32};
1843}
1844
1845/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001846 * Check that the given `memory_region` represents a valid memory send request
1847 * of the given `share_func` type, return the clear flag and permissions via the
1848 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001849 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001850 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001851 * not.
1852 */
J-Alves66652252022-07-06 09:49:51 +01001853struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001854 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1855 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001856 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001857{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001858 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001859 struct ffa_memory_access *receiver =
1860 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001861 uint64_t receivers_end;
1862 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001863 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001864 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001865 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001866 enum ffa_data_access data_access;
1867 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001868 enum ffa_memory_security security_state;
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001869 enum ffa_memory_type type;
Federico Recanatia98603a2021-12-20 18:04:03 +01001870 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001871 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001872 memory_region->receivers_offset +
1873 memory_region->memory_access_desc_size +
1874 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001875
1876 if (fragment_length < minimum_first_fragment_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00001877 dlog_verbose("Fragment length %u too short (min %zu).\n",
1878 fragment_length, minimum_first_fragment_length);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001879 return ffa_error(FFA_INVALID_PARAMETERS);
1880 }
1881
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001882 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1883 "struct ffa_memory_region_constituent must be 16 bytes");
1884 if (!is_aligned(fragment_length,
1885 sizeof(struct ffa_memory_region_constituent)) ||
1886 !is_aligned(memory_share_length,
1887 sizeof(struct ffa_memory_region_constituent))) {
1888 dlog_verbose(
1889 "Fragment length %u or total length %u"
1890 " is not 16-byte aligned.\n",
1891 fragment_length, memory_share_length);
1892 return ffa_error(FFA_INVALID_PARAMETERS);
1893 }
1894
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001895 if (fragment_length > memory_share_length) {
1896 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001897 "Fragment length %zu greater than total length %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001898 (size_t)fragment_length, (size_t)memory_share_length);
1899 return ffa_error(FFA_INVALID_PARAMETERS);
1900 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001901
J-Alves95df0ef2022-12-07 10:09:48 +00001902 /* The sender must match the caller. */
1903 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1904 vm_id_is_current_world(memory_region->sender)) ||
1905 (vm_id_is_current_world(from_locked.vm->id) &&
1906 memory_region->sender != from_locked.vm->id)) {
1907 dlog_verbose("Invalid memory sender ID.\n");
1908 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001909 }
1910
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001911 if (memory_region->receiver_count <= 0) {
1912 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001913 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001914 }
1915
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001916 /*
1917 * Ensure that the composite header is within the memory bounds and
1918 * doesn't overlap the first part of the message. Cast to uint64_t
1919 * to prevent overflow.
1920 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001921 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001922 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001923 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001924 min_length = receivers_end +
1925 sizeof(struct ffa_composite_memory_region) +
1926 sizeof(struct ffa_memory_region_constituent);
1927 if (min_length > memory_share_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00001928 dlog_verbose("Share too short: got %zu but minimum is %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001929 (size_t)memory_share_length, (size_t)min_length);
1930 return ffa_error(FFA_INVALID_PARAMETERS);
1931 }
1932
1933 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001934 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001935
1936 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001937 * Check that the composite memory region descriptor is after the access
1938 * descriptors, is at least 16-byte aligned, and fits in the first
1939 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001940 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001941 if ((composite_memory_region_offset < receivers_end) ||
1942 (composite_memory_region_offset % 16 != 0) ||
1943 (composite_memory_region_offset >
1944 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1945 dlog_verbose(
1946 "Invalid composite memory region descriptor offset "
Karl Meakine8937d92024-03-19 16:04:25 +00001947 "%zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001948 (size_t)composite_memory_region_offset);
1949 return ffa_error(FFA_INVALID_PARAMETERS);
1950 }
1951
1952 /*
1953 * Compute the start of the constituent regions. Already checked
1954 * to be not more than fragment_length and thus not more than
1955 * memory_share_length.
1956 */
1957 constituents_start = composite_memory_region_offset +
1958 sizeof(struct ffa_composite_memory_region);
1959 constituents_length = memory_share_length - constituents_start;
1960
1961 /*
1962 * Check that the number of constituents is consistent with the length
1963 * of the constituent region.
1964 */
1965 composite = ffa_memory_region_get_composite(memory_region, 0);
1966 if ((constituents_length %
1967 sizeof(struct ffa_memory_region_constituent) !=
1968 0) ||
1969 ((constituents_length /
1970 sizeof(struct ffa_memory_region_constituent)) !=
1971 composite->constituent_count)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001972 dlog_verbose("Invalid length %zu or composite offset %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001973 (size_t)memory_share_length,
1974 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001975 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001976 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001977 if (fragment_length < memory_share_length &&
1978 fragment_length < HF_MAILBOX_SIZE) {
1979 dlog_warning(
1980 "Initial fragment length %d smaller than mailbox "
1981 "size.\n",
1982 fragment_length);
1983 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001984
Andrew Walbrana65a1322020-04-06 19:32:32 +01001985 /*
1986 * Clear is not allowed for memory sharing, as the sender still has
1987 * access to the memory.
1988 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001989 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
J-Alves95fbb312024-03-20 15:19:16 +00001990 (share_func == FFA_MEM_SHARE_32 ||
1991 share_func == FFA_MEM_SHARE_64)) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001992 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001993 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001994 }
1995
1996 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001997 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001998 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001999 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002000 }
2001
J-Alves363f5722022-04-25 17:37:37 +01002002 /* Check that the permissions are valid, for each specified receiver. */
2003 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002004 struct ffa_memory_region_attributes receiver_permissions;
2005
2006 receiver = ffa_memory_region_get_receiver(memory_region, i);
2007 assert(receiver != NULL);
2008 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01002009 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002010 receiver_permissions.permissions;
2011 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01002012
2013 if (memory_region->sender == receiver_id) {
2014 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002015 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002016 }
Federico Recanati85090c42021-12-15 13:17:54 +01002017
J-Alves363f5722022-04-25 17:37:37 +01002018 for (uint32_t j = i + 1; j < memory_region->receiver_count;
2019 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002020 struct ffa_memory_access *other_receiver =
2021 ffa_memory_region_get_receiver(memory_region,
2022 j);
2023 assert(other_receiver != NULL);
2024
J-Alves363f5722022-04-25 17:37:37 +01002025 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002026 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01002027 dlog_verbose(
2028 "Repeated receiver(%x) in memory send "
2029 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002030 other_receiver->receiver_permissions
2031 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01002032 return ffa_error(FFA_INVALID_PARAMETERS);
2033 }
2034 }
2035
2036 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002037 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01002038 dlog_verbose(
2039 "All ffa_memory_access should point to the "
2040 "same composite memory region offset.\n");
2041 return ffa_error(FFA_INVALID_PARAMETERS);
2042 }
2043
Karl Meakin84710f32023-10-12 15:14:49 +01002044 data_access = permissions.data_access;
2045 instruction_access = permissions.instruction_access;
J-Alves363f5722022-04-25 17:37:37 +01002046 if (data_access == FFA_DATA_ACCESS_RESERVED ||
2047 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
2048 dlog_verbose(
2049 "Reserved value for receiver permissions "
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002050 "(data_access = %s, instruction_access = %s)\n",
2051 ffa_data_access_name(data_access),
2052 ffa_instruction_access_name(
2053 instruction_access));
J-Alves363f5722022-04-25 17:37:37 +01002054 return ffa_error(FFA_INVALID_PARAMETERS);
2055 }
2056 if (instruction_access !=
2057 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2058 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002059 "Invalid instruction access permissions %s "
2060 "for sending memory, expected %s.\n",
2061 ffa_instruction_access_name(instruction_access),
2062 ffa_instruction_access_name(
2063 FFA_INSTRUCTION_ACCESS_RESERVED));
J-Alves363f5722022-04-25 17:37:37 +01002064 return ffa_error(FFA_INVALID_PARAMETERS);
2065 }
J-Alves95fbb312024-03-20 15:19:16 +00002066 if (share_func == FFA_MEM_SHARE_32 ||
2067 share_func == FFA_MEM_SHARE_64) {
J-Alves363f5722022-04-25 17:37:37 +01002068 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2069 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002070 "Invalid data access permissions %s "
2071 "for sharing memory, expected %s.\n",
2072 ffa_data_access_name(data_access),
2073 ffa_data_access_name(
2074 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002075 return ffa_error(FFA_INVALID_PARAMETERS);
2076 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002077 /*
2078 * According to section 10.10.3 of the FF-A v1.1 EAC0
2079 * spec, NX is required for share operations (but must
2080 * not be specified by the sender) so set it in the
2081 * copy that we store, ready to be returned to the
2082 * retriever.
2083 */
2084 if (vm_id_is_current_world(receiver_id)) {
Karl Meakin84710f32023-10-12 15:14:49 +01002085 permissions.instruction_access =
2086 FFA_INSTRUCTION_ACCESS_NX;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002087 receiver_permissions.permissions = permissions;
2088 }
J-Alves363f5722022-04-25 17:37:37 +01002089 }
J-Alves95fbb312024-03-20 15:19:16 +00002090 if ((share_func == FFA_MEM_LEND_32 ||
2091 share_func == FFA_MEM_LEND_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002092 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2093 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002094 "Invalid data access permissions %s for "
2095 "lending memory, expected %s.\n",
2096 ffa_data_access_name(data_access),
2097 ffa_data_access_name(
2098 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002099 return ffa_error(FFA_INVALID_PARAMETERS);
2100 }
2101
J-Alves95fbb312024-03-20 15:19:16 +00002102 if ((share_func == FFA_MEM_DONATE_32 ||
2103 share_func == FFA_MEM_DONATE_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002104 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
2105 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002106 "Invalid data access permissions %s for "
2107 "donating memory, expected %s.\n",
2108 ffa_data_access_name(data_access),
2109 ffa_data_access_name(
2110 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002111 return ffa_error(FFA_INVALID_PARAMETERS);
2112 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002113 }
2114
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002115 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
Karl Meakin84710f32023-10-12 15:14:49 +01002116 security_state = memory_region->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002117 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2118 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002119 "Invalid security state %s for memory share operation, "
2120 "expected %s.\n",
2121 ffa_memory_security_name(security_state),
2122 ffa_memory_security_name(
2123 FFA_MEMORY_SECURITY_UNSPECIFIED));
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002124 return ffa_error(FFA_INVALID_PARAMETERS);
2125 }
2126
Federico Recanatid937f5e2021-12-20 17:38:23 +01002127 /*
J-Alves807794e2022-06-16 13:42:47 +01002128 * If a memory donate or lend with single borrower, the memory type
2129 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01002130 */
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002131 type = memory_region->attributes.type;
J-Alves807794e2022-06-16 13:42:47 +01002132 if (share_func == FFA_MEM_DONATE_32 ||
J-Alves95fbb312024-03-20 15:19:16 +00002133 share_func == FFA_MEM_DONATE_64 ||
2134 ((share_func == FFA_MEM_LEND_32 || share_func == FFA_MEM_LEND_64) &&
J-Alves807794e2022-06-16 13:42:47 +01002135 memory_region->receiver_count == 1)) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002136 if (type != FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves807794e2022-06-16 13:42:47 +01002137 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002138 "Invalid memory type %s for memory share "
2139 "operation, expected %s.\n",
2140 ffa_memory_type_name(type),
2141 ffa_memory_type_name(
2142 FFA_MEMORY_NOT_SPECIFIED_MEM));
J-Alves807794e2022-06-16 13:42:47 +01002143 return ffa_error(FFA_INVALID_PARAMETERS);
2144 }
2145 } else {
2146 /*
2147 * Check that sender's memory attributes match Hafnium
2148 * expectations: Normal Memory, Inner shareable, Write-Back
2149 * Read-Allocate Write-Allocate Cacheable.
2150 */
2151 ret = ffa_memory_attributes_validate(memory_region->attributes);
2152 if (ret.func != FFA_SUCCESS_32) {
2153 return ret;
2154 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01002155 }
2156
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002157 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01002158}
2159
2160/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002161 * Gets the share state for continuing an operation to donate, lend or share
2162 * memory, and checks that it is a valid request.
2163 *
2164 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
2165 * not.
2166 */
J-Alvesfdd29272022-07-19 13:16:31 +01002167struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01002168 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01002169 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002170 struct mpool *page_pool)
2171{
2172 struct ffa_memory_share_state *share_state;
2173 struct ffa_memory_region *memory_region;
2174
Daniel Boulbya2f8c662021-11-26 17:52:53 +00002175 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01002176
2177 /*
2178 * Look up the share state by handle and make sure that the VM ID
2179 * matches.
2180 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01002181 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00002182 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002183 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002184 "Invalid handle %#lx for memory send continuation.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002185 handle);
2186 return ffa_error(FFA_INVALID_PARAMETERS);
2187 }
2188 memory_region = share_state->memory_region;
2189
J-Alvesfdd29272022-07-19 13:16:31 +01002190 if (vm_id_is_current_world(from_vm_id) &&
2191 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002192 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
2193 return ffa_error(FFA_INVALID_PARAMETERS);
2194 }
2195
2196 if (share_state->sending_complete) {
2197 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002198 "Sending of memory handle %#lx is already complete.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002199 handle);
2200 return ffa_error(FFA_INVALID_PARAMETERS);
2201 }
2202
2203 if (share_state->fragment_count == MAX_FRAGMENTS) {
2204 /*
2205 * Log a warning as this is a sign that MAX_FRAGMENTS should
2206 * probably be increased.
2207 */
2208 dlog_warning(
Karl Meakine8937d92024-03-19 16:04:25 +00002209 "Too many fragments for memory share with handle %#lx; "
Andrew Walbranca808b12020-05-15 17:22:28 +01002210 "only %d supported.\n",
2211 handle, MAX_FRAGMENTS);
2212 /* Free share state, as it's not possible to complete it. */
2213 share_state_free(share_states, share_state, page_pool);
2214 return ffa_error(FFA_NO_MEMORY);
2215 }
2216
2217 *share_state_ret = share_state;
2218
2219 return (struct ffa_value){.func = FFA_SUCCESS_32};
2220}
2221
2222/**
J-Alves95df0ef2022-12-07 10:09:48 +00002223 * Checks if there is at least one receiver from the other world.
2224 */
J-Alvesfdd29272022-07-19 13:16:31 +01002225bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00002226 struct ffa_memory_region *memory_region)
2227{
2228 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002229 struct ffa_memory_access *receiver =
2230 ffa_memory_region_get_receiver(memory_region, i);
2231 assert(receiver != NULL);
2232 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
2233
2234 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00002235 return true;
2236 }
2237 }
2238 return false;
2239}
2240
2241/**
J-Alves9da280b2022-12-21 14:55:39 +00002242 * Validates a call to donate, lend or share memory in which Hafnium is the
2243 * designated allocator of the memory handle. In practice, this also means
2244 * Hafnium is responsible for managing the state structures for the transaction.
2245 * If Hafnium is the SPMC, it should allocate the memory handle when either the
2246 * sender is an SP or there is at least one borrower that is an SP.
2247 * If Hafnium is the hypervisor, it should allocate the memory handle when
2248 * operation involves only NWd VMs.
2249 *
2250 * If validation goes well, Hafnium updates the stage-2 page tables of the
2251 * sender. Validation consists of checking if the message length and number of
2252 * memory region constituents match, and if the transition is valid for the
2253 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00002254 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002255 * Assumes that the caller has already found and locked the sender VM and copied
2256 * the memory region descriptor from the sender's TX buffer to a freshly
2257 * allocated page from Hafnium's internal pool. The caller must have also
2258 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002259 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002260 * This function takes ownership of the `memory_region` passed in and will free
2261 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01002262 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002263struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002264 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002265 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002266 uint32_t fragment_length, uint32_t share_func,
2267 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01002268{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002269 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002270 struct share_states_locked share_states;
2271 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01002272
2273 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002274 * If there is an error validating the `memory_region` then we need to
2275 * free it because we own it but we won't be storing it in a share state
2276 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01002277 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002278 ret = ffa_memory_send_validate(from_locked, memory_region,
2279 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01002280 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002281 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002282 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002283 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002284 }
2285
Andrew Walbrana65a1322020-04-06 19:32:32 +01002286 /* Set flag for share function, ready to be retrieved later. */
2287 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002288 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002289 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002290 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002291 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002292 break;
J-Alves95fbb312024-03-20 15:19:16 +00002293 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002294 case FFA_MEM_LEND_32:
2295 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002296 break;
J-Alves95fbb312024-03-20 15:19:16 +00002297 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002298 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002299 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002300 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002301 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01002302 }
2303
Andrew Walbranca808b12020-05-15 17:22:28 +01002304 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002305 /*
2306 * Allocate a share state before updating the page table. Otherwise if
2307 * updating the page table succeeded but allocating the share state
2308 * failed then it would leave the memory in a state where nobody could
2309 * get it back.
2310 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01002311 share_state = allocate_share_state(share_states, share_func,
2312 memory_region, fragment_length,
2313 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00002314 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002315 dlog_verbose("Failed to allocate share state.\n");
2316 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01002317 ret = ffa_error(FFA_NO_MEMORY);
2318 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002319 }
2320
Andrew Walbranca808b12020-05-15 17:22:28 +01002321 if (fragment_length == memory_share_length) {
2322 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00002323 ret = ffa_memory_send_complete(
2324 from_locked, share_states, share_state, page_pool,
2325 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002326 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002327 /*
2328 * Use sender ID from 'memory_region' assuming
2329 * that at this point it has been validated:
2330 * - MBZ at virtual FF-A instance.
2331 */
J-Alves19e20cf2023-08-02 12:48:55 +01002332 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002333 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2334 ? memory_region->sender
2335 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002336 ret = (struct ffa_value){
2337 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002338 .arg1 = (uint32_t)memory_region->handle,
2339 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002340 .arg3 = fragment_length,
2341 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002342 }
2343
2344out:
2345 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002346 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002347 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002348}
2349
2350/**
J-Alves8505a8a2022-06-15 18:10:18 +01002351 * Continues an operation to donate, lend or share memory to a VM from current
2352 * world. If this is the last fragment then checks that the transition is valid
2353 * for the type of memory sending operation and updates the stage-2 page tables
2354 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002355 *
2356 * Assumes that the caller has already found and locked the sender VM and copied
2357 * the memory region descriptor from the sender's TX buffer to a freshly
2358 * allocated page from Hafnium's internal pool.
2359 *
2360 * This function takes ownership of the `fragment` passed in; it must not be
2361 * freed by the caller.
2362 */
2363struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2364 void *fragment,
2365 uint32_t fragment_length,
2366 ffa_memory_handle_t handle,
2367 struct mpool *page_pool)
2368{
2369 struct share_states_locked share_states = share_states_lock();
2370 struct ffa_memory_share_state *share_state;
2371 struct ffa_value ret;
2372 struct ffa_memory_region *memory_region;
2373
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002374 CHECK(is_aligned(fragment,
2375 alignof(struct ffa_memory_region_constituent)));
2376 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2377 0) {
2378 dlog_verbose("Fragment length %u misaligned.\n",
2379 fragment_length);
2380 ret = ffa_error(FFA_INVALID_PARAMETERS);
2381 goto out_free_fragment;
2382 }
2383
Andrew Walbranca808b12020-05-15 17:22:28 +01002384 ret = ffa_memory_send_continue_validate(share_states, handle,
2385 &share_state,
2386 from_locked.vm->id, page_pool);
2387 if (ret.func != FFA_SUCCESS_32) {
2388 goto out_free_fragment;
2389 }
2390 memory_region = share_state->memory_region;
2391
J-Alves95df0ef2022-12-07 10:09:48 +00002392 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002393 dlog_error(
2394 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002395 "other world. This should never happen, and indicates "
2396 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002397 "EL3 code.\n");
2398 ret = ffa_error(FFA_INVALID_PARAMETERS);
2399 goto out_free_fragment;
2400 }
2401
2402 /* Add this fragment. */
2403 share_state->fragments[share_state->fragment_count] = fragment;
2404 share_state->fragment_constituent_counts[share_state->fragment_count] =
2405 fragment_length / sizeof(struct ffa_memory_region_constituent);
2406 share_state->fragment_count++;
2407
2408 /* Check whether the memory send operation is now ready to complete. */
2409 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002410 ret = ffa_memory_send_complete(
2411 from_locked, share_states, share_state, page_pool,
2412 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002413 } else {
2414 ret = (struct ffa_value){
2415 .func = FFA_MEM_FRAG_RX_32,
2416 .arg1 = (uint32_t)handle,
2417 .arg2 = (uint32_t)(handle >> 32),
2418 .arg3 = share_state_next_fragment_offset(share_states,
2419 share_state)};
2420 }
2421 goto out;
2422
2423out_free_fragment:
2424 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002425
2426out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002427 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002428 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002429}
2430
Andrew Walbranca808b12020-05-15 17:22:28 +01002431/** Clean up after the receiver has finished retrieving a memory region. */
2432static void ffa_memory_retrieve_complete(
2433 struct share_states_locked share_states,
2434 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2435{
J-Alves95fbb312024-03-20 15:19:16 +00002436 if (share_state->share_func == FFA_MEM_DONATE_32 ||
2437 share_state->share_func == FFA_MEM_DONATE_64) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002438 /*
2439 * Memory that has been donated can't be relinquished,
2440 * so no need to keep the share state around.
2441 */
2442 share_state_free(share_states, share_state, page_pool);
2443 dlog_verbose("Freed share state for donate.\n");
2444 }
2445}
2446
J-Alves2d8457f2022-10-05 11:06:41 +01002447/**
2448 * Initialises the given memory region descriptor to be used for an
2449 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2450 * fragment.
2451 * The memory region descriptor is initialized according to retriever's
2452 * FF-A version.
2453 *
2454 * Returns true on success, or false if the given constituents won't all fit in
2455 * the first fragment.
2456 */
2457static bool ffa_retrieved_memory_region_init(
2458 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002459 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002460 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002461 ffa_memory_access_permissions_t permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002462 struct ffa_memory_access *receivers, size_t receiver_count,
2463 uint32_t memory_access_desc_size, uint32_t page_count,
2464 uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002465 const struct ffa_memory_region_constituent constituents[],
2466 uint32_t fragment_constituent_count, uint32_t *total_length,
2467 uint32_t *fragment_length)
2468{
2469 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002470 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002471 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002472 uint32_t constituents_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002473
2474 assert(response != NULL);
2475
2476 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2477 struct ffa_memory_region_v1_0 *retrieve_response =
2478 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002479 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002480
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002481 ffa_memory_region_init_header_v1_0(retrieve_response, sender,
2482 attributes, flags, handle, 0,
2483 receiver_count);
J-Alves2d8457f2022-10-05 11:06:41 +01002484
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002485 receiver = (struct ffa_memory_access_v1_0 *)
2486 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002487 receiver_count = retrieve_response->receiver_count;
2488
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002489 for (uint32_t i = 0; i < receiver_count; i++) {
2490 ffa_id_t receiver_id =
2491 receivers[i].receiver_permissions.receiver;
2492 ffa_memory_receiver_flags_t recv_flags =
2493 receivers[i].receiver_permissions.flags;
2494
2495 /*
2496 * Initialized here as in memory retrieve responses we
2497 * currently expect one borrower to be specified.
2498 */
2499 ffa_memory_access_init_v1_0(
Karl Meakin84710f32023-10-12 15:14:49 +01002500 receiver, receiver_id, permissions.data_access,
2501 permissions.instruction_access, recv_flags);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002502 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002503
2504 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002505 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002506 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2507 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002508
2509 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2510 retrieve_response, 0);
2511 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002512 struct ffa_memory_region *retrieve_response =
2513 (struct ffa_memory_region *)response;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002514 struct ffa_memory_access *retrieve_response_receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002515
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002516 ffa_memory_region_init_header(
2517 retrieve_response, sender, attributes, flags, handle, 0,
2518 receiver_count, memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002519
2520 /*
2521 * Note that `sizeof(struct_ffa_memory_region)` and
2522 * `sizeof(struct ffa_memory_access)` must both be multiples of
2523 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2524 * guaranteed that the offset we calculate here is aligned to a
2525 * 64-bit boundary and so 64-bit values can be copied without
2526 * alignment faults.
2527 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002528 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002529 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002530 (uint32_t)(receiver_count *
2531 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002532
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002533 retrieve_response_receivers =
2534 ffa_memory_region_get_receiver(retrieve_response, 0);
2535 assert(retrieve_response_receivers != NULL);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002536
2537 /*
2538 * Initialized here as in memory retrieve responses we currently
2539 * expect one borrower to be specified.
2540 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002541 memcpy_s(retrieve_response_receivers,
2542 sizeof(struct ffa_memory_access) * receiver_count,
2543 receivers,
2544 sizeof(struct ffa_memory_access) * receiver_count);
2545
2546 retrieve_response_receivers->composite_memory_region_offset =
2547 composite_offset;
2548
J-Alves2d8457f2022-10-05 11:06:41 +01002549 composite_memory_region =
2550 ffa_memory_region_get_composite(retrieve_response, 0);
2551 }
2552
J-Alves2d8457f2022-10-05 11:06:41 +01002553 assert(composite_memory_region != NULL);
2554
J-Alves2d8457f2022-10-05 11:06:41 +01002555 composite_memory_region->page_count = page_count;
2556 composite_memory_region->constituent_count = total_constituent_count;
2557 composite_memory_region->reserved_0 = 0;
2558
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002559 constituents_offset =
2560 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002561 if (constituents_offset +
2562 fragment_constituent_count *
2563 sizeof(struct ffa_memory_region_constituent) >
2564 response_max_size) {
2565 return false;
2566 }
2567
2568 for (i = 0; i < fragment_constituent_count; ++i) {
2569 composite_memory_region->constituents[i] = constituents[i];
2570 }
2571
2572 if (total_length != NULL) {
2573 *total_length =
2574 constituents_offset +
2575 composite_memory_region->constituent_count *
2576 sizeof(struct ffa_memory_region_constituent);
2577 }
2578 if (fragment_length != NULL) {
2579 *fragment_length =
2580 constituents_offset +
2581 fragment_constituent_count *
2582 sizeof(struct ffa_memory_region_constituent);
2583 }
2584
2585 return true;
2586}
2587
J-Alves96de29f2022-04-26 16:05:24 +01002588/**
2589 * Validates the retrieved permissions against those specified by the lender
2590 * of memory share operation. Optionally can help set the permissions to be used
2591 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002592 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2593 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2594 * specification for each ABI.
2595 * - FFA_DENIED -> if the permissions specified by the retriever are not
2596 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002597 */
J-Alvesdcad8992023-09-15 14:10:35 +01002598static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2599 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002600 enum ffa_data_access requested_data_access,
2601 enum ffa_instruction_access sent_instruction_access,
2602 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002603 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002604{
2605 switch (sent_data_access) {
2606 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2607 case FFA_DATA_ACCESS_RW:
2608 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2609 requested_data_access == FFA_DATA_ACCESS_RW) {
2610 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002611 permissions->data_access = FFA_DATA_ACCESS_RW;
J-Alves96de29f2022-04-26 16:05:24 +01002612 }
2613 break;
2614 }
2615 /* Intentional fall-through. */
2616 case FFA_DATA_ACCESS_RO:
2617 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2618 requested_data_access == FFA_DATA_ACCESS_RO) {
2619 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002620 permissions->data_access = FFA_DATA_ACCESS_RO;
J-Alves96de29f2022-04-26 16:05:24 +01002621 }
2622 break;
2623 }
2624 dlog_verbose(
2625 "Invalid data access requested; sender specified "
2626 "permissions %#x but receiver requested %#x.\n",
2627 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002628 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002629 case FFA_DATA_ACCESS_RESERVED:
2630 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2631 "checked before this point.");
2632 }
2633
J-Alvesdcad8992023-09-15 14:10:35 +01002634 /*
2635 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2636 * or FFA_MEMORY_DONATE the retriever should have specifed the
2637 * instruction permissions it wishes to receive.
2638 */
2639 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002640 case FFA_MEM_SHARE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002641 case FFA_MEM_SHARE_32:
2642 if (requested_instruction_access !=
2643 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2644 dlog_verbose(
2645 "%s: for share instruction permissions must "
2646 "NOT be specified.\n",
2647 __func__);
2648 return ffa_error(FFA_INVALID_PARAMETERS);
2649 }
2650 break;
J-Alves95fbb312024-03-20 15:19:16 +00002651 case FFA_MEM_LEND_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002652 case FFA_MEM_LEND_32:
2653 /*
2654 * For operations with multiple borrowers only permit XN
2655 * permissions, and both Sender and borrower should have used
2656 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2657 */
2658 if (multiple_borrowers) {
2659 if (requested_instruction_access !=
2660 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2661 dlog_verbose(
2662 "%s: lend/share/donate with multiple "
2663 "borrowers "
2664 "instruction permissions must NOT be "
2665 "specified.\n",
2666 __func__);
2667 return ffa_error(FFA_INVALID_PARAMETERS);
2668 }
2669 break;
2670 }
2671 /* Fall through if the operation targets a single borrower. */
J-Alves95fbb312024-03-20 15:19:16 +00002672 case FFA_MEM_DONATE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002673 case FFA_MEM_DONATE_32:
2674 if (!multiple_borrowers &&
2675 requested_instruction_access ==
2676 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2677 dlog_verbose(
2678 "%s: for lend/donate with single borrower "
2679 "instruction permissions must be speficified "
2680 "by borrower\n",
2681 __func__);
2682 return ffa_error(FFA_INVALID_PARAMETERS);
2683 }
2684 break;
2685 default:
2686 panic("%s: Wrong func id provided.\n", __func__);
2687 }
2688
J-Alves96de29f2022-04-26 16:05:24 +01002689 switch (sent_instruction_access) {
2690 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2691 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002692 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002693 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002694 permissions->instruction_access =
2695 FFA_INSTRUCTION_ACCESS_X;
J-Alves96de29f2022-04-26 16:05:24 +01002696 }
2697 break;
2698 }
J-Alvesdcad8992023-09-15 14:10:35 +01002699 /*
2700 * Fall through if requested permissions are less
2701 * permissive than those provided by the sender.
2702 */
J-Alves96de29f2022-04-26 16:05:24 +01002703 case FFA_INSTRUCTION_ACCESS_NX:
2704 if (requested_instruction_access ==
2705 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2706 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2707 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002708 permissions->instruction_access =
2709 FFA_INSTRUCTION_ACCESS_NX;
J-Alves96de29f2022-04-26 16:05:24 +01002710 }
2711 break;
2712 }
2713 dlog_verbose(
2714 "Invalid instruction access requested; sender "
2715 "specified permissions %#x but receiver requested "
2716 "%#x.\n",
2717 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002718 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002719 case FFA_INSTRUCTION_ACCESS_RESERVED:
2720 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2721 "be checked before this point.");
2722 }
2723
J-Alvesdcad8992023-09-15 14:10:35 +01002724 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002725}
2726
2727/**
2728 * Validate the receivers' permissions in the retrieve request against those
2729 * specified by the lender.
2730 * In the `permissions` argument returns the permissions to set at S2 for the
2731 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002732 * The function looks into the flag to bypass multiple borrower checks:
2733 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2734 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2735 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2736 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002737 */
2738static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2739 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002740 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002741 ffa_memory_access_permissions_t *permissions,
2742 struct ffa_memory_access **receiver_ret, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002743{
2744 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002745 bool bypass_multi_receiver_check =
2746 (retrieve_request->flags &
2747 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002748 const uint32_t region_receiver_count = memory_region->receiver_count;
2749 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002750
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002751 assert(receiver_ret != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002752 assert(permissions != NULL);
2753
Karl Meakin84710f32023-10-12 15:14:49 +01002754 *permissions = (ffa_memory_access_permissions_t){0};
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002755
J-Alves3456e032023-07-20 12:20:05 +01002756 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002757 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002758 dlog_verbose(
2759 "Retrieve request should contain same list of "
2760 "borrowers, as specified by the lender.\n");
2761 return ffa_error(FFA_INVALID_PARAMETERS);
2762 }
2763 } else {
2764 if (retrieve_request->receiver_count != 1) {
2765 dlog_verbose(
2766 "Set bypass multiple borrower check, receiver "
2767 "list must be sized 1 (%x)\n",
2768 memory_region->receiver_count);
2769 return ffa_error(FFA_INVALID_PARAMETERS);
2770 }
J-Alves96de29f2022-04-26 16:05:24 +01002771 }
2772
2773 retrieve_receiver_index = retrieve_request->receiver_count;
2774
J-Alves96de29f2022-04-26 16:05:24 +01002775 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2776 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002777 struct ffa_memory_access *retrieve_request_receiver =
2778 ffa_memory_region_get_receiver(retrieve_request, i);
2779 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002780 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002781 retrieve_request_receiver->receiver_permissions
2782 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002783 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002784 retrieve_request_receiver->receiver_permissions
2785 .receiver;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002786 struct ffa_memory_access *receiver;
2787 uint32_t mem_region_receiver_index;
2788 bool permissions_RO;
2789 bool clear_memory_flags;
J-Alves96de29f2022-04-26 16:05:24 +01002790 bool found_to_id = current_receiver_id == to_vm_id;
2791
J-Alves3456e032023-07-20 12:20:05 +01002792 if (bypass_multi_receiver_check && !found_to_id) {
2793 dlog_verbose(
2794 "Bypass multiple borrower check for id %x.\n",
2795 current_receiver_id);
2796 continue;
2797 }
2798
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002799 if (retrieve_request_receiver->composite_memory_region_offset !=
2800 0U) {
2801 dlog_verbose(
2802 "Retriever specified address ranges not "
2803 "supported (got offset %d).\n",
2804 retrieve_request_receiver
2805 ->composite_memory_region_offset);
2806 return ffa_error(FFA_INVALID_PARAMETERS);
2807 }
2808
J-Alves96de29f2022-04-26 16:05:24 +01002809 /*
2810 * Find the current receiver in the transaction descriptor from
2811 * sender.
2812 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002813 mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002814 ffa_memory_region_get_receiver_index(
2815 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002816
2817 if (mem_region_receiver_index ==
2818 memory_region->receiver_count) {
2819 dlog_verbose("%s: receiver %x not found\n", __func__,
2820 current_receiver_id);
2821 return ffa_error(FFA_DENIED);
2822 }
2823
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002824 receiver = ffa_memory_region_get_receiver(
2825 memory_region, mem_region_receiver_index);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002826 assert(receiver != NULL);
2827
2828 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002829
2830 if (found_to_id) {
2831 retrieve_receiver_index = i;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002832
2833 *receiver_ret = receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002834 }
2835
2836 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002837 * Check if retrieve request memory access list is valid:
2838 * - The retrieve request complies with the specification.
2839 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002840 */
J-Alvesdcad8992023-09-15 14:10:35 +01002841 ret = ffa_memory_retrieve_is_memory_access_valid(
Karl Meakin84710f32023-10-12 15:14:49 +01002842 func_id, sent_permissions.data_access,
2843 requested_permissions.data_access,
2844 sent_permissions.instruction_access,
2845 requested_permissions.instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002846 found_to_id ? permissions : NULL,
2847 region_receiver_count > 1);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002848
J-Alvesdcad8992023-09-15 14:10:35 +01002849 if (ret.func != FFA_SUCCESS_32) {
2850 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002851 }
2852
Karl Meakin84710f32023-10-12 15:14:49 +01002853 permissions_RO =
2854 (permissions->data_access == FFA_DATA_ACCESS_RO);
J-Alvese5262372024-03-27 11:02:03 +00002855 clear_memory_flags =
2856 (retrieve_request->flags &
2857 (FFA_MEMORY_REGION_FLAG_CLEAR |
2858 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002859
J-Alves96de29f2022-04-26 16:05:24 +01002860 /*
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002861 * Can't request PM to clear memory if only provided
2862 * with RO permissions.
J-Alves96de29f2022-04-26 16:05:24 +01002863 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002864 if (found_to_id && permissions_RO && clear_memory_flags) {
J-Alves96de29f2022-04-26 16:05:24 +01002865 dlog_verbose(
2866 "Receiver has RO permissions can not request "
2867 "clear.\n");
2868 return ffa_error(FFA_DENIED);
2869 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002870
2871 /*
2872 * Check the impdef in the retrieve_request matches the value in
2873 * the original memory send.
2874 */
2875 if (ffa_version_from_memory_access_desc_size(
2876 memory_region->memory_access_desc_size) >=
2877 MAKE_FFA_VERSION(1, 2) &&
2878 ffa_version_from_memory_access_desc_size(
2879 retrieve_request->memory_access_desc_size) >=
2880 MAKE_FFA_VERSION(1, 2)) {
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002881 if (receiver->impdef.val[0] !=
2882 retrieve_request_receiver->impdef.val[0] ||
2883 receiver->impdef.val[1] !=
2884 retrieve_request_receiver->impdef.val[1]) {
2885 dlog_verbose(
2886 "Impdef value in memory send does not "
2887 "match retrieve request value "
Karl Meakine8937d92024-03-19 16:04:25 +00002888 "send value %#lx %#lx retrieve request "
2889 "value %#lx %#lx\n",
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002890 receiver->impdef.val[0],
2891 receiver->impdef.val[1],
2892 retrieve_request_receiver->impdef
2893 .val[0],
2894 retrieve_request_receiver->impdef
2895 .val[1]);
2896 return ffa_error(FFA_INVALID_PARAMETERS);
2897 }
2898 }
J-Alves96de29f2022-04-26 16:05:24 +01002899 }
2900
2901 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2902 dlog_verbose(
2903 "Retrieve request does not contain caller's (%x) "
2904 "permissions\n",
2905 to_vm_id);
2906 return ffa_error(FFA_INVALID_PARAMETERS);
2907 }
2908
2909 return (struct ffa_value){.func = FFA_SUCCESS_32};
2910}
2911
J-Alvesa9cd7e32022-07-01 13:49:33 +01002912/*
2913 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2914 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2915 * of a pending memory sharing operation whose allocator is the SPM, for
2916 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2917 * the memory region descriptor of the retrieve request must be zeroed with the
2918 * exception of the sender ID and handle.
2919 */
J-Alves4f0d9c12024-01-17 17:23:11 +00002920bool is_ffa_hypervisor_retrieve_request(struct ffa_memory_region *request,
2921 struct vm_locked to_locked)
J-Alvesa9cd7e32022-07-01 13:49:33 +01002922{
2923 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
Karl Meakin84710f32023-10-12 15:14:49 +01002924 request->attributes.shareability == 0U &&
2925 request->attributes.cacheability == 0U &&
2926 request->attributes.type == 0U &&
2927 request->attributes.security == 0U && request->flags == 0U &&
J-Alvesa9cd7e32022-07-01 13:49:33 +01002928 request->tag == 0U && request->receiver_count == 0U &&
2929 plat_ffa_memory_handle_allocated_by_current_world(
2930 request->handle);
2931}
2932
2933/*
2934 * Helper to reset count of fragments retrieved by the hypervisor.
2935 */
2936static void ffa_memory_retrieve_complete_from_hyp(
2937 struct ffa_memory_share_state *share_state)
2938{
2939 if (share_state->hypervisor_fragment_count ==
2940 share_state->fragment_count) {
2941 share_state->hypervisor_fragment_count = 0;
2942 }
2943}
2944
J-Alves089004f2022-07-13 14:25:44 +01002945/**
J-Alves4f0d9c12024-01-17 17:23:11 +00002946 * Prepares the return of the ffa_value for the memory retrieve response.
2947 */
2948static struct ffa_value ffa_memory_retrieve_resp(uint32_t total_length,
2949 uint32_t fragment_length)
2950{
2951 return (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
2952 .arg1 = total_length,
2953 .arg2 = fragment_length};
2954}
2955
2956/**
J-Alves089004f2022-07-13 14:25:44 +01002957 * Validate that the memory region descriptor provided by the borrower on
2958 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2959 * memory sharing call.
2960 */
2961static struct ffa_value ffa_memory_retrieve_validate(
J-Alves4f0d9c12024-01-17 17:23:11 +00002962 ffa_id_t to_id, struct ffa_memory_region *retrieve_request,
2963 uint32_t retrieve_request_length,
J-Alves089004f2022-07-13 14:25:44 +01002964 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2965 uint32_t share_func)
2966{
2967 ffa_memory_region_flags_t transaction_type =
2968 retrieve_request->flags &
2969 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002970 enum ffa_memory_security security_state;
J-Alves4f0d9c12024-01-17 17:23:11 +00002971 const uint64_t memory_access_desc_size =
2972 retrieve_request->memory_access_desc_size;
2973 const uint32_t expected_retrieve_request_length =
2974 retrieve_request->receivers_offset +
2975 (uint32_t)(retrieve_request->receiver_count *
2976 memory_access_desc_size);
J-Alves089004f2022-07-13 14:25:44 +01002977
2978 assert(retrieve_request != NULL);
2979 assert(memory_region != NULL);
2980 assert(receiver_index != NULL);
J-Alves089004f2022-07-13 14:25:44 +01002981
J-Alves4f0d9c12024-01-17 17:23:11 +00002982 if (retrieve_request_length != expected_retrieve_request_length) {
2983 dlog_verbose(
2984 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
2985 "but was %d.\n",
2986 expected_retrieve_request_length,
2987 retrieve_request_length);
2988 return ffa_error(FFA_INVALID_PARAMETERS);
2989 }
2990
2991 if (retrieve_request->sender != memory_region->sender) {
2992 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002993 "Memory with handle %#lx not fully sent, can't "
J-Alves4f0d9c12024-01-17 17:23:11 +00002994 "retrieve.\n",
2995 memory_region->handle);
2996 return ffa_error(FFA_DENIED);
2997 }
2998
2999 /*
3000 * The SPMC can only process retrieve requests to memory share
3001 * operations with one borrower from the other world. It can't
3002 * determine the ID of the NWd VM that invoked the retrieve
3003 * request interface call. It relies on the hypervisor to
3004 * validate the caller's ID against that provided in the
3005 * `receivers` list of the retrieve response.
3006 * In case there is only one borrower from the NWd in the
3007 * transaction descriptor, record that in the `receiver_id` for
3008 * later use, and validate in the retrieve request message.
3009 * This limitation is due to the fact SPMC can't determine the
3010 * index in the memory share structures state to update.
3011 */
3012 if (to_id == HF_HYPERVISOR_VM_ID) {
3013 uint32_t other_world_count = 0;
3014
3015 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3016 struct ffa_memory_access *receiver =
3017 ffa_memory_region_get_receiver(retrieve_request,
3018 0);
3019 assert(receiver != NULL);
3020
3021 to_id = receiver->receiver_permissions.receiver;
3022
3023 if (!vm_id_is_current_world(to_id)) {
3024 other_world_count++;
3025 }
3026 }
3027
3028 if (other_world_count > 1) {
3029 dlog_verbose(
3030 "Support one receiver from the other "
3031 "world.\n");
3032 return ffa_error(FFA_NOT_SUPPORTED);
3033 }
3034 }
J-Alves089004f2022-07-13 14:25:44 +01003035 /*
3036 * Check that the transaction type expected by the receiver is
3037 * correct, if it has been specified.
3038 */
3039 if (transaction_type !=
3040 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
3041 transaction_type != (memory_region->flags &
3042 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
3043 dlog_verbose(
3044 "Incorrect transaction type %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +00003045 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003046 transaction_type,
3047 memory_region->flags &
3048 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
3049 retrieve_request->handle);
3050 return ffa_error(FFA_INVALID_PARAMETERS);
3051 }
3052
3053 if (retrieve_request->tag != memory_region->tag) {
3054 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003055 "Incorrect tag %lu for FFA_MEM_RETRIEVE_REQ, expected "
3056 "%lu for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003057 retrieve_request->tag, memory_region->tag,
3058 retrieve_request->handle);
3059 return ffa_error(FFA_INVALID_PARAMETERS);
3060 }
3061
J-Alves4f0d9c12024-01-17 17:23:11 +00003062 *receiver_index =
3063 ffa_memory_region_get_receiver_index(memory_region, to_id);
J-Alves089004f2022-07-13 14:25:44 +01003064
3065 if (*receiver_index == memory_region->receiver_count) {
3066 dlog_verbose(
3067 "Incorrect receiver VM ID %d for "
Karl Meakine8937d92024-03-19 16:04:25 +00003068 "FFA_MEM_RETRIEVE_REQ, for handle %#lx.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003069 to_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01003070 return ffa_error(FFA_INVALID_PARAMETERS);
3071 }
3072
3073 if ((retrieve_request->flags &
3074 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
3075 dlog_verbose(
3076 "Retriever specified 'address range alignment 'hint' "
3077 "not supported.\n");
3078 return ffa_error(FFA_INVALID_PARAMETERS);
3079 }
3080 if ((retrieve_request->flags &
3081 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
3082 dlog_verbose(
3083 "Bits 8-5 must be zero in memory region's flags "
3084 "(address range alignment hint not supported).\n");
3085 return ffa_error(FFA_INVALID_PARAMETERS);
3086 }
3087
3088 if ((retrieve_request->flags & ~0x7FF) != 0U) {
3089 dlog_verbose(
3090 "Bits 31-10 must be zero in memory region's flags.\n");
3091 return ffa_error(FFA_INVALID_PARAMETERS);
3092 }
3093
J-Alves95fbb312024-03-20 15:19:16 +00003094 if ((share_func == FFA_MEM_SHARE_32 ||
3095 share_func == FFA_MEM_SHARE_64) &&
J-Alves089004f2022-07-13 14:25:44 +01003096 (retrieve_request->flags &
3097 (FFA_MEMORY_REGION_FLAG_CLEAR |
3098 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
3099 dlog_verbose(
3100 "Memory Share operation can't clean after relinquish "
3101 "memory region.\n");
3102 return ffa_error(FFA_INVALID_PARAMETERS);
3103 }
3104
3105 /*
3106 * If the borrower needs the memory to be cleared before mapping
3107 * to its address space, the sender should have set the flag
3108 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
3109 * FFA_DENIED.
3110 */
3111 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
3112 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
3113 dlog_verbose(
3114 "Borrower needs memory cleared. Sender needs to set "
3115 "flag for clearing memory.\n");
3116 return ffa_error(FFA_DENIED);
3117 }
3118
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003119 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
Karl Meakin84710f32023-10-12 15:14:49 +01003120 security_state = retrieve_request->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003121 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
3122 dlog_verbose(
3123 "Invalid security state for memory retrieve request "
3124 "operation.\n");
3125 return ffa_error(FFA_INVALID_PARAMETERS);
3126 }
3127
J-Alves089004f2022-07-13 14:25:44 +01003128 /*
3129 * If memory type is not specified, bypass validation of memory
3130 * attributes in the retrieve request. The retriever is expecting to
3131 * obtain this information from the SPMC.
3132 */
Karl Meakin84710f32023-10-12 15:14:49 +01003133 if (retrieve_request->attributes.type == FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves089004f2022-07-13 14:25:44 +01003134 return (struct ffa_value){.func = FFA_SUCCESS_32};
3135 }
3136
3137 /*
3138 * Ensure receiver's attributes are compatible with how
3139 * Hafnium maps memory: Normal Memory, Inner shareable,
3140 * Write-Back Read-Allocate Write-Allocate Cacheable.
3141 */
3142 return ffa_memory_attributes_validate(retrieve_request->attributes);
3143}
3144
J-Alves4f0d9c12024-01-17 17:23:11 +00003145static struct ffa_value ffa_partition_retrieve_request(
3146 struct share_states_locked share_states,
3147 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3148 struct ffa_memory_region *retrieve_request,
3149 uint32_t retrieve_request_length, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003150{
Karl Meakin84710f32023-10-12 15:14:49 +01003151 ffa_memory_access_permissions_t permissions = {0};
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003152 uint32_t memory_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003153 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003154 struct ffa_composite_memory_region *composite;
3155 uint32_t total_length;
3156 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01003157 ffa_id_t receiver_id = to_locked.vm->id;
J-Alves4f0d9c12024-01-17 17:23:11 +00003158 bool is_retrieve_complete = false;
J-Alves4f0d9c12024-01-17 17:23:11 +00003159 const uint64_t memory_access_desc_size =
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003160 retrieve_request->memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003161 uint32_t receiver_index;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003162 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003163 ffa_memory_handle_t handle = retrieve_request->handle;
Karl Meakin84710f32023-10-12 15:14:49 +01003164 ffa_memory_attributes_t attributes = {0};
J-Alves460d36c2023-10-12 17:02:15 +01003165 uint32_t retrieve_mode = 0;
J-Alves4f0d9c12024-01-17 17:23:11 +00003166 struct ffa_memory_region *memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003167
J-Alves96de29f2022-04-26 16:05:24 +01003168 if (!share_state->sending_complete) {
3169 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003170 "Memory with handle %#lx not fully sent, can't "
J-Alves96de29f2022-04-26 16:05:24 +01003171 "retrieve.\n",
3172 handle);
J-Alves4f0d9c12024-01-17 17:23:11 +00003173 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alves96de29f2022-04-26 16:05:24 +01003174 }
3175
J-Alves4f0d9c12024-01-17 17:23:11 +00003176 /*
3177 * Validate retrieve request, according to what was sent by the
3178 * sender. Function will output the `receiver_index` from the
3179 * provided memory region.
3180 */
3181 ret = ffa_memory_retrieve_validate(
3182 receiver_id, retrieve_request, retrieve_request_length,
3183 memory_region, &receiver_index, share_state->share_func);
J-Alves089004f2022-07-13 14:25:44 +01003184
J-Alves4f0d9c12024-01-17 17:23:11 +00003185 if (ret.func != FFA_SUCCESS_32) {
3186 return ret;
J-Alves089004f2022-07-13 14:25:44 +01003187 }
J-Alves96de29f2022-04-26 16:05:24 +01003188
J-Alves4f0d9c12024-01-17 17:23:11 +00003189 /*
3190 * Validate the requested permissions against the sent
3191 * permissions.
3192 * Outputs the permissions to give to retriever at S2
3193 * PTs.
3194 */
3195 ret = ffa_memory_retrieve_validate_memory_access_list(
3196 memory_region, retrieve_request, receiver_id, &permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003197 &receiver, share_state->share_func);
J-Alves4f0d9c12024-01-17 17:23:11 +00003198 if (ret.func != FFA_SUCCESS_32) {
3199 return ret;
3200 }
3201
3202 memory_to_mode = ffa_memory_permissions_to_mode(
3203 permissions, share_state->sender_orig_mode);
3204
3205 ret = ffa_retrieve_check_update(
3206 to_locked, share_state->fragments,
3207 share_state->fragment_constituent_counts,
3208 share_state->fragment_count, memory_to_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003209 share_state->share_func, false, page_pool, &retrieve_mode,
3210 share_state->memory_protected);
J-Alves4f0d9c12024-01-17 17:23:11 +00003211
3212 if (ret.func != FFA_SUCCESS_32) {
3213 return ret;
3214 }
3215
3216 share_state->retrieved_fragment_count[receiver_index] = 1;
3217
3218 is_retrieve_complete =
3219 share_state->retrieved_fragment_count[receiver_index] ==
3220 share_state->fragment_count;
3221
J-Alvesb5084cf2022-07-06 14:20:12 +01003222 /* VMs acquire the RX buffer from SPMC. */
3223 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3224
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003225 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003226 * Copy response to RX buffer of caller and deliver the message.
3227 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003228 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003229 composite = ffa_memory_region_get_composite(memory_region, 0);
J-Alves4f0d9c12024-01-17 17:23:11 +00003230
Andrew Walbranca808b12020-05-15 17:22:28 +01003231 /*
J-Alves460d36c2023-10-12 17:02:15 +01003232 * Set the security state in the memory retrieve response attributes
3233 * if specified by the target mode.
3234 */
3235 attributes = plat_ffa_memory_security_mode(memory_region->attributes,
3236 retrieve_mode);
3237
3238 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003239 * Constituents which we received in the first fragment should
3240 * always fit in the first fragment we are sending, because the
3241 * header is the same size in both cases and we have a fixed
3242 * message buffer size. So `ffa_retrieved_memory_region_init`
3243 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01003244 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003245
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003246 /* Provide the permissions that had been provided. */
3247 receiver->receiver_permissions.permissions = permissions;
3248
3249 /*
3250 * Prepare the memory region descriptor for the retrieve response.
3251 * Provide the pointer to the receiver tracked in the share state
3252 * strucutures.
3253 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003254 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01003255 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003256 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003257 memory_region->flags, handle, permissions, receiver, 1,
3258 memory_access_desc_size, composite->page_count,
3259 composite->constituent_count, share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01003260 share_state->fragment_constituent_counts[0], &total_length,
3261 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01003262
J-Alves4f0d9c12024-01-17 17:23:11 +00003263 if (is_retrieve_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003264 ffa_memory_retrieve_complete(share_states, share_state,
3265 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003266 }
J-Alves4f0d9c12024-01-17 17:23:11 +00003267
3268 return ffa_memory_retrieve_resp(total_length, fragment_length);
3269}
3270
3271static struct ffa_value ffa_hypervisor_retrieve_request(
3272 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3273 struct ffa_memory_region *retrieve_request)
3274{
3275 struct ffa_value ret;
3276 struct ffa_composite_memory_region *composite;
3277 uint32_t total_length;
3278 uint32_t fragment_length;
J-Alves4f0d9c12024-01-17 17:23:11 +00003279 ffa_memory_attributes_t attributes;
J-Alves7b6ab612024-01-24 09:54:54 +00003280 uint64_t memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003281 struct ffa_memory_region *memory_region;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003282 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003283 ffa_memory_handle_t handle = retrieve_request->handle;
3284
J-Alves4f0d9c12024-01-17 17:23:11 +00003285 memory_region = share_state->memory_region;
3286
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003287 assert(to_locked.vm->id == HF_HYPERVISOR_VM_ID);
3288
J-Alves7b6ab612024-01-24 09:54:54 +00003289 switch (to_locked.vm->ffa_version) {
3290 case MAKE_FFA_VERSION(1, 2):
3291 memory_access_desc_size = sizeof(struct ffa_memory_access);
3292 break;
3293 case MAKE_FFA_VERSION(1, 0):
3294 case MAKE_FFA_VERSION(1, 1):
3295 memory_access_desc_size = sizeof(struct ffa_memory_access_v1_0);
3296 break;
3297 default:
3298 panic("version not supported: %x\n", to_locked.vm->ffa_version);
3299 }
3300
J-Alves4f0d9c12024-01-17 17:23:11 +00003301 if (share_state->hypervisor_fragment_count != 0U) {
3302 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003303 "Memory with handle %#lx already retrieved by "
J-Alves4f0d9c12024-01-17 17:23:11 +00003304 "the hypervisor.\n",
3305 handle);
3306 return ffa_error(FFA_DENIED);
3307 }
3308
3309 share_state->hypervisor_fragment_count = 1;
3310
3311 ffa_memory_retrieve_complete_from_hyp(share_state);
3312
3313 /* VMs acquire the RX buffer from SPMC. */
3314 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3315
3316 /*
3317 * Copy response to RX buffer of caller and deliver the message.
3318 * This must be done before the share_state is (possibly) freed.
3319 */
3320 composite = ffa_memory_region_get_composite(memory_region, 0);
3321
3322 /*
3323 * Constituents which we received in the first fragment should
3324 * always fit in the first fragment we are sending, because the
3325 * header is the same size in both cases and we have a fixed
3326 * message buffer size. So `ffa_retrieved_memory_region_init`
3327 * should never fail.
3328 */
3329
3330 /*
3331 * Set the security state in the memory retrieve response attributes
3332 * if specified by the target mode.
3333 */
3334 attributes = plat_ffa_memory_security_mode(
3335 memory_region->attributes, share_state->sender_orig_mode);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003336
3337 receiver = ffa_memory_region_get_receiver(memory_region, 0);
3338
J-Alves4f0d9c12024-01-17 17:23:11 +00003339 CHECK(ffa_retrieved_memory_region_init(
3340 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
3341 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003342 memory_region->flags, handle,
3343 receiver->receiver_permissions.permissions, receiver,
3344 memory_region->receiver_count, memory_access_desc_size,
J-Alves4f0d9c12024-01-17 17:23:11 +00003345 composite->page_count, composite->constituent_count,
3346 share_state->fragments[0],
3347 share_state->fragment_constituent_counts[0], &total_length,
3348 &fragment_length));
3349
3350 return ffa_memory_retrieve_resp(total_length, fragment_length);
3351}
3352
3353struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
3354 struct ffa_memory_region *retrieve_request,
3355 uint32_t retrieve_request_length,
3356 struct mpool *page_pool)
3357{
3358 ffa_memory_handle_t handle = retrieve_request->handle;
3359 struct share_states_locked share_states;
3360 struct ffa_memory_share_state *share_state;
3361 struct ffa_value ret;
3362
3363 dump_share_states();
3364
3365 share_states = share_states_lock();
3366 share_state = get_share_state(share_states, handle);
3367 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003368 dlog_verbose("Invalid handle %#lx for FFA_MEM_RETRIEVE_REQ.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003369 handle);
3370 ret = ffa_error(FFA_INVALID_PARAMETERS);
3371 goto out;
3372 }
3373
3374 if (is_ffa_hypervisor_retrieve_request(retrieve_request, to_locked)) {
3375 ret = ffa_hypervisor_retrieve_request(share_state, to_locked,
3376 retrieve_request);
3377 } else {
3378 ret = ffa_partition_retrieve_request(
3379 share_states, share_state, to_locked, retrieve_request,
3380 retrieve_request_length, page_pool);
3381 }
3382
3383 /* Track use of the RX buffer if the handling has succeeded. */
3384 if (ret.func == FFA_MEM_RETRIEVE_RESP_32) {
3385 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
3386 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
3387 }
3388
Andrew Walbranca808b12020-05-15 17:22:28 +01003389out:
3390 share_states_unlock(&share_states);
3391 dump_share_states();
3392 return ret;
3393}
3394
J-Alves5da37d92022-10-24 16:33:48 +01003395/**
3396 * Determine expected fragment offset according to the FF-A version of
3397 * the caller.
3398 */
3399static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
3400 struct ffa_memory_region *memory_region,
3401 uint32_t retrieved_constituents_count, uint32_t ffa_version)
3402{
3403 uint32_t expected_fragment_offset;
3404 uint32_t composite_constituents_offset;
3405
Kathleen Capellae4fe2962023-09-01 17:08:47 -04003406 if (ffa_version >= MAKE_FFA_VERSION(1, 1)) {
J-Alves5da37d92022-10-24 16:33:48 +01003407 /*
3408 * Hafnium operates memory regions in FF-A v1.1 format, so we
3409 * can retrieve the constituents offset from descriptor.
3410 */
3411 composite_constituents_offset =
3412 ffa_composite_constituent_offset(memory_region, 0);
3413 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
3414 /*
3415 * If retriever is FF-A v1.0, determine the composite offset
3416 * as it is expected to have been configured in the
3417 * retrieve response.
3418 */
3419 composite_constituents_offset =
3420 sizeof(struct ffa_memory_region_v1_0) +
3421 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003422 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01003423 sizeof(struct ffa_composite_memory_region);
3424 } else {
3425 panic("%s received an invalid FF-A version.\n", __func__);
3426 }
3427
3428 expected_fragment_offset =
3429 composite_constituents_offset +
3430 retrieved_constituents_count *
3431 sizeof(struct ffa_memory_region_constituent) -
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003432 (uint32_t)(memory_region->memory_access_desc_size *
3433 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003434
3435 return expected_fragment_offset;
3436}
3437
Andrew Walbranca808b12020-05-15 17:22:28 +01003438struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3439 ffa_memory_handle_t handle,
3440 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003441 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003442 struct mpool *page_pool)
3443{
3444 struct ffa_memory_region *memory_region;
3445 struct share_states_locked share_states;
3446 struct ffa_memory_share_state *share_state;
3447 struct ffa_value ret;
3448 uint32_t fragment_index;
3449 uint32_t retrieved_constituents_count;
3450 uint32_t i;
3451 uint32_t expected_fragment_offset;
3452 uint32_t remaining_constituent_count;
3453 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003454 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003455 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003456
3457 dump_share_states();
3458
3459 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003460 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003461 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003462 dlog_verbose("Invalid handle %#lx for FFA_MEM_FRAG_RX.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01003463 handle);
3464 ret = ffa_error(FFA_INVALID_PARAMETERS);
3465 goto out;
3466 }
3467
3468 memory_region = share_state->memory_region;
3469 CHECK(memory_region != NULL);
3470
Andrew Walbranca808b12020-05-15 17:22:28 +01003471 if (!share_state->sending_complete) {
3472 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003473 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003474 "retrieve.\n",
3475 handle);
3476 ret = ffa_error(FFA_INVALID_PARAMETERS);
3477 goto out;
3478 }
3479
J-Alves59ed0042022-07-28 18:26:41 +01003480 /*
3481 * If retrieve request from the hypervisor has been initiated in the
3482 * given share_state, continue it, else assume it is a continuation of
3483 * retrieve request from a NWd VM.
3484 */
3485 continue_ffa_hyp_mem_retrieve_req =
3486 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3487 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003488 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003489
J-Alves59ed0042022-07-28 18:26:41 +01003490 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003491 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003492 memory_region, to_locked.vm->id);
3493
3494 if (receiver_index == memory_region->receiver_count) {
3495 dlog_verbose(
3496 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
Karl Meakine8937d92024-03-19 16:04:25 +00003497 "borrower to memory sharing transaction "
3498 "(%lx)\n",
J-Alves59ed0042022-07-28 18:26:41 +01003499 to_locked.vm->id, handle);
3500 ret = ffa_error(FFA_INVALID_PARAMETERS);
3501 goto out;
3502 }
3503
3504 if (share_state->retrieved_fragment_count[receiver_index] ==
3505 0 ||
3506 share_state->retrieved_fragment_count[receiver_index] >=
3507 share_state->fragment_count) {
3508 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003509 "Retrieval of memory with handle %#lx not yet "
J-Alves59ed0042022-07-28 18:26:41 +01003510 "started or already completed (%d/%d fragments "
3511 "retrieved).\n",
3512 handle,
3513 share_state->retrieved_fragment_count
3514 [receiver_index],
3515 share_state->fragment_count);
3516 ret = ffa_error(FFA_INVALID_PARAMETERS);
3517 goto out;
3518 }
3519
3520 fragment_index =
3521 share_state->retrieved_fragment_count[receiver_index];
3522 } else {
3523 if (share_state->hypervisor_fragment_count == 0 ||
3524 share_state->hypervisor_fragment_count >=
3525 share_state->fragment_count) {
3526 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003527 "Retrieve of memory with handle %lx not "
J-Alves59ed0042022-07-28 18:26:41 +01003528 "started from hypervisor.\n",
3529 handle);
3530 ret = ffa_error(FFA_INVALID_PARAMETERS);
3531 goto out;
3532 }
3533
3534 if (memory_region->sender != sender_vm_id) {
3535 dlog_verbose(
3536 "Sender ID (%x) is not as expected for memory "
Karl Meakine8937d92024-03-19 16:04:25 +00003537 "handle %lx\n",
J-Alves59ed0042022-07-28 18:26:41 +01003538 sender_vm_id, handle);
3539 ret = ffa_error(FFA_INVALID_PARAMETERS);
3540 goto out;
3541 }
3542
3543 fragment_index = share_state->hypervisor_fragment_count;
3544
3545 receiver_index = 0;
3546 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003547
3548 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003549 * Check that the given fragment offset is correct by counting
3550 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003551 */
3552 retrieved_constituents_count = 0;
3553 for (i = 0; i < fragment_index; ++i) {
3554 retrieved_constituents_count +=
3555 share_state->fragment_constituent_counts[i];
3556 }
J-Alvesc7484f12022-05-13 12:41:14 +01003557
3558 CHECK(memory_region->receiver_count > 0);
3559
Andrew Walbranca808b12020-05-15 17:22:28 +01003560 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003561 ffa_memory_retrieve_expected_offset_per_ffa_version(
3562 memory_region, retrieved_constituents_count,
3563 to_locked.vm->ffa_version);
3564
Andrew Walbranca808b12020-05-15 17:22:28 +01003565 if (fragment_offset != expected_fragment_offset) {
3566 dlog_verbose("Fragment offset was %d but expected %d.\n",
3567 fragment_offset, expected_fragment_offset);
3568 ret = ffa_error(FFA_INVALID_PARAMETERS);
3569 goto out;
3570 }
3571
J-Alves4f0d9c12024-01-17 17:23:11 +00003572 /*
3573 * When hafnium is the hypervisor, acquire the RX buffer of a VM, that
3574 * is currently ownder by the SPMC.
3575 */
3576 assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
J-Alves59ed0042022-07-28 18:26:41 +01003577
Andrew Walbranca808b12020-05-15 17:22:28 +01003578 remaining_constituent_count = ffa_memory_fragment_init(
3579 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3580 share_state->fragments[fragment_index],
3581 share_state->fragment_constituent_counts[fragment_index],
3582 &fragment_length);
3583 CHECK(remaining_constituent_count == 0);
J-Alves674e4de2024-01-17 16:20:32 +00003584
Andrew Walbranca808b12020-05-15 17:22:28 +01003585 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003586 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003587
J-Alves59ed0042022-07-28 18:26:41 +01003588 if (!continue_ffa_hyp_mem_retrieve_req) {
3589 share_state->retrieved_fragment_count[receiver_index]++;
3590 if (share_state->retrieved_fragment_count[receiver_index] ==
3591 share_state->fragment_count) {
3592 ffa_memory_retrieve_complete(share_states, share_state,
3593 page_pool);
3594 }
3595 } else {
3596 share_state->hypervisor_fragment_count++;
3597
3598 ffa_memory_retrieve_complete_from_hyp(share_state);
3599 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003600 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3601 .arg1 = (uint32_t)handle,
3602 .arg2 = (uint32_t)(handle >> 32),
3603 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003604
3605out:
3606 share_states_unlock(&share_states);
3607 dump_share_states();
3608 return ret;
3609}
3610
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003611struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003612 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003613 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003614{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003615 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003616 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003617 struct ffa_memory_share_state *share_state;
3618 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003619 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003620 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003621 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003622 bool receivers_relinquished_memory;
Karl Meakin84710f32023-10-12 15:14:49 +01003623 ffa_memory_access_permissions_t receiver_permissions = {0};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003624
Andrew Walbrana65a1322020-04-06 19:32:32 +01003625 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003626 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003627 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01003628 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003629 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003630 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003631 }
3632
Andrew Walbrana65a1322020-04-06 19:32:32 +01003633 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003634 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003635 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01003636 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003637 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003638 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003639 }
3640
3641 dump_share_states();
3642
3643 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003644 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003645 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003646 dlog_verbose("Invalid handle %#lx for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003647 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003648 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003649 goto out;
3650 }
3651
Andrew Walbranca808b12020-05-15 17:22:28 +01003652 if (!share_state->sending_complete) {
3653 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003654 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003655 "relinquish.\n",
3656 handle);
3657 ret = ffa_error(FFA_INVALID_PARAMETERS);
3658 goto out;
3659 }
3660
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003661 memory_region = share_state->memory_region;
3662 CHECK(memory_region != NULL);
3663
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003664 receiver_index = ffa_memory_region_get_receiver_index(
3665 memory_region, from_locked.vm->id);
J-Alves8eb19162022-04-28 10:56:48 +01003666
3667 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003668 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003669 "VM ID %d tried to relinquish memory region "
Karl Meakine8937d92024-03-19 16:04:25 +00003670 "with handle %#lx and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003671 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003672 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003673 goto out;
3674 }
3675
J-Alves8eb19162022-04-28 10:56:48 +01003676 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003677 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003678 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003679 "Memory with handle %#lx not yet fully "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003680 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003681 "receiver %x can't relinquish.\n",
3682 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003683 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003684 goto out;
3685 }
3686
J-Alves3c5b2072022-11-21 12:45:40 +00003687 /*
3688 * Either clear if requested in relinquish call, or in a retrieve
3689 * request from one of the borrowers.
3690 */
3691 receivers_relinquished_memory = true;
3692
3693 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3694 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003695 ffa_memory_region_get_receiver(memory_region, i);
3696 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003697 if (receiver->receiver_permissions.receiver ==
3698 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003699 receiver_permissions =
3700 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003701 continue;
3702 }
3703
3704 if (share_state->retrieved_fragment_count[i] != 0U) {
3705 receivers_relinquished_memory = false;
3706 break;
3707 }
3708 }
3709
3710 clear = receivers_relinquished_memory &&
Daniel Boulby2e14ebe2024-01-15 16:21:44 +00003711 ((relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3712 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003713
3714 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003715 * Clear is not allowed for memory that was shared, as the
3716 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003717 */
J-Alves95fbb312024-03-20 15:19:16 +00003718 if (clear && (share_state->share_func == FFA_MEM_SHARE_32 ||
3719 share_state->share_func == FFA_MEM_SHARE_64)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003720 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003721 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003722 goto out;
3723 }
3724
J-Alvesb886d492024-04-15 10:55:29 +01003725 if (clear && receiver_permissions.data_access == FFA_DATA_ACCESS_RO) {
J-Alves639ddfc2023-11-21 14:17:26 +00003726 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3727 __func__);
3728 ret = ffa_error(FFA_DENIED);
3729 goto out;
3730 }
3731
Andrew Walbranca808b12020-05-15 17:22:28 +01003732 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003733 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003734 share_state->fragment_constituent_counts,
3735 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003736
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003737 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003738 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003739 * Mark memory handle as not retrieved, so it can be
3740 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003741 */
J-Alves8eb19162022-04-28 10:56:48 +01003742 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003743 }
3744
3745out:
3746 share_states_unlock(&share_states);
3747 dump_share_states();
3748 return ret;
3749}
3750
3751/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003752 * Validates that the reclaim transition is allowed for the given
3753 * handle, updates the page table of the reclaiming VM, and frees the
3754 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003755 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003756struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003757 ffa_memory_handle_t handle,
3758 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003759 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003760{
3761 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003762 struct ffa_memory_share_state *share_state;
3763 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003764 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003765
3766 dump_share_states();
3767
3768 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003769
Karl Meakin4a2854a2023-06-30 16:26:52 +01003770 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003771 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003772 dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003773 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003774 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003775 goto out;
3776 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003777 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003778
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003779 CHECK(memory_region != NULL);
3780
J-Alvesa9cd7e32022-07-01 13:49:33 +01003781 if (vm_id_is_current_world(to_locked.vm->id) &&
3782 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003783 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003784 "VM %#x attempted to reclaim memory handle %#lx "
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003785 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003786 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003787 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003788 goto out;
3789 }
3790
Andrew Walbranca808b12020-05-15 17:22:28 +01003791 if (!share_state->sending_complete) {
3792 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003793 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003794 "reclaim.\n",
3795 handle);
3796 ret = ffa_error(FFA_INVALID_PARAMETERS);
3797 goto out;
3798 }
3799
J-Alves752236c2022-04-28 11:07:47 +01003800 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3801 if (share_state->retrieved_fragment_count[i] != 0) {
3802 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003803 "Tried to reclaim memory handle %#lx "
J-Alves3c5b2072022-11-21 12:45:40 +00003804 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003805 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003806 handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003807 ffa_memory_region_get_receiver(memory_region, i)
3808 ->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01003809 ret = ffa_error(FFA_DENIED);
3810 goto out;
3811 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003812 }
3813
Andrew Walbranca808b12020-05-15 17:22:28 +01003814 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01003815 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003816 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003817 share_state->fragment_count, share_state->sender_orig_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003818 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool,
J-Alvesfd206052023-05-22 16:45:00 +01003819 NULL, share_state->memory_protected);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003820
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003821 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003822 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003823 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003824 }
3825
3826out:
3827 share_states_unlock(&share_states);
3828 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003829}