blob: fd92e9facb514b9fc714f2a8c0adf8f1e49f3e9e [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
J-Alves7b9cc432024-04-04 10:57:17 +010011#include "hf/arch/memcpy_trapped.h"
Federico Recanati4fd065d2021-12-13 20:06:23 +010012#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020014#include "hf/arch/plat/ffa.h"
Karl Meakin64cadf52024-07-24 17:42:57 +010015#include "hf/arch/plat/ffa/ffa_memory.h"
Karl Meakin9724b362024-10-15 14:35:02 +010016#include "hf/arch/plat/ffa/indirect_messaging.h"
Karl Meakin48e049c2024-07-25 18:07:41 +010017#include "hf/arch/plat/ffa/setup_and_discovery.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000018
J-Alves5952d942022-12-22 16:03:00 +000019#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010020#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000021#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010022#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010023#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010024#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010026#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010027#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000028#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000029#include "hf/mpool.h"
J-Alvescf6253e2024-01-03 13:48:48 +000030#include "hf/panic.h"
31#include "hf/plat/memory_protect.h"
Jose Marinho75509b42019-04-09 09:34:59 +010032#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000033#include "hf/vm.h"
Daniel Boulby44e9b3b2024-01-17 12:21:44 +000034#include "hf/vm_ids.h"
Jose Marinho75509b42019-04-09 09:34:59 +010035
J-Alves2d8457f2022-10-05 11:06:41 +010036#include "vmapi/hf/ffa_v1_0.h"
37
J-Alves5da37d92022-10-24 16:33:48 +010038#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
39
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000040/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010041 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000042 * by this lock.
43 */
44static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010045static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000046
47/**
J-Alvesed508c82023-05-04 16:09:48 +010048 * Return the offset to the first constituent within the
49 * `ffa_composite_memory_region` for the given receiver from an
50 * `ffa_memory_region`. The caller must check that the receiver_index is within
51 * bounds, and that it has a composite memory region offset.
52 */
53static uint32_t ffa_composite_constituent_offset(
54 struct ffa_memory_region *memory_region, uint32_t receiver_index)
55{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000056 struct ffa_memory_access *receiver;
57 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010058
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000059 CHECK(receiver_index < memory_region->receiver_count);
60
61 receiver =
62 ffa_memory_region_get_receiver(memory_region, receiver_index);
63 CHECK(receiver != NULL);
64
65 composite_offset = receiver->composite_memory_region_offset;
66
67 CHECK(composite_offset != 0);
68
69 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010070}
71
72/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010073 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
74 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
75 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010076 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010077 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
78 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010079 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010080struct ffa_memory_share_state *allocate_share_state(
81 struct share_states_locked share_states, uint32_t share_func,
82 struct ffa_memory_region *memory_region, uint32_t fragment_length,
83 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000084{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000085 assert(share_states.share_states != NULL);
86 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000087
Karl Meakin52cdfe72023-06-30 14:49:10 +010088 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010089 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010090 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010091 &share_states.share_states[i];
92 struct ffa_composite_memory_region *composite =
93 ffa_memory_region_get_composite(memory_region,
94 0);
95
96 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000097 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020098 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010099 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000100 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100101 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000102 allocated_state->share_func = share_func;
103 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100104 allocated_state->fragment_count = 1;
105 allocated_state->fragments[0] = composite->constituents;
106 allocated_state->fragment_constituent_counts[0] =
107 (fragment_length -
108 ffa_composite_constituent_offset(memory_region,
109 0)) /
110 sizeof(struct ffa_memory_region_constituent);
111 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100112 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
113 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100114 allocated_state->retrieved_fragment_count[j] =
115 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000116 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100117 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000118 }
119 }
120
Karl Meakin52cdfe72023-06-30 14:49:10 +0100121 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122}
123
124/** Locks the share states lock. */
125struct share_states_locked share_states_lock(void)
126{
127 sl_lock(&share_states_lock_instance);
128
129 return (struct share_states_locked){.share_states = share_states};
130}
131
132/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100133void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000134{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000135 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000136 share_states->share_states = NULL;
137 sl_unlock(&share_states_lock_instance);
138}
139
140/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100141 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100142 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000143 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100144struct ffa_memory_share_state *get_share_state(
145 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000146{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000148
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000149 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100150
151 /*
152 * First look for a share_state allocated by us, in which case the
153 * handle is based on the index.
154 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200155 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin1a760e72024-07-25 18:58:37 +0100156 uint64_t index = ffa_memory_handle_index(handle);
Karl Meakin4a2854a2023-06-30 16:26:52 +0100157
Andrew Walbranca808b12020-05-15 17:22:28 +0100158 if (index < MAX_MEM_SHARES) {
159 share_state = &share_states.share_states[index];
160 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100161 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100162 }
163 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000164 }
165
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100167 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100168 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000169 if (share_state->memory_region != NULL &&
170 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100172 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100173 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000174 }
175
Karl Meakin4a2854a2023-06-30 16:26:52 +0100176 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000177}
178
179/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100180void share_state_free(struct share_states_locked share_states,
181 struct ffa_memory_share_state *share_state,
182 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000183{
Andrew Walbranca808b12020-05-15 17:22:28 +0100184 uint32_t i;
185
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000186 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000187 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100188 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000189 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100190 /*
191 * First fragment is part of the same page as the `memory_region`, so it
192 * doesn't need to be freed separately.
193 */
194 share_state->fragments[0] = NULL;
195 share_state->fragment_constituent_counts[0] = 0;
196 for (i = 1; i < share_state->fragment_count; ++i) {
197 mpool_free(page_pool, share_state->fragments[i]);
198 share_state->fragments[i] = NULL;
199 share_state->fragment_constituent_counts[i] = 0;
200 }
201 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000202 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100203 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000204}
205
Andrew Walbranca808b12020-05-15 17:22:28 +0100206/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100207bool share_state_sending_complete(struct share_states_locked share_states,
208 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000209{
Andrew Walbranca808b12020-05-15 17:22:28 +0100210 struct ffa_composite_memory_region *composite;
211 uint32_t expected_constituent_count;
212 uint32_t fragment_constituent_count_total = 0;
213 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000214
Andrew Walbranca808b12020-05-15 17:22:28 +0100215 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000216 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100217
218 /*
219 * Share state must already be valid, or it's not possible to get hold
220 * of it.
221 */
222 CHECK(share_state->memory_region != NULL &&
223 share_state->share_func != 0);
224
225 composite =
226 ffa_memory_region_get_composite(share_state->memory_region, 0);
227 expected_constituent_count = composite->constituent_count;
228 for (i = 0; i < share_state->fragment_count; ++i) {
229 fragment_constituent_count_total +=
230 share_state->fragment_constituent_counts[i];
231 }
232 dlog_verbose(
233 "Checking completion: constituent count %d/%d from %d "
234 "fragments.\n",
235 fragment_constituent_count_total, expected_constituent_count,
236 share_state->fragment_count);
237
238 return fragment_constituent_count_total == expected_constituent_count;
239}
240
241/**
242 * Calculates the offset of the next fragment expected for the given share
243 * state.
244 */
J-Alvesfdd29272022-07-19 13:16:31 +0100245uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100246 struct share_states_locked share_states,
247 struct ffa_memory_share_state *share_state)
248{
249 uint32_t next_fragment_offset;
250 uint32_t i;
251
252 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000253 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100254
255 next_fragment_offset =
256 ffa_composite_constituent_offset(share_state->memory_region, 0);
257 for (i = 0; i < share_state->fragment_count; ++i) {
258 next_fragment_offset +=
259 share_state->fragment_constituent_counts[i] *
260 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000261 }
262
Andrew Walbranca808b12020-05-15 17:22:28 +0100263 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000264}
265
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100266static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000267{
268 uint32_t i;
269
270 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
271 return;
272 }
273
Karl Meakine8937d92024-03-19 16:04:25 +0000274 dlog("from VM %#x, attributes (shareability = %s, cacheability = %s, "
275 "type = %s, security = %s), flags %#x, handle %#lx "
276 "tag %lu, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100277 "recipients [",
Karl Meakine8937d92024-03-19 16:04:25 +0000278 memory_region->sender,
279 ffa_memory_shareability_name(
280 memory_region->attributes.shareability),
281 ffa_memory_cacheability_name(
282 memory_region->attributes.cacheability),
283 ffa_memory_type_name(memory_region->attributes.type),
284 ffa_memory_security_name(memory_region->attributes.security),
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000285 memory_region->flags, memory_region->handle, memory_region->tag,
286 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100287 memory_region->receiver_count);
288 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000289 struct ffa_memory_access *receiver =
290 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000291 if (i != 0) {
292 dlog(", ");
293 }
Karl Meakine8937d92024-03-19 16:04:25 +0000294 dlog("Receiver %#x: permissions (%s, %s) (offset %u)",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000295 receiver->receiver_permissions.receiver,
Karl Meakine8937d92024-03-19 16:04:25 +0000296 ffa_data_access_name(receiver->receiver_permissions
297 .permissions.data_access),
298 ffa_instruction_access_name(
299 receiver->receiver_permissions.permissions
300 .instruction_access),
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000301 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000302 /* The impdef field is only present from v1.2 and later */
303 if (ffa_version_from_memory_access_desc_size(
304 memory_region->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +0100305 FFA_VERSION_1_2) {
Karl Meakine8937d92024-03-19 16:04:25 +0000306 dlog(", impdef: %#lx %#lx", receiver->impdef.val[0],
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000307 receiver->impdef.val[1]);
308 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000309 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000310 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000311}
312
J-Alves66652252022-07-06 09:49:51 +0100313void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000314{
315 uint32_t i;
316
317 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
318 return;
319 }
320
321 dlog("Current share states:\n");
322 sl_lock(&share_states_lock_instance);
323 for (i = 0; i < MAX_MEM_SHARES; ++i) {
324 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000325 switch (share_states[i].share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000326 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100327 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000328 dlog("SHARE");
329 break;
J-Alves95fbb312024-03-20 15:19:16 +0000330 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100331 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000332 dlog("LEND");
333 break;
J-Alves95fbb312024-03-20 15:19:16 +0000334 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100335 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000336 dlog("DONATE");
337 break;
338 default:
339 dlog("invalid share_func %#x",
340 share_states[i].share_func);
341 }
Karl Meakine8937d92024-03-19 16:04:25 +0000342 dlog(" %#lx (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000343 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100344 if (share_states[i].sending_complete) {
345 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000346 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100347 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000348 }
J-Alves2a0d2882020-10-29 14:49:50 +0000349 dlog(" with %d fragments, %d retrieved, "
350 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100351 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000352 share_states[i].retrieved_fragment_count[0],
353 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000354 }
355 }
356 sl_unlock(&share_states_lock_instance);
357}
358
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100360 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000361{
362 uint32_t mode = 0;
363
Karl Meakin84710f32023-10-12 15:14:49 +0100364 switch (permissions.data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100365 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000366 mode = MM_MODE_R;
367 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100368 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000369 mode = MM_MODE_R | MM_MODE_W;
370 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100371 case FFA_DATA_ACCESS_NOT_SPECIFIED:
372 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
373 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100374 case FFA_DATA_ACCESS_RESERVED:
375 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Karl Meakina5ea9092024-05-28 15:40:33 +0100376 default:
377 panic("Unknown data access %#x\n", permissions.data_access);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100378 }
379
Karl Meakin84710f32023-10-12 15:14:49 +0100380 switch (permissions.instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100381 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000382 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100383 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100384 mode |= MM_MODE_X;
385 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100386 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
387 mode |= (default_mode & MM_MODE_X);
388 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100389 case FFA_INSTRUCTION_ACCESS_RESERVED:
390 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Karl Meakina5ea9092024-05-28 15:40:33 +0100391 default:
392 panic("Unknown instruction access %#x\n",
393 permissions.instruction_access);
Andrew Walbran475c1452020-02-07 13:22:22 +0000394 }
395
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200396 /* Set the security state bit if necessary. */
397 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
398 mode |= plat_ffa_other_world_mode();
399 }
400
Daniel Boulby6e261362024-06-13 16:53:00 +0100401 mode |= default_mode & MM_MODE_D;
402
Andrew Walbran475c1452020-02-07 13:22:22 +0000403 return mode;
404}
405
Jose Marinho75509b42019-04-09 09:34:59 +0100406/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000407 * Get the current mode in the stage-2 page table of the given vm of all the
408 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100409 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100410 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100411static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000412 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100413 struct ffa_memory_region_constituent **fragments,
414 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100415{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100416 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100417 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100418
Andrew Walbranca808b12020-05-15 17:22:28 +0100419 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100420 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000421 * Fail if there are no constituents. Otherwise we would get an
422 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100423 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100424 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100425 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100426 }
427
Andrew Walbranca808b12020-05-15 17:22:28 +0100428 for (i = 0; i < fragment_count; ++i) {
429 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
430 ipaddr_t begin = ipa_init(fragments[i][j].address);
431 size_t size = fragments[i][j].page_count * PAGE_SIZE;
432 ipaddr_t end = ipa_add(begin, size);
433 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100434
Andrew Walbranca808b12020-05-15 17:22:28 +0100435 /* Fail if addresses are not page-aligned. */
436 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
437 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100438 dlog_verbose("%s: addresses not page-aligned\n",
439 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100440 return ffa_error(FFA_INVALID_PARAMETERS);
441 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100442
Andrew Walbranca808b12020-05-15 17:22:28 +0100443 /*
444 * Ensure that this constituent memory range is all
445 * mapped with the same mode.
446 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800447 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100448 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000449 "%s: constituent memory range "
450 "%#lx..%#lx "
Karl Meakin5df422c2023-07-11 17:31:38 +0100451 "not mapped with the same mode\n",
Karl Meakine8937d92024-03-19 16:04:25 +0000452 __func__, begin.ipa, end.ipa);
Andrew Walbranca808b12020-05-15 17:22:28 +0100453 return ffa_error(FFA_DENIED);
454 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100455
Andrew Walbranca808b12020-05-15 17:22:28 +0100456 /*
457 * Ensure that all constituents are mapped with the same
458 * mode.
459 */
460 if (i == 0) {
461 *orig_mode = current_mode;
462 } else if (current_mode != *orig_mode) {
463 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100464 "%s: expected mode %#x but was %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +0000465 "%d pages at %#lx.\n",
Karl Meakin5df422c2023-07-11 17:31:38 +0100466 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100467 fragments[i][j].page_count,
468 ipa_addr(begin));
469 return ffa_error(FFA_DENIED);
470 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100471 }
Jose Marinho75509b42019-04-09 09:34:59 +0100472 }
473
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100474 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000475}
476
Karl Meakin0e617d92024-04-05 12:55:22 +0100477enum ffa_version ffa_version_from_memory_access_desc_size(
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100478 uint32_t memory_access_desc_size)
479{
480 switch (memory_access_desc_size) {
481 /*
482 * v1.0 and v1.1 memory access descriptors are the same size however
483 * v1.1 is the first version to include the memory access descriptor
484 * size field so return v1.1.
485 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000486 case sizeof(struct ffa_memory_access_v1_0):
Karl Meakin0e617d92024-04-05 12:55:22 +0100487 return FFA_VERSION_1_1;
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000488 case sizeof(struct ffa_memory_access):
Karl Meakin0e617d92024-04-05 12:55:22 +0100489 return FFA_VERSION_1_2;
Karl Meakina5ea9092024-05-28 15:40:33 +0100490 default:
491 return 0;
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100492 }
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100493}
494
495/**
496 * Check if the receivers size and offset given is valid for the senders
497 * FF-A version.
498 */
499static bool receiver_size_and_offset_valid_for_version(
500 uint32_t receivers_size, uint32_t receivers_offset,
Karl Meakin0e617d92024-04-05 12:55:22 +0100501 enum ffa_version ffa_version)
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100502{
503 /*
504 * Check that the version that the memory access descriptor size belongs
505 * to is compatible with the FF-A version we believe the sender to be.
506 */
Karl Meakin0e617d92024-04-05 12:55:22 +0100507 enum ffa_version expected_ffa_version =
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100508 ffa_version_from_memory_access_desc_size(receivers_size);
Karl Meakin0e617d92024-04-05 12:55:22 +0100509 if (!ffa_versions_are_compatible(expected_ffa_version, ffa_version)) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100510 return false;
511 }
512
513 /*
514 * Check the receivers_offset matches the version we found from
515 * memory access descriptor size.
516 */
517 switch (expected_ffa_version) {
Karl Meakin0e617d92024-04-05 12:55:22 +0100518 case FFA_VERSION_1_1:
519 case FFA_VERSION_1_2:
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100520 return receivers_offset == sizeof(struct ffa_memory_region);
521 default:
522 return false;
523 }
524}
525
526/**
527 * Check the values set for fields in the memory region are valid and safe.
528 * Offset values are within safe bounds, receiver count will not cause overflows
529 * and reserved fields are 0.
530 */
531bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
Karl Meakin0e617d92024-04-05 12:55:22 +0100532 enum ffa_version ffa_version,
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100533 uint32_t fragment_length,
534 bool send_transaction)
535{
536 uint32_t receiver_count;
537 struct ffa_memory_access *receiver;
538 uint32_t composite_offset_0;
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000539 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
540 (struct ffa_memory_region_v1_0 *)memory_region;
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100541
Karl Meakin0e617d92024-04-05 12:55:22 +0100542 if (ffa_version == FFA_VERSION_1_0) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100543 /* Check the reserved fields are 0. */
544 if (memory_region_v1_0->reserved_0 != 0 ||
545 memory_region_v1_0->reserved_1 != 0) {
546 dlog_verbose("Reserved fields must be 0.\n");
547 return false;
548 }
549
550 receiver_count = memory_region_v1_0->receiver_count;
551 } else {
552 uint32_t receivers_size =
553 memory_region->memory_access_desc_size;
554 uint32_t receivers_offset = memory_region->receivers_offset;
555
556 /* Check the reserved field is 0. */
557 if (memory_region->reserved[0] != 0 ||
558 memory_region->reserved[1] != 0 ||
559 memory_region->reserved[2] != 0) {
560 dlog_verbose("Reserved fields must be 0.\n");
561 return false;
562 }
563
564 /*
565 * Check memory_access_desc_size matches the size of the struct
566 * for the senders FF-A version.
567 */
568 if (!receiver_size_and_offset_valid_for_version(
569 receivers_size, receivers_offset, ffa_version)) {
570 dlog_verbose(
571 "Invalid memory access descriptor size %d, "
572 " or receiver offset %d, "
573 "for FF-A version %#x\n",
574 receivers_size, receivers_offset, ffa_version);
575 return false;
576 }
577
578 receiver_count = memory_region->receiver_count;
579 }
580
581 /* Check receiver count is not too large. */
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000582 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS || receiver_count < 1) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100583 dlog_verbose(
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000584 "Receiver count must be 0 < receiver_count < %u "
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100585 "specified %u\n",
586 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
587 return false;
588 }
589
590 /* Check values in the memory access descriptors. */
591 /*
592 * The composite offset values must be the same for all recievers so
593 * check the first one is valid and then they are all the same.
594 */
Karl Meakin0e617d92024-04-05 12:55:22 +0100595 receiver = ffa_version == FFA_VERSION_1_0
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000596 ? (struct ffa_memory_access *)&memory_region_v1_0
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100597 ->receivers[0]
598 : ffa_memory_region_get_receiver(memory_region, 0);
599 assert(receiver != NULL);
600 composite_offset_0 = receiver->composite_memory_region_offset;
601
602 if (!send_transaction) {
603 if (composite_offset_0 != 0) {
604 dlog_verbose(
605 "Composite offset memory region descriptor "
606 "offset must be 0 for retrieve requests. "
607 "Currently %d",
608 composite_offset_0);
609 return false;
610 }
611 } else {
612 bool comp_offset_is_zero = composite_offset_0 == 0U;
613 bool comp_offset_lt_transaction_descriptor_size =
614 composite_offset_0 <
615 (sizeof(struct ffa_memory_region) +
Karl Meakin66a38bd2024-05-28 16:00:56 +0100616 (size_t)(memory_region->memory_access_desc_size *
617 memory_region->receiver_count));
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100618 bool comp_offset_with_comp_gt_fragment_length =
619 composite_offset_0 +
620 sizeof(struct ffa_composite_memory_region) >
621 fragment_length;
622 if (comp_offset_is_zero ||
623 comp_offset_lt_transaction_descriptor_size ||
624 comp_offset_with_comp_gt_fragment_length) {
625 dlog_verbose(
626 "Invalid composite memory region descriptor "
627 "offset for send transaction %u\n",
628 composite_offset_0);
629 return false;
630 }
631 }
632
Karl Meakin824b63d2024-06-03 19:04:53 +0100633 for (size_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100634 uint32_t composite_offset;
635
Karl Meakin0e617d92024-04-05 12:55:22 +0100636 if (ffa_version == FFA_VERSION_1_0) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100637 struct ffa_memory_access_v1_0 *receiver_v1_0 =
638 &memory_region_v1_0->receivers[i];
639 /* Check reserved fields are 0 */
640 if (receiver_v1_0->reserved_0 != 0) {
641 dlog_verbose(
642 "Reserved field in the memory access "
Karl Meakine8937d92024-03-19 16:04:25 +0000643 "descriptor must be zero. Currently "
644 "reciever %zu has a reserved field "
645 "with a value of %lu\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100646 i, receiver_v1_0->reserved_0);
647 return false;
648 }
649 /*
650 * We can cast to the current version receiver as the
651 * remaining fields we are checking have the same
652 * offsets for all versions since memory access
653 * descriptors are forwards compatible.
654 */
655 receiver = (struct ffa_memory_access *)receiver_v1_0;
656 } else {
657 receiver = ffa_memory_region_get_receiver(memory_region,
658 i);
659 assert(receiver != NULL);
660
Daniel Boulbyfd374b82024-07-31 14:31:16 +0100661 if (ffa_version == FFA_VERSION_1_1) {
662 /*
663 * Since the reserved field is at the end of the
664 * Endpoint Memory Access Descriptor we must
665 * cast to ffa_memory_access_v1_0 as they match.
666 * Since all fields except reserved in the
667 * Endpoint Memory Access Descriptor have the
668 * same offsets across all versions this cast is
669 * not required when accessing other fields in
670 * the future.
671 */
672 struct ffa_memory_access_v1_0 *receiver_v1_0 =
673 (struct ffa_memory_access_v1_0 *)
674 receiver;
675 if (receiver_v1_0->reserved_0 != 0) {
676 dlog_verbose(
677 "Reserved field in the memory "
678 "access descriptor must be "
679 "zero. Currently reciever %zu "
680 "has a reserved field with a "
681 "value of %lu\n",
682 i, receiver_v1_0->reserved_0);
683 return false;
684 }
685
686 } else {
687 if (receiver->reserved_0 != 0) {
688 dlog_verbose(
689 "Reserved field in the memory "
690 "access descriptor must be "
691 "zero. Currently reciever %zu "
692 "has a reserved field with a "
693 "value of %lu\n",
694 i, receiver->reserved_0);
695 return false;
696 }
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100697 }
698 }
699
700 /* Check composite offset values are equal for all receivers. */
701 composite_offset = receiver->composite_memory_region_offset;
702 if (composite_offset != composite_offset_0) {
703 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000704 "Composite offset %x differs from %x in "
705 "index\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100706 composite_offset, composite_offset_0);
707 return false;
708 }
709 }
710 return true;
711}
712
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000713/**
J-Alves460d36c2023-10-12 17:02:15 +0100714 * If the receivers for the memory management operation are all from the
Daniel Boulby734981e2024-07-22 11:06:35 +0100715 * secure world, the memory is not device memory (as it isn't covered by the
716 * granule page table) and this isn't a FFA_MEM_SHARE, then request memory
717 * security state update by returning MAP_ACTION_CHECK_PROTECT.
J-Alves460d36c2023-10-12 17:02:15 +0100718 */
719static enum ffa_map_action ffa_mem_send_get_map_action(
720 bool all_receivers_from_current_world, ffa_id_t sender_id,
Daniel Boulby734981e2024-07-22 11:06:35 +0100721 uint32_t mem_func_id, bool is_normal_memory)
J-Alves460d36c2023-10-12 17:02:15 +0100722{
J-Alves95fbb312024-03-20 15:19:16 +0000723 const bool is_memory_share_abi = mem_func_id == FFA_MEM_SHARE_32 ||
724 mem_func_id == FFA_MEM_SHARE_64;
725 const bool protect_memory =
726 (!is_memory_share_abi && all_receivers_from_current_world &&
Daniel Boulby734981e2024-07-22 11:06:35 +0100727 ffa_is_vm_id(sender_id) && is_normal_memory);
J-Alves460d36c2023-10-12 17:02:15 +0100728
729 return protect_memory ? MAP_ACTION_CHECK_PROTECT : MAP_ACTION_CHECK;
730}
731
732/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000733 * Verify that all pages have the same mode, that the starting mode
734 * constitutes a valid state and obtain the next mode to apply
J-Alves460d36c2023-10-12 17:02:15 +0100735 * to the sending VM. It outputs the mapping action that needs to be
736 * invoked for the given memory range. On memory lend/donate there
737 * could be a need to protect the memory from the normal world.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000738 *
739 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100740 * 1) FFA_DENIED if a state transition was not found;
741 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100742 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100743 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100744 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100745 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
746 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000747 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100748static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100749 struct vm_locked from, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +0000750 struct ffa_memory_region *memory_region, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100751 struct ffa_memory_region_constituent **fragments,
752 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4b846eb2024-05-23 17:32:23 +0100753 uint32_t *from_mode, enum ffa_map_action *map_action, bool zero)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000754{
755 const uint32_t state_mask =
756 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100757 struct ffa_value ret;
J-Alves460d36c2023-10-12 17:02:15 +0100758 bool all_receivers_from_current_world = true;
Daniel Boulbya76fd912024-02-22 14:22:15 +0000759 uint32_t receivers_count = memory_region->receiver_count;
J-Alves95fbb312024-03-20 15:19:16 +0000760 const bool is_memory_lend = (share_func == FFA_MEM_LEND_32) ||
761 (share_func == FFA_MEM_LEND_64);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000762
Andrew Walbranca808b12020-05-15 17:22:28 +0100763 ret = constituents_get_mode(from, orig_from_mode, fragments,
764 fragment_constituent_counts,
765 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100766 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100767 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100768 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100769 }
770
Daniel Boulby6e261362024-06-13 16:53:00 +0100771 /*
772 * Check requested memory type is valid with the memory type of the
773 * owner. E.g. they follow the memory type precedence where Normal
774 * memory is more permissive than device and therefore device memory
775 * can only be shared as device memory.
776 */
777 if (memory_region->attributes.type == FFA_MEMORY_NORMAL_MEM &&
778 (*orig_from_mode & MM_MODE_D) != 0U) {
779 dlog_verbose(
780 "Send device memory as Normal memory is not allowed\n");
781 return ffa_error(FFA_DENIED);
782 }
783
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000784 /* Device memory regions can only be lent a single borrower. */
Daniel Boulby9764ff62024-01-30 17:47:39 +0000785 if ((*orig_from_mode & MM_MODE_D) != 0U &&
J-Alves95fbb312024-03-20 15:19:16 +0000786 !(is_memory_lend && receivers_count == 1)) {
Daniel Boulby9764ff62024-01-30 17:47:39 +0000787 dlog_verbose(
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000788 "Device memory can only be lent to a single borrower "
789 "(mode is %#x).\n",
Daniel Boulby9764ff62024-01-30 17:47:39 +0000790 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100791 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000792 }
793
794 /*
795 * Ensure the sender is the owner and has exclusive access to the
796 * memory.
797 */
798 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100799 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100800 }
801
Daniel Boulby4b846eb2024-05-23 17:32:23 +0100802 /*
803 * Memory cannot be zeroed during the lend/donate operation if the
804 * sender only has RO access.
805 */
806 if ((*orig_from_mode & MM_MODE_W) == 0 && zero == true) {
807 dlog_verbose(
808 "Cannot zero memory when the sender doesn't have "
809 "write access\n");
810 return ffa_error(FFA_DENIED);
811 }
812
Daniel Boulbya76fd912024-02-22 14:22:15 +0000813 assert(receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100814
J-Alves363f5722022-04-25 17:37:37 +0100815 for (uint32_t i = 0U; i < receivers_count; i++) {
Daniel Boulbya76fd912024-02-22 14:22:15 +0000816 struct ffa_memory_access *receiver =
817 ffa_memory_region_get_receiver(memory_region, i);
818 assert(receiver != NULL);
J-Alves363f5722022-04-25 17:37:37 +0100819 ffa_memory_access_permissions_t permissions =
Daniel Boulbya76fd912024-02-22 14:22:15 +0000820 receiver->receiver_permissions.permissions;
J-Alves363f5722022-04-25 17:37:37 +0100821 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
822 permissions, *orig_from_mode);
823
J-Alves788b4492023-04-18 14:01:23 +0100824 /*
825 * The assumption is that at this point, the operation from
826 * SP to a receiver VM, should have returned an FFA_ERROR
827 * already.
828 */
829 if (!ffa_is_vm_id(from.vm->id)) {
830 assert(!ffa_is_vm_id(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000831 receiver->receiver_permissions.receiver));
J-Alves788b4492023-04-18 14:01:23 +0100832 }
833
J-Alves460d36c2023-10-12 17:02:15 +0100834 /* Track if all senders are from current world. */
835 all_receivers_from_current_world =
836 all_receivers_from_current_world &&
837 vm_id_is_current_world(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000838 receiver->receiver_permissions.receiver);
J-Alves460d36c2023-10-12 17:02:15 +0100839
J-Alves363f5722022-04-25 17:37:37 +0100840 if ((*orig_from_mode & required_from_mode) !=
841 required_from_mode) {
842 dlog_verbose(
843 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100844 "which required mode %#x but only had %#x "
845 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100846 required_from_mode, *orig_from_mode);
847 return ffa_error(FFA_DENIED);
848 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000849 }
850
J-Alves460d36c2023-10-12 17:02:15 +0100851 *map_action = ffa_mem_send_get_map_action(
Daniel Boulby734981e2024-07-22 11:06:35 +0100852 all_receivers_from_current_world, from.vm->id, share_func,
853 (*orig_from_mode & MM_MODE_D) == 0U);
J-Alves460d36c2023-10-12 17:02:15 +0100854
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000855 /* Find the appropriate new mode. */
856 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000857 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000858 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100859 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000860 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100861 break;
J-Alves95fbb312024-03-20 15:19:16 +0000862 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100863 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000864 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100865 break;
J-Alves95fbb312024-03-20 15:19:16 +0000866 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100867 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000868 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100869 break;
870
Jose Marinho75509b42019-04-09 09:34:59 +0100871 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100872 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100873 }
874
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100875 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000876}
877
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100878static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000879 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100880 struct ffa_memory_region_constituent **fragments,
881 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves69cdfd92024-04-26 11:40:59 +0100882 uint32_t *from_mode, enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000883{
884 const uint32_t state_mask =
885 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
886 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100887 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000888
J-Alves69cdfd92024-04-26 11:40:59 +0100889 assert(map_action != NULL);
890 if (vm_id_is_current_world(from.vm->id)) {
891 *map_action = MAP_ACTION_COMMIT;
892 } else {
893 /*
894 * No need to check the attributes of caller.
895 * The assumption is that the retrieve request of the receiver
896 * also used the MAP_ACTION_NONE, and no update was done to the
897 * page tables. When the receiver is not at the secure virtual
898 * instance SPMC doesn't manage its S2 translation (i.e. when
899 * the receiver is a VM).
900 */
901 *map_action = MAP_ACTION_NONE;
902
903 return (struct ffa_value){.func = FFA_SUCCESS_32};
904 }
905
Andrew Walbranca808b12020-05-15 17:22:28 +0100906 ret = constituents_get_mode(from, orig_from_mode, fragments,
907 fragment_constituent_counts,
908 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100909 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100910 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000911 }
912
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000913 /*
914 * Ensure the relinquishing VM is not the owner but has access to the
915 * memory.
916 */
917 orig_from_state = *orig_from_mode & state_mask;
918 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
919 dlog_verbose(
920 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100921 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000922 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100923 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000924 }
925
926 /* Find the appropriate new mode. */
927 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
928
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100929 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000930}
931
932/**
933 * Verify that all pages have the same mode, that the starting mode
934 * constitutes a valid state and obtain the next mode to apply
935 * to the retrieving VM.
936 *
937 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100938 * 1) FFA_DENIED if a state transition was not found;
939 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100940 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100941 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100942 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100943 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
944 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000945 */
J-Alvesfc19b372022-07-06 12:17:35 +0100946struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000947 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100948 struct ffa_memory_region_constituent **fragments,
949 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby71d887b2024-06-28 16:38:06 +0100950 uint32_t sender_orig_mode, uint32_t *to_mode, bool memory_protected,
J-Alvesfd206052023-05-22 16:45:00 +0100951 enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000952{
953 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100954 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000955
Andrew Walbranca808b12020-05-15 17:22:28 +0100956 ret = constituents_get_mode(to, &orig_to_mode, fragments,
957 fragment_constituent_counts,
958 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100959 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100960 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100961 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000962 }
963
J-Alves460d36c2023-10-12 17:02:15 +0100964 /* Find the appropriate new mode. */
Daniel Boulby71d887b2024-06-28 16:38:06 +0100965 *to_mode = sender_orig_mode;
J-Alves460d36c2023-10-12 17:02:15 +0100966
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100967 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000968 /*
969 * If the original ffa memory send call has been processed
970 * successfully, it is expected the orig_to_mode would overlay
971 * with `state_mask`, as a result of the function
972 * `ffa_send_check_transition`.
J-Alvesfd206052023-05-22 16:45:00 +0100973 *
974 * If Hafnium is the SPMC:
975 * - Caller of the reclaim interface is an SP, the memory shall
976 * have been protected throughout the flow.
977 * - Caller of the reclaim is from the NWd, the memory may have
978 * been protected at the time of lending/donating the memory.
979 * In such case, set action to unprotect memory in the
980 * handling of reclaim operation.
981 * - If Hafnium is the hypervisor memory shall never have been
982 * protected in memory lend/share/donate.
983 *
984 * More details in the doc comment of the function
985 * `ffa_region_group_identity_map`.
J-Alves9256f162021-12-09 13:18:43 +0000986 */
J-Alves59ed0042022-07-28 18:26:41 +0100987 if (vm_id_is_current_world(to.vm->id)) {
988 assert((orig_to_mode &
989 (MM_MODE_INVALID | MM_MODE_UNOWNED |
990 MM_MODE_SHARED)) != 0U);
J-Alvesfd206052023-05-22 16:45:00 +0100991 assert(!memory_protected);
992 } else if (to.vm->id == HF_OTHER_WORLD_ID &&
993 map_action != NULL && memory_protected) {
994 *map_action = MAP_ACTION_COMMIT_UNPROTECT;
J-Alves59ed0042022-07-28 18:26:41 +0100995 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000996 } else {
J-Alves69cdfd92024-04-26 11:40:59 +0100997 if (!vm_id_is_current_world(to.vm->id)) {
998 assert(map_action != NULL);
999 *map_action = MAP_ACTION_NONE;
1000 return (struct ffa_value){.func = FFA_SUCCESS_32};
1001 }
1002
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001003 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01001004 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001005 * Ensure the retriever has the expected state. We don't care
1006 * about the MM_MODE_SHARED bit; either with or without it set
1007 * are both valid representations of the !O-NA state.
1008 */
J-Alvesa9cd7e32022-07-01 13:49:33 +01001009 if (vm_id_is_current_world(to.vm->id) &&
Karl Meakin5e996992024-05-20 11:27:07 +01001010 !vm_is_primary(to.vm) &&
J-Alvesa9cd7e32022-07-01 13:49:33 +01001011 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
1012 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001013 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001014 }
J-Alves460d36c2023-10-12 17:02:15 +01001015
1016 /*
1017 * If memory has been protected before, clear the NS bit to
1018 * allow the secure access from the SP.
1019 */
1020 if (memory_protected) {
1021 *to_mode &= ~plat_ffa_other_world_mode();
1022 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001023 }
1024
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001025 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00001026 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001027 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001028 *to_mode |= 0;
1029 break;
J-Alves95fbb312024-03-20 15:19:16 +00001030 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001031 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001032 *to_mode |= MM_MODE_UNOWNED;
1033 break;
J-Alves95fbb312024-03-20 15:19:16 +00001034 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001035 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001036 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
1037 break;
1038
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001039 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001040 *to_mode |= 0;
1041 break;
1042
1043 default:
Andrew Walbranca808b12020-05-15 17:22:28 +01001044 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001045 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001046 }
1047
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001048 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +01001049}
Jose Marinho09b1db82019-08-08 09:16:59 +01001050
J-Alvescf6253e2024-01-03 13:48:48 +00001051/*
1052 * Performs the operations related to the `action` MAP_ACTION_CHECK*.
1053 * Returns:
1054 * - FFA_SUCCESS_32: if all goes well.
1055 * - FFA_ERROR_32: with FFA_NO_MEMORY, if there is no memory to manage
1056 * the page table update. Or error code provided by the function
1057 * `arch_memory_protect`.
1058 */
1059static struct ffa_value ffa_region_group_check_actions(
1060 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
1061 struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
1062 bool *memory_protected)
1063{
1064 struct ffa_value ret;
1065 bool is_memory_protected;
1066
1067 if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, mode, ppool)) {
1068 dlog_verbose(
1069 "%s: memory can't be mapped to %x due to lack of "
Karl Meakine8937d92024-03-19 16:04:25 +00001070 "memory. Base: %lx end: %lx\n",
J-Alvescf6253e2024-01-03 13:48:48 +00001071 __func__, vm_locked.vm->id, pa_addr(pa_begin),
1072 pa_addr(pa_end));
1073 return ffa_error(FFA_NO_MEMORY);
1074 }
1075
1076 switch (action) {
1077 case MAP_ACTION_CHECK:
1078 /* No protect requested. */
1079 is_memory_protected = false;
1080 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1081 break;
1082 case MAP_ACTION_CHECK_PROTECT: {
1083 paddr_t last_protected_pa = pa_init(0);
1084
1085 ret = arch_memory_protect(pa_begin, pa_end, &last_protected_pa);
1086
1087 is_memory_protected = (ret.func == FFA_SUCCESS_32);
1088
1089 /*
1090 * - If protect memory has failed with FFA_DENIED, means some
1091 * range of memory was in the wrong state. In such case, SPM
1092 * reverts the state of the pages that were successfully
1093 * updated.
1094 * - If protect memory has failed with FFA_NOT_SUPPORTED, it
1095 * means the platform doesn't support the protection mechanism.
1096 * That said, it still permits the page table update to go
1097 * through. The variable
1098 * `is_memory_protected` will be equal to false.
1099 * - If protect memory has failed with FFA_INVALID_PARAMETERS,
1100 * break from switch and return the error.
1101 */
1102 if (ret.func == FFA_ERROR_32) {
1103 assert(!is_memory_protected);
1104 if (ffa_error_code(ret) == FFA_DENIED &&
1105 pa_addr(last_protected_pa) != (uintptr_t)0) {
1106 CHECK(arch_memory_unprotect(
1107 pa_begin,
1108 pa_add(last_protected_pa, PAGE_SIZE)));
1109 } else if (ffa_error_code(ret) == FFA_NOT_SUPPORTED) {
1110 ret = (struct ffa_value){
1111 .func = FFA_SUCCESS_32,
1112 };
1113 }
1114 }
1115 } break;
1116 default:
1117 panic("%s: invalid action to process %x\n", __func__, action);
1118 }
1119
1120 if (memory_protected != NULL) {
1121 *memory_protected = is_memory_protected;
1122 }
1123
1124 return ret;
1125}
1126
1127static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
1128 paddr_t pa_begin, paddr_t pa_end,
1129 struct mpool *ppool, uint32_t mode,
1130 enum ffa_map_action action)
1131{
1132 switch (action) {
1133 case MAP_ACTION_COMMIT_UNPROTECT:
1134 /*
1135 * Checking that it should succeed because SPM should be
1136 * unprotecting memory that it had protected before.
1137 */
1138 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1139 case MAP_ACTION_COMMIT:
1140 vm_identity_commit(vm_locked, pa_begin, pa_end, mode, ppool,
1141 NULL);
1142 break;
1143 default:
1144 panic("%s: invalid action to process %x\n", __func__, action);
1145 }
1146}
1147
Jose Marinho09b1db82019-08-08 09:16:59 +01001148/**
J-Alves063ad832023-10-03 18:05:40 +01001149 * Helper function to revert a failed "Protect" action from the SPMC:
1150 * - `fragment_count`: should specify the number of fragments to traverse from
1151 * `fragments`. This may not be the full amount of fragments that are part of
1152 * the share_state structure.
1153 * - `fragment_constituent_counts`: array holding the amount of constituents
1154 * per fragment.
1155 * - `end`: pointer to the constituent that failed the "protect" action. It
1156 * shall be part of the last fragment, and it shall make the loop below break.
1157 */
1158static void ffa_region_group_fragments_revert_protect(
1159 struct ffa_memory_region_constituent **fragments,
1160 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1161 const struct ffa_memory_region_constituent *end)
1162{
1163 for (uint32_t i = 0; i < fragment_count; ++i) {
1164 for (uint32_t j = 0; j < fragment_constituent_counts[i]; ++j) {
1165 struct ffa_memory_region_constituent *constituent =
1166 &fragments[i][j];
1167 size_t size = constituent->page_count * PAGE_SIZE;
1168 paddr_t pa_begin =
1169 pa_from_ipa(ipa_init(constituent->address));
1170 paddr_t pa_end = pa_add(pa_begin, size);
1171
Karl Meakine8937d92024-03-19 16:04:25 +00001172 dlog_verbose("%s: reverting fragment %lx size %zx\n",
J-Alves063ad832023-10-03 18:05:40 +01001173 __func__, pa_addr(pa_begin), size);
1174
1175 if (constituent == end) {
1176 /*
1177 * The last constituent is expected to be in the
1178 * last fragment.
1179 */
1180 assert(i == fragment_count - 1);
1181 break;
1182 }
1183
1184 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1185 }
1186 }
1187}
1188
1189/**
Jose Marinho09b1db82019-08-08 09:16:59 +01001190 * Updates a VM's page table such that the given set of physical address ranges
1191 * are mapped in the address space at the corresponding address ranges, in the
1192 * mode provided.
1193 *
J-Alves0a83dc22023-05-05 09:50:37 +01001194 * The enum ffa_map_action determines the action taken from a call to the
1195 * function below:
1196 * - If action is MAP_ACTION_CHECK, the page tables will be allocated from the
1197 * mpool but no mappings will actually be updated. This function must always
1198 * be called first with action set to MAP_ACTION_CHECK to check that it will
1199 * succeed before calling ffa_region_group_identity_map with whichever one of
1200 * the remaining actions, to avoid leaving the page table in a half-updated
1201 * state.
1202 * - The action MAP_ACTION_COMMIT allocates the page tables from the mpool, and
1203 * changes the memory mappings.
J-Alvescf6253e2024-01-03 13:48:48 +00001204 * - The action MAP_ACTION_CHECK_PROTECT extends the MAP_ACTION_CHECK with an
1205 * invocation to the monitor to update the security state of the memory,
1206 * to that of the SPMC.
1207 * - The action MAP_ACTION_COMMIT_UNPROTECT extends the MAP_ACTION_COMMIT
1208 * with a call into the monitor, to reset the security state of memory
1209 * that has priorly been mapped with the MAP_ACTION_CHECK_PROTECT action.
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001210 * vm_ptable_defrag should always be called after a series of page table
1211 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +01001212 *
J-Alvescf6253e2024-01-03 13:48:48 +00001213 * If all goes well, returns FFA_SUCCESS_32; or FFA_ERROR, with following
1214 * error codes:
1215 * - FFA_INVALID_PARAMETERS: invalid range of memory.
1216 * - FFA_DENIED:
1217 *
Jose Marinho09b1db82019-08-08 09:16:59 +01001218 * made to memory mappings.
1219 */
J-Alvescf6253e2024-01-03 13:48:48 +00001220struct ffa_value ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +00001221 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001222 struct ffa_memory_region_constituent **fragments,
1223 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvescf6253e2024-01-03 13:48:48 +00001224 uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
1225 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001226{
Andrew Walbranca808b12020-05-15 17:22:28 +01001227 uint32_t i;
1228 uint32_t j;
J-Alvescf6253e2024-01-03 13:48:48 +00001229 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001230
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001231 if (vm_locked.vm->el0_partition) {
1232 mode |= MM_MODE_USER | MM_MODE_NG;
1233 }
1234
Andrew Walbranca808b12020-05-15 17:22:28 +01001235 /* Iterate over the memory region constituents within each fragment. */
1236 for (i = 0; i < fragment_count; ++i) {
1237 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
J-Alves063ad832023-10-03 18:05:40 +01001238 struct ffa_memory_region_constituent *constituent =
1239 &fragments[i][j];
1240 size_t size = constituent->page_count * PAGE_SIZE;
Andrew Walbranca808b12020-05-15 17:22:28 +01001241 paddr_t pa_begin =
J-Alves063ad832023-10-03 18:05:40 +01001242 pa_from_ipa(ipa_init(constituent->address));
Andrew Walbranca808b12020-05-15 17:22:28 +01001243 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001244 uint32_t pa_bits =
1245 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +01001246
1247 /*
1248 * Ensure the requested region falls into system's PA
1249 * range.
1250 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001251 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
1252 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +01001253 dlog_error("Region is outside of PA Range\n");
J-Alvescf6253e2024-01-03 13:48:48 +00001254 return ffa_error(FFA_INVALID_PARAMETERS);
Federico Recanati4fd065d2021-12-13 20:06:23 +01001255 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001256
J-Alvescf6253e2024-01-03 13:48:48 +00001257 if (action <= MAP_ACTION_CHECK_PROTECT) {
1258 ret = ffa_region_group_check_actions(
1259 vm_locked, pa_begin, pa_end, ppool,
1260 mode, action, memory_protected);
J-Alves063ad832023-10-03 18:05:40 +01001261
1262 if (ret.func == FFA_ERROR_32 &&
1263 ffa_error_code(ret) == FFA_DENIED) {
1264 if (memory_protected != NULL) {
1265 assert(!*memory_protected);
1266 }
1267
1268 ffa_region_group_fragments_revert_protect(
1269 fragments,
1270 fragment_constituent_counts,
1271 i + 1, constituent);
1272 break;
1273 }
J-Alvescf6253e2024-01-03 13:48:48 +00001274 } else if (action >= MAP_ACTION_COMMIT &&
1275 action < MAP_ACTION_MAX) {
1276 ffa_region_group_commit_actions(
1277 vm_locked, pa_begin, pa_end, ppool,
1278 mode, action);
1279 ret = (struct ffa_value){
1280 .func = FFA_SUCCESS_32};
1281 } else {
1282 panic("%s: Unknown ffa_map_action.\n",
1283 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001284 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001285 }
1286 }
1287
J-Alvescf6253e2024-01-03 13:48:48 +00001288 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001289}
1290
1291/**
1292 * Clears a region of physical memory by overwriting it with zeros. The data is
1293 * flushed from the cache so the memory has been cleared across the system.
1294 */
J-Alves7db32002021-12-14 14:44:50 +00001295static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
1296 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +01001297{
1298 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +00001299 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +01001300 * global mapping of the whole range. Such an approach will limit
1301 * the changes to stage-1 tables and will allow only local
1302 * invalidation.
1303 */
1304 bool ret;
1305 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +00001306 void *ptr = mm_identity_map(stage1_locked, begin, end,
1307 MM_MODE_W | (extra_mode_attributes &
1308 plat_ffa_other_world_mode()),
1309 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001310 size_t size = pa_difference(begin, end);
1311
1312 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001313 goto fail;
1314 }
1315
1316 memset_s(ptr, size, 0, size);
1317 arch_mm_flush_dcache(ptr, size);
1318 mm_unmap(stage1_locked, begin, end, ppool);
1319
1320 ret = true;
1321 goto out;
1322
1323fail:
1324 ret = false;
1325
1326out:
1327 mm_unlock_stage1(&stage1_locked);
1328
1329 return ret;
1330}
1331
1332/**
1333 * Clears a region of physical memory by overwriting it with zeros. The data is
1334 * flushed from the cache so the memory has been cleared across the system.
1335 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001336static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001337 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001338 struct ffa_memory_region_constituent **fragments,
1339 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1340 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001341{
1342 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001343 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001344 bool ret = false;
1345
1346 /*
1347 * Create a local pool so any freed memory can't be used by another
1348 * thread. This is to ensure each constituent that is mapped can be
1349 * unmapped again afterwards.
1350 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001351 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001352
Andrew Walbranca808b12020-05-15 17:22:28 +01001353 /* Iterate over the memory region constituents within each fragment. */
1354 for (i = 0; i < fragment_count; ++i) {
1355 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001356
J-Alves8457f932023-10-11 16:41:45 +01001357 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001358 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1359 paddr_t begin =
1360 pa_from_ipa(ipa_init(fragments[i][j].address));
1361 paddr_t end = pa_add(begin, size);
1362
J-Alves7db32002021-12-14 14:44:50 +00001363 if (!clear_memory(begin, end, &local_page_pool,
1364 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001365 /*
1366 * api_clear_memory will defrag on failure, so
1367 * no need to do it here.
1368 */
1369 goto out;
1370 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001371 }
1372 }
1373
Jose Marinho09b1db82019-08-08 09:16:59 +01001374 ret = true;
1375
1376out:
1377 mpool_fini(&local_page_pool);
1378 return ret;
1379}
1380
J-Alves5952d942022-12-22 16:03:00 +00001381static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1382 ipaddr_t in_begin, ipaddr_t in_end)
1383{
1384 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1385 ipa_addr(begin) < ipa_addr(in_end)) ||
1386 (ipa_addr(end) <= ipa_addr(in_end) &&
1387 ipa_addr(end) > ipa_addr(in_begin));
1388}
1389
1390/**
1391 * Receives a memory range and looks for overlaps with the remainder
1392 * constituents of the memory share/lend/donate operation. Assumes they are
1393 * passed in order to avoid having to loop over all the elements at each call.
1394 * The function only compares the received memory ranges with those that follow
1395 * within the same fragment, and subsequent fragments from the same operation.
1396 */
1397static bool ffa_memory_check_overlap(
1398 struct ffa_memory_region_constituent **fragments,
1399 const uint32_t *fragment_constituent_counts,
1400 const uint32_t fragment_count, const uint32_t current_fragment,
1401 const uint32_t current_constituent)
1402{
1403 uint32_t i = current_fragment;
1404 uint32_t j = current_constituent;
1405 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1406 const uint32_t current_page_count = fragments[i][j].page_count;
1407 size_t current_size = current_page_count * PAGE_SIZE;
1408 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1409
1410 if (current_size == 0 ||
1411 current_size > UINT64_MAX - ipa_addr(current_begin)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001412 dlog_verbose("Invalid page count. Addr: %zx page_count: %x\n",
1413 current_begin.ipa, current_page_count);
J-Alves5952d942022-12-22 16:03:00 +00001414 return false;
1415 }
1416
1417 for (; i < fragment_count; i++) {
1418 j = (i == current_fragment) ? j + 1 : 0;
1419
1420 for (; j < fragment_constituent_counts[i]; j++) {
1421 ipaddr_t begin = ipa_init(fragments[i][j].address);
1422 const uint32_t page_count = fragments[i][j].page_count;
1423 size_t size = page_count * PAGE_SIZE;
1424 ipaddr_t end = ipa_add(begin, size - 1);
1425
1426 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1427 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001428 "Invalid page count. Addr: %lx "
J-Alves5952d942022-12-22 16:03:00 +00001429 "page_count: %x\n",
Karl Meakine8937d92024-03-19 16:04:25 +00001430 begin.ipa, page_count);
J-Alves5952d942022-12-22 16:03:00 +00001431 return false;
1432 }
1433
1434 /*
1435 * Check if current ranges is within begin and end, as
1436 * well as the reverse. This should help optimize the
1437 * loop, and reduce the number of iterations.
1438 */
1439 if (is_memory_range_within(begin, end, current_begin,
1440 current_end) ||
1441 is_memory_range_within(current_begin, current_end,
1442 begin, end)) {
1443 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001444 "Overlapping memory ranges: %#lx - "
1445 "%#lx with %#lx - %#lx\n",
J-Alves5952d942022-12-22 16:03:00 +00001446 ipa_addr(begin), ipa_addr(end),
1447 ipa_addr(current_begin),
1448 ipa_addr(current_end));
1449 return true;
1450 }
1451 }
1452 }
1453
1454 return false;
1455}
1456
Jose Marinho09b1db82019-08-08 09:16:59 +01001457/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001458 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001459 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001460 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001461 *
1462 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001463 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001464 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001465 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001466 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1467 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001468 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001469 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001470 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001471 */
Daniel Boulbya76fd912024-02-22 14:22:15 +00001472static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001473 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001474 struct ffa_memory_region_constituent **fragments,
1475 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001476 uint32_t composite_total_page_count, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001477 struct ffa_memory_region *memory_region, struct mpool *page_pool,
1478 uint32_t *orig_from_mode_ret, bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001479{
Andrew Walbranca808b12020-05-15 17:22:28 +01001480 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001481 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001482 uint32_t orig_from_mode;
J-Alves460d36c2023-10-12 17:02:15 +01001483 uint32_t clean_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001484 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001485 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001486 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001487 uint32_t constituents_total_page_count = 0;
J-Alves460d36c2023-10-12 17:02:15 +01001488 enum ffa_map_action map_action = MAP_ACTION_CHECK;
Daniel Boulbya76fd912024-02-22 14:22:15 +00001489 bool clear = memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Jose Marinho09b1db82019-08-08 09:16:59 +01001490
1491 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001492 * Make sure constituents are properly aligned to a 64-bit boundary. If
1493 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001494 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001495 for (i = 0; i < fragment_count; ++i) {
1496 if (!is_aligned(fragments[i], 8)) {
1497 dlog_verbose("Constituents not aligned.\n");
1498 return ffa_error(FFA_INVALID_PARAMETERS);
1499 }
J-Alves8f11cde2022-12-21 16:18:22 +00001500 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1501 constituents_total_page_count +=
1502 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001503 if (ffa_memory_check_overlap(
1504 fragments, fragment_constituent_counts,
1505 fragment_count, i, j)) {
1506 return ffa_error(FFA_INVALID_PARAMETERS);
1507 }
J-Alves8f11cde2022-12-21 16:18:22 +00001508 }
1509 }
1510
1511 if (constituents_total_page_count != composite_total_page_count) {
1512 dlog_verbose(
1513 "Composite page count differs from calculated page "
1514 "count from constituents.\n");
1515 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001516 }
1517
1518 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001519 * Check if the state transition is lawful for the sender, ensure that
1520 * all constituents of a memory region being shared are at the same
1521 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001522 */
J-Alves460d36c2023-10-12 17:02:15 +01001523 ret = ffa_send_check_transition(
Daniel Boulbya76fd912024-02-22 14:22:15 +00001524 from_locked, share_func, memory_region, &orig_from_mode,
1525 fragments, fragment_constituent_counts, fragment_count,
Daniel Boulby4b846eb2024-05-23 17:32:23 +01001526 &from_mode, &map_action, clear);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001527 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001528 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001529 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001530 }
1531
Andrew Walbran37c574e2020-06-03 11:45:46 +01001532 if (orig_from_mode_ret != NULL) {
1533 *orig_from_mode_ret = orig_from_mode;
1534 }
1535
Jose Marinho09b1db82019-08-08 09:16:59 +01001536 /*
1537 * Create a local pool so any freed memory can't be used by another
1538 * thread. This is to ensure the original mapping can be restored if the
1539 * clear fails.
1540 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001541 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001542
1543 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001544 * First reserve all required memory for the new page table entries
1545 * without committing, to make sure the entire operation will succeed
1546 * without exhausting the page pool.
J-Alves460d36c2023-10-12 17:02:15 +01001547 * Provide the map_action as populated by 'ffa_send_check_transition'.
1548 * It may request memory to be protected.
Jose Marinho09b1db82019-08-08 09:16:59 +01001549 */
J-Alvescf6253e2024-01-03 13:48:48 +00001550 ret = ffa_region_group_identity_map(
1551 from_locked, fragments, fragment_constituent_counts,
J-Alves460d36c2023-10-12 17:02:15 +01001552 fragment_count, from_mode, page_pool, map_action,
1553 memory_protected);
J-Alvescf6253e2024-01-03 13:48:48 +00001554 if (ret.func == FFA_ERROR_32) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001555 goto out;
1556 }
1557
1558 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001559 * Update the mapping for the sender. This won't allocate because the
1560 * transaction was already prepared above, but may free pages in the
1561 * case that a whole block is being unmapped that was previously
1562 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001563 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001564 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001565 from_locked, fragments, fragment_constituent_counts,
1566 fragment_count, from_mode, &local_page_pool,
1567 MAP_ACTION_COMMIT, NULL)
1568 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001569
J-Alves460d36c2023-10-12 17:02:15 +01001570 /*
1571 * If memory has been protected, it is now part of the secure PAS
1572 * (happens for lend/donate from NWd to SWd), and the `orig_from_mode`
1573 * should have the MM_MODE_NS set, as such mask it in `clean_mode` for
1574 * SPM's S1 translation.
1575 * In case memory hasn't been protected, and it is in the non-secure
1576 * PAS (e.g. memory share from NWd to SWd), as such the SPM needs to
1577 * perform a non-secure memory access. In such case `clean_mode` takes
1578 * the same mode as `orig_from_mode`.
1579 */
1580 clean_mode = (memory_protected != NULL && *memory_protected)
1581 ? orig_from_mode & ~plat_ffa_other_world_mode()
1582 : orig_from_mode;
1583
Jose Marinho09b1db82019-08-08 09:16:59 +01001584 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves460d36c2023-10-12 17:02:15 +01001585 if (clear && !ffa_clear_memory_constituents(
1586 clean_mode, fragments, fragment_constituent_counts,
1587 fragment_count, page_pool)) {
1588 map_action = (memory_protected != NULL && *memory_protected)
1589 ? MAP_ACTION_COMMIT_UNPROTECT
1590 : MAP_ACTION_COMMIT;
1591
Jose Marinho09b1db82019-08-08 09:16:59 +01001592 /*
1593 * On failure, roll back by returning memory to the sender. This
1594 * may allocate pages which were previously freed into
1595 * `local_page_pool` by the call above, but will never allocate
1596 * more pages than that so can never fail.
1597 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001598 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001599 from_locked, fragments,
1600 fragment_constituent_counts, fragment_count,
1601 orig_from_mode, &local_page_pool,
1602 MAP_ACTION_COMMIT, NULL)
1603 .func == FFA_SUCCESS_32);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001604 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001605 goto out;
1606 }
1607
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001608 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001609
1610out:
1611 mpool_fini(&local_page_pool);
1612
1613 /*
1614 * Tidy up the page table by reclaiming failed mappings (if there was an
1615 * error) or merging entries into blocks where possible (on success).
1616 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001617 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001618
1619 return ret;
1620}
1621
1622/**
1623 * Validates and maps memory shared from one VM to another.
1624 *
1625 * This function requires the calling context to hold the <to> lock.
1626 *
1627 * Returns:
1628 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001629 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001630 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001631 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001632 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001633 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001634 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001635struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001636 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001637 struct ffa_memory_region_constituent **fragments,
1638 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001639 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
J-Alves460d36c2023-10-12 17:02:15 +01001640 struct mpool *page_pool, uint32_t *response_mode, bool memory_protected)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001641{
Andrew Walbranca808b12020-05-15 17:22:28 +01001642 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001643 uint32_t to_mode;
1644 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001645 struct ffa_value ret;
J-Alvesfd206052023-05-22 16:45:00 +01001646 enum ffa_map_action map_action = MAP_ACTION_COMMIT;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001647
1648 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001649 * Make sure constituents are properly aligned to a 64-bit boundary. If
1650 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001651 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001652 for (i = 0; i < fragment_count; ++i) {
1653 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001654 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001655 return ffa_error(FFA_INVALID_PARAMETERS);
1656 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001657 }
1658
1659 /*
Daniel Boulby4b846eb2024-05-23 17:32:23 +01001660 * Ensure the sender has write permissions if the memory needs to be
1661 * cleared.
1662 */
1663 if ((sender_orig_mode & MM_MODE_W) == 0 && clear == true) {
1664 dlog_verbose(
1665 "Cannot zero memory when the sender does not have "
1666 "write access\n");
1667 return ffa_error(FFA_DENIED);
1668 }
1669
1670 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001671 * Check if the state transition is lawful for the recipient, and ensure
1672 * that all constituents of the memory region being retrieved are at the
1673 * same state.
1674 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001675 ret = ffa_retrieve_check_transition(
1676 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alvesfd206052023-05-22 16:45:00 +01001677 fragment_count, sender_orig_mode, &to_mode, memory_protected,
1678 &map_action);
J-Alves460d36c2023-10-12 17:02:15 +01001679
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001680 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001681 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001682 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001683 }
1684
1685 /*
J-Alves69cdfd92024-04-26 11:40:59 +01001686 * Create a local pool so any freed memory can't be used by
1687 * another thread. This is to ensure the original mapping can be
1688 * restored if the clear fails.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001689 */
1690 mpool_init_with_fallback(&local_page_pool, page_pool);
1691
1692 /*
J-Alves69cdfd92024-04-26 11:40:59 +01001693 * Memory retrieves from the NWd VMs don't require update to S2 PTs on
1694 * retrieve request.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001695 */
J-Alves69cdfd92024-04-26 11:40:59 +01001696 if (map_action != MAP_ACTION_NONE) {
1697 /*
1698 * First reserve all required memory for the new page table
1699 * entries in the recipient page tables without committing, to
1700 * make sure the entire operation will succeed without
1701 * exhausting the page pool.
1702 */
1703 ret = ffa_region_group_identity_map(
1704 to_locked, fragments, fragment_constituent_counts,
1705 fragment_count, to_mode, page_pool, MAP_ACTION_CHECK,
1706 NULL);
1707 if (ret.func == FFA_ERROR_32) {
1708 /* TODO: partial defrag of failed range. */
1709 goto out;
1710 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001711 }
1712
1713 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001714 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001715 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1716 fragment_constituent_counts,
1717 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001718 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001719 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001720 goto out;
1721 }
1722
J-Alves69cdfd92024-04-26 11:40:59 +01001723 if (map_action != MAP_ACTION_NONE) {
1724 /*
1725 * Complete the transfer by mapping the memory into the
1726 * recipient. This won't allocate because the transaction was
1727 * already prepared above, so it doesn't need to use the
1728 * `local_page_pool`.
1729 */
1730 CHECK(ffa_region_group_identity_map(to_locked, fragments,
1731 fragment_constituent_counts,
1732 fragment_count, to_mode,
1733 page_pool, map_action, NULL)
1734 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001735
J-Alves69cdfd92024-04-26 11:40:59 +01001736 /*
1737 * Return the mode used in mapping the memory in retriever's PT.
1738 */
1739 if (response_mode != NULL) {
1740 *response_mode = to_mode;
1741 }
J-Alves460d36c2023-10-12 17:02:15 +01001742 }
1743
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001744 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001745
1746out:
1747 mpool_fini(&local_page_pool);
1748
1749 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001750 * Tidy up the page table by reclaiming failed mappings (if there was an
1751 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001752 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001753 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001754
1755 return ret;
1756}
1757
Andrew Walbran996d1d12020-05-27 14:08:43 +01001758static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001759 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001760 struct ffa_memory_region_constituent **fragments,
1761 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves69cdfd92024-04-26 11:40:59 +01001762 uint32_t sender_orig_mode, struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001763{
1764 uint32_t orig_from_mode;
J-Alves69cdfd92024-04-26 11:40:59 +01001765 uint32_t clearing_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001766 uint32_t from_mode;
1767 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001768 struct ffa_value ret;
J-Alves69cdfd92024-04-26 11:40:59 +01001769 enum ffa_map_action map_action;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001770
Andrew Walbranca808b12020-05-15 17:22:28 +01001771 ret = ffa_relinquish_check_transition(
1772 from_locked, &orig_from_mode, fragments,
J-Alves69cdfd92024-04-26 11:40:59 +01001773 fragment_constituent_counts, fragment_count, &from_mode,
1774 &map_action);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001775 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001776 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001777 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001778 }
1779
1780 /*
1781 * Create a local pool so any freed memory can't be used by another
1782 * thread. This is to ensure the original mapping can be restored if the
1783 * clear fails.
1784 */
1785 mpool_init_with_fallback(&local_page_pool, page_pool);
1786
J-Alves69cdfd92024-04-26 11:40:59 +01001787 if (map_action != MAP_ACTION_NONE) {
1788 clearing_mode = orig_from_mode;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001789
J-Alves69cdfd92024-04-26 11:40:59 +01001790 /*
1791 * First reserve all required memory for the new page table
1792 * entries without committing, to make sure the entire operation
1793 * will succeed without exhausting the page pool.
1794 */
1795 ret = ffa_region_group_identity_map(
1796 from_locked, fragments, fragment_constituent_counts,
1797 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK,
1798 NULL);
1799 if (ret.func == FFA_ERROR_32) {
1800 goto out;
1801 }
1802
1803 /*
1804 * Update the mapping for the sender. This won't allocate
1805 * because the transaction was already prepared above, but may
1806 * free pages in the case that a whole block is being unmapped
1807 * that was previously partially mapped.
1808 */
1809 CHECK(ffa_region_group_identity_map(from_locked, fragments,
1810 fragment_constituent_counts,
1811 fragment_count, from_mode,
1812 &local_page_pool,
1813 MAP_ACTION_COMMIT, NULL)
1814 .func == FFA_SUCCESS_32);
1815 } else {
1816 /*
1817 * If the `map_action` is set to `MAP_ACTION_NONE`, S2 PTs
1818 * were not updated on retrieve/relinquish. These were updating
1819 * only the `share_state` structures. As such, use the sender's
1820 * original mode.
1821 */
1822 clearing_mode = sender_orig_mode;
1823 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001824
1825 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001826 if (clear &&
J-Alves69cdfd92024-04-26 11:40:59 +01001827 !ffa_clear_memory_constituents(clearing_mode, fragments,
J-Alves26483382023-04-20 12:01:49 +01001828 fragment_constituent_counts,
1829 fragment_count, page_pool)) {
J-Alves69cdfd92024-04-26 11:40:59 +01001830 if (map_action != MAP_ACTION_NONE) {
1831 /*
1832 * On failure, roll back by returning memory to the
1833 * sender. This may allocate pages which were previously
1834 * freed into `local_page_pool` by the call above, but
1835 * will never allocate more pages than that so can never
1836 * fail.
1837 */
1838 CHECK(ffa_region_group_identity_map(
1839 from_locked, fragments,
1840 fragment_constituent_counts,
1841 fragment_count, orig_from_mode,
1842 &local_page_pool, MAP_ACTION_COMMIT, NULL)
1843 .func == FFA_SUCCESS_32);
1844 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001845 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001846 goto out;
1847 }
1848
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001849 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001850
1851out:
1852 mpool_fini(&local_page_pool);
1853
1854 /*
1855 * Tidy up the page table by reclaiming failed mappings (if there was an
1856 * error) or merging entries into blocks where possible (on success).
1857 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001858 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001859
1860 return ret;
1861}
1862
1863/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001864 * Complete a memory sending operation by checking that it is valid, updating
1865 * the sender page table, and then either marking the share state as having
1866 * completed sending (on success) or freeing it (on failure).
1867 *
1868 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1869 */
J-Alvesfdd29272022-07-19 13:16:31 +01001870struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001871 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001872 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1873 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001874{
1875 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001876 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001877 struct ffa_value ret;
1878
1879 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001880 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001881 assert(memory_region != NULL);
1882 composite = ffa_memory_region_get_composite(memory_region, 0);
1883 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001884
1885 /* Check that state is valid in sender page table and update. */
1886 ret = ffa_send_check_update(
1887 from_locked, share_state->fragments,
1888 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001889 share_state->fragment_count, composite->page_count,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001890 share_state->share_func, memory_region, page_pool,
J-Alves460d36c2023-10-12 17:02:15 +01001891 orig_from_mode_ret, &share_state->memory_protected);
Andrew Walbranca808b12020-05-15 17:22:28 +01001892 if (ret.func != FFA_SUCCESS_32) {
1893 /*
1894 * Free share state, it failed to send so it can't be retrieved.
1895 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001896 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1897 __func__, ffa_func_name(ret.func),
1898 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001899 share_state_free(share_states, share_state, page_pool);
1900 return ret;
1901 }
1902
1903 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001904 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001905
J-Alvesee68c542020-10-29 17:48:20 +00001906 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001907}
1908
1909/**
Daniel Boulby9764ff62024-01-30 17:47:39 +00001910 * Check that the memory attributes match Hafnium expectations.
1911 * Cacheability:
1912 * - Normal Memory as `FFA_MEMORY_CACHE_WRITE_BACK`.
1913 * - Device memory as `FFA_MEMORY_DEV_NGNRNE`.
1914 *
1915 * Shareability:
1916 * - Inner Shareable.
Federico Recanatia98603a2021-12-20 18:04:03 +01001917 */
1918static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001919 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001920{
1921 enum ffa_memory_type memory_type;
1922 enum ffa_memory_cacheability cacheability;
1923 enum ffa_memory_shareability shareability;
1924
Karl Meakin84710f32023-10-12 15:14:49 +01001925 memory_type = attributes.type;
Daniel Boulby9764ff62024-01-30 17:47:39 +00001926 cacheability = attributes.cacheability;
1927 if (memory_type == FFA_MEMORY_NORMAL_MEM &&
1928 cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1929 dlog_verbose(
1930 "Normal Memory: Invalid cacheability %s, "
1931 "expected %s.\n",
1932 ffa_memory_cacheability_name(cacheability),
1933 ffa_memory_cacheability_name(
1934 FFA_MEMORY_CACHE_WRITE_BACK));
Federico Recanati3d953f32022-02-17 09:31:29 +01001935 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001936 }
Daniel Boulby9764ff62024-01-30 17:47:39 +00001937 if (memory_type == FFA_MEMORY_DEVICE_MEM &&
1938 cacheability != FFA_MEMORY_DEV_NGNRNE) {
1939 dlog_verbose(
1940 "Device Memory: Invalid cacheability %s, "
1941 "expected %s.\n",
1942 ffa_device_memory_cacheability_name(cacheability),
1943 ffa_device_memory_cacheability_name(
1944 FFA_MEMORY_DEV_NGNRNE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001945 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001946 }
1947
Karl Meakin84710f32023-10-12 15:14:49 +01001948 shareability = attributes.shareability;
Federico Recanatia98603a2021-12-20 18:04:03 +01001949 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001950 dlog_verbose("Invalid shareability %s, expected %s.\n",
1951 ffa_memory_shareability_name(shareability),
1952 ffa_memory_shareability_name(
1953 FFA_MEMORY_INNER_SHAREABLE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001954 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001955 }
1956
1957 return (struct ffa_value){.func = FFA_SUCCESS_32};
1958}
1959
1960/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001961 * Check that the given `memory_region` represents a valid memory send request
1962 * of the given `share_func` type, return the clear flag and permissions via the
1963 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001964 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001965 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001966 * not.
1967 */
J-Alves66652252022-07-06 09:49:51 +01001968struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001969 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1970 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001971 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001972{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001973 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001974 struct ffa_memory_access *receiver =
1975 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001976 uint64_t receivers_end;
1977 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001978 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001979 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001980 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001981 enum ffa_data_access data_access;
1982 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001983 enum ffa_memory_security security_state;
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001984 enum ffa_memory_type type;
Federico Recanatia98603a2021-12-20 18:04:03 +01001985 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001986 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001987 memory_region->receivers_offset +
1988 memory_region->memory_access_desc_size +
1989 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001990
1991 if (fragment_length < minimum_first_fragment_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00001992 dlog_verbose("Fragment length %u too short (min %zu).\n",
1993 fragment_length, minimum_first_fragment_length);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001994 return ffa_error(FFA_INVALID_PARAMETERS);
1995 }
1996
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001997 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1998 "struct ffa_memory_region_constituent must be 16 bytes");
1999 if (!is_aligned(fragment_length,
2000 sizeof(struct ffa_memory_region_constituent)) ||
2001 !is_aligned(memory_share_length,
2002 sizeof(struct ffa_memory_region_constituent))) {
2003 dlog_verbose(
2004 "Fragment length %u or total length %u"
2005 " is not 16-byte aligned.\n",
2006 fragment_length, memory_share_length);
2007 return ffa_error(FFA_INVALID_PARAMETERS);
2008 }
2009
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002010 if (fragment_length > memory_share_length) {
2011 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002012 "Fragment length %zu greater than total length %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002013 (size_t)fragment_length, (size_t)memory_share_length);
2014 return ffa_error(FFA_INVALID_PARAMETERS);
2015 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002016
J-Alves95df0ef2022-12-07 10:09:48 +00002017 /* The sender must match the caller. */
2018 if ((!vm_id_is_current_world(from_locked.vm->id) &&
2019 vm_id_is_current_world(memory_region->sender)) ||
2020 (vm_id_is_current_world(from_locked.vm->id) &&
2021 memory_region->sender != from_locked.vm->id)) {
2022 dlog_verbose("Invalid memory sender ID.\n");
2023 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002024 }
2025
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002026 if (memory_region->receiver_count <= 0) {
2027 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002028 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002029 }
2030
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002031 /*
2032 * Ensure that the composite header is within the memory bounds and
2033 * doesn't overlap the first part of the message. Cast to uint64_t
2034 * to prevent overflow.
2035 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002036 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002037 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002038 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002039 min_length = receivers_end +
2040 sizeof(struct ffa_composite_memory_region) +
2041 sizeof(struct ffa_memory_region_constituent);
2042 if (min_length > memory_share_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00002043 dlog_verbose("Share too short: got %zu but minimum is %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002044 (size_t)memory_share_length, (size_t)min_length);
2045 return ffa_error(FFA_INVALID_PARAMETERS);
2046 }
2047
2048 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002049 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002050
2051 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002052 * Check that the composite memory region descriptor is after the access
2053 * descriptors, is at least 16-byte aligned, and fits in the first
2054 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01002055 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002056 if ((composite_memory_region_offset < receivers_end) ||
2057 (composite_memory_region_offset % 16 != 0) ||
2058 (composite_memory_region_offset >
2059 fragment_length - sizeof(struct ffa_composite_memory_region))) {
2060 dlog_verbose(
2061 "Invalid composite memory region descriptor offset "
Karl Meakine8937d92024-03-19 16:04:25 +00002062 "%zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002063 (size_t)composite_memory_region_offset);
2064 return ffa_error(FFA_INVALID_PARAMETERS);
2065 }
2066
2067 /*
2068 * Compute the start of the constituent regions. Already checked
2069 * to be not more than fragment_length and thus not more than
2070 * memory_share_length.
2071 */
2072 constituents_start = composite_memory_region_offset +
2073 sizeof(struct ffa_composite_memory_region);
2074 constituents_length = memory_share_length - constituents_start;
2075
2076 /*
2077 * Check that the number of constituents is consistent with the length
2078 * of the constituent region.
2079 */
2080 composite = ffa_memory_region_get_composite(memory_region, 0);
2081 if ((constituents_length %
2082 sizeof(struct ffa_memory_region_constituent) !=
2083 0) ||
2084 ((constituents_length /
2085 sizeof(struct ffa_memory_region_constituent)) !=
2086 composite->constituent_count)) {
Karl Meakine8937d92024-03-19 16:04:25 +00002087 dlog_verbose("Invalid length %zu or composite offset %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05002088 (size_t)memory_share_length,
2089 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002090 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002091 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002092 if (fragment_length < memory_share_length &&
2093 fragment_length < HF_MAILBOX_SIZE) {
2094 dlog_warning(
2095 "Initial fragment length %d smaller than mailbox "
2096 "size.\n",
2097 fragment_length);
2098 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002099
Andrew Walbrana65a1322020-04-06 19:32:32 +01002100 /*
2101 * Clear is not allowed for memory sharing, as the sender still has
2102 * access to the memory.
2103 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002104 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
J-Alves95fbb312024-03-20 15:19:16 +00002105 (share_func == FFA_MEM_SHARE_32 ||
2106 share_func == FFA_MEM_SHARE_64)) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01002107 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002108 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002109 }
2110
2111 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002112 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01002113 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002114 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002115 }
2116
J-Alves363f5722022-04-25 17:37:37 +01002117 /* Check that the permissions are valid, for each specified receiver. */
2118 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002119 struct ffa_memory_region_attributes receiver_permissions;
2120
2121 receiver = ffa_memory_region_get_receiver(memory_region, i);
2122 assert(receiver != NULL);
2123 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01002124 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002125 receiver_permissions.permissions;
2126 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01002127
2128 if (memory_region->sender == receiver_id) {
2129 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002130 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002131 }
Federico Recanati85090c42021-12-15 13:17:54 +01002132
J-Alves363f5722022-04-25 17:37:37 +01002133 for (uint32_t j = i + 1; j < memory_region->receiver_count;
2134 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002135 struct ffa_memory_access *other_receiver =
2136 ffa_memory_region_get_receiver(memory_region,
2137 j);
2138 assert(other_receiver != NULL);
2139
J-Alves363f5722022-04-25 17:37:37 +01002140 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002141 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01002142 dlog_verbose(
2143 "Repeated receiver(%x) in memory send "
2144 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002145 other_receiver->receiver_permissions
2146 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01002147 return ffa_error(FFA_INVALID_PARAMETERS);
2148 }
2149 }
2150
2151 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002152 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01002153 dlog_verbose(
2154 "All ffa_memory_access should point to the "
2155 "same composite memory region offset.\n");
2156 return ffa_error(FFA_INVALID_PARAMETERS);
2157 }
2158
Karl Meakin84710f32023-10-12 15:14:49 +01002159 data_access = permissions.data_access;
2160 instruction_access = permissions.instruction_access;
J-Alves363f5722022-04-25 17:37:37 +01002161 if (data_access == FFA_DATA_ACCESS_RESERVED ||
2162 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
2163 dlog_verbose(
2164 "Reserved value for receiver permissions "
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002165 "(data_access = %s, instruction_access = %s)\n",
2166 ffa_data_access_name(data_access),
2167 ffa_instruction_access_name(
2168 instruction_access));
J-Alves363f5722022-04-25 17:37:37 +01002169 return ffa_error(FFA_INVALID_PARAMETERS);
2170 }
2171 if (instruction_access !=
2172 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2173 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002174 "Invalid instruction access permissions %s "
2175 "for sending memory, expected %s.\n",
2176 ffa_instruction_access_name(instruction_access),
2177 ffa_instruction_access_name(
Daniel Boulby91052c32024-05-21 14:09:48 +01002178 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002179 return ffa_error(FFA_INVALID_PARAMETERS);
2180 }
J-Alves95fbb312024-03-20 15:19:16 +00002181 if (share_func == FFA_MEM_SHARE_32 ||
2182 share_func == FFA_MEM_SHARE_64) {
J-Alves363f5722022-04-25 17:37:37 +01002183 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2184 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002185 "Invalid data access permissions %s "
2186 "for sharing memory, expected %s.\n",
2187 ffa_data_access_name(data_access),
2188 ffa_data_access_name(
2189 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002190 return ffa_error(FFA_INVALID_PARAMETERS);
2191 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002192 /*
2193 * According to section 10.10.3 of the FF-A v1.1 EAC0
2194 * spec, NX is required for share operations (but must
2195 * not be specified by the sender) so set it in the
2196 * copy that we store, ready to be returned to the
2197 * retriever.
2198 */
2199 if (vm_id_is_current_world(receiver_id)) {
Karl Meakin84710f32023-10-12 15:14:49 +01002200 permissions.instruction_access =
2201 FFA_INSTRUCTION_ACCESS_NX;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002202 receiver_permissions.permissions = permissions;
2203 }
J-Alves363f5722022-04-25 17:37:37 +01002204 }
J-Alves95fbb312024-03-20 15:19:16 +00002205 if ((share_func == FFA_MEM_LEND_32 ||
2206 share_func == FFA_MEM_LEND_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002207 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2208 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002209 "Invalid data access permissions %s for "
2210 "lending memory, expected %s.\n",
2211 ffa_data_access_name(data_access),
2212 ffa_data_access_name(
2213 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002214 return ffa_error(FFA_INVALID_PARAMETERS);
2215 }
2216
J-Alves95fbb312024-03-20 15:19:16 +00002217 if ((share_func == FFA_MEM_DONATE_32 ||
2218 share_func == FFA_MEM_DONATE_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002219 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
2220 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002221 "Invalid data access permissions %s for "
2222 "donating memory, expected %s.\n",
2223 ffa_data_access_name(data_access),
2224 ffa_data_access_name(
2225 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002226 return ffa_error(FFA_INVALID_PARAMETERS);
2227 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002228 }
2229
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002230 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
Karl Meakin84710f32023-10-12 15:14:49 +01002231 security_state = memory_region->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002232 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2233 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002234 "Invalid security state %s for memory share operation, "
2235 "expected %s.\n",
2236 ffa_memory_security_name(security_state),
2237 ffa_memory_security_name(
2238 FFA_MEMORY_SECURITY_UNSPECIFIED));
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002239 return ffa_error(FFA_INVALID_PARAMETERS);
2240 }
2241
Federico Recanatid937f5e2021-12-20 17:38:23 +01002242 /*
J-Alves807794e2022-06-16 13:42:47 +01002243 * If a memory donate or lend with single borrower, the memory type
2244 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01002245 */
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002246 type = memory_region->attributes.type;
J-Alves807794e2022-06-16 13:42:47 +01002247 if (share_func == FFA_MEM_DONATE_32 ||
J-Alves95fbb312024-03-20 15:19:16 +00002248 share_func == FFA_MEM_DONATE_64 ||
2249 ((share_func == FFA_MEM_LEND_32 || share_func == FFA_MEM_LEND_64) &&
J-Alves807794e2022-06-16 13:42:47 +01002250 memory_region->receiver_count == 1)) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002251 if (type != FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves807794e2022-06-16 13:42:47 +01002252 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002253 "Invalid memory type %s for memory share "
2254 "operation, expected %s.\n",
2255 ffa_memory_type_name(type),
2256 ffa_memory_type_name(
2257 FFA_MEMORY_NOT_SPECIFIED_MEM));
J-Alves807794e2022-06-16 13:42:47 +01002258 return ffa_error(FFA_INVALID_PARAMETERS);
2259 }
2260 } else {
2261 /*
2262 * Check that sender's memory attributes match Hafnium
2263 * expectations: Normal Memory, Inner shareable, Write-Back
2264 * Read-Allocate Write-Allocate Cacheable.
2265 */
2266 ret = ffa_memory_attributes_validate(memory_region->attributes);
2267 if (ret.func != FFA_SUCCESS_32) {
2268 return ret;
2269 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01002270 }
2271
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002272 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01002273}
2274
2275/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002276 * Gets the share state for continuing an operation to donate, lend or share
2277 * memory, and checks that it is a valid request.
2278 *
2279 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
2280 * not.
2281 */
J-Alvesfdd29272022-07-19 13:16:31 +01002282struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01002283 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01002284 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002285 struct mpool *page_pool)
2286{
2287 struct ffa_memory_share_state *share_state;
2288 struct ffa_memory_region *memory_region;
2289
Daniel Boulbya2f8c662021-11-26 17:52:53 +00002290 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01002291
2292 /*
2293 * Look up the share state by handle and make sure that the VM ID
2294 * matches.
2295 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01002296 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00002297 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002298 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002299 "Invalid handle %#lx for memory send continuation.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002300 handle);
2301 return ffa_error(FFA_INVALID_PARAMETERS);
2302 }
2303 memory_region = share_state->memory_region;
2304
J-Alvesfdd29272022-07-19 13:16:31 +01002305 if (vm_id_is_current_world(from_vm_id) &&
2306 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002307 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
2308 return ffa_error(FFA_INVALID_PARAMETERS);
2309 }
2310
2311 if (share_state->sending_complete) {
2312 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002313 "Sending of memory handle %#lx is already complete.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002314 handle);
2315 return ffa_error(FFA_INVALID_PARAMETERS);
2316 }
2317
2318 if (share_state->fragment_count == MAX_FRAGMENTS) {
2319 /*
2320 * Log a warning as this is a sign that MAX_FRAGMENTS should
2321 * probably be increased.
2322 */
2323 dlog_warning(
Karl Meakine8937d92024-03-19 16:04:25 +00002324 "Too many fragments for memory share with handle %#lx; "
Andrew Walbranca808b12020-05-15 17:22:28 +01002325 "only %d supported.\n",
2326 handle, MAX_FRAGMENTS);
2327 /* Free share state, as it's not possible to complete it. */
2328 share_state_free(share_states, share_state, page_pool);
2329 return ffa_error(FFA_NO_MEMORY);
2330 }
2331
2332 *share_state_ret = share_state;
2333
2334 return (struct ffa_value){.func = FFA_SUCCESS_32};
2335}
2336
2337/**
J-Alves95df0ef2022-12-07 10:09:48 +00002338 * Checks if there is at least one receiver from the other world.
2339 */
J-Alvesfdd29272022-07-19 13:16:31 +01002340bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00002341 struct ffa_memory_region *memory_region)
2342{
2343 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002344 struct ffa_memory_access *receiver =
2345 ffa_memory_region_get_receiver(memory_region, i);
2346 assert(receiver != NULL);
2347 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
2348
2349 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00002350 return true;
2351 }
2352 }
2353 return false;
2354}
2355
2356/**
J-Alves9da280b2022-12-21 14:55:39 +00002357 * Validates a call to donate, lend or share memory in which Hafnium is the
2358 * designated allocator of the memory handle. In practice, this also means
2359 * Hafnium is responsible for managing the state structures for the transaction.
2360 * If Hafnium is the SPMC, it should allocate the memory handle when either the
2361 * sender is an SP or there is at least one borrower that is an SP.
2362 * If Hafnium is the hypervisor, it should allocate the memory handle when
2363 * operation involves only NWd VMs.
2364 *
2365 * If validation goes well, Hafnium updates the stage-2 page tables of the
2366 * sender. Validation consists of checking if the message length and number of
2367 * memory region constituents match, and if the transition is valid for the
2368 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00002369 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002370 * Assumes that the caller has already found and locked the sender VM and copied
2371 * the memory region descriptor from the sender's TX buffer to a freshly
2372 * allocated page from Hafnium's internal pool. The caller must have also
2373 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002374 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002375 * This function takes ownership of the `memory_region` passed in and will free
2376 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01002377 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002378struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002379 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002380 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002381 uint32_t fragment_length, uint32_t share_func,
2382 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01002383{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002384 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002385 struct share_states_locked share_states;
2386 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01002387
2388 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002389 * If there is an error validating the `memory_region` then we need to
2390 * free it because we own it but we won't be storing it in a share state
2391 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01002392 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002393 ret = ffa_memory_send_validate(from_locked, memory_region,
2394 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01002395 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002396 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002397 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002398 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002399 }
2400
Andrew Walbrana65a1322020-04-06 19:32:32 +01002401 /* Set flag for share function, ready to be retrieved later. */
2402 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002403 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002404 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002405 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002406 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002407 break;
J-Alves95fbb312024-03-20 15:19:16 +00002408 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002409 case FFA_MEM_LEND_32:
2410 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002411 break;
J-Alves95fbb312024-03-20 15:19:16 +00002412 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002413 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002414 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002415 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002416 break;
Karl Meakina5ea9092024-05-28 15:40:33 +01002417 default:
2418 dlog_verbose("Unknown share func %#x (%s)\n", share_func,
2419 ffa_func_name(share_func));
2420 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01002421 }
2422
Andrew Walbranca808b12020-05-15 17:22:28 +01002423 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002424 /*
2425 * Allocate a share state before updating the page table. Otherwise if
2426 * updating the page table succeeded but allocating the share state
2427 * failed then it would leave the memory in a state where nobody could
2428 * get it back.
2429 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01002430 share_state = allocate_share_state(share_states, share_func,
2431 memory_region, fragment_length,
2432 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00002433 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002434 dlog_verbose("Failed to allocate share state.\n");
2435 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01002436 ret = ffa_error(FFA_NO_MEMORY);
2437 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002438 }
2439
Andrew Walbranca808b12020-05-15 17:22:28 +01002440 if (fragment_length == memory_share_length) {
2441 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00002442 ret = ffa_memory_send_complete(
2443 from_locked, share_states, share_state, page_pool,
2444 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002445 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002446 /*
2447 * Use sender ID from 'memory_region' assuming
2448 * that at this point it has been validated:
2449 * - MBZ at virtual FF-A instance.
2450 */
J-Alves19e20cf2023-08-02 12:48:55 +01002451 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002452 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2453 ? memory_region->sender
2454 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002455 ret = (struct ffa_value){
2456 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002457 .arg1 = (uint32_t)memory_region->handle,
2458 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002459 .arg3 = fragment_length,
2460 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002461 }
2462
2463out:
2464 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002465 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002466 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002467}
2468
2469/**
J-Alves8505a8a2022-06-15 18:10:18 +01002470 * Continues an operation to donate, lend or share memory to a VM from current
2471 * world. If this is the last fragment then checks that the transition is valid
2472 * for the type of memory sending operation and updates the stage-2 page tables
2473 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002474 *
2475 * Assumes that the caller has already found and locked the sender VM and copied
2476 * the memory region descriptor from the sender's TX buffer to a freshly
2477 * allocated page from Hafnium's internal pool.
2478 *
2479 * This function takes ownership of the `fragment` passed in; it must not be
2480 * freed by the caller.
2481 */
2482struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2483 void *fragment,
2484 uint32_t fragment_length,
2485 ffa_memory_handle_t handle,
2486 struct mpool *page_pool)
2487{
2488 struct share_states_locked share_states = share_states_lock();
2489 struct ffa_memory_share_state *share_state;
2490 struct ffa_value ret;
2491 struct ffa_memory_region *memory_region;
2492
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002493 CHECK(is_aligned(fragment,
2494 alignof(struct ffa_memory_region_constituent)));
2495 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2496 0) {
2497 dlog_verbose("Fragment length %u misaligned.\n",
2498 fragment_length);
2499 ret = ffa_error(FFA_INVALID_PARAMETERS);
2500 goto out_free_fragment;
2501 }
2502
Andrew Walbranca808b12020-05-15 17:22:28 +01002503 ret = ffa_memory_send_continue_validate(share_states, handle,
2504 &share_state,
2505 from_locked.vm->id, page_pool);
2506 if (ret.func != FFA_SUCCESS_32) {
2507 goto out_free_fragment;
2508 }
2509 memory_region = share_state->memory_region;
2510
J-Alves95df0ef2022-12-07 10:09:48 +00002511 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002512 dlog_error(
2513 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002514 "other world. This should never happen, and indicates "
2515 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002516 "EL3 code.\n");
2517 ret = ffa_error(FFA_INVALID_PARAMETERS);
2518 goto out_free_fragment;
2519 }
2520
2521 /* Add this fragment. */
2522 share_state->fragments[share_state->fragment_count] = fragment;
2523 share_state->fragment_constituent_counts[share_state->fragment_count] =
2524 fragment_length / sizeof(struct ffa_memory_region_constituent);
2525 share_state->fragment_count++;
2526
2527 /* Check whether the memory send operation is now ready to complete. */
2528 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002529 ret = ffa_memory_send_complete(
2530 from_locked, share_states, share_state, page_pool,
2531 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002532 } else {
2533 ret = (struct ffa_value){
2534 .func = FFA_MEM_FRAG_RX_32,
2535 .arg1 = (uint32_t)handle,
2536 .arg2 = (uint32_t)(handle >> 32),
2537 .arg3 = share_state_next_fragment_offset(share_states,
2538 share_state)};
2539 }
2540 goto out;
2541
2542out_free_fragment:
2543 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002544
2545out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002546 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002547 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002548}
2549
Andrew Walbranca808b12020-05-15 17:22:28 +01002550/** Clean up after the receiver has finished retrieving a memory region. */
2551static void ffa_memory_retrieve_complete(
2552 struct share_states_locked share_states,
2553 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2554{
J-Alves95fbb312024-03-20 15:19:16 +00002555 if (share_state->share_func == FFA_MEM_DONATE_32 ||
2556 share_state->share_func == FFA_MEM_DONATE_64) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002557 /*
2558 * Memory that has been donated can't be relinquished,
2559 * so no need to keep the share state around.
2560 */
2561 share_state_free(share_states, share_state, page_pool);
2562 dlog_verbose("Freed share state for donate.\n");
2563 }
2564}
2565
J-Alves2d8457f2022-10-05 11:06:41 +01002566/**
2567 * Initialises the given memory region descriptor to be used for an
2568 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2569 * fragment.
2570 * The memory region descriptor is initialized according to retriever's
2571 * FF-A version.
2572 *
2573 * Returns true on success, or false if the given constituents won't all fit in
2574 * the first fragment.
2575 */
2576static bool ffa_retrieved_memory_region_init(
Karl Meakin0e617d92024-04-05 12:55:22 +01002577 void *response, enum ffa_version ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002578 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002579 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002580 ffa_memory_access_permissions_t permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002581 struct ffa_memory_access *receivers, size_t receiver_count,
2582 uint32_t memory_access_desc_size, uint32_t page_count,
2583 uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002584 const struct ffa_memory_region_constituent constituents[],
2585 uint32_t fragment_constituent_count, uint32_t *total_length,
2586 uint32_t *fragment_length)
2587{
2588 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002589 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002590 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002591 uint32_t constituents_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002592
2593 assert(response != NULL);
2594
Karl Meakin0e617d92024-04-05 12:55:22 +01002595 if (ffa_version == FFA_VERSION_1_0) {
J-Alves2d8457f2022-10-05 11:06:41 +01002596 struct ffa_memory_region_v1_0 *retrieve_response =
2597 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002598 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002599
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002600 ffa_memory_region_init_header_v1_0(retrieve_response, sender,
2601 attributes, flags, handle, 0,
2602 receiver_count);
J-Alves2d8457f2022-10-05 11:06:41 +01002603
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002604 receiver = (struct ffa_memory_access_v1_0 *)
2605 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002606 receiver_count = retrieve_response->receiver_count;
2607
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002608 for (uint32_t i = 0; i < receiver_count; i++) {
2609 ffa_id_t receiver_id =
2610 receivers[i].receiver_permissions.receiver;
2611 ffa_memory_receiver_flags_t recv_flags =
2612 receivers[i].receiver_permissions.flags;
2613
2614 /*
2615 * Initialized here as in memory retrieve responses we
2616 * currently expect one borrower to be specified.
2617 */
2618 ffa_memory_access_init_v1_0(
Karl Meakin84710f32023-10-12 15:14:49 +01002619 receiver, receiver_id, permissions.data_access,
2620 permissions.instruction_access, recv_flags);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002621 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002622
2623 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002624 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002625 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2626 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002627
2628 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2629 retrieve_response, 0);
2630 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002631 struct ffa_memory_region *retrieve_response =
2632 (struct ffa_memory_region *)response;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002633 struct ffa_memory_access *retrieve_response_receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002634
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002635 ffa_memory_region_init_header(
2636 retrieve_response, sender, attributes, flags, handle, 0,
2637 receiver_count, memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002638
2639 /*
2640 * Note that `sizeof(struct_ffa_memory_region)` and
2641 * `sizeof(struct ffa_memory_access)` must both be multiples of
2642 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2643 * guaranteed that the offset we calculate here is aligned to a
2644 * 64-bit boundary and so 64-bit values can be copied without
2645 * alignment faults.
2646 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002647 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002648 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002649 (uint32_t)(receiver_count *
2650 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002651
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002652 retrieve_response_receivers =
2653 ffa_memory_region_get_receiver(retrieve_response, 0);
2654 assert(retrieve_response_receivers != NULL);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002655
2656 /*
2657 * Initialized here as in memory retrieve responses we currently
2658 * expect one borrower to be specified.
2659 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002660 memcpy_s(retrieve_response_receivers,
2661 sizeof(struct ffa_memory_access) * receiver_count,
2662 receivers,
2663 sizeof(struct ffa_memory_access) * receiver_count);
2664
2665 retrieve_response_receivers->composite_memory_region_offset =
2666 composite_offset;
2667
J-Alves2d8457f2022-10-05 11:06:41 +01002668 composite_memory_region =
2669 ffa_memory_region_get_composite(retrieve_response, 0);
2670 }
2671
J-Alves2d8457f2022-10-05 11:06:41 +01002672 assert(composite_memory_region != NULL);
2673
J-Alves2d8457f2022-10-05 11:06:41 +01002674 composite_memory_region->page_count = page_count;
2675 composite_memory_region->constituent_count = total_constituent_count;
2676 composite_memory_region->reserved_0 = 0;
2677
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002678 constituents_offset =
2679 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002680 if (constituents_offset +
2681 fragment_constituent_count *
2682 sizeof(struct ffa_memory_region_constituent) >
2683 response_max_size) {
2684 return false;
2685 }
2686
2687 for (i = 0; i < fragment_constituent_count; ++i) {
2688 composite_memory_region->constituents[i] = constituents[i];
2689 }
2690
2691 if (total_length != NULL) {
2692 *total_length =
2693 constituents_offset +
2694 composite_memory_region->constituent_count *
2695 sizeof(struct ffa_memory_region_constituent);
2696 }
2697 if (fragment_length != NULL) {
2698 *fragment_length =
2699 constituents_offset +
2700 fragment_constituent_count *
2701 sizeof(struct ffa_memory_region_constituent);
2702 }
2703
2704 return true;
2705}
2706
J-Alves96de29f2022-04-26 16:05:24 +01002707/**
2708 * Validates the retrieved permissions against those specified by the lender
2709 * of memory share operation. Optionally can help set the permissions to be used
2710 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002711 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2712 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2713 * specification for each ABI.
2714 * - FFA_DENIED -> if the permissions specified by the retriever are not
2715 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002716 */
J-Alvesdcad8992023-09-15 14:10:35 +01002717static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2718 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002719 enum ffa_data_access requested_data_access,
2720 enum ffa_instruction_access sent_instruction_access,
2721 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002722 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002723{
2724 switch (sent_data_access) {
2725 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2726 case FFA_DATA_ACCESS_RW:
2727 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2728 requested_data_access == FFA_DATA_ACCESS_RW) {
2729 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002730 permissions->data_access = FFA_DATA_ACCESS_RW;
J-Alves96de29f2022-04-26 16:05:24 +01002731 }
2732 break;
2733 }
2734 /* Intentional fall-through. */
2735 case FFA_DATA_ACCESS_RO:
2736 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2737 requested_data_access == FFA_DATA_ACCESS_RO) {
2738 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002739 permissions->data_access = FFA_DATA_ACCESS_RO;
J-Alves96de29f2022-04-26 16:05:24 +01002740 }
2741 break;
2742 }
2743 dlog_verbose(
2744 "Invalid data access requested; sender specified "
2745 "permissions %#x but receiver requested %#x.\n",
2746 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002747 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002748 case FFA_DATA_ACCESS_RESERVED:
2749 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2750 "checked before this point.");
2751 }
2752
J-Alvesdcad8992023-09-15 14:10:35 +01002753 /*
2754 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2755 * or FFA_MEMORY_DONATE the retriever should have specifed the
2756 * instruction permissions it wishes to receive.
2757 */
2758 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002759 case FFA_MEM_SHARE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002760 case FFA_MEM_SHARE_32:
2761 if (requested_instruction_access !=
2762 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2763 dlog_verbose(
2764 "%s: for share instruction permissions must "
2765 "NOT be specified.\n",
2766 __func__);
2767 return ffa_error(FFA_INVALID_PARAMETERS);
2768 }
2769 break;
J-Alves95fbb312024-03-20 15:19:16 +00002770 case FFA_MEM_LEND_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002771 case FFA_MEM_LEND_32:
2772 /*
2773 * For operations with multiple borrowers only permit XN
2774 * permissions, and both Sender and borrower should have used
2775 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2776 */
2777 if (multiple_borrowers) {
2778 if (requested_instruction_access !=
2779 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2780 dlog_verbose(
2781 "%s: lend/share/donate with multiple "
2782 "borrowers "
2783 "instruction permissions must NOT be "
2784 "specified.\n",
2785 __func__);
2786 return ffa_error(FFA_INVALID_PARAMETERS);
2787 }
2788 break;
2789 }
2790 /* Fall through if the operation targets a single borrower. */
J-Alves95fbb312024-03-20 15:19:16 +00002791 case FFA_MEM_DONATE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002792 case FFA_MEM_DONATE_32:
2793 if (!multiple_borrowers &&
2794 requested_instruction_access ==
2795 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2796 dlog_verbose(
2797 "%s: for lend/donate with single borrower "
2798 "instruction permissions must be speficified "
2799 "by borrower\n",
2800 __func__);
2801 return ffa_error(FFA_INVALID_PARAMETERS);
2802 }
2803 break;
2804 default:
2805 panic("%s: Wrong func id provided.\n", __func__);
2806 }
2807
J-Alves96de29f2022-04-26 16:05:24 +01002808 switch (sent_instruction_access) {
2809 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2810 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002811 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002812 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002813 permissions->instruction_access =
2814 FFA_INSTRUCTION_ACCESS_X;
J-Alves96de29f2022-04-26 16:05:24 +01002815 }
2816 break;
2817 }
J-Alvesdcad8992023-09-15 14:10:35 +01002818 /*
2819 * Fall through if requested permissions are less
2820 * permissive than those provided by the sender.
2821 */
J-Alves96de29f2022-04-26 16:05:24 +01002822 case FFA_INSTRUCTION_ACCESS_NX:
2823 if (requested_instruction_access ==
2824 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2825 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2826 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002827 permissions->instruction_access =
2828 FFA_INSTRUCTION_ACCESS_NX;
J-Alves96de29f2022-04-26 16:05:24 +01002829 }
2830 break;
2831 }
2832 dlog_verbose(
2833 "Invalid instruction access requested; sender "
2834 "specified permissions %#x but receiver requested "
2835 "%#x.\n",
2836 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002837 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002838 case FFA_INSTRUCTION_ACCESS_RESERVED:
2839 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2840 "be checked before this point.");
2841 }
2842
J-Alvesdcad8992023-09-15 14:10:35 +01002843 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002844}
2845
2846/**
2847 * Validate the receivers' permissions in the retrieve request against those
2848 * specified by the lender.
2849 * In the `permissions` argument returns the permissions to set at S2 for the
2850 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002851 * The function looks into the flag to bypass multiple borrower checks:
2852 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2853 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2854 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2855 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002856 */
2857static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2858 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002859 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002860 ffa_memory_access_permissions_t *permissions,
2861 struct ffa_memory_access **receiver_ret, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002862{
2863 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002864 bool bypass_multi_receiver_check =
2865 (retrieve_request->flags &
2866 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002867 const uint32_t region_receiver_count = memory_region->receiver_count;
2868 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002869
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002870 assert(receiver_ret != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002871 assert(permissions != NULL);
2872
Karl Meakin84710f32023-10-12 15:14:49 +01002873 *permissions = (ffa_memory_access_permissions_t){0};
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002874
J-Alves3456e032023-07-20 12:20:05 +01002875 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002876 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002877 dlog_verbose(
2878 "Retrieve request should contain same list of "
2879 "borrowers, as specified by the lender.\n");
2880 return ffa_error(FFA_INVALID_PARAMETERS);
2881 }
2882 } else {
2883 if (retrieve_request->receiver_count != 1) {
2884 dlog_verbose(
2885 "Set bypass multiple borrower check, receiver "
2886 "list must be sized 1 (%x)\n",
2887 memory_region->receiver_count);
2888 return ffa_error(FFA_INVALID_PARAMETERS);
2889 }
J-Alves96de29f2022-04-26 16:05:24 +01002890 }
2891
2892 retrieve_receiver_index = retrieve_request->receiver_count;
2893
J-Alves96de29f2022-04-26 16:05:24 +01002894 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2895 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002896 struct ffa_memory_access *retrieve_request_receiver =
2897 ffa_memory_region_get_receiver(retrieve_request, i);
2898 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002899 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002900 retrieve_request_receiver->receiver_permissions
2901 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002902 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002903 retrieve_request_receiver->receiver_permissions
2904 .receiver;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002905 struct ffa_memory_access *receiver;
2906 uint32_t mem_region_receiver_index;
2907 bool permissions_RO;
2908 bool clear_memory_flags;
J-Alvesf220d572024-04-24 22:15:14 +01002909 /*
2910 * If the call is at the virtual FF-A instance the caller's
2911 * ID must match an entry in the memory access list.
2912 * In the SPMC, one of the specified receivers could be from
2913 * the NWd.
2914 */
2915 bool found_to_id = vm_id_is_current_world(to_vm_id)
2916 ? (current_receiver_id == to_vm_id)
2917 : (!vm_id_is_current_world(
2918 current_receiver_id));
J-Alves96de29f2022-04-26 16:05:24 +01002919
J-Alves3456e032023-07-20 12:20:05 +01002920 if (bypass_multi_receiver_check && !found_to_id) {
2921 dlog_verbose(
2922 "Bypass multiple borrower check for id %x.\n",
2923 current_receiver_id);
2924 continue;
2925 }
2926
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002927 if (retrieve_request_receiver->composite_memory_region_offset !=
2928 0U) {
2929 dlog_verbose(
2930 "Retriever specified address ranges not "
2931 "supported (got offset %d).\n",
2932 retrieve_request_receiver
2933 ->composite_memory_region_offset);
2934 return ffa_error(FFA_INVALID_PARAMETERS);
2935 }
2936
J-Alves96de29f2022-04-26 16:05:24 +01002937 /*
2938 * Find the current receiver in the transaction descriptor from
2939 * sender.
2940 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002941 mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002942 ffa_memory_region_get_receiver_index(
2943 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002944
2945 if (mem_region_receiver_index ==
2946 memory_region->receiver_count) {
2947 dlog_verbose("%s: receiver %x not found\n", __func__,
2948 current_receiver_id);
2949 return ffa_error(FFA_DENIED);
2950 }
2951
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002952 receiver = ffa_memory_region_get_receiver(
2953 memory_region, mem_region_receiver_index);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002954 assert(receiver != NULL);
2955
2956 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002957
2958 if (found_to_id) {
2959 retrieve_receiver_index = i;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002960
2961 *receiver_ret = receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002962 }
2963
2964 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002965 * Check if retrieve request memory access list is valid:
2966 * - The retrieve request complies with the specification.
2967 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002968 */
J-Alvesdcad8992023-09-15 14:10:35 +01002969 ret = ffa_memory_retrieve_is_memory_access_valid(
Karl Meakin84710f32023-10-12 15:14:49 +01002970 func_id, sent_permissions.data_access,
2971 requested_permissions.data_access,
2972 sent_permissions.instruction_access,
2973 requested_permissions.instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002974 found_to_id ? permissions : NULL,
2975 region_receiver_count > 1);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002976
J-Alvesdcad8992023-09-15 14:10:35 +01002977 if (ret.func != FFA_SUCCESS_32) {
2978 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002979 }
2980
Karl Meakin84710f32023-10-12 15:14:49 +01002981 permissions_RO =
2982 (permissions->data_access == FFA_DATA_ACCESS_RO);
J-Alvese5262372024-03-27 11:02:03 +00002983 clear_memory_flags =
2984 (retrieve_request->flags &
2985 (FFA_MEMORY_REGION_FLAG_CLEAR |
2986 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002987
J-Alves96de29f2022-04-26 16:05:24 +01002988 /*
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002989 * Can't request PM to clear memory if only provided
2990 * with RO permissions.
J-Alves96de29f2022-04-26 16:05:24 +01002991 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002992 if (found_to_id && permissions_RO && clear_memory_flags) {
J-Alves96de29f2022-04-26 16:05:24 +01002993 dlog_verbose(
2994 "Receiver has RO permissions can not request "
2995 "clear.\n");
2996 return ffa_error(FFA_DENIED);
2997 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002998
2999 /*
3000 * Check the impdef in the retrieve_request matches the value in
3001 * the original memory send.
3002 */
3003 if (ffa_version_from_memory_access_desc_size(
3004 memory_region->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +01003005 FFA_VERSION_1_2 &&
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003006 ffa_version_from_memory_access_desc_size(
3007 retrieve_request->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +01003008 FFA_VERSION_1_2) {
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003009 if (receiver->impdef.val[0] !=
3010 retrieve_request_receiver->impdef.val[0] ||
3011 receiver->impdef.val[1] !=
3012 retrieve_request_receiver->impdef.val[1]) {
3013 dlog_verbose(
3014 "Impdef value in memory send does not "
J-Alves0a824e92024-04-26 16:20:12 +01003015 "match retrieve request value send "
3016 "value %#lx %#lx retrieve request "
Karl Meakine8937d92024-03-19 16:04:25 +00003017 "value %#lx %#lx\n",
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003018 receiver->impdef.val[0],
3019 receiver->impdef.val[1],
3020 retrieve_request_receiver->impdef
3021 .val[0],
3022 retrieve_request_receiver->impdef
3023 .val[1]);
3024 return ffa_error(FFA_INVALID_PARAMETERS);
3025 }
3026 }
J-Alves96de29f2022-04-26 16:05:24 +01003027 }
3028
3029 if (retrieve_receiver_index == retrieve_request->receiver_count) {
3030 dlog_verbose(
3031 "Retrieve request does not contain caller's (%x) "
3032 "permissions\n",
3033 to_vm_id);
3034 return ffa_error(FFA_INVALID_PARAMETERS);
3035 }
3036
3037 return (struct ffa_value){.func = FFA_SUCCESS_32};
3038}
3039
Daniel Boulby296ee702023-11-28 13:36:55 +00003040/**
3041 * According to section 17.4.3 of the FF-A v1.2 ALP0 specification, the
3042 * hypervisor may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region
3043 * description of a pending memory sharing operation whose allocator is the SPM,
3044 * for validation purposes before forwarding an FFA_MEM_RECLAIM call. For a
3045 * hypervisor retrieve request the endpoint memory access descriptor count must
3046 * be 0 (for any other retrieve request it must be >= 1).
J-Alvesa9cd7e32022-07-01 13:49:33 +01003047 */
Daniel Boulby296ee702023-11-28 13:36:55 +00003048bool is_ffa_hypervisor_retrieve_request(struct ffa_memory_region *request)
J-Alvesa9cd7e32022-07-01 13:49:33 +01003049{
Daniel Boulby296ee702023-11-28 13:36:55 +00003050 return request->receiver_count == 0U;
3051}
3052
J-Alvesa9cd7e32022-07-01 13:49:33 +01003053/*
3054 * Helper to reset count of fragments retrieved by the hypervisor.
3055 */
3056static void ffa_memory_retrieve_complete_from_hyp(
3057 struct ffa_memory_share_state *share_state)
3058{
3059 if (share_state->hypervisor_fragment_count ==
3060 share_state->fragment_count) {
3061 share_state->hypervisor_fragment_count = 0;
3062 }
3063}
3064
J-Alves089004f2022-07-13 14:25:44 +01003065/**
J-Alves4f0d9c12024-01-17 17:23:11 +00003066 * Prepares the return of the ffa_value for the memory retrieve response.
3067 */
3068static struct ffa_value ffa_memory_retrieve_resp(uint32_t total_length,
3069 uint32_t fragment_length)
3070{
3071 return (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
3072 .arg1 = total_length,
3073 .arg2 = fragment_length};
3074}
3075
3076/**
J-Alves089004f2022-07-13 14:25:44 +01003077 * Validate that the memory region descriptor provided by the borrower on
3078 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
3079 * memory sharing call.
3080 */
3081static struct ffa_value ffa_memory_retrieve_validate(
J-Alves4f0d9c12024-01-17 17:23:11 +00003082 ffa_id_t to_id, struct ffa_memory_region *retrieve_request,
3083 uint32_t retrieve_request_length,
J-Alves089004f2022-07-13 14:25:44 +01003084 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
3085 uint32_t share_func)
3086{
3087 ffa_memory_region_flags_t transaction_type =
3088 retrieve_request->flags &
3089 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003090 enum ffa_memory_security security_state;
J-Alves4f0d9c12024-01-17 17:23:11 +00003091 const uint64_t memory_access_desc_size =
3092 retrieve_request->memory_access_desc_size;
3093 const uint32_t expected_retrieve_request_length =
3094 retrieve_request->receivers_offset +
3095 (uint32_t)(retrieve_request->receiver_count *
3096 memory_access_desc_size);
J-Alves089004f2022-07-13 14:25:44 +01003097
3098 assert(retrieve_request != NULL);
3099 assert(memory_region != NULL);
3100 assert(receiver_index != NULL);
J-Alves089004f2022-07-13 14:25:44 +01003101
J-Alves4f0d9c12024-01-17 17:23:11 +00003102 if (retrieve_request_length != expected_retrieve_request_length) {
3103 dlog_verbose(
3104 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
3105 "but was %d.\n",
3106 expected_retrieve_request_length,
3107 retrieve_request_length);
3108 return ffa_error(FFA_INVALID_PARAMETERS);
3109 }
3110
3111 if (retrieve_request->sender != memory_region->sender) {
3112 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003113 "Memory with handle %#lx not fully sent, can't "
J-Alves4f0d9c12024-01-17 17:23:11 +00003114 "retrieve.\n",
3115 memory_region->handle);
3116 return ffa_error(FFA_DENIED);
3117 }
3118
3119 /*
3120 * The SPMC can only process retrieve requests to memory share
3121 * operations with one borrower from the other world. It can't
3122 * determine the ID of the NWd VM that invoked the retrieve
3123 * request interface call. It relies on the hypervisor to
3124 * validate the caller's ID against that provided in the
3125 * `receivers` list of the retrieve response.
3126 * In case there is only one borrower from the NWd in the
3127 * transaction descriptor, record that in the `receiver_id` for
3128 * later use, and validate in the retrieve request message.
3129 * This limitation is due to the fact SPMC can't determine the
3130 * index in the memory share structures state to update.
3131 */
3132 if (to_id == HF_HYPERVISOR_VM_ID) {
3133 uint32_t other_world_count = 0;
3134
3135 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3136 struct ffa_memory_access *receiver =
3137 ffa_memory_region_get_receiver(retrieve_request,
J-Alvesf220d572024-04-24 22:15:14 +01003138 i);
J-Alves4f0d9c12024-01-17 17:23:11 +00003139 assert(receiver != NULL);
3140
J-Alvesf220d572024-04-24 22:15:14 +01003141 if (!vm_id_is_current_world(
3142 receiver->receiver_permissions.receiver)) {
J-Alves4f0d9c12024-01-17 17:23:11 +00003143 other_world_count++;
J-Alvesf220d572024-04-24 22:15:14 +01003144 /* Set it to be used later. */
3145 to_id = receiver->receiver_permissions.receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003146 }
3147 }
3148
3149 if (other_world_count > 1) {
3150 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003151 "Support one receiver from the other world.\n");
J-Alves4f0d9c12024-01-17 17:23:11 +00003152 return ffa_error(FFA_NOT_SUPPORTED);
3153 }
3154 }
J-Alves089004f2022-07-13 14:25:44 +01003155 /*
3156 * Check that the transaction type expected by the receiver is
3157 * correct, if it has been specified.
3158 */
3159 if (transaction_type !=
3160 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
3161 transaction_type != (memory_region->flags &
3162 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
3163 dlog_verbose(
3164 "Incorrect transaction type %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +00003165 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003166 transaction_type,
3167 memory_region->flags &
3168 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
3169 retrieve_request->handle);
3170 return ffa_error(FFA_INVALID_PARAMETERS);
3171 }
3172
3173 if (retrieve_request->tag != memory_region->tag) {
3174 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003175 "Incorrect tag %lu for FFA_MEM_RETRIEVE_REQ, expected "
3176 "%lu for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003177 retrieve_request->tag, memory_region->tag,
3178 retrieve_request->handle);
3179 return ffa_error(FFA_INVALID_PARAMETERS);
3180 }
3181
J-Alves4f0d9c12024-01-17 17:23:11 +00003182 *receiver_index =
3183 ffa_memory_region_get_receiver_index(memory_region, to_id);
J-Alves089004f2022-07-13 14:25:44 +01003184
3185 if (*receiver_index == memory_region->receiver_count) {
3186 dlog_verbose(
3187 "Incorrect receiver VM ID %d for "
Karl Meakine8937d92024-03-19 16:04:25 +00003188 "FFA_MEM_RETRIEVE_REQ, for handle %#lx.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003189 to_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01003190 return ffa_error(FFA_INVALID_PARAMETERS);
3191 }
3192
3193 if ((retrieve_request->flags &
3194 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
3195 dlog_verbose(
3196 "Retriever specified 'address range alignment 'hint' "
3197 "not supported.\n");
3198 return ffa_error(FFA_INVALID_PARAMETERS);
3199 }
3200 if ((retrieve_request->flags &
3201 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
3202 dlog_verbose(
3203 "Bits 8-5 must be zero in memory region's flags "
3204 "(address range alignment hint not supported).\n");
3205 return ffa_error(FFA_INVALID_PARAMETERS);
3206 }
3207
3208 if ((retrieve_request->flags & ~0x7FF) != 0U) {
3209 dlog_verbose(
3210 "Bits 31-10 must be zero in memory region's flags.\n");
3211 return ffa_error(FFA_INVALID_PARAMETERS);
3212 }
3213
J-Alves95fbb312024-03-20 15:19:16 +00003214 if ((share_func == FFA_MEM_SHARE_32 ||
3215 share_func == FFA_MEM_SHARE_64) &&
J-Alves089004f2022-07-13 14:25:44 +01003216 (retrieve_request->flags &
3217 (FFA_MEMORY_REGION_FLAG_CLEAR |
3218 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
3219 dlog_verbose(
3220 "Memory Share operation can't clean after relinquish "
3221 "memory region.\n");
3222 return ffa_error(FFA_INVALID_PARAMETERS);
3223 }
3224
3225 /*
3226 * If the borrower needs the memory to be cleared before mapping
3227 * to its address space, the sender should have set the flag
3228 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
3229 * FFA_DENIED.
3230 */
3231 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
3232 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
3233 dlog_verbose(
3234 "Borrower needs memory cleared. Sender needs to set "
3235 "flag for clearing memory.\n");
3236 return ffa_error(FFA_DENIED);
3237 }
3238
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003239 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
Karl Meakin84710f32023-10-12 15:14:49 +01003240 security_state = retrieve_request->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003241 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
3242 dlog_verbose(
3243 "Invalid security state for memory retrieve request "
3244 "operation.\n");
3245 return ffa_error(FFA_INVALID_PARAMETERS);
3246 }
3247
J-Alves089004f2022-07-13 14:25:44 +01003248 /*
3249 * If memory type is not specified, bypass validation of memory
3250 * attributes in the retrieve request. The retriever is expecting to
3251 * obtain this information from the SPMC.
3252 */
Karl Meakin84710f32023-10-12 15:14:49 +01003253 if (retrieve_request->attributes.type == FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves089004f2022-07-13 14:25:44 +01003254 return (struct ffa_value){.func = FFA_SUCCESS_32};
3255 }
3256
3257 /*
3258 * Ensure receiver's attributes are compatible with how
3259 * Hafnium maps memory: Normal Memory, Inner shareable,
3260 * Write-Back Read-Allocate Write-Allocate Cacheable.
3261 */
3262 return ffa_memory_attributes_validate(retrieve_request->attributes);
3263}
3264
J-Alves3f6527c2024-04-25 17:10:57 +01003265/**
3266 * Whilst processing the retrieve request, the operation could be aborted, and
3267 * changes to page tables and the share state structures need to be reverted.
3268 */
3269static void ffa_partition_memory_retrieve_request_undo(
3270 struct vm_locked from_locked,
3271 struct ffa_memory_share_state *share_state, uint32_t receiver_index)
3272{
3273 /*
3274 * Currently this operation is expected for operations involving the
3275 * 'other_world' vm.
3276 */
3277 assert(from_locked.vm->id == HF_OTHER_WORLD_ID);
3278 assert(share_state->retrieved_fragment_count[receiver_index] > 0);
3279
3280 /* Decrement the retrieved fragment count for the given receiver. */
3281 share_state->retrieved_fragment_count[receiver_index]--;
3282}
3283
3284/**
3285 * Whilst processing an hypervisor retrieve request the operation could be
3286 * aborted. There were no updates to PTs in this case, so decrementing the
3287 * fragment count retrieved by the hypervisor should be enough.
3288 */
3289static void ffa_hypervisor_memory_retrieve_request_undo(
3290 struct ffa_memory_share_state *share_state)
3291{
3292 assert(share_state->hypervisor_fragment_count > 0);
3293 share_state->hypervisor_fragment_count--;
3294}
3295
J-Alves4f0d9c12024-01-17 17:23:11 +00003296static struct ffa_value ffa_partition_retrieve_request(
3297 struct share_states_locked share_states,
3298 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3299 struct ffa_memory_region *retrieve_request,
3300 uint32_t retrieve_request_length, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003301{
Karl Meakin84710f32023-10-12 15:14:49 +01003302 ffa_memory_access_permissions_t permissions = {0};
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003303 uint32_t memory_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003304 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003305 struct ffa_composite_memory_region *composite;
3306 uint32_t total_length;
3307 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01003308 ffa_id_t receiver_id = to_locked.vm->id;
J-Alves4f0d9c12024-01-17 17:23:11 +00003309 bool is_retrieve_complete = false;
J-Alves4f0d9c12024-01-17 17:23:11 +00003310 const uint64_t memory_access_desc_size =
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003311 retrieve_request->memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003312 uint32_t receiver_index;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003313 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003314 ffa_memory_handle_t handle = retrieve_request->handle;
Karl Meakin84710f32023-10-12 15:14:49 +01003315 ffa_memory_attributes_t attributes = {0};
J-Alves460d36c2023-10-12 17:02:15 +01003316 uint32_t retrieve_mode = 0;
J-Alves4f0d9c12024-01-17 17:23:11 +00003317 struct ffa_memory_region *memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003318
J-Alves96de29f2022-04-26 16:05:24 +01003319 if (!share_state->sending_complete) {
3320 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003321 "Memory with handle %#lx not fully sent, can't "
J-Alves96de29f2022-04-26 16:05:24 +01003322 "retrieve.\n",
3323 handle);
J-Alves4f0d9c12024-01-17 17:23:11 +00003324 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alves96de29f2022-04-26 16:05:24 +01003325 }
3326
J-Alves4f0d9c12024-01-17 17:23:11 +00003327 /*
3328 * Validate retrieve request, according to what was sent by the
3329 * sender. Function will output the `receiver_index` from the
3330 * provided memory region.
3331 */
3332 ret = ffa_memory_retrieve_validate(
3333 receiver_id, retrieve_request, retrieve_request_length,
3334 memory_region, &receiver_index, share_state->share_func);
J-Alves089004f2022-07-13 14:25:44 +01003335
J-Alves4f0d9c12024-01-17 17:23:11 +00003336 if (ret.func != FFA_SUCCESS_32) {
3337 return ret;
J-Alves089004f2022-07-13 14:25:44 +01003338 }
J-Alves96de29f2022-04-26 16:05:24 +01003339
J-Alves4f0d9c12024-01-17 17:23:11 +00003340 /*
3341 * Validate the requested permissions against the sent
3342 * permissions.
3343 * Outputs the permissions to give to retriever at S2
3344 * PTs.
3345 */
3346 ret = ffa_memory_retrieve_validate_memory_access_list(
3347 memory_region, retrieve_request, receiver_id, &permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003348 &receiver, share_state->share_func);
J-Alves4f0d9c12024-01-17 17:23:11 +00003349 if (ret.func != FFA_SUCCESS_32) {
3350 return ret;
3351 }
3352
3353 memory_to_mode = ffa_memory_permissions_to_mode(
3354 permissions, share_state->sender_orig_mode);
3355
Daniel Boulby6e261362024-06-13 16:53:00 +01003356 /*
3357 * Check requested memory type is valid with the memory type of the
3358 * owner. E.g. they follow the memory type precedence where Normal
3359 * memory is more permissive than device and therefore device memory
3360 * can only be shared as device memory.
3361 */
3362 if (retrieve_request->attributes.type == FFA_MEMORY_NORMAL_MEM &&
3363 ((share_state->sender_orig_mode & MM_MODE_D) != 0U ||
3364 memory_region->attributes.type == FFA_MEMORY_DEVICE_MEM)) {
3365 dlog_verbose(
3366 "Retrieving device memory as Normal memory is not "
3367 "allowed\n");
3368 return ffa_error(FFA_DENIED);
3369 }
3370
J-Alves4f0d9c12024-01-17 17:23:11 +00003371 ret = ffa_retrieve_check_update(
3372 to_locked, share_state->fragments,
3373 share_state->fragment_constituent_counts,
3374 share_state->fragment_count, memory_to_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003375 share_state->share_func, false, page_pool, &retrieve_mode,
3376 share_state->memory_protected);
J-Alves4f0d9c12024-01-17 17:23:11 +00003377
3378 if (ret.func != FFA_SUCCESS_32) {
3379 return ret;
3380 }
3381
3382 share_state->retrieved_fragment_count[receiver_index] = 1;
3383
3384 is_retrieve_complete =
3385 share_state->retrieved_fragment_count[receiver_index] ==
3386 share_state->fragment_count;
3387
J-Alvesb5084cf2022-07-06 14:20:12 +01003388 /* VMs acquire the RX buffer from SPMC. */
3389 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3390
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003391 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003392 * Copy response to RX buffer of caller and deliver the message.
3393 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003394 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003395 composite = ffa_memory_region_get_composite(memory_region, 0);
J-Alves4f0d9c12024-01-17 17:23:11 +00003396
Andrew Walbranca808b12020-05-15 17:22:28 +01003397 /*
J-Alves460d36c2023-10-12 17:02:15 +01003398 * Set the security state in the memory retrieve response attributes
3399 * if specified by the target mode.
3400 */
Karl Meakin3d32eef2024-11-25 16:40:09 +00003401 attributes = plat_ffa_memory_add_security_bit_from_mode(
3402 memory_region->attributes, retrieve_mode);
J-Alves460d36c2023-10-12 17:02:15 +01003403
3404 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003405 * Constituents which we received in the first fragment should
3406 * always fit in the first fragment we are sending, because the
3407 * header is the same size in both cases and we have a fixed
3408 * message buffer size. So `ffa_retrieved_memory_region_init`
3409 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01003410 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003411
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003412 /* Provide the permissions that had been provided. */
3413 receiver->receiver_permissions.permissions = permissions;
3414
3415 /*
3416 * Prepare the memory region descriptor for the retrieve response.
3417 * Provide the pointer to the receiver tracked in the share state
J-Alves7b9cc432024-04-04 10:57:17 +01003418 * structures.
3419 * At this point the retrieve request descriptor from the partition
3420 * has been processed. The `retrieve_request` is expected to be in
3421 * a region that is handled by the SPMC/Hyp. Reuse the same buffer to
3422 * prepare the retrieve response before copying it to the RX buffer of
3423 * the caller.
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003424 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003425 CHECK(ffa_retrieved_memory_region_init(
J-Alves7b9cc432024-04-04 10:57:17 +01003426 retrieve_request, to_locked.vm->ffa_version, HF_MAILBOX_SIZE,
3427 memory_region->sender, attributes, memory_region->flags, handle,
3428 permissions, receiver, 1, memory_access_desc_size,
3429 composite->page_count, composite->constituent_count,
3430 share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01003431 share_state->fragment_constituent_counts[0], &total_length,
3432 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01003433
J-Alves7b9cc432024-04-04 10:57:17 +01003434 /*
3435 * Copy the message from the buffer into the partition's mailbox.
3436 * The operation might fail unexpectedly due to change in PAS address
3437 * space, or improper values to the sizes of the structures.
3438 */
3439 if (!memcpy_trapped(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3440 retrieve_request, fragment_length)) {
3441 dlog_error(
3442 "%s: aborted the copy of response to RX buffer of "
3443 "%x.\n",
3444 __func__, to_locked.vm->id);
J-Alves3f6527c2024-04-25 17:10:57 +01003445
3446 ffa_partition_memory_retrieve_request_undo(
3447 to_locked, share_state, receiver_index);
3448
J-Alves7b9cc432024-04-04 10:57:17 +01003449 return ffa_error(FFA_ABORTED);
3450 }
3451
J-Alves4f0d9c12024-01-17 17:23:11 +00003452 if (is_retrieve_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003453 ffa_memory_retrieve_complete(share_states, share_state,
3454 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003455 }
J-Alves4f0d9c12024-01-17 17:23:11 +00003456
3457 return ffa_memory_retrieve_resp(total_length, fragment_length);
3458}
3459
3460static struct ffa_value ffa_hypervisor_retrieve_request(
3461 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3462 struct ffa_memory_region *retrieve_request)
3463{
3464 struct ffa_value ret;
3465 struct ffa_composite_memory_region *composite;
3466 uint32_t total_length;
3467 uint32_t fragment_length;
J-Alves4f0d9c12024-01-17 17:23:11 +00003468 ffa_memory_attributes_t attributes;
J-Alves7b6ab612024-01-24 09:54:54 +00003469 uint64_t memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003470 struct ffa_memory_region *memory_region;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003471 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003472 ffa_memory_handle_t handle = retrieve_request->handle;
3473
J-Alves4f0d9c12024-01-17 17:23:11 +00003474 memory_region = share_state->memory_region;
3475
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003476 assert(to_locked.vm->id == HF_HYPERVISOR_VM_ID);
3477
J-Alves7b6ab612024-01-24 09:54:54 +00003478 switch (to_locked.vm->ffa_version) {
Karl Meakin0e617d92024-04-05 12:55:22 +01003479 case FFA_VERSION_1_2:
J-Alves7b6ab612024-01-24 09:54:54 +00003480 memory_access_desc_size = sizeof(struct ffa_memory_access);
3481 break;
Karl Meakin0e617d92024-04-05 12:55:22 +01003482 case FFA_VERSION_1_0:
3483 case FFA_VERSION_1_1:
J-Alves7b6ab612024-01-24 09:54:54 +00003484 memory_access_desc_size = sizeof(struct ffa_memory_access_v1_0);
3485 break;
3486 default:
3487 panic("version not supported: %x\n", to_locked.vm->ffa_version);
3488 }
3489
J-Alves4f0d9c12024-01-17 17:23:11 +00003490 if (share_state->hypervisor_fragment_count != 0U) {
3491 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003492 "Memory with handle %#lx already retrieved by "
J-Alves4f0d9c12024-01-17 17:23:11 +00003493 "the hypervisor.\n",
3494 handle);
3495 return ffa_error(FFA_DENIED);
3496 }
3497
3498 share_state->hypervisor_fragment_count = 1;
3499
J-Alves4f0d9c12024-01-17 17:23:11 +00003500 /* VMs acquire the RX buffer from SPMC. */
3501 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3502
3503 /*
3504 * Copy response to RX buffer of caller and deliver the message.
3505 * This must be done before the share_state is (possibly) freed.
3506 */
3507 composite = ffa_memory_region_get_composite(memory_region, 0);
3508
3509 /*
3510 * Constituents which we received in the first fragment should
3511 * always fit in the first fragment we are sending, because the
3512 * header is the same size in both cases and we have a fixed
3513 * message buffer size. So `ffa_retrieved_memory_region_init`
3514 * should never fail.
3515 */
3516
3517 /*
3518 * Set the security state in the memory retrieve response attributes
3519 * if specified by the target mode.
3520 */
Karl Meakin3d32eef2024-11-25 16:40:09 +00003521 attributes = plat_ffa_memory_add_security_bit_from_mode(
J-Alves4f0d9c12024-01-17 17:23:11 +00003522 memory_region->attributes, share_state->sender_orig_mode);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003523
3524 receiver = ffa_memory_region_get_receiver(memory_region, 0);
3525
J-Alves7b9cc432024-04-04 10:57:17 +01003526 /*
3527 * At this point the `retrieve_request` is expected to be in a section
3528 * managed by the hypervisor.
3529 */
J-Alves4f0d9c12024-01-17 17:23:11 +00003530 CHECK(ffa_retrieved_memory_region_init(
J-Alves7b9cc432024-04-04 10:57:17 +01003531 retrieve_request, to_locked.vm->ffa_version, HF_MAILBOX_SIZE,
3532 memory_region->sender, attributes, memory_region->flags, handle,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003533 receiver->receiver_permissions.permissions, receiver,
3534 memory_region->receiver_count, memory_access_desc_size,
J-Alves4f0d9c12024-01-17 17:23:11 +00003535 composite->page_count, composite->constituent_count,
3536 share_state->fragments[0],
3537 share_state->fragment_constituent_counts[0], &total_length,
3538 &fragment_length));
3539
J-Alves7b9cc432024-04-04 10:57:17 +01003540 /*
3541 * Copy the message from the buffer into the hypervisor's mailbox.
3542 * The operation might fail unexpectedly due to change in PAS, or
3543 * improper values for the sizes of the structures.
3544 */
3545 if (!memcpy_trapped(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3546 retrieve_request, fragment_length)) {
3547 dlog_error(
3548 "%s: aborted the copy of response to RX buffer of "
3549 "%x.\n",
3550 __func__, to_locked.vm->id);
J-Alves3f6527c2024-04-25 17:10:57 +01003551
3552 ffa_hypervisor_memory_retrieve_request_undo(share_state);
3553
J-Alves7b9cc432024-04-04 10:57:17 +01003554 return ffa_error(FFA_ABORTED);
3555 }
3556
J-Alves3f6527c2024-04-25 17:10:57 +01003557 ffa_memory_retrieve_complete_from_hyp(share_state);
3558
J-Alves4f0d9c12024-01-17 17:23:11 +00003559 return ffa_memory_retrieve_resp(total_length, fragment_length);
3560}
3561
3562struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
3563 struct ffa_memory_region *retrieve_request,
3564 uint32_t retrieve_request_length,
3565 struct mpool *page_pool)
3566{
3567 ffa_memory_handle_t handle = retrieve_request->handle;
3568 struct share_states_locked share_states;
3569 struct ffa_memory_share_state *share_state;
3570 struct ffa_value ret;
3571
3572 dump_share_states();
3573
3574 share_states = share_states_lock();
3575 share_state = get_share_state(share_states, handle);
3576 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003577 dlog_verbose("Invalid handle %#lx for FFA_MEM_RETRIEVE_REQ.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003578 handle);
3579 ret = ffa_error(FFA_INVALID_PARAMETERS);
3580 goto out;
3581 }
3582
Daniel Boulby296ee702023-11-28 13:36:55 +00003583 if (is_ffa_hypervisor_retrieve_request(retrieve_request)) {
J-Alves4f0d9c12024-01-17 17:23:11 +00003584 ret = ffa_hypervisor_retrieve_request(share_state, to_locked,
3585 retrieve_request);
3586 } else {
3587 ret = ffa_partition_retrieve_request(
3588 share_states, share_state, to_locked, retrieve_request,
3589 retrieve_request_length, page_pool);
3590 }
3591
3592 /* Track use of the RX buffer if the handling has succeeded. */
3593 if (ret.func == FFA_MEM_RETRIEVE_RESP_32) {
3594 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
3595 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
3596 }
3597
Andrew Walbranca808b12020-05-15 17:22:28 +01003598out:
3599 share_states_unlock(&share_states);
3600 dump_share_states();
3601 return ret;
3602}
3603
J-Alves5da37d92022-10-24 16:33:48 +01003604/**
3605 * Determine expected fragment offset according to the FF-A version of
3606 * the caller.
3607 */
3608static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
3609 struct ffa_memory_region *memory_region,
Karl Meakin0e617d92024-04-05 12:55:22 +01003610 uint32_t retrieved_constituents_count, enum ffa_version ffa_version)
J-Alves5da37d92022-10-24 16:33:48 +01003611{
3612 uint32_t expected_fragment_offset;
3613 uint32_t composite_constituents_offset;
3614
Karl Meakin0e617d92024-04-05 12:55:22 +01003615 if (ffa_version >= FFA_VERSION_1_1) {
J-Alves5da37d92022-10-24 16:33:48 +01003616 /*
3617 * Hafnium operates memory regions in FF-A v1.1 format, so we
3618 * can retrieve the constituents offset from descriptor.
3619 */
3620 composite_constituents_offset =
3621 ffa_composite_constituent_offset(memory_region, 0);
Karl Meakin0e617d92024-04-05 12:55:22 +01003622 } else if (ffa_version == FFA_VERSION_1_0) {
J-Alves5da37d92022-10-24 16:33:48 +01003623 /*
3624 * If retriever is FF-A v1.0, determine the composite offset
3625 * as it is expected to have been configured in the
3626 * retrieve response.
3627 */
3628 composite_constituents_offset =
3629 sizeof(struct ffa_memory_region_v1_0) +
3630 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003631 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01003632 sizeof(struct ffa_composite_memory_region);
3633 } else {
3634 panic("%s received an invalid FF-A version.\n", __func__);
3635 }
3636
3637 expected_fragment_offset =
3638 composite_constituents_offset +
3639 retrieved_constituents_count *
3640 sizeof(struct ffa_memory_region_constituent) -
Karl Meakin66a38bd2024-05-28 16:00:56 +01003641 (size_t)(memory_region->memory_access_desc_size *
3642 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003643
3644 return expected_fragment_offset;
3645}
3646
Andrew Walbranca808b12020-05-15 17:22:28 +01003647struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3648 ffa_memory_handle_t handle,
3649 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003650 ffa_id_t sender_vm_id,
J-Alvesc3fd9752024-04-04 11:45:33 +01003651 void *retrieve_continue_page,
Andrew Walbranca808b12020-05-15 17:22:28 +01003652 struct mpool *page_pool)
3653{
3654 struct ffa_memory_region *memory_region;
3655 struct share_states_locked share_states;
3656 struct ffa_memory_share_state *share_state;
3657 struct ffa_value ret;
3658 uint32_t fragment_index;
3659 uint32_t retrieved_constituents_count;
3660 uint32_t i;
3661 uint32_t expected_fragment_offset;
3662 uint32_t remaining_constituent_count;
3663 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003664 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003665 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003666
3667 dump_share_states();
3668
3669 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003670 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003671 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003672 dlog_verbose("Invalid handle %#lx for FFA_MEM_FRAG_RX.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01003673 handle);
3674 ret = ffa_error(FFA_INVALID_PARAMETERS);
3675 goto out;
3676 }
3677
3678 memory_region = share_state->memory_region;
3679 CHECK(memory_region != NULL);
3680
Andrew Walbranca808b12020-05-15 17:22:28 +01003681 if (!share_state->sending_complete) {
3682 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003683 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003684 "retrieve.\n",
3685 handle);
3686 ret = ffa_error(FFA_INVALID_PARAMETERS);
3687 goto out;
3688 }
3689
J-Alves59ed0042022-07-28 18:26:41 +01003690 /*
3691 * If retrieve request from the hypervisor has been initiated in the
3692 * given share_state, continue it, else assume it is a continuation of
J-Alvesc3fd9752024-04-04 11:45:33 +01003693 * retrieve request from a partition.
J-Alves59ed0042022-07-28 18:26:41 +01003694 */
3695 continue_ffa_hyp_mem_retrieve_req =
3696 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3697 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003698 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003699
J-Alves59ed0042022-07-28 18:26:41 +01003700 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003701 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003702 memory_region, to_locked.vm->id);
3703
3704 if (receiver_index == memory_region->receiver_count) {
3705 dlog_verbose(
3706 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
Karl Meakine8937d92024-03-19 16:04:25 +00003707 "borrower to memory sharing transaction "
3708 "(%lx)\n",
J-Alves59ed0042022-07-28 18:26:41 +01003709 to_locked.vm->id, handle);
3710 ret = ffa_error(FFA_INVALID_PARAMETERS);
3711 goto out;
3712 }
3713
J-Alvesc3fd9752024-04-04 11:45:33 +01003714 fragment_index =
3715 share_state->retrieved_fragment_count[receiver_index];
3716
3717 if (fragment_index == 0 ||
3718 fragment_index >= share_state->fragment_count) {
J-Alves59ed0042022-07-28 18:26:41 +01003719 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003720 "Retrieval of memory with handle %#lx not yet "
J-Alves59ed0042022-07-28 18:26:41 +01003721 "started or already completed (%d/%d fragments "
3722 "retrieved).\n",
3723 handle,
3724 share_state->retrieved_fragment_count
3725 [receiver_index],
3726 share_state->fragment_count);
3727 ret = ffa_error(FFA_INVALID_PARAMETERS);
3728 goto out;
3729 }
J-Alves59ed0042022-07-28 18:26:41 +01003730 } else {
J-Alvesc3fd9752024-04-04 11:45:33 +01003731 fragment_index = share_state->hypervisor_fragment_count;
3732
3733 if (fragment_index == 0 ||
3734 fragment_index >= share_state->fragment_count) {
J-Alves59ed0042022-07-28 18:26:41 +01003735 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003736 "Retrieve of memory with handle %lx not "
J-Alves59ed0042022-07-28 18:26:41 +01003737 "started from hypervisor.\n",
3738 handle);
3739 ret = ffa_error(FFA_INVALID_PARAMETERS);
3740 goto out;
3741 }
3742
3743 if (memory_region->sender != sender_vm_id) {
3744 dlog_verbose(
3745 "Sender ID (%x) is not as expected for memory "
Karl Meakine8937d92024-03-19 16:04:25 +00003746 "handle %lx\n",
J-Alves59ed0042022-07-28 18:26:41 +01003747 sender_vm_id, handle);
3748 ret = ffa_error(FFA_INVALID_PARAMETERS);
3749 goto out;
3750 }
3751
J-Alves59ed0042022-07-28 18:26:41 +01003752 receiver_index = 0;
3753 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003754
3755 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003756 * Check that the given fragment offset is correct by counting
3757 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003758 */
3759 retrieved_constituents_count = 0;
3760 for (i = 0; i < fragment_index; ++i) {
3761 retrieved_constituents_count +=
3762 share_state->fragment_constituent_counts[i];
3763 }
J-Alvesc7484f12022-05-13 12:41:14 +01003764
3765 CHECK(memory_region->receiver_count > 0);
3766
Andrew Walbranca808b12020-05-15 17:22:28 +01003767 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003768 ffa_memory_retrieve_expected_offset_per_ffa_version(
3769 memory_region, retrieved_constituents_count,
3770 to_locked.vm->ffa_version);
3771
Andrew Walbranca808b12020-05-15 17:22:28 +01003772 if (fragment_offset != expected_fragment_offset) {
3773 dlog_verbose("Fragment offset was %d but expected %d.\n",
3774 fragment_offset, expected_fragment_offset);
3775 ret = ffa_error(FFA_INVALID_PARAMETERS);
3776 goto out;
3777 }
3778
J-Alves4f0d9c12024-01-17 17:23:11 +00003779 /*
3780 * When hafnium is the hypervisor, acquire the RX buffer of a VM, that
3781 * is currently ownder by the SPMC.
3782 */
3783 assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
J-Alves59ed0042022-07-28 18:26:41 +01003784
Andrew Walbranca808b12020-05-15 17:22:28 +01003785 remaining_constituent_count = ffa_memory_fragment_init(
J-Alvesc3fd9752024-04-04 11:45:33 +01003786 (struct ffa_memory_region_constituent *)retrieve_continue_page,
3787 HF_MAILBOX_SIZE, share_state->fragments[fragment_index],
Andrew Walbranca808b12020-05-15 17:22:28 +01003788 share_state->fragment_constituent_counts[fragment_index],
3789 &fragment_length);
3790 CHECK(remaining_constituent_count == 0);
J-Alves674e4de2024-01-17 16:20:32 +00003791
J-Alvesc3fd9752024-04-04 11:45:33 +01003792 /*
3793 * Return FFA_ERROR(FFA_ABORTED) in case the access to the partition's
3794 * RX buffer results in a GPF exception. Could happen if the retrieve
3795 * request is for a VM or the Hypervisor retrieve request, if the PAS
3796 * has been changed externally.
3797 */
3798 if (!memcpy_trapped(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3799 retrieve_continue_page, fragment_length)) {
3800 dlog_error(
3801 "%s: aborted copying fragment to RX buffer of %#x.\n",
3802 __func__, to_locked.vm->id);
3803 ret = ffa_error(FFA_ABORTED);
3804 goto out;
3805 }
3806
Andrew Walbranca808b12020-05-15 17:22:28 +01003807 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003808 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003809
J-Alves59ed0042022-07-28 18:26:41 +01003810 if (!continue_ffa_hyp_mem_retrieve_req) {
3811 share_state->retrieved_fragment_count[receiver_index]++;
3812 if (share_state->retrieved_fragment_count[receiver_index] ==
3813 share_state->fragment_count) {
3814 ffa_memory_retrieve_complete(share_states, share_state,
3815 page_pool);
3816 }
3817 } else {
3818 share_state->hypervisor_fragment_count++;
3819
3820 ffa_memory_retrieve_complete_from_hyp(share_state);
3821 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003822 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3823 .arg1 = (uint32_t)handle,
3824 .arg2 = (uint32_t)(handle >> 32),
3825 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003826
3827out:
3828 share_states_unlock(&share_states);
3829 dump_share_states();
3830 return ret;
3831}
3832
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003833struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003834 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003835 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003836{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003837 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003838 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003839 struct ffa_memory_share_state *share_state;
3840 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003841 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003842 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003843 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003844 bool receivers_relinquished_memory;
Karl Meakin84710f32023-10-12 15:14:49 +01003845 ffa_memory_access_permissions_t receiver_permissions = {0};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003846
Andrew Walbrana65a1322020-04-06 19:32:32 +01003847 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003848 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003849 "Stream endpoints not supported (got %d endpoints on "
3850 "FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003851 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003852 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003853 }
3854
J-Alvesbd060342024-04-26 18:44:31 +01003855 if (vm_id_is_current_world(from_locked.vm->id) &&
3856 relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003857 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003858 "VM ID %d in relinquish message doesn't match calling "
3859 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003860 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003861 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003862 }
3863
3864 dump_share_states();
3865
3866 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003867 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003868 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003869 dlog_verbose("Invalid handle %#lx for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003870 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003871 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003872 goto out;
3873 }
3874
Andrew Walbranca808b12020-05-15 17:22:28 +01003875 if (!share_state->sending_complete) {
3876 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003877 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003878 "relinquish.\n",
3879 handle);
3880 ret = ffa_error(FFA_INVALID_PARAMETERS);
3881 goto out;
3882 }
3883
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003884 memory_region = share_state->memory_region;
3885 CHECK(memory_region != NULL);
3886
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003887 receiver_index = ffa_memory_region_get_receiver_index(
J-Alvesbd060342024-04-26 18:44:31 +01003888 memory_region, relinquish_request->endpoints[0]);
J-Alves8eb19162022-04-28 10:56:48 +01003889
3890 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003891 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003892 "VM ID %d tried to relinquish memory region "
Karl Meakine8937d92024-03-19 16:04:25 +00003893 "with handle %#lx and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003894 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003895 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003896 goto out;
3897 }
3898
J-Alves8eb19162022-04-28 10:56:48 +01003899 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003900 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003901 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003902 "Memory with handle %#lx not yet fully retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003903 "receiver %x can't relinquish.\n",
3904 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003905 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003906 goto out;
3907 }
3908
J-Alves3c5b2072022-11-21 12:45:40 +00003909 /*
3910 * Either clear if requested in relinquish call, or in a retrieve
3911 * request from one of the borrowers.
3912 */
3913 receivers_relinquished_memory = true;
3914
3915 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3916 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003917 ffa_memory_region_get_receiver(memory_region, i);
3918 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003919 if (receiver->receiver_permissions.receiver ==
3920 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003921 receiver_permissions =
3922 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003923 continue;
3924 }
3925
3926 if (share_state->retrieved_fragment_count[i] != 0U) {
3927 receivers_relinquished_memory = false;
3928 break;
3929 }
3930 }
3931
3932 clear = receivers_relinquished_memory &&
Daniel Boulby2e14ebe2024-01-15 16:21:44 +00003933 ((relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3934 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003935
3936 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003937 * Clear is not allowed for memory that was shared, as the
3938 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003939 */
J-Alves95fbb312024-03-20 15:19:16 +00003940 if (clear && (share_state->share_func == FFA_MEM_SHARE_32 ||
3941 share_state->share_func == FFA_MEM_SHARE_64)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003942 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003943 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003944 goto out;
3945 }
3946
J-Alvesb886d492024-04-15 10:55:29 +01003947 if (clear && receiver_permissions.data_access == FFA_DATA_ACCESS_RO) {
J-Alves639ddfc2023-11-21 14:17:26 +00003948 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3949 __func__);
3950 ret = ffa_error(FFA_DENIED);
3951 goto out;
3952 }
3953
Andrew Walbranca808b12020-05-15 17:22:28 +01003954 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003955 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003956 share_state->fragment_constituent_counts,
J-Alves69cdfd92024-04-26 11:40:59 +01003957 share_state->fragment_count, share_state->sender_orig_mode,
3958 page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003959
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003960 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003961 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003962 * Mark memory handle as not retrieved, so it can be
3963 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003964 */
J-Alves8eb19162022-04-28 10:56:48 +01003965 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003966 }
3967
3968out:
3969 share_states_unlock(&share_states);
3970 dump_share_states();
3971 return ret;
3972}
3973
3974/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003975 * Validates that the reclaim transition is allowed for the given
3976 * handle, updates the page table of the reclaiming VM, and frees the
3977 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003978 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003979struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003980 ffa_memory_handle_t handle,
3981 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003982 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003983{
3984 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003985 struct ffa_memory_share_state *share_state;
3986 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003987 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003988
3989 dump_share_states();
3990
3991 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003992
Karl Meakin4a2854a2023-06-30 16:26:52 +01003993 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003994 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003995 dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003996 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003997 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003998 goto out;
3999 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01004000 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004001
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004002 CHECK(memory_region != NULL);
4003
J-Alvesa9cd7e32022-07-01 13:49:33 +01004004 if (vm_id_is_current_world(to_locked.vm->id) &&
4005 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004006 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00004007 "VM %#x attempted to reclaim memory handle %#lx "
Olivier Deprezf92e5d42020-11-13 16:00:54 +01004008 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004009 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01004010 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004011 goto out;
4012 }
4013
Andrew Walbranca808b12020-05-15 17:22:28 +01004014 if (!share_state->sending_complete) {
4015 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00004016 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01004017 "reclaim.\n",
4018 handle);
4019 ret = ffa_error(FFA_INVALID_PARAMETERS);
4020 goto out;
4021 }
4022
J-Alves752236c2022-04-28 11:07:47 +01004023 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
4024 if (share_state->retrieved_fragment_count[i] != 0) {
J-Alves9bbcb872024-04-25 17:19:00 +01004025 struct ffa_memory_access *receiver =
4026 ffa_memory_region_get_receiver(memory_region,
4027 i);
4028
4029 assert(receiver != NULL);
4030 (void)receiver;
J-Alves752236c2022-04-28 11:07:47 +01004031 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01004032 "Tried to reclaim memory handle %#lx that has "
4033 "not been relinquished by all borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01004034 handle,
J-Alves9bbcb872024-04-25 17:19:00 +01004035 receiver->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01004036 ret = ffa_error(FFA_DENIED);
4037 goto out;
4038 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004039 }
4040
Andrew Walbranca808b12020-05-15 17:22:28 +01004041 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01004042 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01004043 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00004044 share_state->fragment_count, share_state->sender_orig_mode,
J-Alves460d36c2023-10-12 17:02:15 +01004045 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool,
J-Alvesfd206052023-05-22 16:45:00 +01004046 NULL, share_state->memory_protected);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004047
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01004048 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004049 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00004050 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00004051 }
4052
4053out:
4054 share_states_unlock(&share_states);
4055 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01004056}