blob: 2db0e8add617a85406f7f9122bca082b64f077fc [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
J-Alves7b9cc432024-04-04 10:57:17 +010011#include "hf/arch/memcpy_trapped.h"
Federico Recanati4fd065d2021-12-13 20:06:23 +010012#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020014#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000015
J-Alves5952d942022-12-22 16:03:00 +000016#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010017#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010019#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010020#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010021#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010022#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010023#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010024#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000025#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000026#include "hf/mpool.h"
J-Alvescf6253e2024-01-03 13:48:48 +000027#include "hf/panic.h"
28#include "hf/plat/memory_protect.h"
Jose Marinho75509b42019-04-09 09:34:59 +010029#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000030#include "hf/vm.h"
Daniel Boulby44e9b3b2024-01-17 12:21:44 +000031#include "hf/vm_ids.h"
Jose Marinho75509b42019-04-09 09:34:59 +010032
J-Alves2d8457f2022-10-05 11:06:41 +010033#include "vmapi/hf/ffa_v1_0.h"
34
J-Alves5da37d92022-10-24 16:33:48 +010035#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
36
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000037/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000039 * by this lock.
40 */
41static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010042static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000043
44/**
J-Alvesed508c82023-05-04 16:09:48 +010045 * Return the offset to the first constituent within the
46 * `ffa_composite_memory_region` for the given receiver from an
47 * `ffa_memory_region`. The caller must check that the receiver_index is within
48 * bounds, and that it has a composite memory region offset.
49 */
50static uint32_t ffa_composite_constituent_offset(
51 struct ffa_memory_region *memory_region, uint32_t receiver_index)
52{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000053 struct ffa_memory_access *receiver;
54 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010055
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000056 CHECK(receiver_index < memory_region->receiver_count);
57
58 receiver =
59 ffa_memory_region_get_receiver(memory_region, receiver_index);
60 CHECK(receiver != NULL);
61
62 composite_offset = receiver->composite_memory_region_offset;
63
64 CHECK(composite_offset != 0);
65
66 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010067}
68
69/**
J-Alves917d2f22020-10-30 18:39:30 +000070 * Extracts the index from a memory handle allocated by Hafnium's current world.
71 */
72uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
73{
74 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
75}
76
77/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010078 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
79 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
80 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010081 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010082 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
83 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010084 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010085struct ffa_memory_share_state *allocate_share_state(
86 struct share_states_locked share_states, uint32_t share_func,
87 struct ffa_memory_region *memory_region, uint32_t fragment_length,
88 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000089{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000090 assert(share_states.share_states != NULL);
91 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000092
Karl Meakin52cdfe72023-06-30 14:49:10 +010093 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010094 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010095 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010096 &share_states.share_states[i];
97 struct ffa_composite_memory_region *composite =
98 ffa_memory_region_get_composite(memory_region,
99 0);
100
101 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000102 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +0200103 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100104 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000105 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100106 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000107 allocated_state->share_func = share_func;
108 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100109 allocated_state->fragment_count = 1;
110 allocated_state->fragments[0] = composite->constituents;
111 allocated_state->fragment_constituent_counts[0] =
112 (fragment_length -
113 ffa_composite_constituent_offset(memory_region,
114 0)) /
115 sizeof(struct ffa_memory_region_constituent);
116 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100117 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
118 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100119 allocated_state->retrieved_fragment_count[j] =
120 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000121 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100122 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000123 }
124 }
125
Karl Meakin52cdfe72023-06-30 14:49:10 +0100126 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000127}
128
129/** Locks the share states lock. */
130struct share_states_locked share_states_lock(void)
131{
132 sl_lock(&share_states_lock_instance);
133
134 return (struct share_states_locked){.share_states = share_states};
135}
136
137/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100138void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000139{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000140 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000141 share_states->share_states = NULL;
142 sl_unlock(&share_states_lock_instance);
143}
144
145/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100146 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100147 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000148 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100149struct ffa_memory_share_state *get_share_state(
150 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000151{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100152 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000153
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000154 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100155
156 /*
157 * First look for a share_state allocated by us, in which case the
158 * handle is based on the index.
159 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200160 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100161 uint64_t index = ffa_memory_handle_get_index(handle);
162
Andrew Walbranca808b12020-05-15 17:22:28 +0100163 if (index < MAX_MEM_SHARES) {
164 share_state = &share_states.share_states[index];
165 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100166 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100167 }
168 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000169 }
170
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100172 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100173 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000174 if (share_state->memory_region != NULL &&
175 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100176 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100177 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100178 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000179 }
180
Karl Meakin4a2854a2023-06-30 16:26:52 +0100181 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000182}
183
184/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100185void share_state_free(struct share_states_locked share_states,
186 struct ffa_memory_share_state *share_state,
187 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000188{
Andrew Walbranca808b12020-05-15 17:22:28 +0100189 uint32_t i;
190
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000191 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000192 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100193 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000194 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100195 /*
196 * First fragment is part of the same page as the `memory_region`, so it
197 * doesn't need to be freed separately.
198 */
199 share_state->fragments[0] = NULL;
200 share_state->fragment_constituent_counts[0] = 0;
201 for (i = 1; i < share_state->fragment_count; ++i) {
202 mpool_free(page_pool, share_state->fragments[i]);
203 share_state->fragments[i] = NULL;
204 share_state->fragment_constituent_counts[i] = 0;
205 }
206 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000207 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100208 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000209}
210
Andrew Walbranca808b12020-05-15 17:22:28 +0100211/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100212bool share_state_sending_complete(struct share_states_locked share_states,
213 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000214{
Andrew Walbranca808b12020-05-15 17:22:28 +0100215 struct ffa_composite_memory_region *composite;
216 uint32_t expected_constituent_count;
217 uint32_t fragment_constituent_count_total = 0;
218 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000219
Andrew Walbranca808b12020-05-15 17:22:28 +0100220 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000221 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100222
223 /*
224 * Share state must already be valid, or it's not possible to get hold
225 * of it.
226 */
227 CHECK(share_state->memory_region != NULL &&
228 share_state->share_func != 0);
229
230 composite =
231 ffa_memory_region_get_composite(share_state->memory_region, 0);
232 expected_constituent_count = composite->constituent_count;
233 for (i = 0; i < share_state->fragment_count; ++i) {
234 fragment_constituent_count_total +=
235 share_state->fragment_constituent_counts[i];
236 }
237 dlog_verbose(
238 "Checking completion: constituent count %d/%d from %d "
239 "fragments.\n",
240 fragment_constituent_count_total, expected_constituent_count,
241 share_state->fragment_count);
242
243 return fragment_constituent_count_total == expected_constituent_count;
244}
245
246/**
247 * Calculates the offset of the next fragment expected for the given share
248 * state.
249 */
J-Alvesfdd29272022-07-19 13:16:31 +0100250uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100251 struct share_states_locked share_states,
252 struct ffa_memory_share_state *share_state)
253{
254 uint32_t next_fragment_offset;
255 uint32_t i;
256
257 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000258 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100259
260 next_fragment_offset =
261 ffa_composite_constituent_offset(share_state->memory_region, 0);
262 for (i = 0; i < share_state->fragment_count; ++i) {
263 next_fragment_offset +=
264 share_state->fragment_constituent_counts[i] *
265 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000266 }
267
Andrew Walbranca808b12020-05-15 17:22:28 +0100268 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000269}
270
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100271static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000272{
273 uint32_t i;
274
275 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
276 return;
277 }
278
Karl Meakine8937d92024-03-19 16:04:25 +0000279 dlog("from VM %#x, attributes (shareability = %s, cacheability = %s, "
280 "type = %s, security = %s), flags %#x, handle %#lx "
281 "tag %lu, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100282 "recipients [",
Karl Meakine8937d92024-03-19 16:04:25 +0000283 memory_region->sender,
284 ffa_memory_shareability_name(
285 memory_region->attributes.shareability),
286 ffa_memory_cacheability_name(
287 memory_region->attributes.cacheability),
288 ffa_memory_type_name(memory_region->attributes.type),
289 ffa_memory_security_name(memory_region->attributes.security),
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000290 memory_region->flags, memory_region->handle, memory_region->tag,
291 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100292 memory_region->receiver_count);
293 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000294 struct ffa_memory_access *receiver =
295 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000296 if (i != 0) {
297 dlog(", ");
298 }
Karl Meakine8937d92024-03-19 16:04:25 +0000299 dlog("Receiver %#x: permissions (%s, %s) (offset %u)",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000300 receiver->receiver_permissions.receiver,
Karl Meakine8937d92024-03-19 16:04:25 +0000301 ffa_data_access_name(receiver->receiver_permissions
302 .permissions.data_access),
303 ffa_instruction_access_name(
304 receiver->receiver_permissions.permissions
305 .instruction_access),
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000306 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000307 /* The impdef field is only present from v1.2 and later */
308 if (ffa_version_from_memory_access_desc_size(
309 memory_region->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +0100310 FFA_VERSION_1_2) {
Karl Meakine8937d92024-03-19 16:04:25 +0000311 dlog(", impdef: %#lx %#lx", receiver->impdef.val[0],
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000312 receiver->impdef.val[1]);
313 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000314 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000315 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000316}
317
J-Alves66652252022-07-06 09:49:51 +0100318void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000319{
320 uint32_t i;
321
322 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
323 return;
324 }
325
326 dlog("Current share states:\n");
327 sl_lock(&share_states_lock_instance);
328 for (i = 0; i < MAX_MEM_SHARES; ++i) {
329 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000330 switch (share_states[i].share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000331 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100332 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000333 dlog("SHARE");
334 break;
J-Alves95fbb312024-03-20 15:19:16 +0000335 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000337 dlog("LEND");
338 break;
J-Alves95fbb312024-03-20 15:19:16 +0000339 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100340 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000341 dlog("DONATE");
342 break;
343 default:
344 dlog("invalid share_func %#x",
345 share_states[i].share_func);
346 }
Karl Meakine8937d92024-03-19 16:04:25 +0000347 dlog(" %#lx (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000348 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100349 if (share_states[i].sending_complete) {
350 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000351 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100352 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000353 }
J-Alves2a0d2882020-10-29 14:49:50 +0000354 dlog(" with %d fragments, %d retrieved, "
355 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100356 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000357 share_states[i].retrieved_fragment_count[0],
358 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000359 }
360 }
361 sl_unlock(&share_states_lock_instance);
362}
363
Andrew Walbran475c1452020-02-07 13:22:22 +0000364/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100365static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100366 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000367{
368 uint32_t mode = 0;
369
Karl Meakin84710f32023-10-12 15:14:49 +0100370 switch (permissions.data_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100371 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000372 mode = MM_MODE_R;
373 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100374 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000375 mode = MM_MODE_R | MM_MODE_W;
376 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100377 case FFA_DATA_ACCESS_NOT_SPECIFIED:
378 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
379 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100380 case FFA_DATA_ACCESS_RESERVED:
381 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100382 }
383
Karl Meakin84710f32023-10-12 15:14:49 +0100384 switch (permissions.instruction_access) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100385 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000386 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100387 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100388 mode |= MM_MODE_X;
389 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100390 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
391 mode |= (default_mode & MM_MODE_X);
392 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100393 case FFA_INSTRUCTION_ACCESS_RESERVED:
394 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000395 }
396
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200397 /* Set the security state bit if necessary. */
398 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
399 mode |= plat_ffa_other_world_mode();
400 }
401
Andrew Walbran475c1452020-02-07 13:22:22 +0000402 return mode;
403}
404
Jose Marinho75509b42019-04-09 09:34:59 +0100405/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000406 * Get the current mode in the stage-2 page table of the given vm of all the
407 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100408 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100409 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100410static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000411 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100412 struct ffa_memory_region_constituent **fragments,
413 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100414{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100415 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100416 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100417
Andrew Walbranca808b12020-05-15 17:22:28 +0100418 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100419 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000420 * Fail if there are no constituents. Otherwise we would get an
421 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100422 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100423 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100424 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100425 }
426
Andrew Walbranca808b12020-05-15 17:22:28 +0100427 for (i = 0; i < fragment_count; ++i) {
428 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
429 ipaddr_t begin = ipa_init(fragments[i][j].address);
430 size_t size = fragments[i][j].page_count * PAGE_SIZE;
431 ipaddr_t end = ipa_add(begin, size);
432 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100433
Andrew Walbranca808b12020-05-15 17:22:28 +0100434 /* Fail if addresses are not page-aligned. */
435 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
436 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100437 dlog_verbose("%s: addresses not page-aligned\n",
438 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100439 return ffa_error(FFA_INVALID_PARAMETERS);
440 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100441
Andrew Walbranca808b12020-05-15 17:22:28 +0100442 /*
443 * Ensure that this constituent memory range is all
444 * mapped with the same mode.
445 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800446 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100447 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000448 "%s: constituent memory range "
449 "%#lx..%#lx "
Karl Meakin5df422c2023-07-11 17:31:38 +0100450 "not mapped with the same mode\n",
Karl Meakine8937d92024-03-19 16:04:25 +0000451 __func__, begin.ipa, end.ipa);
Andrew Walbranca808b12020-05-15 17:22:28 +0100452 return ffa_error(FFA_DENIED);
453 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100454
Andrew Walbranca808b12020-05-15 17:22:28 +0100455 /*
456 * Ensure that all constituents are mapped with the same
457 * mode.
458 */
459 if (i == 0) {
460 *orig_mode = current_mode;
461 } else if (current_mode != *orig_mode) {
462 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100463 "%s: expected mode %#x but was %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +0000464 "%d pages at %#lx.\n",
Karl Meakin5df422c2023-07-11 17:31:38 +0100465 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100466 fragments[i][j].page_count,
467 ipa_addr(begin));
468 return ffa_error(FFA_DENIED);
469 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100470 }
Jose Marinho75509b42019-04-09 09:34:59 +0100471 }
472
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100473 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000474}
475
Karl Meakin0e617d92024-04-05 12:55:22 +0100476enum ffa_version ffa_version_from_memory_access_desc_size(
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100477 uint32_t memory_access_desc_size)
478{
479 switch (memory_access_desc_size) {
480 /*
481 * v1.0 and v1.1 memory access descriptors are the same size however
482 * v1.1 is the first version to include the memory access descriptor
483 * size field so return v1.1.
484 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000485 case sizeof(struct ffa_memory_access_v1_0):
Karl Meakin0e617d92024-04-05 12:55:22 +0100486 return FFA_VERSION_1_1;
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000487 case sizeof(struct ffa_memory_access):
Karl Meakin0e617d92024-04-05 12:55:22 +0100488 return FFA_VERSION_1_2;
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100489 }
490 return 0;
491}
492
493/**
494 * Check if the receivers size and offset given is valid for the senders
495 * FF-A version.
496 */
497static bool receiver_size_and_offset_valid_for_version(
498 uint32_t receivers_size, uint32_t receivers_offset,
Karl Meakin0e617d92024-04-05 12:55:22 +0100499 enum ffa_version ffa_version)
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100500{
501 /*
502 * Check that the version that the memory access descriptor size belongs
503 * to is compatible with the FF-A version we believe the sender to be.
504 */
Karl Meakin0e617d92024-04-05 12:55:22 +0100505 enum ffa_version expected_ffa_version =
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100506 ffa_version_from_memory_access_desc_size(receivers_size);
Karl Meakin0e617d92024-04-05 12:55:22 +0100507 if (!ffa_versions_are_compatible(expected_ffa_version, ffa_version)) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100508 return false;
509 }
510
511 /*
512 * Check the receivers_offset matches the version we found from
513 * memory access descriptor size.
514 */
515 switch (expected_ffa_version) {
Karl Meakin0e617d92024-04-05 12:55:22 +0100516 case FFA_VERSION_1_1:
517 case FFA_VERSION_1_2:
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100518 return receivers_offset == sizeof(struct ffa_memory_region);
519 default:
520 return false;
521 }
522}
523
524/**
525 * Check the values set for fields in the memory region are valid and safe.
526 * Offset values are within safe bounds, receiver count will not cause overflows
527 * and reserved fields are 0.
528 */
529bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
Karl Meakin0e617d92024-04-05 12:55:22 +0100530 enum ffa_version ffa_version,
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100531 uint32_t fragment_length,
532 bool send_transaction)
533{
534 uint32_t receiver_count;
535 struct ffa_memory_access *receiver;
536 uint32_t composite_offset_0;
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000537 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
538 (struct ffa_memory_region_v1_0 *)memory_region;
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100539
Karl Meakin0e617d92024-04-05 12:55:22 +0100540 if (ffa_version == FFA_VERSION_1_0) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100541 /* Check the reserved fields are 0. */
542 if (memory_region_v1_0->reserved_0 != 0 ||
543 memory_region_v1_0->reserved_1 != 0) {
544 dlog_verbose("Reserved fields must be 0.\n");
545 return false;
546 }
547
548 receiver_count = memory_region_v1_0->receiver_count;
549 } else {
550 uint32_t receivers_size =
551 memory_region->memory_access_desc_size;
552 uint32_t receivers_offset = memory_region->receivers_offset;
553
554 /* Check the reserved field is 0. */
555 if (memory_region->reserved[0] != 0 ||
556 memory_region->reserved[1] != 0 ||
557 memory_region->reserved[2] != 0) {
558 dlog_verbose("Reserved fields must be 0.\n");
559 return false;
560 }
561
562 /*
563 * Check memory_access_desc_size matches the size of the struct
564 * for the senders FF-A version.
565 */
566 if (!receiver_size_and_offset_valid_for_version(
567 receivers_size, receivers_offset, ffa_version)) {
568 dlog_verbose(
569 "Invalid memory access descriptor size %d, "
570 " or receiver offset %d, "
571 "for FF-A version %#x\n",
572 receivers_size, receivers_offset, ffa_version);
573 return false;
574 }
575
576 receiver_count = memory_region->receiver_count;
577 }
578
579 /* Check receiver count is not too large. */
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000580 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS || receiver_count < 1) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100581 dlog_verbose(
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000582 "Receiver count must be 0 < receiver_count < %u "
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100583 "specified %u\n",
584 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
585 return false;
586 }
587
588 /* Check values in the memory access descriptors. */
589 /*
590 * The composite offset values must be the same for all recievers so
591 * check the first one is valid and then they are all the same.
592 */
Karl Meakin0e617d92024-04-05 12:55:22 +0100593 receiver = ffa_version == FFA_VERSION_1_0
Daniel Boulbyf06b5232024-02-22 16:26:43 +0000594 ? (struct ffa_memory_access *)&memory_region_v1_0
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100595 ->receivers[0]
596 : ffa_memory_region_get_receiver(memory_region, 0);
597 assert(receiver != NULL);
598 composite_offset_0 = receiver->composite_memory_region_offset;
599
600 if (!send_transaction) {
601 if (composite_offset_0 != 0) {
602 dlog_verbose(
603 "Composite offset memory region descriptor "
604 "offset must be 0 for retrieve requests. "
605 "Currently %d",
606 composite_offset_0);
607 return false;
608 }
609 } else {
610 bool comp_offset_is_zero = composite_offset_0 == 0U;
611 bool comp_offset_lt_transaction_descriptor_size =
612 composite_offset_0 <
613 (sizeof(struct ffa_memory_region) +
614 (uint32_t)(memory_region->memory_access_desc_size *
615 memory_region->receiver_count));
616 bool comp_offset_with_comp_gt_fragment_length =
617 composite_offset_0 +
618 sizeof(struct ffa_composite_memory_region) >
619 fragment_length;
620 if (comp_offset_is_zero ||
621 comp_offset_lt_transaction_descriptor_size ||
622 comp_offset_with_comp_gt_fragment_length) {
623 dlog_verbose(
624 "Invalid composite memory region descriptor "
625 "offset for send transaction %u\n",
626 composite_offset_0);
627 return false;
628 }
629 }
630
Karl Meakin824b63d2024-06-03 19:04:53 +0100631 for (size_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100632 uint32_t composite_offset;
633
Karl Meakin0e617d92024-04-05 12:55:22 +0100634 if (ffa_version == FFA_VERSION_1_0) {
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100635 struct ffa_memory_access_v1_0 *receiver_v1_0 =
636 &memory_region_v1_0->receivers[i];
637 /* Check reserved fields are 0 */
638 if (receiver_v1_0->reserved_0 != 0) {
639 dlog_verbose(
640 "Reserved field in the memory access "
Karl Meakine8937d92024-03-19 16:04:25 +0000641 "descriptor must be zero. Currently "
642 "reciever %zu has a reserved field "
643 "with a value of %lu\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100644 i, receiver_v1_0->reserved_0);
645 return false;
646 }
647 /*
648 * We can cast to the current version receiver as the
649 * remaining fields we are checking have the same
650 * offsets for all versions since memory access
651 * descriptors are forwards compatible.
652 */
653 receiver = (struct ffa_memory_access *)receiver_v1_0;
654 } else {
655 receiver = ffa_memory_region_get_receiver(memory_region,
656 i);
657 assert(receiver != NULL);
658
659 if (receiver->reserved_0 != 0) {
660 dlog_verbose(
661 "Reserved field in the memory access "
Karl Meakine8937d92024-03-19 16:04:25 +0000662 "descriptor must be zero. Currently "
663 "reciever %zu has a reserved field "
664 "with a value of %lu\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100665 i, receiver->reserved_0);
666 return false;
667 }
668 }
669
670 /* Check composite offset values are equal for all receivers. */
671 composite_offset = receiver->composite_memory_region_offset;
672 if (composite_offset != composite_offset_0) {
673 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +0000674 "Composite offset %x differs from %x in "
675 "index\n",
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100676 composite_offset, composite_offset_0);
677 return false;
678 }
679 }
680 return true;
681}
682
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000683/**
J-Alves460d36c2023-10-12 17:02:15 +0100684 * If the receivers for the memory management operation are all from the
685 * secure world and this isn't a FFA_MEM_SHARE, then request memory security
686 * state update by returning MAP_ACTION_CHECK_PROTECT.
687 */
688static enum ffa_map_action ffa_mem_send_get_map_action(
689 bool all_receivers_from_current_world, ffa_id_t sender_id,
690 uint32_t mem_func_id)
691{
J-Alves95fbb312024-03-20 15:19:16 +0000692 const bool is_memory_share_abi = mem_func_id == FFA_MEM_SHARE_32 ||
693 mem_func_id == FFA_MEM_SHARE_64;
694 const bool protect_memory =
695 (!is_memory_share_abi && all_receivers_from_current_world &&
696 ffa_is_vm_id(sender_id));
J-Alves460d36c2023-10-12 17:02:15 +0100697
698 return protect_memory ? MAP_ACTION_CHECK_PROTECT : MAP_ACTION_CHECK;
699}
700
701/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000702 * Verify that all pages have the same mode, that the starting mode
703 * constitutes a valid state and obtain the next mode to apply
J-Alves460d36c2023-10-12 17:02:15 +0100704 * to the sending VM. It outputs the mapping action that needs to be
705 * invoked for the given memory range. On memory lend/donate there
706 * could be a need to protect the memory from the normal world.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000707 *
708 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100709 * 1) FFA_DENIED if a state transition was not found;
710 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100711 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100712 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100713 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100714 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
715 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000716 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100717static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100718 struct vm_locked from, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +0000719 struct ffa_memory_region *memory_region, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100720 struct ffa_memory_region_constituent **fragments,
721 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4b846eb2024-05-23 17:32:23 +0100722 uint32_t *from_mode, enum ffa_map_action *map_action, bool zero)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000723{
724 const uint32_t state_mask =
725 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100726 struct ffa_value ret;
J-Alves460d36c2023-10-12 17:02:15 +0100727 bool all_receivers_from_current_world = true;
Daniel Boulbya76fd912024-02-22 14:22:15 +0000728 uint32_t receivers_count = memory_region->receiver_count;
J-Alves95fbb312024-03-20 15:19:16 +0000729 const bool is_memory_lend = (share_func == FFA_MEM_LEND_32) ||
730 (share_func == FFA_MEM_LEND_64);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000731
Andrew Walbranca808b12020-05-15 17:22:28 +0100732 ret = constituents_get_mode(from, orig_from_mode, fragments,
733 fragment_constituent_counts,
734 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100735 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100736 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100737 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100738 }
739
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000740 /* Device memory regions can only be lent a single borrower. */
Daniel Boulby9764ff62024-01-30 17:47:39 +0000741 if ((*orig_from_mode & MM_MODE_D) != 0U &&
J-Alves95fbb312024-03-20 15:19:16 +0000742 !(is_memory_lend && receivers_count == 1)) {
Daniel Boulby9764ff62024-01-30 17:47:39 +0000743 dlog_verbose(
Daniel Boulby63af1fa2024-03-18 14:17:31 +0000744 "Device memory can only be lent to a single borrower "
745 "(mode is %#x).\n",
Daniel Boulby9764ff62024-01-30 17:47:39 +0000746 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100747 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000748 }
749
750 /*
751 * Ensure the sender is the owner and has exclusive access to the
752 * memory.
753 */
754 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100755 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100756 }
757
Daniel Boulby4b846eb2024-05-23 17:32:23 +0100758 /*
759 * Memory cannot be zeroed during the lend/donate operation if the
760 * sender only has RO access.
761 */
762 if ((*orig_from_mode & MM_MODE_W) == 0 && zero == true) {
763 dlog_verbose(
764 "Cannot zero memory when the sender doesn't have "
765 "write access\n");
766 return ffa_error(FFA_DENIED);
767 }
768
Daniel Boulbya76fd912024-02-22 14:22:15 +0000769 assert(receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100770
J-Alves363f5722022-04-25 17:37:37 +0100771 for (uint32_t i = 0U; i < receivers_count; i++) {
Daniel Boulbya76fd912024-02-22 14:22:15 +0000772 struct ffa_memory_access *receiver =
773 ffa_memory_region_get_receiver(memory_region, i);
774 assert(receiver != NULL);
J-Alves363f5722022-04-25 17:37:37 +0100775 ffa_memory_access_permissions_t permissions =
Daniel Boulbya76fd912024-02-22 14:22:15 +0000776 receiver->receiver_permissions.permissions;
J-Alves363f5722022-04-25 17:37:37 +0100777 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
778 permissions, *orig_from_mode);
779
J-Alves788b4492023-04-18 14:01:23 +0100780 /*
781 * The assumption is that at this point, the operation from
782 * SP to a receiver VM, should have returned an FFA_ERROR
783 * already.
784 */
785 if (!ffa_is_vm_id(from.vm->id)) {
786 assert(!ffa_is_vm_id(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000787 receiver->receiver_permissions.receiver));
J-Alves788b4492023-04-18 14:01:23 +0100788 }
789
J-Alves460d36c2023-10-12 17:02:15 +0100790 /* Track if all senders are from current world. */
791 all_receivers_from_current_world =
792 all_receivers_from_current_world &&
793 vm_id_is_current_world(
Daniel Boulbya76fd912024-02-22 14:22:15 +0000794 receiver->receiver_permissions.receiver);
J-Alves460d36c2023-10-12 17:02:15 +0100795
J-Alves363f5722022-04-25 17:37:37 +0100796 if ((*orig_from_mode & required_from_mode) !=
797 required_from_mode) {
798 dlog_verbose(
799 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100800 "which required mode %#x but only had %#x "
801 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100802 required_from_mode, *orig_from_mode);
803 return ffa_error(FFA_DENIED);
804 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000805 }
806
J-Alves460d36c2023-10-12 17:02:15 +0100807 *map_action = ffa_mem_send_get_map_action(
808 all_receivers_from_current_world, from.vm->id, share_func);
809
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000810 /* Find the appropriate new mode. */
811 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000812 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000813 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100814 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000815 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100816 break;
J-Alves95fbb312024-03-20 15:19:16 +0000817 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100818 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000819 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100820 break;
J-Alves95fbb312024-03-20 15:19:16 +0000821 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100822 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000823 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100824 break;
825
Jose Marinho75509b42019-04-09 09:34:59 +0100826 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100827 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100828 }
829
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100830 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000831}
832
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100833static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000834 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100835 struct ffa_memory_region_constituent **fragments,
836 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
837 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000838{
839 const uint32_t state_mask =
840 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
841 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100842 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000843
Andrew Walbranca808b12020-05-15 17:22:28 +0100844 ret = constituents_get_mode(from, orig_from_mode, fragments,
845 fragment_constituent_counts,
846 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100847 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100848 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000849 }
850
851 /* Ensure the address range is normal memory and not a device. */
852 if (*orig_from_mode & MM_MODE_D) {
853 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
854 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100855 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000856 }
857
858 /*
859 * Ensure the relinquishing VM is not the owner but has access to the
860 * memory.
861 */
862 orig_from_state = *orig_from_mode & state_mask;
863 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
864 dlog_verbose(
865 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100866 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000867 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100868 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000869 }
870
871 /* Find the appropriate new mode. */
872 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
873
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100874 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000875}
876
877/**
878 * Verify that all pages have the same mode, that the starting mode
879 * constitutes a valid state and obtain the next mode to apply
880 * to the retrieving VM.
881 *
882 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100883 * 1) FFA_DENIED if a state transition was not found;
884 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100885 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100886 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100887 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100888 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
889 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000890 */
J-Alvesfc19b372022-07-06 12:17:35 +0100891struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000892 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100893 struct ffa_memory_region_constituent **fragments,
894 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvesfd206052023-05-22 16:45:00 +0100895 uint32_t memory_to_attributes, uint32_t *to_mode, bool memory_protected,
896 enum ffa_map_action *map_action)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000897{
898 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100899 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000900
Andrew Walbranca808b12020-05-15 17:22:28 +0100901 ret = constituents_get_mode(to, &orig_to_mode, fragments,
902 fragment_constituent_counts,
903 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100904 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100905 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100906 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000907 }
908
J-Alves460d36c2023-10-12 17:02:15 +0100909 /* Find the appropriate new mode. */
910 *to_mode = memory_to_attributes;
911
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100912 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000913 /*
914 * If the original ffa memory send call has been processed
915 * successfully, it is expected the orig_to_mode would overlay
916 * with `state_mask`, as a result of the function
917 * `ffa_send_check_transition`.
J-Alvesfd206052023-05-22 16:45:00 +0100918 *
919 * If Hafnium is the SPMC:
920 * - Caller of the reclaim interface is an SP, the memory shall
921 * have been protected throughout the flow.
922 * - Caller of the reclaim is from the NWd, the memory may have
923 * been protected at the time of lending/donating the memory.
924 * In such case, set action to unprotect memory in the
925 * handling of reclaim operation.
926 * - If Hafnium is the hypervisor memory shall never have been
927 * protected in memory lend/share/donate.
928 *
929 * More details in the doc comment of the function
930 * `ffa_region_group_identity_map`.
J-Alves9256f162021-12-09 13:18:43 +0000931 */
J-Alves59ed0042022-07-28 18:26:41 +0100932 if (vm_id_is_current_world(to.vm->id)) {
933 assert((orig_to_mode &
934 (MM_MODE_INVALID | MM_MODE_UNOWNED |
935 MM_MODE_SHARED)) != 0U);
J-Alvesfd206052023-05-22 16:45:00 +0100936 assert(!memory_protected);
937 } else if (to.vm->id == HF_OTHER_WORLD_ID &&
938 map_action != NULL && memory_protected) {
939 *map_action = MAP_ACTION_COMMIT_UNPROTECT;
J-Alves59ed0042022-07-28 18:26:41 +0100940 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000941 } else {
942 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100943 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000944 * Ensure the retriever has the expected state. We don't care
945 * about the MM_MODE_SHARED bit; either with or without it set
946 * are both valid representations of the !O-NA state.
947 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100948 if (vm_id_is_current_world(to.vm->id) &&
949 to.vm->id != HF_PRIMARY_VM_ID &&
950 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
951 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100952 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000953 }
J-Alves460d36c2023-10-12 17:02:15 +0100954
955 /*
956 * If memory has been protected before, clear the NS bit to
957 * allow the secure access from the SP.
958 */
959 if (memory_protected) {
960 *to_mode &= ~plat_ffa_other_world_mode();
961 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000962 }
963
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000964 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +0000965 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100966 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000967 *to_mode |= 0;
968 break;
J-Alves95fbb312024-03-20 15:19:16 +0000969 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100970 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000971 *to_mode |= MM_MODE_UNOWNED;
972 break;
J-Alves95fbb312024-03-20 15:19:16 +0000973 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100974 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000975 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
976 break;
977
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100978 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000979 *to_mode |= 0;
980 break;
981
982 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100983 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100984 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000985 }
986
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100987 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100988}
Jose Marinho09b1db82019-08-08 09:16:59 +0100989
J-Alvescf6253e2024-01-03 13:48:48 +0000990/*
991 * Performs the operations related to the `action` MAP_ACTION_CHECK*.
992 * Returns:
993 * - FFA_SUCCESS_32: if all goes well.
994 * - FFA_ERROR_32: with FFA_NO_MEMORY, if there is no memory to manage
995 * the page table update. Or error code provided by the function
996 * `arch_memory_protect`.
997 */
998static struct ffa_value ffa_region_group_check_actions(
999 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
1000 struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
1001 bool *memory_protected)
1002{
1003 struct ffa_value ret;
1004 bool is_memory_protected;
1005
1006 if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, mode, ppool)) {
1007 dlog_verbose(
1008 "%s: memory can't be mapped to %x due to lack of "
Karl Meakine8937d92024-03-19 16:04:25 +00001009 "memory. Base: %lx end: %lx\n",
J-Alvescf6253e2024-01-03 13:48:48 +00001010 __func__, vm_locked.vm->id, pa_addr(pa_begin),
1011 pa_addr(pa_end));
1012 return ffa_error(FFA_NO_MEMORY);
1013 }
1014
1015 switch (action) {
1016 case MAP_ACTION_CHECK:
1017 /* No protect requested. */
1018 is_memory_protected = false;
1019 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1020 break;
1021 case MAP_ACTION_CHECK_PROTECT: {
1022 paddr_t last_protected_pa = pa_init(0);
1023
1024 ret = arch_memory_protect(pa_begin, pa_end, &last_protected_pa);
1025
1026 is_memory_protected = (ret.func == FFA_SUCCESS_32);
1027
1028 /*
1029 * - If protect memory has failed with FFA_DENIED, means some
1030 * range of memory was in the wrong state. In such case, SPM
1031 * reverts the state of the pages that were successfully
1032 * updated.
1033 * - If protect memory has failed with FFA_NOT_SUPPORTED, it
1034 * means the platform doesn't support the protection mechanism.
1035 * That said, it still permits the page table update to go
1036 * through. The variable
1037 * `is_memory_protected` will be equal to false.
1038 * - If protect memory has failed with FFA_INVALID_PARAMETERS,
1039 * break from switch and return the error.
1040 */
1041 if (ret.func == FFA_ERROR_32) {
1042 assert(!is_memory_protected);
1043 if (ffa_error_code(ret) == FFA_DENIED &&
1044 pa_addr(last_protected_pa) != (uintptr_t)0) {
1045 CHECK(arch_memory_unprotect(
1046 pa_begin,
1047 pa_add(last_protected_pa, PAGE_SIZE)));
1048 } else if (ffa_error_code(ret) == FFA_NOT_SUPPORTED) {
1049 ret = (struct ffa_value){
1050 .func = FFA_SUCCESS_32,
1051 };
1052 }
1053 }
1054 } break;
1055 default:
1056 panic("%s: invalid action to process %x\n", __func__, action);
1057 }
1058
1059 if (memory_protected != NULL) {
1060 *memory_protected = is_memory_protected;
1061 }
1062
1063 return ret;
1064}
1065
1066static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
1067 paddr_t pa_begin, paddr_t pa_end,
1068 struct mpool *ppool, uint32_t mode,
1069 enum ffa_map_action action)
1070{
1071 switch (action) {
1072 case MAP_ACTION_COMMIT_UNPROTECT:
1073 /*
1074 * Checking that it should succeed because SPM should be
1075 * unprotecting memory that it had protected before.
1076 */
1077 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1078 case MAP_ACTION_COMMIT:
1079 vm_identity_commit(vm_locked, pa_begin, pa_end, mode, ppool,
1080 NULL);
1081 break;
1082 default:
1083 panic("%s: invalid action to process %x\n", __func__, action);
1084 }
1085}
1086
Jose Marinho09b1db82019-08-08 09:16:59 +01001087/**
J-Alves063ad832023-10-03 18:05:40 +01001088 * Helper function to revert a failed "Protect" action from the SPMC:
1089 * - `fragment_count`: should specify the number of fragments to traverse from
1090 * `fragments`. This may not be the full amount of fragments that are part of
1091 * the share_state structure.
1092 * - `fragment_constituent_counts`: array holding the amount of constituents
1093 * per fragment.
1094 * - `end`: pointer to the constituent that failed the "protect" action. It
1095 * shall be part of the last fragment, and it shall make the loop below break.
1096 */
1097static void ffa_region_group_fragments_revert_protect(
1098 struct ffa_memory_region_constituent **fragments,
1099 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1100 const struct ffa_memory_region_constituent *end)
1101{
1102 for (uint32_t i = 0; i < fragment_count; ++i) {
1103 for (uint32_t j = 0; j < fragment_constituent_counts[i]; ++j) {
1104 struct ffa_memory_region_constituent *constituent =
1105 &fragments[i][j];
1106 size_t size = constituent->page_count * PAGE_SIZE;
1107 paddr_t pa_begin =
1108 pa_from_ipa(ipa_init(constituent->address));
1109 paddr_t pa_end = pa_add(pa_begin, size);
1110
Karl Meakine8937d92024-03-19 16:04:25 +00001111 dlog_verbose("%s: reverting fragment %lx size %zx\n",
J-Alves063ad832023-10-03 18:05:40 +01001112 __func__, pa_addr(pa_begin), size);
1113
1114 if (constituent == end) {
1115 /*
1116 * The last constituent is expected to be in the
1117 * last fragment.
1118 */
1119 assert(i == fragment_count - 1);
1120 break;
1121 }
1122
1123 CHECK(arch_memory_unprotect(pa_begin, pa_end));
1124 }
1125 }
1126}
1127
1128/**
Jose Marinho09b1db82019-08-08 09:16:59 +01001129 * Updates a VM's page table such that the given set of physical address ranges
1130 * are mapped in the address space at the corresponding address ranges, in the
1131 * mode provided.
1132 *
J-Alves0a83dc22023-05-05 09:50:37 +01001133 * The enum ffa_map_action determines the action taken from a call to the
1134 * function below:
1135 * - If action is MAP_ACTION_CHECK, the page tables will be allocated from the
1136 * mpool but no mappings will actually be updated. This function must always
1137 * be called first with action set to MAP_ACTION_CHECK to check that it will
1138 * succeed before calling ffa_region_group_identity_map with whichever one of
1139 * the remaining actions, to avoid leaving the page table in a half-updated
1140 * state.
1141 * - The action MAP_ACTION_COMMIT allocates the page tables from the mpool, and
1142 * changes the memory mappings.
J-Alvescf6253e2024-01-03 13:48:48 +00001143 * - The action MAP_ACTION_CHECK_PROTECT extends the MAP_ACTION_CHECK with an
1144 * invocation to the monitor to update the security state of the memory,
1145 * to that of the SPMC.
1146 * - The action MAP_ACTION_COMMIT_UNPROTECT extends the MAP_ACTION_COMMIT
1147 * with a call into the monitor, to reset the security state of memory
1148 * that has priorly been mapped with the MAP_ACTION_CHECK_PROTECT action.
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001149 * vm_ptable_defrag should always be called after a series of page table
1150 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +01001151 *
J-Alvescf6253e2024-01-03 13:48:48 +00001152 * If all goes well, returns FFA_SUCCESS_32; or FFA_ERROR, with following
1153 * error codes:
1154 * - FFA_INVALID_PARAMETERS: invalid range of memory.
1155 * - FFA_DENIED:
1156 *
Jose Marinho09b1db82019-08-08 09:16:59 +01001157 * made to memory mappings.
1158 */
J-Alvescf6253e2024-01-03 13:48:48 +00001159struct ffa_value ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +00001160 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001161 struct ffa_memory_region_constituent **fragments,
1162 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvescf6253e2024-01-03 13:48:48 +00001163 uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
1164 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001165{
Andrew Walbranca808b12020-05-15 17:22:28 +01001166 uint32_t i;
1167 uint32_t j;
J-Alvescf6253e2024-01-03 13:48:48 +00001168 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001169
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001170 if (vm_locked.vm->el0_partition) {
1171 mode |= MM_MODE_USER | MM_MODE_NG;
1172 }
1173
Andrew Walbranca808b12020-05-15 17:22:28 +01001174 /* Iterate over the memory region constituents within each fragment. */
1175 for (i = 0; i < fragment_count; ++i) {
1176 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
J-Alves063ad832023-10-03 18:05:40 +01001177 struct ffa_memory_region_constituent *constituent =
1178 &fragments[i][j];
1179 size_t size = constituent->page_count * PAGE_SIZE;
Andrew Walbranca808b12020-05-15 17:22:28 +01001180 paddr_t pa_begin =
J-Alves063ad832023-10-03 18:05:40 +01001181 pa_from_ipa(ipa_init(constituent->address));
Andrew Walbranca808b12020-05-15 17:22:28 +01001182 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001183 uint32_t pa_bits =
1184 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +01001185
1186 /*
1187 * Ensure the requested region falls into system's PA
1188 * range.
1189 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001190 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
1191 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +01001192 dlog_error("Region is outside of PA Range\n");
J-Alvescf6253e2024-01-03 13:48:48 +00001193 return ffa_error(FFA_INVALID_PARAMETERS);
Federico Recanati4fd065d2021-12-13 20:06:23 +01001194 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001195
J-Alvescf6253e2024-01-03 13:48:48 +00001196 if (action <= MAP_ACTION_CHECK_PROTECT) {
1197 ret = ffa_region_group_check_actions(
1198 vm_locked, pa_begin, pa_end, ppool,
1199 mode, action, memory_protected);
J-Alves063ad832023-10-03 18:05:40 +01001200
1201 if (ret.func == FFA_ERROR_32 &&
1202 ffa_error_code(ret) == FFA_DENIED) {
1203 if (memory_protected != NULL) {
1204 assert(!*memory_protected);
1205 }
1206
1207 ffa_region_group_fragments_revert_protect(
1208 fragments,
1209 fragment_constituent_counts,
1210 i + 1, constituent);
1211 break;
1212 }
J-Alvescf6253e2024-01-03 13:48:48 +00001213 } else if (action >= MAP_ACTION_COMMIT &&
1214 action < MAP_ACTION_MAX) {
1215 ffa_region_group_commit_actions(
1216 vm_locked, pa_begin, pa_end, ppool,
1217 mode, action);
1218 ret = (struct ffa_value){
1219 .func = FFA_SUCCESS_32};
1220 } else {
1221 panic("%s: Unknown ffa_map_action.\n",
1222 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001223 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001224 }
1225 }
1226
J-Alvescf6253e2024-01-03 13:48:48 +00001227 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001228}
1229
1230/**
1231 * Clears a region of physical memory by overwriting it with zeros. The data is
1232 * flushed from the cache so the memory has been cleared across the system.
1233 */
J-Alves7db32002021-12-14 14:44:50 +00001234static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
1235 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +01001236{
1237 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +00001238 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +01001239 * global mapping of the whole range. Such an approach will limit
1240 * the changes to stage-1 tables and will allow only local
1241 * invalidation.
1242 */
1243 bool ret;
1244 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +00001245 void *ptr = mm_identity_map(stage1_locked, begin, end,
1246 MM_MODE_W | (extra_mode_attributes &
1247 plat_ffa_other_world_mode()),
1248 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001249 size_t size = pa_difference(begin, end);
1250
1251 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001252 goto fail;
1253 }
1254
1255 memset_s(ptr, size, 0, size);
1256 arch_mm_flush_dcache(ptr, size);
1257 mm_unmap(stage1_locked, begin, end, ppool);
1258
1259 ret = true;
1260 goto out;
1261
1262fail:
1263 ret = false;
1264
1265out:
1266 mm_unlock_stage1(&stage1_locked);
1267
1268 return ret;
1269}
1270
1271/**
1272 * Clears a region of physical memory by overwriting it with zeros. The data is
1273 * flushed from the cache so the memory has been cleared across the system.
1274 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001275static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001276 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001277 struct ffa_memory_region_constituent **fragments,
1278 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1279 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001280{
1281 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001282 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001283 bool ret = false;
1284
1285 /*
1286 * Create a local pool so any freed memory can't be used by another
1287 * thread. This is to ensure each constituent that is mapped can be
1288 * unmapped again afterwards.
1289 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001290 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001291
Andrew Walbranca808b12020-05-15 17:22:28 +01001292 /* Iterate over the memory region constituents within each fragment. */
1293 for (i = 0; i < fragment_count; ++i) {
1294 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001295
J-Alves8457f932023-10-11 16:41:45 +01001296 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001297 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1298 paddr_t begin =
1299 pa_from_ipa(ipa_init(fragments[i][j].address));
1300 paddr_t end = pa_add(begin, size);
1301
J-Alves7db32002021-12-14 14:44:50 +00001302 if (!clear_memory(begin, end, &local_page_pool,
1303 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001304 /*
1305 * api_clear_memory will defrag on failure, so
1306 * no need to do it here.
1307 */
1308 goto out;
1309 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001310 }
1311 }
1312
Jose Marinho09b1db82019-08-08 09:16:59 +01001313 ret = true;
1314
1315out:
1316 mpool_fini(&local_page_pool);
1317 return ret;
1318}
1319
J-Alves5952d942022-12-22 16:03:00 +00001320static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1321 ipaddr_t in_begin, ipaddr_t in_end)
1322{
1323 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1324 ipa_addr(begin) < ipa_addr(in_end)) ||
1325 (ipa_addr(end) <= ipa_addr(in_end) &&
1326 ipa_addr(end) > ipa_addr(in_begin));
1327}
1328
1329/**
1330 * Receives a memory range and looks for overlaps with the remainder
1331 * constituents of the memory share/lend/donate operation. Assumes they are
1332 * passed in order to avoid having to loop over all the elements at each call.
1333 * The function only compares the received memory ranges with those that follow
1334 * within the same fragment, and subsequent fragments from the same operation.
1335 */
1336static bool ffa_memory_check_overlap(
1337 struct ffa_memory_region_constituent **fragments,
1338 const uint32_t *fragment_constituent_counts,
1339 const uint32_t fragment_count, const uint32_t current_fragment,
1340 const uint32_t current_constituent)
1341{
1342 uint32_t i = current_fragment;
1343 uint32_t j = current_constituent;
1344 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1345 const uint32_t current_page_count = fragments[i][j].page_count;
1346 size_t current_size = current_page_count * PAGE_SIZE;
1347 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1348
1349 if (current_size == 0 ||
1350 current_size > UINT64_MAX - ipa_addr(current_begin)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001351 dlog_verbose("Invalid page count. Addr: %zx page_count: %x\n",
1352 current_begin.ipa, current_page_count);
J-Alves5952d942022-12-22 16:03:00 +00001353 return false;
1354 }
1355
1356 for (; i < fragment_count; i++) {
1357 j = (i == current_fragment) ? j + 1 : 0;
1358
1359 for (; j < fragment_constituent_counts[i]; j++) {
1360 ipaddr_t begin = ipa_init(fragments[i][j].address);
1361 const uint32_t page_count = fragments[i][j].page_count;
1362 size_t size = page_count * PAGE_SIZE;
1363 ipaddr_t end = ipa_add(begin, size - 1);
1364
1365 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1366 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001367 "Invalid page count. Addr: %lx "
J-Alves5952d942022-12-22 16:03:00 +00001368 "page_count: %x\n",
Karl Meakine8937d92024-03-19 16:04:25 +00001369 begin.ipa, page_count);
J-Alves5952d942022-12-22 16:03:00 +00001370 return false;
1371 }
1372
1373 /*
1374 * Check if current ranges is within begin and end, as
1375 * well as the reverse. This should help optimize the
1376 * loop, and reduce the number of iterations.
1377 */
1378 if (is_memory_range_within(begin, end, current_begin,
1379 current_end) ||
1380 is_memory_range_within(current_begin, current_end,
1381 begin, end)) {
1382 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001383 "Overlapping memory ranges: %#lx - "
1384 "%#lx with %#lx - %#lx\n",
J-Alves5952d942022-12-22 16:03:00 +00001385 ipa_addr(begin), ipa_addr(end),
1386 ipa_addr(current_begin),
1387 ipa_addr(current_end));
1388 return true;
1389 }
1390 }
1391 }
1392
1393 return false;
1394}
1395
Jose Marinho09b1db82019-08-08 09:16:59 +01001396/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001397 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001398 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001399 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001400 *
1401 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001402 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001403 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001404 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001405 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1406 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001407 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001408 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001409 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001410 */
Daniel Boulbya76fd912024-02-22 14:22:15 +00001411static struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001412 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001413 struct ffa_memory_region_constituent **fragments,
1414 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001415 uint32_t composite_total_page_count, uint32_t share_func,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001416 struct ffa_memory_region *memory_region, struct mpool *page_pool,
1417 uint32_t *orig_from_mode_ret, bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001418{
Andrew Walbranca808b12020-05-15 17:22:28 +01001419 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001420 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001421 uint32_t orig_from_mode;
J-Alves460d36c2023-10-12 17:02:15 +01001422 uint32_t clean_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001423 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001424 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001425 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001426 uint32_t constituents_total_page_count = 0;
J-Alves460d36c2023-10-12 17:02:15 +01001427 enum ffa_map_action map_action = MAP_ACTION_CHECK;
Daniel Boulbya76fd912024-02-22 14:22:15 +00001428 bool clear = memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR;
Jose Marinho09b1db82019-08-08 09:16:59 +01001429
1430 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001431 * Make sure constituents are properly aligned to a 64-bit boundary. If
1432 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001433 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001434 for (i = 0; i < fragment_count; ++i) {
1435 if (!is_aligned(fragments[i], 8)) {
1436 dlog_verbose("Constituents not aligned.\n");
1437 return ffa_error(FFA_INVALID_PARAMETERS);
1438 }
J-Alves8f11cde2022-12-21 16:18:22 +00001439 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1440 constituents_total_page_count +=
1441 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001442 if (ffa_memory_check_overlap(
1443 fragments, fragment_constituent_counts,
1444 fragment_count, i, j)) {
1445 return ffa_error(FFA_INVALID_PARAMETERS);
1446 }
J-Alves8f11cde2022-12-21 16:18:22 +00001447 }
1448 }
1449
1450 if (constituents_total_page_count != composite_total_page_count) {
1451 dlog_verbose(
1452 "Composite page count differs from calculated page "
1453 "count from constituents.\n");
1454 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001455 }
1456
1457 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001458 * Check if the state transition is lawful for the sender, ensure that
1459 * all constituents of a memory region being shared are at the same
1460 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001461 */
J-Alves460d36c2023-10-12 17:02:15 +01001462 ret = ffa_send_check_transition(
Daniel Boulbya76fd912024-02-22 14:22:15 +00001463 from_locked, share_func, memory_region, &orig_from_mode,
1464 fragments, fragment_constituent_counts, fragment_count,
Daniel Boulby4b846eb2024-05-23 17:32:23 +01001465 &from_mode, &map_action, clear);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001466 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001467 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001468 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001469 }
1470
Andrew Walbran37c574e2020-06-03 11:45:46 +01001471 if (orig_from_mode_ret != NULL) {
1472 *orig_from_mode_ret = orig_from_mode;
1473 }
1474
Jose Marinho09b1db82019-08-08 09:16:59 +01001475 /*
1476 * Create a local pool so any freed memory can't be used by another
1477 * thread. This is to ensure the original mapping can be restored if the
1478 * clear fails.
1479 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001480 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001481
1482 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001483 * First reserve all required memory for the new page table entries
1484 * without committing, to make sure the entire operation will succeed
1485 * without exhausting the page pool.
J-Alves460d36c2023-10-12 17:02:15 +01001486 * Provide the map_action as populated by 'ffa_send_check_transition'.
1487 * It may request memory to be protected.
Jose Marinho09b1db82019-08-08 09:16:59 +01001488 */
J-Alvescf6253e2024-01-03 13:48:48 +00001489 ret = ffa_region_group_identity_map(
1490 from_locked, fragments, fragment_constituent_counts,
J-Alves460d36c2023-10-12 17:02:15 +01001491 fragment_count, from_mode, page_pool, map_action,
1492 memory_protected);
J-Alvescf6253e2024-01-03 13:48:48 +00001493 if (ret.func == FFA_ERROR_32) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001494 goto out;
1495 }
1496
1497 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001498 * Update the mapping for the sender. This won't allocate because the
1499 * transaction was already prepared above, but may free pages in the
1500 * case that a whole block is being unmapped that was previously
1501 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001502 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001503 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001504 from_locked, fragments, fragment_constituent_counts,
1505 fragment_count, from_mode, &local_page_pool,
1506 MAP_ACTION_COMMIT, NULL)
1507 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001508
J-Alves460d36c2023-10-12 17:02:15 +01001509 /*
1510 * If memory has been protected, it is now part of the secure PAS
1511 * (happens for lend/donate from NWd to SWd), and the `orig_from_mode`
1512 * should have the MM_MODE_NS set, as such mask it in `clean_mode` for
1513 * SPM's S1 translation.
1514 * In case memory hasn't been protected, and it is in the non-secure
1515 * PAS (e.g. memory share from NWd to SWd), as such the SPM needs to
1516 * perform a non-secure memory access. In such case `clean_mode` takes
1517 * the same mode as `orig_from_mode`.
1518 */
1519 clean_mode = (memory_protected != NULL && *memory_protected)
1520 ? orig_from_mode & ~plat_ffa_other_world_mode()
1521 : orig_from_mode;
1522
Jose Marinho09b1db82019-08-08 09:16:59 +01001523 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves460d36c2023-10-12 17:02:15 +01001524 if (clear && !ffa_clear_memory_constituents(
1525 clean_mode, fragments, fragment_constituent_counts,
1526 fragment_count, page_pool)) {
1527 map_action = (memory_protected != NULL && *memory_protected)
1528 ? MAP_ACTION_COMMIT_UNPROTECT
1529 : MAP_ACTION_COMMIT;
1530
Jose Marinho09b1db82019-08-08 09:16:59 +01001531 /*
1532 * On failure, roll back by returning memory to the sender. This
1533 * may allocate pages which were previously freed into
1534 * `local_page_pool` by the call above, but will never allocate
1535 * more pages than that so can never fail.
1536 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001537 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001538 from_locked, fragments,
1539 fragment_constituent_counts, fragment_count,
1540 orig_from_mode, &local_page_pool,
1541 MAP_ACTION_COMMIT, NULL)
1542 .func == FFA_SUCCESS_32);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001543 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001544 goto out;
1545 }
1546
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001547 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001548
1549out:
1550 mpool_fini(&local_page_pool);
1551
1552 /*
1553 * Tidy up the page table by reclaiming failed mappings (if there was an
1554 * error) or merging entries into blocks where possible (on success).
1555 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001556 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001557
1558 return ret;
1559}
1560
1561/**
1562 * Validates and maps memory shared from one VM to another.
1563 *
1564 * This function requires the calling context to hold the <to> lock.
1565 *
1566 * Returns:
1567 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001568 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001569 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001570 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001571 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001572 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001573 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001574struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001575 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001576 struct ffa_memory_region_constituent **fragments,
1577 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001578 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
J-Alves460d36c2023-10-12 17:02:15 +01001579 struct mpool *page_pool, uint32_t *response_mode, bool memory_protected)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001580{
Andrew Walbranca808b12020-05-15 17:22:28 +01001581 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001582 uint32_t to_mode;
1583 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001584 struct ffa_value ret;
J-Alvesfd206052023-05-22 16:45:00 +01001585 enum ffa_map_action map_action = MAP_ACTION_COMMIT;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001586
1587 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001588 * Make sure constituents are properly aligned to a 64-bit boundary. If
1589 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001590 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001591 for (i = 0; i < fragment_count; ++i) {
1592 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001593 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001594 return ffa_error(FFA_INVALID_PARAMETERS);
1595 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001596 }
1597
1598 /*
Daniel Boulby4b846eb2024-05-23 17:32:23 +01001599 * Ensure the sender has write permissions if the memory needs to be
1600 * cleared.
1601 */
1602 if ((sender_orig_mode & MM_MODE_W) == 0 && clear == true) {
1603 dlog_verbose(
1604 "Cannot zero memory when the sender does not have "
1605 "write access\n");
1606 return ffa_error(FFA_DENIED);
1607 }
1608
1609 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001610 * Check if the state transition is lawful for the recipient, and ensure
1611 * that all constituents of the memory region being retrieved are at the
1612 * same state.
1613 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001614 ret = ffa_retrieve_check_transition(
1615 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alvesfd206052023-05-22 16:45:00 +01001616 fragment_count, sender_orig_mode, &to_mode, memory_protected,
1617 &map_action);
J-Alves460d36c2023-10-12 17:02:15 +01001618
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001619 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001620 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001621 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001622 }
1623
1624 /*
1625 * Create a local pool so any freed memory can't be used by another
1626 * thread. This is to ensure the original mapping can be restored if the
1627 * clear fails.
1628 */
1629 mpool_init_with_fallback(&local_page_pool, page_pool);
1630
1631 /*
1632 * First reserve all required memory for the new page table entries in
1633 * the recipient page tables without committing, to make sure the entire
1634 * operation will succeed without exhausting the page pool.
1635 */
J-Alvescf6253e2024-01-03 13:48:48 +00001636 ret = ffa_region_group_identity_map(
1637 to_locked, fragments, fragment_constituent_counts,
1638 fragment_count, to_mode, page_pool, MAP_ACTION_CHECK, NULL);
1639 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001640 /* TODO: partial defrag of failed range. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001641 goto out;
1642 }
1643
1644 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001645 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001646 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1647 fragment_constituent_counts,
1648 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001649 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001650 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001651 goto out;
1652 }
1653
Jose Marinho09b1db82019-08-08 09:16:59 +01001654 /*
1655 * Complete the transfer by mapping the memory into the recipient. This
1656 * won't allocate because the transaction was already prepared above, so
1657 * it doesn't need to use the `local_page_pool`.
1658 */
J-Alvesfd206052023-05-22 16:45:00 +01001659 CHECK(ffa_region_group_identity_map(
1660 to_locked, fragments, fragment_constituent_counts,
1661 fragment_count, to_mode, page_pool, map_action, NULL)
J-Alvescf6253e2024-01-03 13:48:48 +00001662 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001663
J-Alves460d36c2023-10-12 17:02:15 +01001664 /* Return the mode used in mapping the memory in retriever's PT. */
1665 if (response_mode != NULL) {
1666 *response_mode = to_mode;
1667 }
1668
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001669 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001670
1671out:
1672 mpool_fini(&local_page_pool);
1673
1674 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001675 * Tidy up the page table by reclaiming failed mappings (if there was an
1676 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001677 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001678 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001679
1680 return ret;
1681}
1682
Andrew Walbran996d1d12020-05-27 14:08:43 +01001683static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001684 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001685 struct ffa_memory_region_constituent **fragments,
1686 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1687 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001688{
1689 uint32_t orig_from_mode;
1690 uint32_t from_mode;
1691 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001692 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001693
Andrew Walbranca808b12020-05-15 17:22:28 +01001694 ret = ffa_relinquish_check_transition(
1695 from_locked, &orig_from_mode, fragments,
1696 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001697 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001698 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001699 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001700 }
1701
1702 /*
1703 * Create a local pool so any freed memory can't be used by another
1704 * thread. This is to ensure the original mapping can be restored if the
1705 * clear fails.
1706 */
1707 mpool_init_with_fallback(&local_page_pool, page_pool);
1708
1709 /*
1710 * First reserve all required memory for the new page table entries
1711 * without committing, to make sure the entire operation will succeed
1712 * without exhausting the page pool.
1713 */
J-Alvescf6253e2024-01-03 13:48:48 +00001714 ret = ffa_region_group_identity_map(
1715 from_locked, fragments, fragment_constituent_counts,
1716 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK, NULL);
1717 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001718 goto out;
1719 }
1720
1721 /*
1722 * Update the mapping for the sender. This won't allocate because the
1723 * transaction was already prepared above, but may free pages in the
1724 * case that a whole block is being unmapped that was previously
1725 * partially mapped.
1726 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001727 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001728 from_locked, fragments, fragment_constituent_counts,
1729 fragment_count, from_mode, &local_page_pool,
1730 MAP_ACTION_COMMIT, NULL)
1731 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001732
1733 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001734 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001735 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1736 fragment_constituent_counts,
1737 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001738 /*
1739 * On failure, roll back by returning memory to the sender. This
1740 * may allocate pages which were previously freed into
1741 * `local_page_pool` by the call above, but will never allocate
1742 * more pages than that so can never fail.
1743 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001744 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001745 from_locked, fragments,
1746 fragment_constituent_counts, fragment_count,
1747 orig_from_mode, &local_page_pool,
1748 MAP_ACTION_COMMIT, NULL)
1749 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001750
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001751 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001752 goto out;
1753 }
1754
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001755 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001756
1757out:
1758 mpool_fini(&local_page_pool);
1759
1760 /*
1761 * Tidy up the page table by reclaiming failed mappings (if there was an
1762 * error) or merging entries into blocks where possible (on success).
1763 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001764 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001765
1766 return ret;
1767}
1768
1769/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001770 * Complete a memory sending operation by checking that it is valid, updating
1771 * the sender page table, and then either marking the share state as having
1772 * completed sending (on success) or freeing it (on failure).
1773 *
1774 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1775 */
J-Alvesfdd29272022-07-19 13:16:31 +01001776struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001777 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001778 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1779 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001780{
1781 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001782 struct ffa_composite_memory_region *composite;
Andrew Walbranca808b12020-05-15 17:22:28 +01001783 struct ffa_value ret;
1784
1785 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001786 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001787 assert(memory_region != NULL);
1788 composite = ffa_memory_region_get_composite(memory_region, 0);
1789 assert(composite != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001790
1791 /* Check that state is valid in sender page table and update. */
1792 ret = ffa_send_check_update(
1793 from_locked, share_state->fragments,
1794 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001795 share_state->fragment_count, composite->page_count,
Daniel Boulbya76fd912024-02-22 14:22:15 +00001796 share_state->share_func, memory_region, page_pool,
J-Alves460d36c2023-10-12 17:02:15 +01001797 orig_from_mode_ret, &share_state->memory_protected);
Andrew Walbranca808b12020-05-15 17:22:28 +01001798 if (ret.func != FFA_SUCCESS_32) {
1799 /*
1800 * Free share state, it failed to send so it can't be retrieved.
1801 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001802 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1803 __func__, ffa_func_name(ret.func),
1804 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001805 share_state_free(share_states, share_state, page_pool);
1806 return ret;
1807 }
1808
1809 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001810 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001811
J-Alvesee68c542020-10-29 17:48:20 +00001812 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001813}
1814
1815/**
Daniel Boulby9764ff62024-01-30 17:47:39 +00001816 * Check that the memory attributes match Hafnium expectations.
1817 * Cacheability:
1818 * - Normal Memory as `FFA_MEMORY_CACHE_WRITE_BACK`.
1819 * - Device memory as `FFA_MEMORY_DEV_NGNRNE`.
1820 *
1821 * Shareability:
1822 * - Inner Shareable.
Federico Recanatia98603a2021-12-20 18:04:03 +01001823 */
1824static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001825 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001826{
1827 enum ffa_memory_type memory_type;
1828 enum ffa_memory_cacheability cacheability;
1829 enum ffa_memory_shareability shareability;
1830
Karl Meakin84710f32023-10-12 15:14:49 +01001831 memory_type = attributes.type;
Daniel Boulby9764ff62024-01-30 17:47:39 +00001832 cacheability = attributes.cacheability;
1833 if (memory_type == FFA_MEMORY_NORMAL_MEM &&
1834 cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1835 dlog_verbose(
1836 "Normal Memory: Invalid cacheability %s, "
1837 "expected %s.\n",
1838 ffa_memory_cacheability_name(cacheability),
1839 ffa_memory_cacheability_name(
1840 FFA_MEMORY_CACHE_WRITE_BACK));
Federico Recanati3d953f32022-02-17 09:31:29 +01001841 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001842 }
Daniel Boulby9764ff62024-01-30 17:47:39 +00001843 if (memory_type == FFA_MEMORY_DEVICE_MEM &&
1844 cacheability != FFA_MEMORY_DEV_NGNRNE) {
1845 dlog_verbose(
1846 "Device Memory: Invalid cacheability %s, "
1847 "expected %s.\n",
1848 ffa_device_memory_cacheability_name(cacheability),
1849 ffa_device_memory_cacheability_name(
1850 FFA_MEMORY_DEV_NGNRNE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001851 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001852 }
1853
Karl Meakin84710f32023-10-12 15:14:49 +01001854 shareability = attributes.shareability;
Federico Recanatia98603a2021-12-20 18:04:03 +01001855 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001856 dlog_verbose("Invalid shareability %s, expected %s.\n",
1857 ffa_memory_shareability_name(shareability),
1858 ffa_memory_shareability_name(
1859 FFA_MEMORY_INNER_SHAREABLE));
Federico Recanati3d953f32022-02-17 09:31:29 +01001860 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001861 }
1862
1863 return (struct ffa_value){.func = FFA_SUCCESS_32};
1864}
1865
1866/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001867 * Check that the given `memory_region` represents a valid memory send request
1868 * of the given `share_func` type, return the clear flag and permissions via the
1869 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001870 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001871 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001872 * not.
1873 */
J-Alves66652252022-07-06 09:49:51 +01001874struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001875 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1876 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001877 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001878{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001879 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001880 struct ffa_memory_access *receiver =
1881 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001882 uint64_t receivers_end;
1883 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001884 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001885 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001886 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001887 enum ffa_data_access data_access;
1888 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001889 enum ffa_memory_security security_state;
Karl Meakinf98b2aa2023-10-12 16:09:59 +01001890 enum ffa_memory_type type;
Federico Recanatia98603a2021-12-20 18:04:03 +01001891 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001892 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001893 memory_region->receivers_offset +
1894 memory_region->memory_access_desc_size +
1895 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001896
1897 if (fragment_length < minimum_first_fragment_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00001898 dlog_verbose("Fragment length %u too short (min %zu).\n",
1899 fragment_length, minimum_first_fragment_length);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001900 return ffa_error(FFA_INVALID_PARAMETERS);
1901 }
1902
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001903 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1904 "struct ffa_memory_region_constituent must be 16 bytes");
1905 if (!is_aligned(fragment_length,
1906 sizeof(struct ffa_memory_region_constituent)) ||
1907 !is_aligned(memory_share_length,
1908 sizeof(struct ffa_memory_region_constituent))) {
1909 dlog_verbose(
1910 "Fragment length %u or total length %u"
1911 " is not 16-byte aligned.\n",
1912 fragment_length, memory_share_length);
1913 return ffa_error(FFA_INVALID_PARAMETERS);
1914 }
1915
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001916 if (fragment_length > memory_share_length) {
1917 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001918 "Fragment length %zu greater than total length %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001919 (size_t)fragment_length, (size_t)memory_share_length);
1920 return ffa_error(FFA_INVALID_PARAMETERS);
1921 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001922
J-Alves95df0ef2022-12-07 10:09:48 +00001923 /* The sender must match the caller. */
1924 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1925 vm_id_is_current_world(memory_region->sender)) ||
1926 (vm_id_is_current_world(from_locked.vm->id) &&
1927 memory_region->sender != from_locked.vm->id)) {
1928 dlog_verbose("Invalid memory sender ID.\n");
1929 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001930 }
1931
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001932 if (memory_region->receiver_count <= 0) {
1933 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001934 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001935 }
1936
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001937 /*
1938 * Ensure that the composite header is within the memory bounds and
1939 * doesn't overlap the first part of the message. Cast to uint64_t
1940 * to prevent overflow.
1941 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001942 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001943 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001944 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001945 min_length = receivers_end +
1946 sizeof(struct ffa_composite_memory_region) +
1947 sizeof(struct ffa_memory_region_constituent);
1948 if (min_length > memory_share_length) {
Karl Meakine8937d92024-03-19 16:04:25 +00001949 dlog_verbose("Share too short: got %zu but minimum is %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001950 (size_t)memory_share_length, (size_t)min_length);
1951 return ffa_error(FFA_INVALID_PARAMETERS);
1952 }
1953
1954 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001955 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001956
1957 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001958 * Check that the composite memory region descriptor is after the access
1959 * descriptors, is at least 16-byte aligned, and fits in the first
1960 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001961 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001962 if ((composite_memory_region_offset < receivers_end) ||
1963 (composite_memory_region_offset % 16 != 0) ||
1964 (composite_memory_region_offset >
1965 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1966 dlog_verbose(
1967 "Invalid composite memory region descriptor offset "
Karl Meakine8937d92024-03-19 16:04:25 +00001968 "%zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001969 (size_t)composite_memory_region_offset);
1970 return ffa_error(FFA_INVALID_PARAMETERS);
1971 }
1972
1973 /*
1974 * Compute the start of the constituent regions. Already checked
1975 * to be not more than fragment_length and thus not more than
1976 * memory_share_length.
1977 */
1978 constituents_start = composite_memory_region_offset +
1979 sizeof(struct ffa_composite_memory_region);
1980 constituents_length = memory_share_length - constituents_start;
1981
1982 /*
1983 * Check that the number of constituents is consistent with the length
1984 * of the constituent region.
1985 */
1986 composite = ffa_memory_region_get_composite(memory_region, 0);
1987 if ((constituents_length %
1988 sizeof(struct ffa_memory_region_constituent) !=
1989 0) ||
1990 ((constituents_length /
1991 sizeof(struct ffa_memory_region_constituent)) !=
1992 composite->constituent_count)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001993 dlog_verbose("Invalid length %zu or composite offset %zu.\n",
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001994 (size_t)memory_share_length,
1995 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001996 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001997 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001998 if (fragment_length < memory_share_length &&
1999 fragment_length < HF_MAILBOX_SIZE) {
2000 dlog_warning(
2001 "Initial fragment length %d smaller than mailbox "
2002 "size.\n",
2003 fragment_length);
2004 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002005
Andrew Walbrana65a1322020-04-06 19:32:32 +01002006 /*
2007 * Clear is not allowed for memory sharing, as the sender still has
2008 * access to the memory.
2009 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002010 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
J-Alves95fbb312024-03-20 15:19:16 +00002011 (share_func == FFA_MEM_SHARE_32 ||
2012 share_func == FFA_MEM_SHARE_64)) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01002013 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002014 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002015 }
2016
2017 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002018 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01002019 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002020 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002021 }
2022
J-Alves363f5722022-04-25 17:37:37 +01002023 /* Check that the permissions are valid, for each specified receiver. */
2024 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002025 struct ffa_memory_region_attributes receiver_permissions;
2026
2027 receiver = ffa_memory_region_get_receiver(memory_region, i);
2028 assert(receiver != NULL);
2029 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01002030 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002031 receiver_permissions.permissions;
2032 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01002033
2034 if (memory_region->sender == receiver_id) {
2035 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002036 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002037 }
Federico Recanati85090c42021-12-15 13:17:54 +01002038
J-Alves363f5722022-04-25 17:37:37 +01002039 for (uint32_t j = i + 1; j < memory_region->receiver_count;
2040 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002041 struct ffa_memory_access *other_receiver =
2042 ffa_memory_region_get_receiver(memory_region,
2043 j);
2044 assert(other_receiver != NULL);
2045
J-Alves363f5722022-04-25 17:37:37 +01002046 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002047 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01002048 dlog_verbose(
2049 "Repeated receiver(%x) in memory send "
2050 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002051 other_receiver->receiver_permissions
2052 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01002053 return ffa_error(FFA_INVALID_PARAMETERS);
2054 }
2055 }
2056
2057 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002058 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01002059 dlog_verbose(
2060 "All ffa_memory_access should point to the "
2061 "same composite memory region offset.\n");
2062 return ffa_error(FFA_INVALID_PARAMETERS);
2063 }
2064
Karl Meakin84710f32023-10-12 15:14:49 +01002065 data_access = permissions.data_access;
2066 instruction_access = permissions.instruction_access;
J-Alves363f5722022-04-25 17:37:37 +01002067 if (data_access == FFA_DATA_ACCESS_RESERVED ||
2068 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
2069 dlog_verbose(
2070 "Reserved value for receiver permissions "
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002071 "(data_access = %s, instruction_access = %s)\n",
2072 ffa_data_access_name(data_access),
2073 ffa_instruction_access_name(
2074 instruction_access));
J-Alves363f5722022-04-25 17:37:37 +01002075 return ffa_error(FFA_INVALID_PARAMETERS);
2076 }
2077 if (instruction_access !=
2078 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2079 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002080 "Invalid instruction access permissions %s "
2081 "for sending memory, expected %s.\n",
2082 ffa_instruction_access_name(instruction_access),
2083 ffa_instruction_access_name(
2084 FFA_INSTRUCTION_ACCESS_RESERVED));
J-Alves363f5722022-04-25 17:37:37 +01002085 return ffa_error(FFA_INVALID_PARAMETERS);
2086 }
J-Alves95fbb312024-03-20 15:19:16 +00002087 if (share_func == FFA_MEM_SHARE_32 ||
2088 share_func == FFA_MEM_SHARE_64) {
J-Alves363f5722022-04-25 17:37:37 +01002089 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2090 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002091 "Invalid data access permissions %s "
2092 "for sharing memory, expected %s.\n",
2093 ffa_data_access_name(data_access),
2094 ffa_data_access_name(
2095 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002096 return ffa_error(FFA_INVALID_PARAMETERS);
2097 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002098 /*
2099 * According to section 10.10.3 of the FF-A v1.1 EAC0
2100 * spec, NX is required for share operations (but must
2101 * not be specified by the sender) so set it in the
2102 * copy that we store, ready to be returned to the
2103 * retriever.
2104 */
2105 if (vm_id_is_current_world(receiver_id)) {
Karl Meakin84710f32023-10-12 15:14:49 +01002106 permissions.instruction_access =
2107 FFA_INSTRUCTION_ACCESS_NX;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002108 receiver_permissions.permissions = permissions;
2109 }
J-Alves363f5722022-04-25 17:37:37 +01002110 }
J-Alves95fbb312024-03-20 15:19:16 +00002111 if ((share_func == FFA_MEM_LEND_32 ||
2112 share_func == FFA_MEM_LEND_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002113 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
2114 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002115 "Invalid data access permissions %s for "
2116 "lending memory, expected %s.\n",
2117 ffa_data_access_name(data_access),
2118 ffa_data_access_name(
2119 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002120 return ffa_error(FFA_INVALID_PARAMETERS);
2121 }
2122
J-Alves95fbb312024-03-20 15:19:16 +00002123 if ((share_func == FFA_MEM_DONATE_32 ||
2124 share_func == FFA_MEM_DONATE_64) &&
J-Alves363f5722022-04-25 17:37:37 +01002125 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
2126 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002127 "Invalid data access permissions %s for "
2128 "donating memory, expected %s.\n",
2129 ffa_data_access_name(data_access),
2130 ffa_data_access_name(
2131 FFA_DATA_ACCESS_NOT_SPECIFIED));
J-Alves363f5722022-04-25 17:37:37 +01002132 return ffa_error(FFA_INVALID_PARAMETERS);
2133 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01002134 }
2135
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002136 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
Karl Meakin84710f32023-10-12 15:14:49 +01002137 security_state = memory_region->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002138 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2139 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002140 "Invalid security state %s for memory share operation, "
2141 "expected %s.\n",
2142 ffa_memory_security_name(security_state),
2143 ffa_memory_security_name(
2144 FFA_MEMORY_SECURITY_UNSPECIFIED));
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002145 return ffa_error(FFA_INVALID_PARAMETERS);
2146 }
2147
Federico Recanatid937f5e2021-12-20 17:38:23 +01002148 /*
J-Alves807794e2022-06-16 13:42:47 +01002149 * If a memory donate or lend with single borrower, the memory type
2150 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01002151 */
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002152 type = memory_region->attributes.type;
J-Alves807794e2022-06-16 13:42:47 +01002153 if (share_func == FFA_MEM_DONATE_32 ||
J-Alves95fbb312024-03-20 15:19:16 +00002154 share_func == FFA_MEM_DONATE_64 ||
2155 ((share_func == FFA_MEM_LEND_32 || share_func == FFA_MEM_LEND_64) &&
J-Alves807794e2022-06-16 13:42:47 +01002156 memory_region->receiver_count == 1)) {
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002157 if (type != FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves807794e2022-06-16 13:42:47 +01002158 dlog_verbose(
Karl Meakinf98b2aa2023-10-12 16:09:59 +01002159 "Invalid memory type %s for memory share "
2160 "operation, expected %s.\n",
2161 ffa_memory_type_name(type),
2162 ffa_memory_type_name(
2163 FFA_MEMORY_NOT_SPECIFIED_MEM));
J-Alves807794e2022-06-16 13:42:47 +01002164 return ffa_error(FFA_INVALID_PARAMETERS);
2165 }
2166 } else {
2167 /*
2168 * Check that sender's memory attributes match Hafnium
2169 * expectations: Normal Memory, Inner shareable, Write-Back
2170 * Read-Allocate Write-Allocate Cacheable.
2171 */
2172 ret = ffa_memory_attributes_validate(memory_region->attributes);
2173 if (ret.func != FFA_SUCCESS_32) {
2174 return ret;
2175 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01002176 }
2177
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002178 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01002179}
2180
2181/**
Andrew Walbranca808b12020-05-15 17:22:28 +01002182 * Gets the share state for continuing an operation to donate, lend or share
2183 * memory, and checks that it is a valid request.
2184 *
2185 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
2186 * not.
2187 */
J-Alvesfdd29272022-07-19 13:16:31 +01002188struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01002189 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01002190 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002191 struct mpool *page_pool)
2192{
2193 struct ffa_memory_share_state *share_state;
2194 struct ffa_memory_region *memory_region;
2195
Daniel Boulbya2f8c662021-11-26 17:52:53 +00002196 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01002197
2198 /*
2199 * Look up the share state by handle and make sure that the VM ID
2200 * matches.
2201 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01002202 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00002203 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002204 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002205 "Invalid handle %#lx for memory send continuation.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002206 handle);
2207 return ffa_error(FFA_INVALID_PARAMETERS);
2208 }
2209 memory_region = share_state->memory_region;
2210
J-Alvesfdd29272022-07-19 13:16:31 +01002211 if (vm_id_is_current_world(from_vm_id) &&
2212 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002213 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
2214 return ffa_error(FFA_INVALID_PARAMETERS);
2215 }
2216
2217 if (share_state->sending_complete) {
2218 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00002219 "Sending of memory handle %#lx is already complete.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01002220 handle);
2221 return ffa_error(FFA_INVALID_PARAMETERS);
2222 }
2223
2224 if (share_state->fragment_count == MAX_FRAGMENTS) {
2225 /*
2226 * Log a warning as this is a sign that MAX_FRAGMENTS should
2227 * probably be increased.
2228 */
2229 dlog_warning(
Karl Meakine8937d92024-03-19 16:04:25 +00002230 "Too many fragments for memory share with handle %#lx; "
Andrew Walbranca808b12020-05-15 17:22:28 +01002231 "only %d supported.\n",
2232 handle, MAX_FRAGMENTS);
2233 /* Free share state, as it's not possible to complete it. */
2234 share_state_free(share_states, share_state, page_pool);
2235 return ffa_error(FFA_NO_MEMORY);
2236 }
2237
2238 *share_state_ret = share_state;
2239
2240 return (struct ffa_value){.func = FFA_SUCCESS_32};
2241}
2242
2243/**
J-Alves95df0ef2022-12-07 10:09:48 +00002244 * Checks if there is at least one receiver from the other world.
2245 */
J-Alvesfdd29272022-07-19 13:16:31 +01002246bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00002247 struct ffa_memory_region *memory_region)
2248{
2249 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002250 struct ffa_memory_access *receiver =
2251 ffa_memory_region_get_receiver(memory_region, i);
2252 assert(receiver != NULL);
2253 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
2254
2255 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00002256 return true;
2257 }
2258 }
2259 return false;
2260}
2261
2262/**
J-Alves9da280b2022-12-21 14:55:39 +00002263 * Validates a call to donate, lend or share memory in which Hafnium is the
2264 * designated allocator of the memory handle. In practice, this also means
2265 * Hafnium is responsible for managing the state structures for the transaction.
2266 * If Hafnium is the SPMC, it should allocate the memory handle when either the
2267 * sender is an SP or there is at least one borrower that is an SP.
2268 * If Hafnium is the hypervisor, it should allocate the memory handle when
2269 * operation involves only NWd VMs.
2270 *
2271 * If validation goes well, Hafnium updates the stage-2 page tables of the
2272 * sender. Validation consists of checking if the message length and number of
2273 * memory region constituents match, and if the transition is valid for the
2274 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00002275 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002276 * Assumes that the caller has already found and locked the sender VM and copied
2277 * the memory region descriptor from the sender's TX buffer to a freshly
2278 * allocated page from Hafnium's internal pool. The caller must have also
2279 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002280 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002281 * This function takes ownership of the `memory_region` passed in and will free
2282 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01002283 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002284struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002285 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002286 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002287 uint32_t fragment_length, uint32_t share_func,
2288 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01002289{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002290 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002291 struct share_states_locked share_states;
2292 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01002293
2294 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002295 * If there is an error validating the `memory_region` then we need to
2296 * free it because we own it but we won't be storing it in a share state
2297 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01002298 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002299 ret = ffa_memory_send_validate(from_locked, memory_region,
2300 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01002301 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002302 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002303 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002304 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002305 }
2306
Andrew Walbrana65a1322020-04-06 19:32:32 +01002307 /* Set flag for share function, ready to be retrieved later. */
2308 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002309 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002310 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002311 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002312 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002313 break;
J-Alves95fbb312024-03-20 15:19:16 +00002314 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002315 case FFA_MEM_LEND_32:
2316 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002317 break;
J-Alves95fbb312024-03-20 15:19:16 +00002318 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002319 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002320 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002321 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002322 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01002323 }
2324
Andrew Walbranca808b12020-05-15 17:22:28 +01002325 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002326 /*
2327 * Allocate a share state before updating the page table. Otherwise if
2328 * updating the page table succeeded but allocating the share state
2329 * failed then it would leave the memory in a state where nobody could
2330 * get it back.
2331 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01002332 share_state = allocate_share_state(share_states, share_func,
2333 memory_region, fragment_length,
2334 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00002335 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002336 dlog_verbose("Failed to allocate share state.\n");
2337 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01002338 ret = ffa_error(FFA_NO_MEMORY);
2339 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002340 }
2341
Andrew Walbranca808b12020-05-15 17:22:28 +01002342 if (fragment_length == memory_share_length) {
2343 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00002344 ret = ffa_memory_send_complete(
2345 from_locked, share_states, share_state, page_pool,
2346 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002347 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002348 /*
2349 * Use sender ID from 'memory_region' assuming
2350 * that at this point it has been validated:
2351 * - MBZ at virtual FF-A instance.
2352 */
J-Alves19e20cf2023-08-02 12:48:55 +01002353 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002354 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2355 ? memory_region->sender
2356 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002357 ret = (struct ffa_value){
2358 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002359 .arg1 = (uint32_t)memory_region->handle,
2360 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002361 .arg3 = fragment_length,
2362 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002363 }
2364
2365out:
2366 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002367 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002368 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002369}
2370
2371/**
J-Alves8505a8a2022-06-15 18:10:18 +01002372 * Continues an operation to donate, lend or share memory to a VM from current
2373 * world. If this is the last fragment then checks that the transition is valid
2374 * for the type of memory sending operation and updates the stage-2 page tables
2375 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002376 *
2377 * Assumes that the caller has already found and locked the sender VM and copied
2378 * the memory region descriptor from the sender's TX buffer to a freshly
2379 * allocated page from Hafnium's internal pool.
2380 *
2381 * This function takes ownership of the `fragment` passed in; it must not be
2382 * freed by the caller.
2383 */
2384struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2385 void *fragment,
2386 uint32_t fragment_length,
2387 ffa_memory_handle_t handle,
2388 struct mpool *page_pool)
2389{
2390 struct share_states_locked share_states = share_states_lock();
2391 struct ffa_memory_share_state *share_state;
2392 struct ffa_value ret;
2393 struct ffa_memory_region *memory_region;
2394
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002395 CHECK(is_aligned(fragment,
2396 alignof(struct ffa_memory_region_constituent)));
2397 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2398 0) {
2399 dlog_verbose("Fragment length %u misaligned.\n",
2400 fragment_length);
2401 ret = ffa_error(FFA_INVALID_PARAMETERS);
2402 goto out_free_fragment;
2403 }
2404
Andrew Walbranca808b12020-05-15 17:22:28 +01002405 ret = ffa_memory_send_continue_validate(share_states, handle,
2406 &share_state,
2407 from_locked.vm->id, page_pool);
2408 if (ret.func != FFA_SUCCESS_32) {
2409 goto out_free_fragment;
2410 }
2411 memory_region = share_state->memory_region;
2412
J-Alves95df0ef2022-12-07 10:09:48 +00002413 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002414 dlog_error(
2415 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002416 "other world. This should never happen, and indicates "
2417 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002418 "EL3 code.\n");
2419 ret = ffa_error(FFA_INVALID_PARAMETERS);
2420 goto out_free_fragment;
2421 }
2422
2423 /* Add this fragment. */
2424 share_state->fragments[share_state->fragment_count] = fragment;
2425 share_state->fragment_constituent_counts[share_state->fragment_count] =
2426 fragment_length / sizeof(struct ffa_memory_region_constituent);
2427 share_state->fragment_count++;
2428
2429 /* Check whether the memory send operation is now ready to complete. */
2430 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002431 ret = ffa_memory_send_complete(
2432 from_locked, share_states, share_state, page_pool,
2433 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002434 } else {
2435 ret = (struct ffa_value){
2436 .func = FFA_MEM_FRAG_RX_32,
2437 .arg1 = (uint32_t)handle,
2438 .arg2 = (uint32_t)(handle >> 32),
2439 .arg3 = share_state_next_fragment_offset(share_states,
2440 share_state)};
2441 }
2442 goto out;
2443
2444out_free_fragment:
2445 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002446
2447out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002448 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002449 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002450}
2451
Andrew Walbranca808b12020-05-15 17:22:28 +01002452/** Clean up after the receiver has finished retrieving a memory region. */
2453static void ffa_memory_retrieve_complete(
2454 struct share_states_locked share_states,
2455 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2456{
J-Alves95fbb312024-03-20 15:19:16 +00002457 if (share_state->share_func == FFA_MEM_DONATE_32 ||
2458 share_state->share_func == FFA_MEM_DONATE_64) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002459 /*
2460 * Memory that has been donated can't be relinquished,
2461 * so no need to keep the share state around.
2462 */
2463 share_state_free(share_states, share_state, page_pool);
2464 dlog_verbose("Freed share state for donate.\n");
2465 }
2466}
2467
J-Alves2d8457f2022-10-05 11:06:41 +01002468/**
2469 * Initialises the given memory region descriptor to be used for an
2470 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2471 * fragment.
2472 * The memory region descriptor is initialized according to retriever's
2473 * FF-A version.
2474 *
2475 * Returns true on success, or false if the given constituents won't all fit in
2476 * the first fragment.
2477 */
2478static bool ffa_retrieved_memory_region_init(
Karl Meakin0e617d92024-04-05 12:55:22 +01002479 void *response, enum ffa_version ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002480 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002481 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002482 ffa_memory_access_permissions_t permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002483 struct ffa_memory_access *receivers, size_t receiver_count,
2484 uint32_t memory_access_desc_size, uint32_t page_count,
2485 uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002486 const struct ffa_memory_region_constituent constituents[],
2487 uint32_t fragment_constituent_count, uint32_t *total_length,
2488 uint32_t *fragment_length)
2489{
2490 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002491 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002492 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002493 uint32_t constituents_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002494
2495 assert(response != NULL);
2496
Karl Meakin0e617d92024-04-05 12:55:22 +01002497 if (ffa_version == FFA_VERSION_1_0) {
J-Alves2d8457f2022-10-05 11:06:41 +01002498 struct ffa_memory_region_v1_0 *retrieve_response =
2499 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002500 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002501
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002502 ffa_memory_region_init_header_v1_0(retrieve_response, sender,
2503 attributes, flags, handle, 0,
2504 receiver_count);
J-Alves2d8457f2022-10-05 11:06:41 +01002505
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002506 receiver = (struct ffa_memory_access_v1_0 *)
2507 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002508 receiver_count = retrieve_response->receiver_count;
2509
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002510 for (uint32_t i = 0; i < receiver_count; i++) {
2511 ffa_id_t receiver_id =
2512 receivers[i].receiver_permissions.receiver;
2513 ffa_memory_receiver_flags_t recv_flags =
2514 receivers[i].receiver_permissions.flags;
2515
2516 /*
2517 * Initialized here as in memory retrieve responses we
2518 * currently expect one borrower to be specified.
2519 */
2520 ffa_memory_access_init_v1_0(
Karl Meakin84710f32023-10-12 15:14:49 +01002521 receiver, receiver_id, permissions.data_access,
2522 permissions.instruction_access, recv_flags);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002523 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002524
2525 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002526 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002527 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2528 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002529
2530 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2531 retrieve_response, 0);
2532 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002533 struct ffa_memory_region *retrieve_response =
2534 (struct ffa_memory_region *)response;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002535 struct ffa_memory_access *retrieve_response_receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002536
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002537 ffa_memory_region_init_header(
2538 retrieve_response, sender, attributes, flags, handle, 0,
2539 receiver_count, memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002540
2541 /*
2542 * Note that `sizeof(struct_ffa_memory_region)` and
2543 * `sizeof(struct ffa_memory_access)` must both be multiples of
2544 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2545 * guaranteed that the offset we calculate here is aligned to a
2546 * 64-bit boundary and so 64-bit values can be copied without
2547 * alignment faults.
2548 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002549 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002550 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002551 (uint32_t)(receiver_count *
2552 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002553
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002554 retrieve_response_receivers =
2555 ffa_memory_region_get_receiver(retrieve_response, 0);
2556 assert(retrieve_response_receivers != NULL);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002557
2558 /*
2559 * Initialized here as in memory retrieve responses we currently
2560 * expect one borrower to be specified.
2561 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002562 memcpy_s(retrieve_response_receivers,
2563 sizeof(struct ffa_memory_access) * receiver_count,
2564 receivers,
2565 sizeof(struct ffa_memory_access) * receiver_count);
2566
2567 retrieve_response_receivers->composite_memory_region_offset =
2568 composite_offset;
2569
J-Alves2d8457f2022-10-05 11:06:41 +01002570 composite_memory_region =
2571 ffa_memory_region_get_composite(retrieve_response, 0);
2572 }
2573
J-Alves2d8457f2022-10-05 11:06:41 +01002574 assert(composite_memory_region != NULL);
2575
J-Alves2d8457f2022-10-05 11:06:41 +01002576 composite_memory_region->page_count = page_count;
2577 composite_memory_region->constituent_count = total_constituent_count;
2578 composite_memory_region->reserved_0 = 0;
2579
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002580 constituents_offset =
2581 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002582 if (constituents_offset +
2583 fragment_constituent_count *
2584 sizeof(struct ffa_memory_region_constituent) >
2585 response_max_size) {
2586 return false;
2587 }
2588
2589 for (i = 0; i < fragment_constituent_count; ++i) {
2590 composite_memory_region->constituents[i] = constituents[i];
2591 }
2592
2593 if (total_length != NULL) {
2594 *total_length =
2595 constituents_offset +
2596 composite_memory_region->constituent_count *
2597 sizeof(struct ffa_memory_region_constituent);
2598 }
2599 if (fragment_length != NULL) {
2600 *fragment_length =
2601 constituents_offset +
2602 fragment_constituent_count *
2603 sizeof(struct ffa_memory_region_constituent);
2604 }
2605
2606 return true;
2607}
2608
J-Alves96de29f2022-04-26 16:05:24 +01002609/**
2610 * Validates the retrieved permissions against those specified by the lender
2611 * of memory share operation. Optionally can help set the permissions to be used
2612 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002613 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2614 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2615 * specification for each ABI.
2616 * - FFA_DENIED -> if the permissions specified by the retriever are not
2617 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002618 */
J-Alvesdcad8992023-09-15 14:10:35 +01002619static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2620 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002621 enum ffa_data_access requested_data_access,
2622 enum ffa_instruction_access sent_instruction_access,
2623 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002624 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002625{
2626 switch (sent_data_access) {
2627 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2628 case FFA_DATA_ACCESS_RW:
2629 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2630 requested_data_access == FFA_DATA_ACCESS_RW) {
2631 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002632 permissions->data_access = FFA_DATA_ACCESS_RW;
J-Alves96de29f2022-04-26 16:05:24 +01002633 }
2634 break;
2635 }
2636 /* Intentional fall-through. */
2637 case FFA_DATA_ACCESS_RO:
2638 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2639 requested_data_access == FFA_DATA_ACCESS_RO) {
2640 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002641 permissions->data_access = FFA_DATA_ACCESS_RO;
J-Alves96de29f2022-04-26 16:05:24 +01002642 }
2643 break;
2644 }
2645 dlog_verbose(
2646 "Invalid data access requested; sender specified "
2647 "permissions %#x but receiver requested %#x.\n",
2648 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002649 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002650 case FFA_DATA_ACCESS_RESERVED:
2651 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2652 "checked before this point.");
2653 }
2654
J-Alvesdcad8992023-09-15 14:10:35 +01002655 /*
2656 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2657 * or FFA_MEMORY_DONATE the retriever should have specifed the
2658 * instruction permissions it wishes to receive.
2659 */
2660 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00002661 case FFA_MEM_SHARE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002662 case FFA_MEM_SHARE_32:
2663 if (requested_instruction_access !=
2664 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2665 dlog_verbose(
2666 "%s: for share instruction permissions must "
2667 "NOT be specified.\n",
2668 __func__);
2669 return ffa_error(FFA_INVALID_PARAMETERS);
2670 }
2671 break;
J-Alves95fbb312024-03-20 15:19:16 +00002672 case FFA_MEM_LEND_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002673 case FFA_MEM_LEND_32:
2674 /*
2675 * For operations with multiple borrowers only permit XN
2676 * permissions, and both Sender and borrower should have used
2677 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2678 */
2679 if (multiple_borrowers) {
2680 if (requested_instruction_access !=
2681 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2682 dlog_verbose(
2683 "%s: lend/share/donate with multiple "
2684 "borrowers "
2685 "instruction permissions must NOT be "
2686 "specified.\n",
2687 __func__);
2688 return ffa_error(FFA_INVALID_PARAMETERS);
2689 }
2690 break;
2691 }
2692 /* Fall through if the operation targets a single borrower. */
J-Alves95fbb312024-03-20 15:19:16 +00002693 case FFA_MEM_DONATE_64:
J-Alvesdcad8992023-09-15 14:10:35 +01002694 case FFA_MEM_DONATE_32:
2695 if (!multiple_borrowers &&
2696 requested_instruction_access ==
2697 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2698 dlog_verbose(
2699 "%s: for lend/donate with single borrower "
2700 "instruction permissions must be speficified "
2701 "by borrower\n",
2702 __func__);
2703 return ffa_error(FFA_INVALID_PARAMETERS);
2704 }
2705 break;
2706 default:
2707 panic("%s: Wrong func id provided.\n", __func__);
2708 }
2709
J-Alves96de29f2022-04-26 16:05:24 +01002710 switch (sent_instruction_access) {
2711 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2712 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002713 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002714 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002715 permissions->instruction_access =
2716 FFA_INSTRUCTION_ACCESS_X;
J-Alves96de29f2022-04-26 16:05:24 +01002717 }
2718 break;
2719 }
J-Alvesdcad8992023-09-15 14:10:35 +01002720 /*
2721 * Fall through if requested permissions are less
2722 * permissive than those provided by the sender.
2723 */
J-Alves96de29f2022-04-26 16:05:24 +01002724 case FFA_INSTRUCTION_ACCESS_NX:
2725 if (requested_instruction_access ==
2726 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2727 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2728 if (permissions != NULL) {
Karl Meakin84710f32023-10-12 15:14:49 +01002729 permissions->instruction_access =
2730 FFA_INSTRUCTION_ACCESS_NX;
J-Alves96de29f2022-04-26 16:05:24 +01002731 }
2732 break;
2733 }
2734 dlog_verbose(
2735 "Invalid instruction access requested; sender "
2736 "specified permissions %#x but receiver requested "
2737 "%#x.\n",
2738 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002739 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002740 case FFA_INSTRUCTION_ACCESS_RESERVED:
2741 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2742 "be checked before this point.");
2743 }
2744
J-Alvesdcad8992023-09-15 14:10:35 +01002745 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002746}
2747
2748/**
2749 * Validate the receivers' permissions in the retrieve request against those
2750 * specified by the lender.
2751 * In the `permissions` argument returns the permissions to set at S2 for the
2752 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002753 * The function looks into the flag to bypass multiple borrower checks:
2754 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2755 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2756 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2757 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002758 */
2759static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2760 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002761 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002762 ffa_memory_access_permissions_t *permissions,
2763 struct ffa_memory_access **receiver_ret, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002764{
2765 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002766 bool bypass_multi_receiver_check =
2767 (retrieve_request->flags &
2768 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002769 const uint32_t region_receiver_count = memory_region->receiver_count;
2770 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002771
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002772 assert(receiver_ret != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002773 assert(permissions != NULL);
2774
Karl Meakin84710f32023-10-12 15:14:49 +01002775 *permissions = (ffa_memory_access_permissions_t){0};
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002776
J-Alves3456e032023-07-20 12:20:05 +01002777 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002778 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002779 dlog_verbose(
2780 "Retrieve request should contain same list of "
2781 "borrowers, as specified by the lender.\n");
2782 return ffa_error(FFA_INVALID_PARAMETERS);
2783 }
2784 } else {
2785 if (retrieve_request->receiver_count != 1) {
2786 dlog_verbose(
2787 "Set bypass multiple borrower check, receiver "
2788 "list must be sized 1 (%x)\n",
2789 memory_region->receiver_count);
2790 return ffa_error(FFA_INVALID_PARAMETERS);
2791 }
J-Alves96de29f2022-04-26 16:05:24 +01002792 }
2793
2794 retrieve_receiver_index = retrieve_request->receiver_count;
2795
J-Alves96de29f2022-04-26 16:05:24 +01002796 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2797 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002798 struct ffa_memory_access *retrieve_request_receiver =
2799 ffa_memory_region_get_receiver(retrieve_request, i);
2800 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002801 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002802 retrieve_request_receiver->receiver_permissions
2803 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002804 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002805 retrieve_request_receiver->receiver_permissions
2806 .receiver;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002807 struct ffa_memory_access *receiver;
2808 uint32_t mem_region_receiver_index;
2809 bool permissions_RO;
2810 bool clear_memory_flags;
J-Alvesf220d572024-04-24 22:15:14 +01002811 /*
2812 * If the call is at the virtual FF-A instance the caller's
2813 * ID must match an entry in the memory access list.
2814 * In the SPMC, one of the specified receivers could be from
2815 * the NWd.
2816 */
2817 bool found_to_id = vm_id_is_current_world(to_vm_id)
2818 ? (current_receiver_id == to_vm_id)
2819 : (!vm_id_is_current_world(
2820 current_receiver_id));
J-Alves96de29f2022-04-26 16:05:24 +01002821
J-Alves3456e032023-07-20 12:20:05 +01002822 if (bypass_multi_receiver_check && !found_to_id) {
2823 dlog_verbose(
2824 "Bypass multiple borrower check for id %x.\n",
2825 current_receiver_id);
2826 continue;
2827 }
2828
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002829 if (retrieve_request_receiver->composite_memory_region_offset !=
2830 0U) {
2831 dlog_verbose(
2832 "Retriever specified address ranges not "
2833 "supported (got offset %d).\n",
2834 retrieve_request_receiver
2835 ->composite_memory_region_offset);
2836 return ffa_error(FFA_INVALID_PARAMETERS);
2837 }
2838
J-Alves96de29f2022-04-26 16:05:24 +01002839 /*
2840 * Find the current receiver in the transaction descriptor from
2841 * sender.
2842 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002843 mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002844 ffa_memory_region_get_receiver_index(
2845 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002846
2847 if (mem_region_receiver_index ==
2848 memory_region->receiver_count) {
2849 dlog_verbose("%s: receiver %x not found\n", __func__,
2850 current_receiver_id);
2851 return ffa_error(FFA_DENIED);
2852 }
2853
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002854 receiver = ffa_memory_region_get_receiver(
2855 memory_region, mem_region_receiver_index);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002856 assert(receiver != NULL);
2857
2858 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002859
2860 if (found_to_id) {
2861 retrieve_receiver_index = i;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002862
2863 *receiver_ret = receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002864 }
2865
2866 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002867 * Check if retrieve request memory access list is valid:
2868 * - The retrieve request complies with the specification.
2869 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002870 */
J-Alvesdcad8992023-09-15 14:10:35 +01002871 ret = ffa_memory_retrieve_is_memory_access_valid(
Karl Meakin84710f32023-10-12 15:14:49 +01002872 func_id, sent_permissions.data_access,
2873 requested_permissions.data_access,
2874 sent_permissions.instruction_access,
2875 requested_permissions.instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002876 found_to_id ? permissions : NULL,
2877 region_receiver_count > 1);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002878
J-Alvesdcad8992023-09-15 14:10:35 +01002879 if (ret.func != FFA_SUCCESS_32) {
2880 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002881 }
2882
Karl Meakin84710f32023-10-12 15:14:49 +01002883 permissions_RO =
2884 (permissions->data_access == FFA_DATA_ACCESS_RO);
J-Alvese5262372024-03-27 11:02:03 +00002885 clear_memory_flags =
2886 (retrieve_request->flags &
2887 (FFA_MEMORY_REGION_FLAG_CLEAR |
2888 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002889
J-Alves96de29f2022-04-26 16:05:24 +01002890 /*
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002891 * Can't request PM to clear memory if only provided
2892 * with RO permissions.
J-Alves96de29f2022-04-26 16:05:24 +01002893 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002894 if (found_to_id && permissions_RO && clear_memory_flags) {
J-Alves96de29f2022-04-26 16:05:24 +01002895 dlog_verbose(
2896 "Receiver has RO permissions can not request "
2897 "clear.\n");
2898 return ffa_error(FFA_DENIED);
2899 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002900
2901 /*
2902 * Check the impdef in the retrieve_request matches the value in
2903 * the original memory send.
2904 */
2905 if (ffa_version_from_memory_access_desc_size(
2906 memory_region->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +01002907 FFA_VERSION_1_2 &&
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002908 ffa_version_from_memory_access_desc_size(
2909 retrieve_request->memory_access_desc_size) >=
Karl Meakin0e617d92024-04-05 12:55:22 +01002910 FFA_VERSION_1_2) {
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002911 if (receiver->impdef.val[0] !=
2912 retrieve_request_receiver->impdef.val[0] ||
2913 receiver->impdef.val[1] !=
2914 retrieve_request_receiver->impdef.val[1]) {
2915 dlog_verbose(
2916 "Impdef value in memory send does not "
2917 "match retrieve request value "
Karl Meakine8937d92024-03-19 16:04:25 +00002918 "send value %#lx %#lx retrieve request "
2919 "value %#lx %#lx\n",
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002920 receiver->impdef.val[0],
2921 receiver->impdef.val[1],
2922 retrieve_request_receiver->impdef
2923 .val[0],
2924 retrieve_request_receiver->impdef
2925 .val[1]);
2926 return ffa_error(FFA_INVALID_PARAMETERS);
2927 }
2928 }
J-Alves96de29f2022-04-26 16:05:24 +01002929 }
2930
2931 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2932 dlog_verbose(
2933 "Retrieve request does not contain caller's (%x) "
2934 "permissions\n",
2935 to_vm_id);
2936 return ffa_error(FFA_INVALID_PARAMETERS);
2937 }
2938
2939 return (struct ffa_value){.func = FFA_SUCCESS_32};
2940}
2941
J-Alvesa9cd7e32022-07-01 13:49:33 +01002942/*
2943 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2944 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2945 * of a pending memory sharing operation whose allocator is the SPM, for
2946 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2947 * the memory region descriptor of the retrieve request must be zeroed with the
2948 * exception of the sender ID and handle.
2949 */
J-Alves4f0d9c12024-01-17 17:23:11 +00002950bool is_ffa_hypervisor_retrieve_request(struct ffa_memory_region *request,
2951 struct vm_locked to_locked)
J-Alvesa9cd7e32022-07-01 13:49:33 +01002952{
2953 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
Karl Meakin84710f32023-10-12 15:14:49 +01002954 request->attributes.shareability == 0U &&
2955 request->attributes.cacheability == 0U &&
2956 request->attributes.type == 0U &&
2957 request->attributes.security == 0U && request->flags == 0U &&
J-Alvesa9cd7e32022-07-01 13:49:33 +01002958 request->tag == 0U && request->receiver_count == 0U &&
2959 plat_ffa_memory_handle_allocated_by_current_world(
2960 request->handle);
2961}
2962
2963/*
2964 * Helper to reset count of fragments retrieved by the hypervisor.
2965 */
2966static void ffa_memory_retrieve_complete_from_hyp(
2967 struct ffa_memory_share_state *share_state)
2968{
2969 if (share_state->hypervisor_fragment_count ==
2970 share_state->fragment_count) {
2971 share_state->hypervisor_fragment_count = 0;
2972 }
2973}
2974
J-Alves089004f2022-07-13 14:25:44 +01002975/**
J-Alves4f0d9c12024-01-17 17:23:11 +00002976 * Prepares the return of the ffa_value for the memory retrieve response.
2977 */
2978static struct ffa_value ffa_memory_retrieve_resp(uint32_t total_length,
2979 uint32_t fragment_length)
2980{
2981 return (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
2982 .arg1 = total_length,
2983 .arg2 = fragment_length};
2984}
2985
2986/**
J-Alves089004f2022-07-13 14:25:44 +01002987 * Validate that the memory region descriptor provided by the borrower on
2988 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2989 * memory sharing call.
2990 */
2991static struct ffa_value ffa_memory_retrieve_validate(
J-Alves4f0d9c12024-01-17 17:23:11 +00002992 ffa_id_t to_id, struct ffa_memory_region *retrieve_request,
2993 uint32_t retrieve_request_length,
J-Alves089004f2022-07-13 14:25:44 +01002994 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2995 uint32_t share_func)
2996{
2997 ffa_memory_region_flags_t transaction_type =
2998 retrieve_request->flags &
2999 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003000 enum ffa_memory_security security_state;
J-Alves4f0d9c12024-01-17 17:23:11 +00003001 const uint64_t memory_access_desc_size =
3002 retrieve_request->memory_access_desc_size;
3003 const uint32_t expected_retrieve_request_length =
3004 retrieve_request->receivers_offset +
3005 (uint32_t)(retrieve_request->receiver_count *
3006 memory_access_desc_size);
J-Alves089004f2022-07-13 14:25:44 +01003007
3008 assert(retrieve_request != NULL);
3009 assert(memory_region != NULL);
3010 assert(receiver_index != NULL);
J-Alves089004f2022-07-13 14:25:44 +01003011
J-Alves4f0d9c12024-01-17 17:23:11 +00003012 if (retrieve_request_length != expected_retrieve_request_length) {
3013 dlog_verbose(
3014 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
3015 "but was %d.\n",
3016 expected_retrieve_request_length,
3017 retrieve_request_length);
3018 return ffa_error(FFA_INVALID_PARAMETERS);
3019 }
3020
3021 if (retrieve_request->sender != memory_region->sender) {
3022 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003023 "Memory with handle %#lx not fully sent, can't "
J-Alves4f0d9c12024-01-17 17:23:11 +00003024 "retrieve.\n",
3025 memory_region->handle);
3026 return ffa_error(FFA_DENIED);
3027 }
3028
3029 /*
3030 * The SPMC can only process retrieve requests to memory share
3031 * operations with one borrower from the other world. It can't
3032 * determine the ID of the NWd VM that invoked the retrieve
3033 * request interface call. It relies on the hypervisor to
3034 * validate the caller's ID against that provided in the
3035 * `receivers` list of the retrieve response.
3036 * In case there is only one borrower from the NWd in the
3037 * transaction descriptor, record that in the `receiver_id` for
3038 * later use, and validate in the retrieve request message.
3039 * This limitation is due to the fact SPMC can't determine the
3040 * index in the memory share structures state to update.
3041 */
3042 if (to_id == HF_HYPERVISOR_VM_ID) {
3043 uint32_t other_world_count = 0;
3044
3045 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3046 struct ffa_memory_access *receiver =
3047 ffa_memory_region_get_receiver(retrieve_request,
J-Alvesf220d572024-04-24 22:15:14 +01003048 i);
J-Alves4f0d9c12024-01-17 17:23:11 +00003049 assert(receiver != NULL);
3050
J-Alvesf220d572024-04-24 22:15:14 +01003051 if (!vm_id_is_current_world(
3052 receiver->receiver_permissions.receiver)) {
J-Alves4f0d9c12024-01-17 17:23:11 +00003053 other_world_count++;
J-Alvesf220d572024-04-24 22:15:14 +01003054 /* Set it to be used later. */
3055 to_id = receiver->receiver_permissions.receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003056 }
3057 }
3058
3059 if (other_world_count > 1) {
3060 dlog_verbose(
3061 "Support one receiver from the other "
3062 "world.\n");
3063 return ffa_error(FFA_NOT_SUPPORTED);
3064 }
3065 }
J-Alves089004f2022-07-13 14:25:44 +01003066 /*
3067 * Check that the transaction type expected by the receiver is
3068 * correct, if it has been specified.
3069 */
3070 if (transaction_type !=
3071 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
3072 transaction_type != (memory_region->flags &
3073 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
3074 dlog_verbose(
3075 "Incorrect transaction type %#x for "
Karl Meakine8937d92024-03-19 16:04:25 +00003076 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003077 transaction_type,
3078 memory_region->flags &
3079 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
3080 retrieve_request->handle);
3081 return ffa_error(FFA_INVALID_PARAMETERS);
3082 }
3083
3084 if (retrieve_request->tag != memory_region->tag) {
3085 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003086 "Incorrect tag %lu for FFA_MEM_RETRIEVE_REQ, expected "
3087 "%lu for handle %#lx.\n",
J-Alves089004f2022-07-13 14:25:44 +01003088 retrieve_request->tag, memory_region->tag,
3089 retrieve_request->handle);
3090 return ffa_error(FFA_INVALID_PARAMETERS);
3091 }
3092
J-Alves4f0d9c12024-01-17 17:23:11 +00003093 *receiver_index =
3094 ffa_memory_region_get_receiver_index(memory_region, to_id);
J-Alves089004f2022-07-13 14:25:44 +01003095
3096 if (*receiver_index == memory_region->receiver_count) {
3097 dlog_verbose(
3098 "Incorrect receiver VM ID %d for "
Karl Meakine8937d92024-03-19 16:04:25 +00003099 "FFA_MEM_RETRIEVE_REQ, for handle %#lx.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003100 to_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01003101 return ffa_error(FFA_INVALID_PARAMETERS);
3102 }
3103
3104 if ((retrieve_request->flags &
3105 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
3106 dlog_verbose(
3107 "Retriever specified 'address range alignment 'hint' "
3108 "not supported.\n");
3109 return ffa_error(FFA_INVALID_PARAMETERS);
3110 }
3111 if ((retrieve_request->flags &
3112 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
3113 dlog_verbose(
3114 "Bits 8-5 must be zero in memory region's flags "
3115 "(address range alignment hint not supported).\n");
3116 return ffa_error(FFA_INVALID_PARAMETERS);
3117 }
3118
3119 if ((retrieve_request->flags & ~0x7FF) != 0U) {
3120 dlog_verbose(
3121 "Bits 31-10 must be zero in memory region's flags.\n");
3122 return ffa_error(FFA_INVALID_PARAMETERS);
3123 }
3124
J-Alves95fbb312024-03-20 15:19:16 +00003125 if ((share_func == FFA_MEM_SHARE_32 ||
3126 share_func == FFA_MEM_SHARE_64) &&
J-Alves089004f2022-07-13 14:25:44 +01003127 (retrieve_request->flags &
3128 (FFA_MEMORY_REGION_FLAG_CLEAR |
3129 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
3130 dlog_verbose(
3131 "Memory Share operation can't clean after relinquish "
3132 "memory region.\n");
3133 return ffa_error(FFA_INVALID_PARAMETERS);
3134 }
3135
3136 /*
3137 * If the borrower needs the memory to be cleared before mapping
3138 * to its address space, the sender should have set the flag
3139 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
3140 * FFA_DENIED.
3141 */
3142 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
3143 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
3144 dlog_verbose(
3145 "Borrower needs memory cleared. Sender needs to set "
3146 "flag for clearing memory.\n");
3147 return ffa_error(FFA_DENIED);
3148 }
3149
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003150 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
Karl Meakin84710f32023-10-12 15:14:49 +01003151 security_state = retrieve_request->attributes.security;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01003152 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
3153 dlog_verbose(
3154 "Invalid security state for memory retrieve request "
3155 "operation.\n");
3156 return ffa_error(FFA_INVALID_PARAMETERS);
3157 }
3158
J-Alves089004f2022-07-13 14:25:44 +01003159 /*
3160 * If memory type is not specified, bypass validation of memory
3161 * attributes in the retrieve request. The retriever is expecting to
3162 * obtain this information from the SPMC.
3163 */
Karl Meakin84710f32023-10-12 15:14:49 +01003164 if (retrieve_request->attributes.type == FFA_MEMORY_NOT_SPECIFIED_MEM) {
J-Alves089004f2022-07-13 14:25:44 +01003165 return (struct ffa_value){.func = FFA_SUCCESS_32};
3166 }
3167
3168 /*
3169 * Ensure receiver's attributes are compatible with how
3170 * Hafnium maps memory: Normal Memory, Inner shareable,
3171 * Write-Back Read-Allocate Write-Allocate Cacheable.
3172 */
3173 return ffa_memory_attributes_validate(retrieve_request->attributes);
3174}
3175
J-Alves4f0d9c12024-01-17 17:23:11 +00003176static struct ffa_value ffa_partition_retrieve_request(
3177 struct share_states_locked share_states,
3178 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3179 struct ffa_memory_region *retrieve_request,
3180 uint32_t retrieve_request_length, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003181{
Karl Meakin84710f32023-10-12 15:14:49 +01003182 ffa_memory_access_permissions_t permissions = {0};
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003183 uint32_t memory_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003184 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01003185 struct ffa_composite_memory_region *composite;
3186 uint32_t total_length;
3187 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01003188 ffa_id_t receiver_id = to_locked.vm->id;
J-Alves4f0d9c12024-01-17 17:23:11 +00003189 bool is_retrieve_complete = false;
J-Alves4f0d9c12024-01-17 17:23:11 +00003190 const uint64_t memory_access_desc_size =
Daniel Boulbyde974ca2023-12-12 13:53:31 +00003191 retrieve_request->memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003192 uint32_t receiver_index;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003193 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003194 ffa_memory_handle_t handle = retrieve_request->handle;
Karl Meakin84710f32023-10-12 15:14:49 +01003195 ffa_memory_attributes_t attributes = {0};
J-Alves460d36c2023-10-12 17:02:15 +01003196 uint32_t retrieve_mode = 0;
J-Alves4f0d9c12024-01-17 17:23:11 +00003197 struct ffa_memory_region *memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003198
J-Alves96de29f2022-04-26 16:05:24 +01003199 if (!share_state->sending_complete) {
3200 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003201 "Memory with handle %#lx not fully sent, can't "
J-Alves96de29f2022-04-26 16:05:24 +01003202 "retrieve.\n",
3203 handle);
J-Alves4f0d9c12024-01-17 17:23:11 +00003204 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alves96de29f2022-04-26 16:05:24 +01003205 }
3206
J-Alves4f0d9c12024-01-17 17:23:11 +00003207 /*
3208 * Validate retrieve request, according to what was sent by the
3209 * sender. Function will output the `receiver_index` from the
3210 * provided memory region.
3211 */
3212 ret = ffa_memory_retrieve_validate(
3213 receiver_id, retrieve_request, retrieve_request_length,
3214 memory_region, &receiver_index, share_state->share_func);
J-Alves089004f2022-07-13 14:25:44 +01003215
J-Alves4f0d9c12024-01-17 17:23:11 +00003216 if (ret.func != FFA_SUCCESS_32) {
3217 return ret;
J-Alves089004f2022-07-13 14:25:44 +01003218 }
J-Alves96de29f2022-04-26 16:05:24 +01003219
J-Alves4f0d9c12024-01-17 17:23:11 +00003220 /*
3221 * Validate the requested permissions against the sent
3222 * permissions.
3223 * Outputs the permissions to give to retriever at S2
3224 * PTs.
3225 */
3226 ret = ffa_memory_retrieve_validate_memory_access_list(
3227 memory_region, retrieve_request, receiver_id, &permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003228 &receiver, share_state->share_func);
J-Alves4f0d9c12024-01-17 17:23:11 +00003229 if (ret.func != FFA_SUCCESS_32) {
3230 return ret;
3231 }
3232
3233 memory_to_mode = ffa_memory_permissions_to_mode(
3234 permissions, share_state->sender_orig_mode);
3235
3236 ret = ffa_retrieve_check_update(
3237 to_locked, share_state->fragments,
3238 share_state->fragment_constituent_counts,
3239 share_state->fragment_count, memory_to_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003240 share_state->share_func, false, page_pool, &retrieve_mode,
3241 share_state->memory_protected);
J-Alves4f0d9c12024-01-17 17:23:11 +00003242
3243 if (ret.func != FFA_SUCCESS_32) {
3244 return ret;
3245 }
3246
3247 share_state->retrieved_fragment_count[receiver_index] = 1;
3248
3249 is_retrieve_complete =
3250 share_state->retrieved_fragment_count[receiver_index] ==
3251 share_state->fragment_count;
3252
J-Alvesb5084cf2022-07-06 14:20:12 +01003253 /* VMs acquire the RX buffer from SPMC. */
3254 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3255
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003256 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003257 * Copy response to RX buffer of caller and deliver the message.
3258 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003259 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003260 composite = ffa_memory_region_get_composite(memory_region, 0);
J-Alves4f0d9c12024-01-17 17:23:11 +00003261
Andrew Walbranca808b12020-05-15 17:22:28 +01003262 /*
J-Alves460d36c2023-10-12 17:02:15 +01003263 * Set the security state in the memory retrieve response attributes
3264 * if specified by the target mode.
3265 */
3266 attributes = plat_ffa_memory_security_mode(memory_region->attributes,
3267 retrieve_mode);
3268
3269 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003270 * Constituents which we received in the first fragment should
3271 * always fit in the first fragment we are sending, because the
3272 * header is the same size in both cases and we have a fixed
3273 * message buffer size. So `ffa_retrieved_memory_region_init`
3274 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01003275 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003276
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003277 /* Provide the permissions that had been provided. */
3278 receiver->receiver_permissions.permissions = permissions;
3279
3280 /*
3281 * Prepare the memory region descriptor for the retrieve response.
3282 * Provide the pointer to the receiver tracked in the share state
J-Alves7b9cc432024-04-04 10:57:17 +01003283 * structures.
3284 * At this point the retrieve request descriptor from the partition
3285 * has been processed. The `retrieve_request` is expected to be in
3286 * a region that is handled by the SPMC/Hyp. Reuse the same buffer to
3287 * prepare the retrieve response before copying it to the RX buffer of
3288 * the caller.
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003289 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003290 CHECK(ffa_retrieved_memory_region_init(
J-Alves7b9cc432024-04-04 10:57:17 +01003291 retrieve_request, to_locked.vm->ffa_version, HF_MAILBOX_SIZE,
3292 memory_region->sender, attributes, memory_region->flags, handle,
3293 permissions, receiver, 1, memory_access_desc_size,
3294 composite->page_count, composite->constituent_count,
3295 share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01003296 share_state->fragment_constituent_counts[0], &total_length,
3297 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01003298
J-Alves7b9cc432024-04-04 10:57:17 +01003299 /*
3300 * Copy the message from the buffer into the partition's mailbox.
3301 * The operation might fail unexpectedly due to change in PAS address
3302 * space, or improper values to the sizes of the structures.
3303 */
3304 if (!memcpy_trapped(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3305 retrieve_request, fragment_length)) {
3306 dlog_error(
3307 "%s: aborted the copy of response to RX buffer of "
3308 "%x.\n",
3309 __func__, to_locked.vm->id);
3310 return ffa_error(FFA_ABORTED);
3311 }
3312
J-Alves4f0d9c12024-01-17 17:23:11 +00003313 if (is_retrieve_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003314 ffa_memory_retrieve_complete(share_states, share_state,
3315 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003316 }
J-Alves4f0d9c12024-01-17 17:23:11 +00003317
3318 return ffa_memory_retrieve_resp(total_length, fragment_length);
3319}
3320
3321static struct ffa_value ffa_hypervisor_retrieve_request(
3322 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3323 struct ffa_memory_region *retrieve_request)
3324{
3325 struct ffa_value ret;
3326 struct ffa_composite_memory_region *composite;
3327 uint32_t total_length;
3328 uint32_t fragment_length;
J-Alves4f0d9c12024-01-17 17:23:11 +00003329 ffa_memory_attributes_t attributes;
J-Alves7b6ab612024-01-24 09:54:54 +00003330 uint64_t memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003331 struct ffa_memory_region *memory_region;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003332 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003333 ffa_memory_handle_t handle = retrieve_request->handle;
3334
J-Alves4f0d9c12024-01-17 17:23:11 +00003335 memory_region = share_state->memory_region;
3336
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003337 assert(to_locked.vm->id == HF_HYPERVISOR_VM_ID);
3338
J-Alves7b6ab612024-01-24 09:54:54 +00003339 switch (to_locked.vm->ffa_version) {
Karl Meakin0e617d92024-04-05 12:55:22 +01003340 case FFA_VERSION_1_2:
J-Alves7b6ab612024-01-24 09:54:54 +00003341 memory_access_desc_size = sizeof(struct ffa_memory_access);
3342 break;
Karl Meakin0e617d92024-04-05 12:55:22 +01003343 case FFA_VERSION_1_0:
3344 case FFA_VERSION_1_1:
J-Alves7b6ab612024-01-24 09:54:54 +00003345 memory_access_desc_size = sizeof(struct ffa_memory_access_v1_0);
3346 break;
3347 default:
3348 panic("version not supported: %x\n", to_locked.vm->ffa_version);
3349 }
3350
J-Alves4f0d9c12024-01-17 17:23:11 +00003351 if (share_state->hypervisor_fragment_count != 0U) {
3352 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003353 "Memory with handle %#lx already retrieved by "
J-Alves4f0d9c12024-01-17 17:23:11 +00003354 "the hypervisor.\n",
3355 handle);
3356 return ffa_error(FFA_DENIED);
3357 }
3358
3359 share_state->hypervisor_fragment_count = 1;
3360
3361 ffa_memory_retrieve_complete_from_hyp(share_state);
3362
3363 /* VMs acquire the RX buffer from SPMC. */
3364 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3365
3366 /*
3367 * Copy response to RX buffer of caller and deliver the message.
3368 * This must be done before the share_state is (possibly) freed.
3369 */
3370 composite = ffa_memory_region_get_composite(memory_region, 0);
3371
3372 /*
3373 * Constituents which we received in the first fragment should
3374 * always fit in the first fragment we are sending, because the
3375 * header is the same size in both cases and we have a fixed
3376 * message buffer size. So `ffa_retrieved_memory_region_init`
3377 * should never fail.
3378 */
3379
3380 /*
3381 * Set the security state in the memory retrieve response attributes
3382 * if specified by the target mode.
3383 */
3384 attributes = plat_ffa_memory_security_mode(
3385 memory_region->attributes, share_state->sender_orig_mode);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003386
3387 receiver = ffa_memory_region_get_receiver(memory_region, 0);
3388
J-Alves7b9cc432024-04-04 10:57:17 +01003389 /*
3390 * At this point the `retrieve_request` is expected to be in a section
3391 * managed by the hypervisor.
3392 */
J-Alves4f0d9c12024-01-17 17:23:11 +00003393 CHECK(ffa_retrieved_memory_region_init(
J-Alves7b9cc432024-04-04 10:57:17 +01003394 retrieve_request, to_locked.vm->ffa_version, HF_MAILBOX_SIZE,
3395 memory_region->sender, attributes, memory_region->flags, handle,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003396 receiver->receiver_permissions.permissions, receiver,
3397 memory_region->receiver_count, memory_access_desc_size,
J-Alves4f0d9c12024-01-17 17:23:11 +00003398 composite->page_count, composite->constituent_count,
3399 share_state->fragments[0],
3400 share_state->fragment_constituent_counts[0], &total_length,
3401 &fragment_length));
3402
J-Alves7b9cc432024-04-04 10:57:17 +01003403 /*
3404 * Copy the message from the buffer into the hypervisor's mailbox.
3405 * The operation might fail unexpectedly due to change in PAS, or
3406 * improper values for the sizes of the structures.
3407 */
3408 if (!memcpy_trapped(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3409 retrieve_request, fragment_length)) {
3410 dlog_error(
3411 "%s: aborted the copy of response to RX buffer of "
3412 "%x.\n",
3413 __func__, to_locked.vm->id);
3414 return ffa_error(FFA_ABORTED);
3415 }
3416
J-Alves4f0d9c12024-01-17 17:23:11 +00003417 return ffa_memory_retrieve_resp(total_length, fragment_length);
3418}
3419
3420struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
3421 struct ffa_memory_region *retrieve_request,
3422 uint32_t retrieve_request_length,
3423 struct mpool *page_pool)
3424{
3425 ffa_memory_handle_t handle = retrieve_request->handle;
3426 struct share_states_locked share_states;
3427 struct ffa_memory_share_state *share_state;
3428 struct ffa_value ret;
3429
3430 dump_share_states();
3431
3432 share_states = share_states_lock();
3433 share_state = get_share_state(share_states, handle);
3434 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003435 dlog_verbose("Invalid handle %#lx for FFA_MEM_RETRIEVE_REQ.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00003436 handle);
3437 ret = ffa_error(FFA_INVALID_PARAMETERS);
3438 goto out;
3439 }
3440
3441 if (is_ffa_hypervisor_retrieve_request(retrieve_request, to_locked)) {
3442 ret = ffa_hypervisor_retrieve_request(share_state, to_locked,
3443 retrieve_request);
3444 } else {
3445 ret = ffa_partition_retrieve_request(
3446 share_states, share_state, to_locked, retrieve_request,
3447 retrieve_request_length, page_pool);
3448 }
3449
3450 /* Track use of the RX buffer if the handling has succeeded. */
3451 if (ret.func == FFA_MEM_RETRIEVE_RESP_32) {
3452 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
3453 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
3454 }
3455
Andrew Walbranca808b12020-05-15 17:22:28 +01003456out:
3457 share_states_unlock(&share_states);
3458 dump_share_states();
3459 return ret;
3460}
3461
J-Alves5da37d92022-10-24 16:33:48 +01003462/**
3463 * Determine expected fragment offset according to the FF-A version of
3464 * the caller.
3465 */
3466static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
3467 struct ffa_memory_region *memory_region,
Karl Meakin0e617d92024-04-05 12:55:22 +01003468 uint32_t retrieved_constituents_count, enum ffa_version ffa_version)
J-Alves5da37d92022-10-24 16:33:48 +01003469{
3470 uint32_t expected_fragment_offset;
3471 uint32_t composite_constituents_offset;
3472
Karl Meakin0e617d92024-04-05 12:55:22 +01003473 if (ffa_version >= FFA_VERSION_1_1) {
J-Alves5da37d92022-10-24 16:33:48 +01003474 /*
3475 * Hafnium operates memory regions in FF-A v1.1 format, so we
3476 * can retrieve the constituents offset from descriptor.
3477 */
3478 composite_constituents_offset =
3479 ffa_composite_constituent_offset(memory_region, 0);
Karl Meakin0e617d92024-04-05 12:55:22 +01003480 } else if (ffa_version == FFA_VERSION_1_0) {
J-Alves5da37d92022-10-24 16:33:48 +01003481 /*
3482 * If retriever is FF-A v1.0, determine the composite offset
3483 * as it is expected to have been configured in the
3484 * retrieve response.
3485 */
3486 composite_constituents_offset =
3487 sizeof(struct ffa_memory_region_v1_0) +
3488 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003489 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01003490 sizeof(struct ffa_composite_memory_region);
3491 } else {
3492 panic("%s received an invalid FF-A version.\n", __func__);
3493 }
3494
3495 expected_fragment_offset =
3496 composite_constituents_offset +
3497 retrieved_constituents_count *
3498 sizeof(struct ffa_memory_region_constituent) -
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003499 (uint32_t)(memory_region->memory_access_desc_size *
3500 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003501
3502 return expected_fragment_offset;
3503}
3504
Andrew Walbranca808b12020-05-15 17:22:28 +01003505struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3506 ffa_memory_handle_t handle,
3507 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003508 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003509 struct mpool *page_pool)
3510{
3511 struct ffa_memory_region *memory_region;
3512 struct share_states_locked share_states;
3513 struct ffa_memory_share_state *share_state;
3514 struct ffa_value ret;
3515 uint32_t fragment_index;
3516 uint32_t retrieved_constituents_count;
3517 uint32_t i;
3518 uint32_t expected_fragment_offset;
3519 uint32_t remaining_constituent_count;
3520 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003521 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003522 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003523
3524 dump_share_states();
3525
3526 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003527 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003528 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003529 dlog_verbose("Invalid handle %#lx for FFA_MEM_FRAG_RX.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01003530 handle);
3531 ret = ffa_error(FFA_INVALID_PARAMETERS);
3532 goto out;
3533 }
3534
3535 memory_region = share_state->memory_region;
3536 CHECK(memory_region != NULL);
3537
Andrew Walbranca808b12020-05-15 17:22:28 +01003538 if (!share_state->sending_complete) {
3539 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003540 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003541 "retrieve.\n",
3542 handle);
3543 ret = ffa_error(FFA_INVALID_PARAMETERS);
3544 goto out;
3545 }
3546
J-Alves59ed0042022-07-28 18:26:41 +01003547 /*
3548 * If retrieve request from the hypervisor has been initiated in the
3549 * given share_state, continue it, else assume it is a continuation of
3550 * retrieve request from a NWd VM.
3551 */
3552 continue_ffa_hyp_mem_retrieve_req =
3553 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3554 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003555 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003556
J-Alves59ed0042022-07-28 18:26:41 +01003557 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003558 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003559 memory_region, to_locked.vm->id);
3560
3561 if (receiver_index == memory_region->receiver_count) {
3562 dlog_verbose(
3563 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
Karl Meakine8937d92024-03-19 16:04:25 +00003564 "borrower to memory sharing transaction "
3565 "(%lx)\n",
J-Alves59ed0042022-07-28 18:26:41 +01003566 to_locked.vm->id, handle);
3567 ret = ffa_error(FFA_INVALID_PARAMETERS);
3568 goto out;
3569 }
3570
3571 if (share_state->retrieved_fragment_count[receiver_index] ==
3572 0 ||
3573 share_state->retrieved_fragment_count[receiver_index] >=
3574 share_state->fragment_count) {
3575 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003576 "Retrieval of memory with handle %#lx not yet "
J-Alves59ed0042022-07-28 18:26:41 +01003577 "started or already completed (%d/%d fragments "
3578 "retrieved).\n",
3579 handle,
3580 share_state->retrieved_fragment_count
3581 [receiver_index],
3582 share_state->fragment_count);
3583 ret = ffa_error(FFA_INVALID_PARAMETERS);
3584 goto out;
3585 }
3586
3587 fragment_index =
3588 share_state->retrieved_fragment_count[receiver_index];
3589 } else {
3590 if (share_state->hypervisor_fragment_count == 0 ||
3591 share_state->hypervisor_fragment_count >=
3592 share_state->fragment_count) {
3593 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003594 "Retrieve of memory with handle %lx not "
J-Alves59ed0042022-07-28 18:26:41 +01003595 "started from hypervisor.\n",
3596 handle);
3597 ret = ffa_error(FFA_INVALID_PARAMETERS);
3598 goto out;
3599 }
3600
3601 if (memory_region->sender != sender_vm_id) {
3602 dlog_verbose(
3603 "Sender ID (%x) is not as expected for memory "
Karl Meakine8937d92024-03-19 16:04:25 +00003604 "handle %lx\n",
J-Alves59ed0042022-07-28 18:26:41 +01003605 sender_vm_id, handle);
3606 ret = ffa_error(FFA_INVALID_PARAMETERS);
3607 goto out;
3608 }
3609
3610 fragment_index = share_state->hypervisor_fragment_count;
3611
3612 receiver_index = 0;
3613 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003614
3615 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003616 * Check that the given fragment offset is correct by counting
3617 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003618 */
3619 retrieved_constituents_count = 0;
3620 for (i = 0; i < fragment_index; ++i) {
3621 retrieved_constituents_count +=
3622 share_state->fragment_constituent_counts[i];
3623 }
J-Alvesc7484f12022-05-13 12:41:14 +01003624
3625 CHECK(memory_region->receiver_count > 0);
3626
Andrew Walbranca808b12020-05-15 17:22:28 +01003627 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003628 ffa_memory_retrieve_expected_offset_per_ffa_version(
3629 memory_region, retrieved_constituents_count,
3630 to_locked.vm->ffa_version);
3631
Andrew Walbranca808b12020-05-15 17:22:28 +01003632 if (fragment_offset != expected_fragment_offset) {
3633 dlog_verbose("Fragment offset was %d but expected %d.\n",
3634 fragment_offset, expected_fragment_offset);
3635 ret = ffa_error(FFA_INVALID_PARAMETERS);
3636 goto out;
3637 }
3638
J-Alves4f0d9c12024-01-17 17:23:11 +00003639 /*
3640 * When hafnium is the hypervisor, acquire the RX buffer of a VM, that
3641 * is currently ownder by the SPMC.
3642 */
3643 assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
J-Alves59ed0042022-07-28 18:26:41 +01003644
Andrew Walbranca808b12020-05-15 17:22:28 +01003645 remaining_constituent_count = ffa_memory_fragment_init(
3646 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3647 share_state->fragments[fragment_index],
3648 share_state->fragment_constituent_counts[fragment_index],
3649 &fragment_length);
3650 CHECK(remaining_constituent_count == 0);
J-Alves674e4de2024-01-17 16:20:32 +00003651
Andrew Walbranca808b12020-05-15 17:22:28 +01003652 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003653 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003654
J-Alves59ed0042022-07-28 18:26:41 +01003655 if (!continue_ffa_hyp_mem_retrieve_req) {
3656 share_state->retrieved_fragment_count[receiver_index]++;
3657 if (share_state->retrieved_fragment_count[receiver_index] ==
3658 share_state->fragment_count) {
3659 ffa_memory_retrieve_complete(share_states, share_state,
3660 page_pool);
3661 }
3662 } else {
3663 share_state->hypervisor_fragment_count++;
3664
3665 ffa_memory_retrieve_complete_from_hyp(share_state);
3666 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003667 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3668 .arg1 = (uint32_t)handle,
3669 .arg2 = (uint32_t)(handle >> 32),
3670 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003671
3672out:
3673 share_states_unlock(&share_states);
3674 dump_share_states();
3675 return ret;
3676}
3677
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003678struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003679 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003680 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003681{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003682 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003683 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003684 struct ffa_memory_share_state *share_state;
3685 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003686 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003687 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003688 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003689 bool receivers_relinquished_memory;
Karl Meakin84710f32023-10-12 15:14:49 +01003690 ffa_memory_access_permissions_t receiver_permissions = {0};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003691
Andrew Walbrana65a1322020-04-06 19:32:32 +01003692 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003693 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003694 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01003695 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003696 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003697 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003698 }
3699
Andrew Walbrana65a1322020-04-06 19:32:32 +01003700 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003701 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003702 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01003703 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003704 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003705 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003706 }
3707
3708 dump_share_states();
3709
3710 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003711 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003712 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003713 dlog_verbose("Invalid handle %#lx for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003714 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003715 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003716 goto out;
3717 }
3718
Andrew Walbranca808b12020-05-15 17:22:28 +01003719 if (!share_state->sending_complete) {
3720 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003721 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003722 "relinquish.\n",
3723 handle);
3724 ret = ffa_error(FFA_INVALID_PARAMETERS);
3725 goto out;
3726 }
3727
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003728 memory_region = share_state->memory_region;
3729 CHECK(memory_region != NULL);
3730
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003731 receiver_index = ffa_memory_region_get_receiver_index(
3732 memory_region, from_locked.vm->id);
J-Alves8eb19162022-04-28 10:56:48 +01003733
3734 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003735 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003736 "VM ID %d tried to relinquish memory region "
Karl Meakine8937d92024-03-19 16:04:25 +00003737 "with handle %#lx and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003738 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003739 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003740 goto out;
3741 }
3742
J-Alves8eb19162022-04-28 10:56:48 +01003743 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003744 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003745 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003746 "Memory with handle %#lx not yet fully "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003747 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003748 "receiver %x can't relinquish.\n",
3749 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003750 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003751 goto out;
3752 }
3753
J-Alves3c5b2072022-11-21 12:45:40 +00003754 /*
3755 * Either clear if requested in relinquish call, or in a retrieve
3756 * request from one of the borrowers.
3757 */
3758 receivers_relinquished_memory = true;
3759
3760 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3761 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003762 ffa_memory_region_get_receiver(memory_region, i);
3763 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003764 if (receiver->receiver_permissions.receiver ==
3765 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003766 receiver_permissions =
3767 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003768 continue;
3769 }
3770
3771 if (share_state->retrieved_fragment_count[i] != 0U) {
3772 receivers_relinquished_memory = false;
3773 break;
3774 }
3775 }
3776
3777 clear = receivers_relinquished_memory &&
Daniel Boulby2e14ebe2024-01-15 16:21:44 +00003778 ((relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3779 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003780
3781 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003782 * Clear is not allowed for memory that was shared, as the
3783 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003784 */
J-Alves95fbb312024-03-20 15:19:16 +00003785 if (clear && (share_state->share_func == FFA_MEM_SHARE_32 ||
3786 share_state->share_func == FFA_MEM_SHARE_64)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003787 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003788 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003789 goto out;
3790 }
3791
J-Alvesb886d492024-04-15 10:55:29 +01003792 if (clear && receiver_permissions.data_access == FFA_DATA_ACCESS_RO) {
J-Alves639ddfc2023-11-21 14:17:26 +00003793 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3794 __func__);
3795 ret = ffa_error(FFA_DENIED);
3796 goto out;
3797 }
3798
Andrew Walbranca808b12020-05-15 17:22:28 +01003799 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003800 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003801 share_state->fragment_constituent_counts,
3802 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003803
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003804 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003805 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003806 * Mark memory handle as not retrieved, so it can be
3807 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003808 */
J-Alves8eb19162022-04-28 10:56:48 +01003809 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003810 }
3811
3812out:
3813 share_states_unlock(&share_states);
3814 dump_share_states();
3815 return ret;
3816}
3817
3818/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003819 * Validates that the reclaim transition is allowed for the given
3820 * handle, updates the page table of the reclaiming VM, and frees the
3821 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003822 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003823struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003824 ffa_memory_handle_t handle,
3825 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003826 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003827{
3828 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003829 struct ffa_memory_share_state *share_state;
3830 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003831 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003832
3833 dump_share_states();
3834
3835 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003836
Karl Meakin4a2854a2023-06-30 16:26:52 +01003837 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003838 if (share_state == NULL) {
Karl Meakine8937d92024-03-19 16:04:25 +00003839 dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003840 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003841 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003842 goto out;
3843 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003844 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003845
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003846 CHECK(memory_region != NULL);
3847
J-Alvesa9cd7e32022-07-01 13:49:33 +01003848 if (vm_id_is_current_world(to_locked.vm->id) &&
3849 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003850 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003851 "VM %#x attempted to reclaim memory handle %#lx "
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003852 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003853 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003854 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003855 goto out;
3856 }
3857
Andrew Walbranca808b12020-05-15 17:22:28 +01003858 if (!share_state->sending_complete) {
3859 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003860 "Memory with handle %#lx not fully sent, can't "
Andrew Walbranca808b12020-05-15 17:22:28 +01003861 "reclaim.\n",
3862 handle);
3863 ret = ffa_error(FFA_INVALID_PARAMETERS);
3864 goto out;
3865 }
3866
J-Alves752236c2022-04-28 11:07:47 +01003867 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3868 if (share_state->retrieved_fragment_count[i] != 0) {
3869 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003870 "Tried to reclaim memory handle %#lx "
J-Alves3c5b2072022-11-21 12:45:40 +00003871 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003872 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003873 handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003874 ffa_memory_region_get_receiver(memory_region, i)
3875 ->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01003876 ret = ffa_error(FFA_DENIED);
3877 goto out;
3878 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003879 }
3880
Andrew Walbranca808b12020-05-15 17:22:28 +01003881 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01003882 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003883 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003884 share_state->fragment_count, share_state->sender_orig_mode,
J-Alves460d36c2023-10-12 17:02:15 +01003885 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool,
J-Alvesfd206052023-05-22 16:45:00 +01003886 NULL, share_state->memory_protected);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003887
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003888 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003889 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003890 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003891 }
3892
3893out:
3894 share_states_unlock(&share_states);
3895 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003896}