blob: 47f501d875824703f7d6cd703762c3c41074bc91 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
J-Alves5952d942022-12-22 16:03:00 +000015#include "hf/addr.h"
Jose Marinho75509b42019-04-09 09:34:59 +010016#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000017#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
J-Alves3456e032023-07-20 12:20:05 +010020#include "hf/ffa.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010022#include "hf/ffa_memory_internal.h"
J-Alves3456e032023-07-20 12:20:05 +010023#include "hf/ffa_partition_manifest.h"
J-Alves5952d942022-12-22 16:03:00 +000024#include "hf/mm.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000025#include "hf/mpool.h"
J-Alvescf6253e2024-01-03 13:48:48 +000026#include "hf/panic.h"
27#include "hf/plat/memory_protect.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000029#include "hf/vm.h"
Daniel Boulby44e9b3b2024-01-17 12:21:44 +000030#include "hf/vm_ids.h"
Jose Marinho75509b42019-04-09 09:34:59 +010031
J-Alves2d8457f2022-10-05 11:06:41 +010032#include "vmapi/hf/ffa_v1_0.h"
33
J-Alves5da37d92022-10-24 16:33:48 +010034#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
35
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000036/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000038 * by this lock.
39 */
40static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010041static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000042
43/**
J-Alvesed508c82023-05-04 16:09:48 +010044 * Return the offset to the first constituent within the
45 * `ffa_composite_memory_region` for the given receiver from an
46 * `ffa_memory_region`. The caller must check that the receiver_index is within
47 * bounds, and that it has a composite memory region offset.
48 */
49static uint32_t ffa_composite_constituent_offset(
50 struct ffa_memory_region *memory_region, uint32_t receiver_index)
51{
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000052 struct ffa_memory_access *receiver;
53 uint32_t composite_offset;
J-Alvesed508c82023-05-04 16:09:48 +010054
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +000055 CHECK(receiver_index < memory_region->receiver_count);
56
57 receiver =
58 ffa_memory_region_get_receiver(memory_region, receiver_index);
59 CHECK(receiver != NULL);
60
61 composite_offset = receiver->composite_memory_region_offset;
62
63 CHECK(composite_offset != 0);
64
65 return composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alvesed508c82023-05-04 16:09:48 +010066}
67
68/**
J-Alves917d2f22020-10-30 18:39:30 +000069 * Extracts the index from a memory handle allocated by Hafnium's current world.
70 */
71uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
72{
73 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
74}
75
76/**
Karl Meakin52cdfe72023-06-30 14:49:10 +010077 * Initialises the next available `struct ffa_memory_share_state`. If `handle`
78 * is `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle,
79 * otherwise uses the provided handle which is assumed to be globally unique.
Andrew Walbranca808b12020-05-15 17:22:28 +010080 *
Karl Meakin52cdfe72023-06-30 14:49:10 +010081 * Returns a pointer to the allocated `ffa_memory_share_state` on success or
82 * `NULL` if none are available.
Andrew Walbranca808b12020-05-15 17:22:28 +010083 */
Karl Meakin52cdfe72023-06-30 14:49:10 +010084struct ffa_memory_share_state *allocate_share_state(
85 struct share_states_locked share_states, uint32_t share_func,
86 struct ffa_memory_region *memory_region, uint32_t fragment_length,
87 ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000088{
Daniel Boulbya2f8c662021-11-26 17:52:53 +000089 assert(share_states.share_states != NULL);
90 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000091
Karl Meakin52cdfe72023-06-30 14:49:10 +010092 for (uint64_t i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010093 if (share_states.share_states[i].share_func == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010094 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010095 &share_states.share_states[i];
96 struct ffa_composite_memory_region *composite =
97 ffa_memory_region_get_composite(memory_region,
98 0);
99
100 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +0000101 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +0200102 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +0100103 } else {
J-Alvesee68c542020-10-29 17:48:20 +0000104 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000106 allocated_state->share_func = share_func;
107 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +0100108 allocated_state->fragment_count = 1;
109 allocated_state->fragments[0] = composite->constituents;
110 allocated_state->fragment_constituent_counts[0] =
111 (fragment_length -
112 ffa_composite_constituent_offset(memory_region,
113 0)) /
114 sizeof(struct ffa_memory_region_constituent);
115 allocated_state->sending_complete = false;
Karl Meakin52cdfe72023-06-30 14:49:10 +0100116 for (uint32_t j = 0; j < MAX_MEM_SHARE_RECIPIENTS;
117 ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100118 allocated_state->retrieved_fragment_count[j] =
119 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000120 }
Karl Meakin52cdfe72023-06-30 14:49:10 +0100121 return allocated_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122 }
123 }
124
Karl Meakin52cdfe72023-06-30 14:49:10 +0100125 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000126}
127
128/** Locks the share states lock. */
129struct share_states_locked share_states_lock(void)
130{
131 sl_lock(&share_states_lock_instance);
132
133 return (struct share_states_locked){.share_states = share_states};
134}
135
136/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100137void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000138{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000139 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000140 share_states->share_states = NULL;
141 sl_unlock(&share_states_lock_instance);
142}
143
144/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100145 * If the given handle is a valid handle for an allocated share state then
Karl Meakin4a2854a2023-06-30 16:26:52 +0100146 * returns a pointer to the share state. Otherwise returns NULL.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000147 */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100148struct ffa_memory_share_state *get_share_state(
149 struct share_states_locked share_states, ffa_memory_handle_t handle)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000150{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100151 struct ffa_memory_share_state *share_state;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000152
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000153 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100154
155 /*
156 * First look for a share_state allocated by us, in which case the
157 * handle is based on the index.
158 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200159 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100160 uint64_t index = ffa_memory_handle_get_index(handle);
161
Andrew Walbranca808b12020-05-15 17:22:28 +0100162 if (index < MAX_MEM_SHARES) {
163 share_state = &share_states.share_states[index];
164 if (share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100165 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100166 }
167 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000168 }
169
Andrew Walbranca808b12020-05-15 17:22:28 +0100170 /* Fall back to a linear scan. */
Karl Meakin4a2854a2023-06-30 16:26:52 +0100171 for (uint64_t index = 0; index < MAX_MEM_SHARES; ++index) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100172 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000173 if (share_state->memory_region != NULL &&
174 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100175 share_state->share_func != 0) {
Karl Meakin4a2854a2023-06-30 16:26:52 +0100176 return share_state;
Andrew Walbranca808b12020-05-15 17:22:28 +0100177 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000178 }
179
Karl Meakin4a2854a2023-06-30 16:26:52 +0100180 return NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000181}
182
183/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100184void share_state_free(struct share_states_locked share_states,
185 struct ffa_memory_share_state *share_state,
186 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000187{
Andrew Walbranca808b12020-05-15 17:22:28 +0100188 uint32_t i;
189
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000190 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000191 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100192 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000193 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100194 /*
195 * First fragment is part of the same page as the `memory_region`, so it
196 * doesn't need to be freed separately.
197 */
198 share_state->fragments[0] = NULL;
199 share_state->fragment_constituent_counts[0] = 0;
200 for (i = 1; i < share_state->fragment_count; ++i) {
201 mpool_free(page_pool, share_state->fragments[i]);
202 share_state->fragments[i] = NULL;
203 share_state->fragment_constituent_counts[i] = 0;
204 }
205 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000206 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100207 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000208}
209
Andrew Walbranca808b12020-05-15 17:22:28 +0100210/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100211bool share_state_sending_complete(struct share_states_locked share_states,
212 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000213{
Andrew Walbranca808b12020-05-15 17:22:28 +0100214 struct ffa_composite_memory_region *composite;
215 uint32_t expected_constituent_count;
216 uint32_t fragment_constituent_count_total = 0;
217 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000218
Andrew Walbranca808b12020-05-15 17:22:28 +0100219 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000220 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100221
222 /*
223 * Share state must already be valid, or it's not possible to get hold
224 * of it.
225 */
226 CHECK(share_state->memory_region != NULL &&
227 share_state->share_func != 0);
228
229 composite =
230 ffa_memory_region_get_composite(share_state->memory_region, 0);
231 expected_constituent_count = composite->constituent_count;
232 for (i = 0; i < share_state->fragment_count; ++i) {
233 fragment_constituent_count_total +=
234 share_state->fragment_constituent_counts[i];
235 }
236 dlog_verbose(
237 "Checking completion: constituent count %d/%d from %d "
238 "fragments.\n",
239 fragment_constituent_count_total, expected_constituent_count,
240 share_state->fragment_count);
241
242 return fragment_constituent_count_total == expected_constituent_count;
243}
244
245/**
246 * Calculates the offset of the next fragment expected for the given share
247 * state.
248 */
J-Alvesfdd29272022-07-19 13:16:31 +0100249uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100250 struct share_states_locked share_states,
251 struct ffa_memory_share_state *share_state)
252{
253 uint32_t next_fragment_offset;
254 uint32_t i;
255
256 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000257 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100258
259 next_fragment_offset =
260 ffa_composite_constituent_offset(share_state->memory_region, 0);
261 for (i = 0; i < share_state->fragment_count; ++i) {
262 next_fragment_offset +=
263 share_state->fragment_constituent_counts[i] *
264 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000265 }
266
Andrew Walbranca808b12020-05-15 17:22:28 +0100267 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000268}
269
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100270static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000271{
272 uint32_t i;
273
274 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
275 return;
276 }
277
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000278 dlog("from VM %#x, attributes %#x, flags %#x, handle %#x "
279 "tag %u, memory access descriptor size %u, to %u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100280 "recipients [",
281 memory_region->sender, memory_region->attributes,
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000282 memory_region->flags, memory_region->handle, memory_region->tag,
283 memory_region->memory_access_desc_size,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100284 memory_region->receiver_count);
285 for (i = 0; i < memory_region->receiver_count; ++i) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000286 struct ffa_memory_access *receiver =
287 ffa_memory_region_get_receiver(memory_region, i);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000288 if (i != 0) {
289 dlog(", ");
290 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +0000291 dlog("Receiver %#x: %#x (offset %u)",
292 receiver->receiver_permissions.receiver,
293 receiver->receiver_permissions.permissions,
294 receiver->composite_memory_region_offset);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000295 /* The impdef field is only present from v1.2 and later */
296 if (ffa_version_from_memory_access_desc_size(
297 memory_region->memory_access_desc_size) >=
298 MAKE_FFA_VERSION(1, 2)) {
299 dlog(", impdef: %#x %#x", receiver->impdef.val[0],
300 receiver->impdef.val[1]);
301 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000302 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000303 dlog("] at offset %u", memory_region->receivers_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000304}
305
J-Alves66652252022-07-06 09:49:51 +0100306void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000307{
308 uint32_t i;
309
310 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
311 return;
312 }
313
314 dlog("Current share states:\n");
315 sl_lock(&share_states_lock_instance);
316 for (i = 0; i < MAX_MEM_SHARES; ++i) {
317 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000318 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100319 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000320 dlog("SHARE");
321 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100322 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000323 dlog("LEND");
324 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100325 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000326 dlog("DONATE");
327 break;
328 default:
329 dlog("invalid share_func %#x",
330 share_states[i].share_func);
331 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100332 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000333 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100334 if (share_states[i].sending_complete) {
335 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000336 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100337 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000338 }
J-Alves2a0d2882020-10-29 14:49:50 +0000339 dlog(" with %d fragments, %d retrieved, "
340 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100341 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000342 share_states[i].retrieved_fragment_count[0],
343 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000344 }
345 }
346 sl_unlock(&share_states_lock_instance);
347}
348
Andrew Walbran475c1452020-02-07 13:22:22 +0000349/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100350static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100351 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000352{
353 uint32_t mode = 0;
354
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100355 switch (ffa_get_data_access_attr(permissions)) {
356 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000357 mode = MM_MODE_R;
358 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000360 mode = MM_MODE_R | MM_MODE_W;
361 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100362 case FFA_DATA_ACCESS_NOT_SPECIFIED:
363 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
364 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100365 case FFA_DATA_ACCESS_RESERVED:
366 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100367 }
368
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100369 switch (ffa_get_instruction_access_attr(permissions)) {
370 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000371 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100372 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100373 mode |= MM_MODE_X;
374 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100375 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
376 mode |= (default_mode & MM_MODE_X);
377 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100378 case FFA_INSTRUCTION_ACCESS_RESERVED:
379 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000380 }
381
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200382 /* Set the security state bit if necessary. */
383 if ((default_mode & plat_ffa_other_world_mode()) != 0) {
384 mode |= plat_ffa_other_world_mode();
385 }
386
Andrew Walbran475c1452020-02-07 13:22:22 +0000387 return mode;
388}
389
Jose Marinho75509b42019-04-09 09:34:59 +0100390/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000391 * Get the current mode in the stage-2 page table of the given vm of all the
392 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100393 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100394 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100395static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000396 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100397 struct ffa_memory_region_constituent **fragments,
398 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100399{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100400 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100401 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100402
Andrew Walbranca808b12020-05-15 17:22:28 +0100403 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100404 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000405 * Fail if there are no constituents. Otherwise we would get an
406 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100407 */
Karl Meakin5df422c2023-07-11 17:31:38 +0100408 dlog_verbose("%s: no constituents\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100409 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100410 }
411
Andrew Walbranca808b12020-05-15 17:22:28 +0100412 for (i = 0; i < fragment_count; ++i) {
413 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
414 ipaddr_t begin = ipa_init(fragments[i][j].address);
415 size_t size = fragments[i][j].page_count * PAGE_SIZE;
416 ipaddr_t end = ipa_add(begin, size);
417 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100418
Andrew Walbranca808b12020-05-15 17:22:28 +0100419 /* Fail if addresses are not page-aligned. */
420 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
421 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100422 dlog_verbose("%s: addresses not page-aligned\n",
423 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +0100424 return ffa_error(FFA_INVALID_PARAMETERS);
425 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100426
Andrew Walbranca808b12020-05-15 17:22:28 +0100427 /*
428 * Ensure that this constituent memory range is all
429 * mapped with the same mode.
430 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800431 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Karl Meakin5df422c2023-07-11 17:31:38 +0100432 dlog_verbose(
433 "%s: constituent memory range %#x..%#x "
434 "not mapped with the same mode\n",
435 __func__, begin, end);
Andrew Walbranca808b12020-05-15 17:22:28 +0100436 return ffa_error(FFA_DENIED);
437 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100438
Andrew Walbranca808b12020-05-15 17:22:28 +0100439 /*
440 * Ensure that all constituents are mapped with the same
441 * mode.
442 */
443 if (i == 0) {
444 *orig_mode = current_mode;
445 } else if (current_mode != *orig_mode) {
446 dlog_verbose(
Karl Meakin5df422c2023-07-11 17:31:38 +0100447 "%s: expected mode %#x but was %#x for "
448 "%d pages at %#x.\n",
449 __func__, *orig_mode, current_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100450 fragments[i][j].page_count,
451 ipa_addr(begin));
452 return ffa_error(FFA_DENIED);
453 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100454 }
Jose Marinho75509b42019-04-09 09:34:59 +0100455 }
456
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100457 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000458}
459
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100460uint32_t ffa_version_from_memory_access_desc_size(
461 uint32_t memory_access_desc_size)
462{
463 switch (memory_access_desc_size) {
464 /*
465 * v1.0 and v1.1 memory access descriptors are the same size however
466 * v1.1 is the first version to include the memory access descriptor
467 * size field so return v1.1.
468 */
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000469 case sizeof(struct ffa_memory_access_v1_0):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100470 return MAKE_FFA_VERSION(1, 1);
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000471 case sizeof(struct ffa_memory_access):
472 return MAKE_FFA_VERSION(1, 2);
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100473 }
474 return 0;
475}
476
477/**
478 * Check if the receivers size and offset given is valid for the senders
479 * FF-A version.
480 */
481static bool receiver_size_and_offset_valid_for_version(
482 uint32_t receivers_size, uint32_t receivers_offset,
483 uint32_t ffa_version)
484{
485 /*
486 * Check that the version that the memory access descriptor size belongs
487 * to is compatible with the FF-A version we believe the sender to be.
488 */
489 uint32_t expected_ffa_version =
490 ffa_version_from_memory_access_desc_size(receivers_size);
491 if (!FFA_VERSIONS_ARE_COMPATIBLE(expected_ffa_version, ffa_version)) {
492 return false;
493 }
494
495 /*
496 * Check the receivers_offset matches the version we found from
497 * memory access descriptor size.
498 */
499 switch (expected_ffa_version) {
500 case MAKE_FFA_VERSION(1, 1):
Daniel Boulbyde974ca2023-12-12 13:53:31 +0000501 case MAKE_FFA_VERSION(1, 2):
Daniel Boulbyc7dc9322023-10-27 15:12:07 +0100502 return receivers_offset == sizeof(struct ffa_memory_region);
503 default:
504 return false;
505 }
506}
507
508/**
509 * Check the values set for fields in the memory region are valid and safe.
510 * Offset values are within safe bounds, receiver count will not cause overflows
511 * and reserved fields are 0.
512 */
513bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region,
514 uint32_t ffa_version,
515 uint32_t fragment_length,
516 bool send_transaction)
517{
518 uint32_t receiver_count;
519 struct ffa_memory_access *receiver;
520 uint32_t composite_offset_0;
521
522 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
523 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
524 (struct ffa_memory_region_v1_0 *)memory_region;
525 /* Check the reserved fields are 0. */
526 if (memory_region_v1_0->reserved_0 != 0 ||
527 memory_region_v1_0->reserved_1 != 0) {
528 dlog_verbose("Reserved fields must be 0.\n");
529 return false;
530 }
531
532 receiver_count = memory_region_v1_0->receiver_count;
533 } else {
534 uint32_t receivers_size =
535 memory_region->memory_access_desc_size;
536 uint32_t receivers_offset = memory_region->receivers_offset;
537
538 /* Check the reserved field is 0. */
539 if (memory_region->reserved[0] != 0 ||
540 memory_region->reserved[1] != 0 ||
541 memory_region->reserved[2] != 0) {
542 dlog_verbose("Reserved fields must be 0.\n");
543 return false;
544 }
545
546 /*
547 * Check memory_access_desc_size matches the size of the struct
548 * for the senders FF-A version.
549 */
550 if (!receiver_size_and_offset_valid_for_version(
551 receivers_size, receivers_offset, ffa_version)) {
552 dlog_verbose(
553 "Invalid memory access descriptor size %d, "
554 " or receiver offset %d, "
555 "for FF-A version %#x\n",
556 receivers_size, receivers_offset, ffa_version);
557 return false;
558 }
559
560 receiver_count = memory_region->receiver_count;
561 }
562
563 /* Check receiver count is not too large. */
564 if (receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
565 dlog_verbose(
566 "Max number of recipients supported is %u "
567 "specified %u\n",
568 MAX_MEM_SHARE_RECIPIENTS, receiver_count);
569 return false;
570 }
571
572 /* Check values in the memory access descriptors. */
573 /*
574 * The composite offset values must be the same for all recievers so
575 * check the first one is valid and then they are all the same.
576 */
577 receiver = ffa_version == MAKE_FFA_VERSION(1, 0)
578 ? (struct ffa_memory_access *)&(
579 (struct ffa_memory_region_v1_0 *)
580 memory_region)
581 ->receivers[0]
582 : ffa_memory_region_get_receiver(memory_region, 0);
583 assert(receiver != NULL);
584 composite_offset_0 = receiver->composite_memory_region_offset;
585
586 if (!send_transaction) {
587 if (composite_offset_0 != 0) {
588 dlog_verbose(
589 "Composite offset memory region descriptor "
590 "offset must be 0 for retrieve requests. "
591 "Currently %d",
592 composite_offset_0);
593 return false;
594 }
595 } else {
596 bool comp_offset_is_zero = composite_offset_0 == 0U;
597 bool comp_offset_lt_transaction_descriptor_size =
598 composite_offset_0 <
599 (sizeof(struct ffa_memory_region) +
600 (uint32_t)(memory_region->memory_access_desc_size *
601 memory_region->receiver_count));
602 bool comp_offset_with_comp_gt_fragment_length =
603 composite_offset_0 +
604 sizeof(struct ffa_composite_memory_region) >
605 fragment_length;
606 if (comp_offset_is_zero ||
607 comp_offset_lt_transaction_descriptor_size ||
608 comp_offset_with_comp_gt_fragment_length) {
609 dlog_verbose(
610 "Invalid composite memory region descriptor "
611 "offset for send transaction %u\n",
612 composite_offset_0);
613 return false;
614 }
615 }
616
617 for (int i = 0; i < memory_region->receiver_count; i++) {
618 uint32_t composite_offset;
619
620 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
621 struct ffa_memory_region_v1_0 *memory_region_v1_0 =
622 (struct ffa_memory_region_v1_0 *)memory_region;
623
624 struct ffa_memory_access_v1_0 *receiver_v1_0 =
625 &memory_region_v1_0->receivers[i];
626 /* Check reserved fields are 0 */
627 if (receiver_v1_0->reserved_0 != 0) {
628 dlog_verbose(
629 "Reserved field in the memory access "
630 " descriptor must be zero "
631 " Currently reciever %d has a reserved "
632 " field with a value of %d\n",
633 i, receiver_v1_0->reserved_0);
634 return false;
635 }
636 /*
637 * We can cast to the current version receiver as the
638 * remaining fields we are checking have the same
639 * offsets for all versions since memory access
640 * descriptors are forwards compatible.
641 */
642 receiver = (struct ffa_memory_access *)receiver_v1_0;
643 } else {
644 receiver = ffa_memory_region_get_receiver(memory_region,
645 i);
646 assert(receiver != NULL);
647
648 if (receiver->reserved_0 != 0) {
649 dlog_verbose(
650 "Reserved field in the memory access "
651 " descriptor must be zero "
652 " Currently reciever %d has a reserved "
653 " field with a value of %d\n",
654 i, receiver->reserved_0);
655 return false;
656 }
657 }
658
659 /* Check composite offset values are equal for all receivers. */
660 composite_offset = receiver->composite_memory_region_offset;
661 if (composite_offset != composite_offset_0) {
662 dlog_verbose(
663 "Composite offset %x differs from %x in index "
664 "%u\n",
665 composite_offset, composite_offset_0);
666 return false;
667 }
668 }
669 return true;
670}
671
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000672/**
673 * Verify that all pages have the same mode, that the starting mode
674 * constitutes a valid state and obtain the next mode to apply
675 * to the sending VM.
676 *
677 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100678 * 1) FFA_DENIED if a state transition was not found;
679 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100680 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100681 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100682 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100683 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
684 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000685 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100686static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100687 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100688 struct ffa_memory_access *receivers, uint32_t receivers_count,
689 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100690 struct ffa_memory_region_constituent **fragments,
691 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
692 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000693{
694 const uint32_t state_mask =
695 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100696 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000697
Andrew Walbranca808b12020-05-15 17:22:28 +0100698 ret = constituents_get_mode(from, orig_from_mode, fragments,
699 fragment_constituent_counts,
700 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100701 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100702 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100703 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100704 }
705
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000706 /* Ensure the address range is normal memory and not a device. */
J-Alves788b4492023-04-18 14:01:23 +0100707 if ((*orig_from_mode & MM_MODE_D) != 0U) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000708 dlog_verbose("Can't share device memory (mode is %#x).\n",
709 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100710 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000711 }
712
713 /*
714 * Ensure the sender is the owner and has exclusive access to the
715 * memory.
716 */
717 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100718 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100719 }
720
J-Alves363f5722022-04-25 17:37:37 +0100721 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100722
J-Alves363f5722022-04-25 17:37:37 +0100723 for (uint32_t i = 0U; i < receivers_count; i++) {
724 ffa_memory_access_permissions_t permissions =
725 receivers[i].receiver_permissions.permissions;
726 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
727 permissions, *orig_from_mode);
728
J-Alves788b4492023-04-18 14:01:23 +0100729 /*
730 * The assumption is that at this point, the operation from
731 * SP to a receiver VM, should have returned an FFA_ERROR
732 * already.
733 */
734 if (!ffa_is_vm_id(from.vm->id)) {
735 assert(!ffa_is_vm_id(
736 receivers[i].receiver_permissions.receiver));
737 }
738
J-Alves363f5722022-04-25 17:37:37 +0100739 if ((*orig_from_mode & required_from_mode) !=
740 required_from_mode) {
741 dlog_verbose(
742 "Sender tried to send memory with permissions "
J-Alves788b4492023-04-18 14:01:23 +0100743 "which required mode %#x but only had %#x "
744 "itself.\n",
J-Alves363f5722022-04-25 17:37:37 +0100745 required_from_mode, *orig_from_mode);
746 return ffa_error(FFA_DENIED);
747 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000748 }
749
750 /* Find the appropriate new mode. */
751 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000752 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100753 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000754 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100755 break;
756
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100757 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000758 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100759 break;
760
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100761 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000762 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100763 break;
764
Jose Marinho75509b42019-04-09 09:34:59 +0100765 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100766 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100767 }
768
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100769 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000770}
771
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100772static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000773 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100774 struct ffa_memory_region_constituent **fragments,
775 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
776 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000777{
778 const uint32_t state_mask =
779 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
780 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100781 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000782
Andrew Walbranca808b12020-05-15 17:22:28 +0100783 ret = constituents_get_mode(from, orig_from_mode, fragments,
784 fragment_constituent_counts,
785 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100786 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100787 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000788 }
789
790 /* Ensure the address range is normal memory and not a device. */
791 if (*orig_from_mode & MM_MODE_D) {
792 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
793 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100794 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000795 }
796
797 /*
798 * Ensure the relinquishing VM is not the owner but has access to the
799 * memory.
800 */
801 orig_from_state = *orig_from_mode & state_mask;
802 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
803 dlog_verbose(
804 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100805 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000806 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100807 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000808 }
809
810 /* Find the appropriate new mode. */
811 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
812
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100813 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000814}
815
816/**
817 * Verify that all pages have the same mode, that the starting mode
818 * constitutes a valid state and obtain the next mode to apply
819 * to the retrieving VM.
820 *
821 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100822 * 1) FFA_DENIED if a state transition was not found;
823 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100824 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100825 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100826 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100827 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
828 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000829 */
J-Alvesfc19b372022-07-06 12:17:35 +0100830struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000831 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100832 struct ffa_memory_region_constituent **fragments,
833 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
834 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000835{
836 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100837 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000838
Andrew Walbranca808b12020-05-15 17:22:28 +0100839 ret = constituents_get_mode(to, &orig_to_mode, fragments,
840 fragment_constituent_counts,
841 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100842 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100843 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100844 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000845 }
846
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100847 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000848 /*
849 * If the original ffa memory send call has been processed
850 * successfully, it is expected the orig_to_mode would overlay
851 * with `state_mask`, as a result of the function
852 * `ffa_send_check_transition`.
853 */
J-Alves59ed0042022-07-28 18:26:41 +0100854 if (vm_id_is_current_world(to.vm->id)) {
855 assert((orig_to_mode &
856 (MM_MODE_INVALID | MM_MODE_UNOWNED |
857 MM_MODE_SHARED)) != 0U);
858 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000859 } else {
860 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100861 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000862 * Ensure the retriever has the expected state. We don't care
863 * about the MM_MODE_SHARED bit; either with or without it set
864 * are both valid representations of the !O-NA state.
865 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100866 if (vm_id_is_current_world(to.vm->id) &&
867 to.vm->id != HF_PRIMARY_VM_ID &&
868 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
869 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100870 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000871 }
872 }
873
874 /* Find the appropriate new mode. */
875 *to_mode = memory_to_attributes;
876 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100877 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000878 *to_mode |= 0;
879 break;
880
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100881 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000882 *to_mode |= MM_MODE_UNOWNED;
883 break;
884
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100885 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000886 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
887 break;
888
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100889 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000890 *to_mode |= 0;
891 break;
892
893 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100894 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100895 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000896 }
897
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100898 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100899}
Jose Marinho09b1db82019-08-08 09:16:59 +0100900
J-Alvescf6253e2024-01-03 13:48:48 +0000901/*
902 * Performs the operations related to the `action` MAP_ACTION_CHECK*.
903 * Returns:
904 * - FFA_SUCCESS_32: if all goes well.
905 * - FFA_ERROR_32: with FFA_NO_MEMORY, if there is no memory to manage
906 * the page table update. Or error code provided by the function
907 * `arch_memory_protect`.
908 */
909static struct ffa_value ffa_region_group_check_actions(
910 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end,
911 struct mpool *ppool, uint32_t mode, enum ffa_map_action action,
912 bool *memory_protected)
913{
914 struct ffa_value ret;
915 bool is_memory_protected;
916
917 if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, mode, ppool)) {
918 dlog_verbose(
919 "%s: memory can't be mapped to %x due to lack of "
920 "memory. Base: %lx end: %x\n",
921 __func__, vm_locked.vm->id, pa_addr(pa_begin),
922 pa_addr(pa_end));
923 return ffa_error(FFA_NO_MEMORY);
924 }
925
926 switch (action) {
927 case MAP_ACTION_CHECK:
928 /* No protect requested. */
929 is_memory_protected = false;
930 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
931 break;
932 case MAP_ACTION_CHECK_PROTECT: {
933 paddr_t last_protected_pa = pa_init(0);
934
935 ret = arch_memory_protect(pa_begin, pa_end, &last_protected_pa);
936
937 is_memory_protected = (ret.func == FFA_SUCCESS_32);
938
939 /*
940 * - If protect memory has failed with FFA_DENIED, means some
941 * range of memory was in the wrong state. In such case, SPM
942 * reverts the state of the pages that were successfully
943 * updated.
944 * - If protect memory has failed with FFA_NOT_SUPPORTED, it
945 * means the platform doesn't support the protection mechanism.
946 * That said, it still permits the page table update to go
947 * through. The variable
948 * `is_memory_protected` will be equal to false.
949 * - If protect memory has failed with FFA_INVALID_PARAMETERS,
950 * break from switch and return the error.
951 */
952 if (ret.func == FFA_ERROR_32) {
953 assert(!is_memory_protected);
954 if (ffa_error_code(ret) == FFA_DENIED &&
955 pa_addr(last_protected_pa) != (uintptr_t)0) {
956 CHECK(arch_memory_unprotect(
957 pa_begin,
958 pa_add(last_protected_pa, PAGE_SIZE)));
959 } else if (ffa_error_code(ret) == FFA_NOT_SUPPORTED) {
960 ret = (struct ffa_value){
961 .func = FFA_SUCCESS_32,
962 };
963 }
964 }
965 } break;
966 default:
967 panic("%s: invalid action to process %x\n", __func__, action);
968 }
969
970 if (memory_protected != NULL) {
971 *memory_protected = is_memory_protected;
972 }
973
974 return ret;
975}
976
977static void ffa_region_group_commit_actions(struct vm_locked vm_locked,
978 paddr_t pa_begin, paddr_t pa_end,
979 struct mpool *ppool, uint32_t mode,
980 enum ffa_map_action action)
981{
982 switch (action) {
983 case MAP_ACTION_COMMIT_UNPROTECT:
984 /*
985 * Checking that it should succeed because SPM should be
986 * unprotecting memory that it had protected before.
987 */
988 CHECK(arch_memory_unprotect(pa_begin, pa_end));
989 case MAP_ACTION_COMMIT:
990 vm_identity_commit(vm_locked, pa_begin, pa_end, mode, ppool,
991 NULL);
992 break;
993 default:
994 panic("%s: invalid action to process %x\n", __func__, action);
995 }
996}
997
Jose Marinho09b1db82019-08-08 09:16:59 +0100998/**
999 * Updates a VM's page table such that the given set of physical address ranges
1000 * are mapped in the address space at the corresponding address ranges, in the
1001 * mode provided.
1002 *
J-Alves0a83dc22023-05-05 09:50:37 +01001003 * The enum ffa_map_action determines the action taken from a call to the
1004 * function below:
1005 * - If action is MAP_ACTION_CHECK, the page tables will be allocated from the
1006 * mpool but no mappings will actually be updated. This function must always
1007 * be called first with action set to MAP_ACTION_CHECK to check that it will
1008 * succeed before calling ffa_region_group_identity_map with whichever one of
1009 * the remaining actions, to avoid leaving the page table in a half-updated
1010 * state.
1011 * - The action MAP_ACTION_COMMIT allocates the page tables from the mpool, and
1012 * changes the memory mappings.
J-Alvescf6253e2024-01-03 13:48:48 +00001013 * - The action MAP_ACTION_CHECK_PROTECT extends the MAP_ACTION_CHECK with an
1014 * invocation to the monitor to update the security state of the memory,
1015 * to that of the SPMC.
1016 * - The action MAP_ACTION_COMMIT_UNPROTECT extends the MAP_ACTION_COMMIT
1017 * with a call into the monitor, to reset the security state of memory
1018 * that has priorly been mapped with the MAP_ACTION_CHECK_PROTECT action.
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001019 * vm_ptable_defrag should always be called after a series of page table
1020 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +01001021 *
J-Alvescf6253e2024-01-03 13:48:48 +00001022 * If all goes well, returns FFA_SUCCESS_32; or FFA_ERROR, with following
1023 * error codes:
1024 * - FFA_INVALID_PARAMETERS: invalid range of memory.
1025 * - FFA_DENIED:
1026 *
Jose Marinho09b1db82019-08-08 09:16:59 +01001027 * made to memory mappings.
1028 */
J-Alvescf6253e2024-01-03 13:48:48 +00001029struct ffa_value ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +00001030 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001031 struct ffa_memory_region_constituent **fragments,
1032 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alvescf6253e2024-01-03 13:48:48 +00001033 uint32_t mode, struct mpool *ppool, enum ffa_map_action action,
1034 bool *memory_protected)
Jose Marinho09b1db82019-08-08 09:16:59 +01001035{
Andrew Walbranca808b12020-05-15 17:22:28 +01001036 uint32_t i;
1037 uint32_t j;
J-Alvescf6253e2024-01-03 13:48:48 +00001038 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001039
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001040 if (vm_locked.vm->el0_partition) {
1041 mode |= MM_MODE_USER | MM_MODE_NG;
1042 }
1043
Andrew Walbranca808b12020-05-15 17:22:28 +01001044 /* Iterate over the memory region constituents within each fragment. */
1045 for (i = 0; i < fragment_count; ++i) {
1046 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1047 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1048 paddr_t pa_begin =
1049 pa_from_ipa(ipa_init(fragments[i][j].address));
1050 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001051 uint32_t pa_bits =
1052 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +01001053
1054 /*
1055 * Ensure the requested region falls into system's PA
1056 * range.
1057 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +02001058 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
1059 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +01001060 dlog_error("Region is outside of PA Range\n");
J-Alvescf6253e2024-01-03 13:48:48 +00001061 return ffa_error(FFA_INVALID_PARAMETERS);
Federico Recanati4fd065d2021-12-13 20:06:23 +01001062 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001063
J-Alvescf6253e2024-01-03 13:48:48 +00001064 if (action <= MAP_ACTION_CHECK_PROTECT) {
1065 ret = ffa_region_group_check_actions(
1066 vm_locked, pa_begin, pa_end, ppool,
1067 mode, action, memory_protected);
1068 } else if (action >= MAP_ACTION_COMMIT &&
1069 action < MAP_ACTION_MAX) {
1070 ffa_region_group_commit_actions(
1071 vm_locked, pa_begin, pa_end, ppool,
1072 mode, action);
1073 ret = (struct ffa_value){
1074 .func = FFA_SUCCESS_32};
1075 } else {
1076 panic("%s: Unknown ffa_map_action.\n",
1077 __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001078 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001079 }
1080 }
1081
J-Alvescf6253e2024-01-03 13:48:48 +00001082 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001083}
1084
1085/**
1086 * Clears a region of physical memory by overwriting it with zeros. The data is
1087 * flushed from the cache so the memory has been cleared across the system.
1088 */
J-Alves7db32002021-12-14 14:44:50 +00001089static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
1090 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +01001091{
1092 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +00001093 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +01001094 * global mapping of the whole range. Such an approach will limit
1095 * the changes to stage-1 tables and will allow only local
1096 * invalidation.
1097 */
1098 bool ret;
1099 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +00001100 void *ptr = mm_identity_map(stage1_locked, begin, end,
1101 MM_MODE_W | (extra_mode_attributes &
1102 plat_ffa_other_world_mode()),
1103 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001104 size_t size = pa_difference(begin, end);
1105
1106 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001107 goto fail;
1108 }
1109
1110 memset_s(ptr, size, 0, size);
1111 arch_mm_flush_dcache(ptr, size);
1112 mm_unmap(stage1_locked, begin, end, ppool);
1113
1114 ret = true;
1115 goto out;
1116
1117fail:
1118 ret = false;
1119
1120out:
1121 mm_unlock_stage1(&stage1_locked);
1122
1123 return ret;
1124}
1125
1126/**
1127 * Clears a region of physical memory by overwriting it with zeros. The data is
1128 * flushed from the cache so the memory has been cleared across the system.
1129 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001130static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +00001131 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01001132 struct ffa_memory_region_constituent **fragments,
1133 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1134 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001135{
1136 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +01001137 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +01001138 bool ret = false;
1139
1140 /*
1141 * Create a local pool so any freed memory can't be used by another
1142 * thread. This is to ensure each constituent that is mapped can be
1143 * unmapped again afterwards.
1144 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001145 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001146
Andrew Walbranca808b12020-05-15 17:22:28 +01001147 /* Iterate over the memory region constituents within each fragment. */
1148 for (i = 0; i < fragment_count; ++i) {
1149 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001150
J-Alves8457f932023-10-11 16:41:45 +01001151 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001152 size_t size = fragments[i][j].page_count * PAGE_SIZE;
1153 paddr_t begin =
1154 pa_from_ipa(ipa_init(fragments[i][j].address));
1155 paddr_t end = pa_add(begin, size);
1156
J-Alves7db32002021-12-14 14:44:50 +00001157 if (!clear_memory(begin, end, &local_page_pool,
1158 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001159 /*
1160 * api_clear_memory will defrag on failure, so
1161 * no need to do it here.
1162 */
1163 goto out;
1164 }
Jose Marinho09b1db82019-08-08 09:16:59 +01001165 }
1166 }
1167
Jose Marinho09b1db82019-08-08 09:16:59 +01001168 ret = true;
1169
1170out:
1171 mpool_fini(&local_page_pool);
1172 return ret;
1173}
1174
J-Alves5952d942022-12-22 16:03:00 +00001175static bool is_memory_range_within(ipaddr_t begin, ipaddr_t end,
1176 ipaddr_t in_begin, ipaddr_t in_end)
1177{
1178 return (ipa_addr(begin) >= ipa_addr(in_begin) &&
1179 ipa_addr(begin) < ipa_addr(in_end)) ||
1180 (ipa_addr(end) <= ipa_addr(in_end) &&
1181 ipa_addr(end) > ipa_addr(in_begin));
1182}
1183
1184/**
1185 * Receives a memory range and looks for overlaps with the remainder
1186 * constituents of the memory share/lend/donate operation. Assumes they are
1187 * passed in order to avoid having to loop over all the elements at each call.
1188 * The function only compares the received memory ranges with those that follow
1189 * within the same fragment, and subsequent fragments from the same operation.
1190 */
1191static bool ffa_memory_check_overlap(
1192 struct ffa_memory_region_constituent **fragments,
1193 const uint32_t *fragment_constituent_counts,
1194 const uint32_t fragment_count, const uint32_t current_fragment,
1195 const uint32_t current_constituent)
1196{
1197 uint32_t i = current_fragment;
1198 uint32_t j = current_constituent;
1199 ipaddr_t current_begin = ipa_init(fragments[i][j].address);
1200 const uint32_t current_page_count = fragments[i][j].page_count;
1201 size_t current_size = current_page_count * PAGE_SIZE;
1202 ipaddr_t current_end = ipa_add(current_begin, current_size - 1);
1203
1204 if (current_size == 0 ||
1205 current_size > UINT64_MAX - ipa_addr(current_begin)) {
1206 dlog_verbose("Invalid page count. Addr: %x page_count: %x\n",
1207 current_begin, current_page_count);
1208 return false;
1209 }
1210
1211 for (; i < fragment_count; i++) {
1212 j = (i == current_fragment) ? j + 1 : 0;
1213
1214 for (; j < fragment_constituent_counts[i]; j++) {
1215 ipaddr_t begin = ipa_init(fragments[i][j].address);
1216 const uint32_t page_count = fragments[i][j].page_count;
1217 size_t size = page_count * PAGE_SIZE;
1218 ipaddr_t end = ipa_add(begin, size - 1);
1219
1220 if (size == 0 || size > UINT64_MAX - ipa_addr(begin)) {
1221 dlog_verbose(
1222 "Invalid page count. Addr: %x "
1223 "page_count: %x\n",
1224 begin, page_count);
1225 return false;
1226 }
1227
1228 /*
1229 * Check if current ranges is within begin and end, as
1230 * well as the reverse. This should help optimize the
1231 * loop, and reduce the number of iterations.
1232 */
1233 if (is_memory_range_within(begin, end, current_begin,
1234 current_end) ||
1235 is_memory_range_within(current_begin, current_end,
1236 begin, end)) {
1237 dlog_verbose(
1238 "Overlapping memory ranges: %#x - %#x "
1239 "with %#x - %#x\n",
1240 ipa_addr(begin), ipa_addr(end),
1241 ipa_addr(current_begin),
1242 ipa_addr(current_end));
1243 return true;
1244 }
1245 }
1246 }
1247
1248 return false;
1249}
1250
Jose Marinho09b1db82019-08-08 09:16:59 +01001251/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001252 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +01001253 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001254 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +01001255 *
1256 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001257 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001258 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +01001259 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001260 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
1261 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001262 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +01001263 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001264 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +01001265 */
J-Alves66652252022-07-06 09:49:51 +01001266struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001267 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001268 struct ffa_memory_region_constituent **fragments,
1269 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves8f11cde2022-12-21 16:18:22 +00001270 uint32_t composite_total_page_count, uint32_t share_func,
1271 struct ffa_memory_access *receivers, uint32_t receivers_count,
1272 struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +01001273{
Andrew Walbranca808b12020-05-15 17:22:28 +01001274 uint32_t i;
J-Alves8f11cde2022-12-21 16:18:22 +00001275 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +01001276 uint32_t orig_from_mode;
1277 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +01001278 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001279 struct ffa_value ret;
J-Alves8f11cde2022-12-21 16:18:22 +00001280 uint32_t constituents_total_page_count = 0;
Jose Marinho09b1db82019-08-08 09:16:59 +01001281
1282 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001283 * Make sure constituents are properly aligned to a 64-bit boundary. If
1284 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +01001285 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001286 for (i = 0; i < fragment_count; ++i) {
1287 if (!is_aligned(fragments[i], 8)) {
1288 dlog_verbose("Constituents not aligned.\n");
1289 return ffa_error(FFA_INVALID_PARAMETERS);
1290 }
J-Alves8f11cde2022-12-21 16:18:22 +00001291 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
1292 constituents_total_page_count +=
1293 fragments[i][j].page_count;
J-Alves5952d942022-12-22 16:03:00 +00001294 if (ffa_memory_check_overlap(
1295 fragments, fragment_constituent_counts,
1296 fragment_count, i, j)) {
1297 return ffa_error(FFA_INVALID_PARAMETERS);
1298 }
J-Alves8f11cde2022-12-21 16:18:22 +00001299 }
1300 }
1301
1302 if (constituents_total_page_count != composite_total_page_count) {
1303 dlog_verbose(
1304 "Composite page count differs from calculated page "
1305 "count from constituents.\n");
1306 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho09b1db82019-08-08 09:16:59 +01001307 }
1308
1309 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001310 * Check if the state transition is lawful for the sender, ensure that
1311 * all constituents of a memory region being shared are at the same
1312 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +01001313 */
J-Alves363f5722022-04-25 17:37:37 +01001314 ret = ffa_send_check_transition(from_locked, share_func, receivers,
1315 receivers_count, &orig_from_mode,
1316 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +01001317 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001318 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001319 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001320 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001321 }
1322
Andrew Walbran37c574e2020-06-03 11:45:46 +01001323 if (orig_from_mode_ret != NULL) {
1324 *orig_from_mode_ret = orig_from_mode;
1325 }
1326
Jose Marinho09b1db82019-08-08 09:16:59 +01001327 /*
1328 * Create a local pool so any freed memory can't be used by another
1329 * thread. This is to ensure the original mapping can be restored if the
1330 * clear fails.
1331 */
Andrew Walbran475c1452020-02-07 13:22:22 +00001332 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001333
1334 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001335 * First reserve all required memory for the new page table entries
1336 * without committing, to make sure the entire operation will succeed
1337 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +01001338 */
J-Alvescf6253e2024-01-03 13:48:48 +00001339 ret = ffa_region_group_identity_map(
1340 from_locked, fragments, fragment_constituent_counts,
1341 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK, NULL);
1342 if (ret.func == FFA_ERROR_32) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001343 goto out;
1344 }
1345
1346 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001347 * Update the mapping for the sender. This won't allocate because the
1348 * transaction was already prepared above, but may free pages in the
1349 * case that a whole block is being unmapped that was previously
1350 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +01001351 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001352 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001353 from_locked, fragments, fragment_constituent_counts,
1354 fragment_count, from_mode, &local_page_pool,
1355 MAP_ACTION_COMMIT, NULL)
1356 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001357
1358 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001359 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001360 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1361 fragment_constituent_counts,
1362 fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +01001363 /*
1364 * On failure, roll back by returning memory to the sender. This
1365 * may allocate pages which were previously freed into
1366 * `local_page_pool` by the call above, but will never allocate
1367 * more pages than that so can never fail.
1368 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001369 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001370 from_locked, fragments,
1371 fragment_constituent_counts, fragment_count,
1372 orig_from_mode, &local_page_pool,
1373 MAP_ACTION_COMMIT, NULL)
1374 .func == FFA_SUCCESS_32);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001375 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +01001376 goto out;
1377 }
1378
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001379 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001380
1381out:
1382 mpool_fini(&local_page_pool);
1383
1384 /*
1385 * Tidy up the page table by reclaiming failed mappings (if there was an
1386 * error) or merging entries into blocks where possible (on success).
1387 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001388 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001389
1390 return ret;
1391}
1392
1393/**
1394 * Validates and maps memory shared from one VM to another.
1395 *
1396 * This function requires the calling context to hold the <to> lock.
1397 *
1398 * Returns:
1399 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001400 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001401 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001402 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001403 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001404 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001405 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001406struct ffa_value ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01001407 struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001408 struct ffa_memory_region_constituent **fragments,
1409 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves26483382023-04-20 12:01:49 +01001410 uint32_t sender_orig_mode, uint32_t share_func, bool clear,
Andrew Walbranca808b12020-05-15 17:22:28 +01001411 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001412{
Andrew Walbranca808b12020-05-15 17:22:28 +01001413 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001414 uint32_t to_mode;
1415 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001416 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001417
1418 /*
Andrew Walbranca808b12020-05-15 17:22:28 +01001419 * Make sure constituents are properly aligned to a 64-bit boundary. If
1420 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001421 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001422 for (i = 0; i < fragment_count; ++i) {
1423 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001424 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +01001425 return ffa_error(FFA_INVALID_PARAMETERS);
1426 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001427 }
1428
1429 /*
1430 * Check if the state transition is lawful for the recipient, and ensure
1431 * that all constituents of the memory region being retrieved are at the
1432 * same state.
1433 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001434 ret = ffa_retrieve_check_transition(
1435 to_locked, share_func, fragments, fragment_constituent_counts,
J-Alves26483382023-04-20 12:01:49 +01001436 fragment_count, sender_orig_mode, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001437 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001438 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001439 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001440 }
1441
1442 /*
1443 * Create a local pool so any freed memory can't be used by another
1444 * thread. This is to ensure the original mapping can be restored if the
1445 * clear fails.
1446 */
1447 mpool_init_with_fallback(&local_page_pool, page_pool);
1448
1449 /*
1450 * First reserve all required memory for the new page table entries in
1451 * the recipient page tables without committing, to make sure the entire
1452 * operation will succeed without exhausting the page pool.
1453 */
J-Alvescf6253e2024-01-03 13:48:48 +00001454 ret = ffa_region_group_identity_map(
1455 to_locked, fragments, fragment_constituent_counts,
1456 fragment_count, to_mode, page_pool, MAP_ACTION_CHECK, NULL);
1457 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001458 /* TODO: partial defrag of failed range. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001459 goto out;
1460 }
1461
1462 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001463 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001464 !ffa_clear_memory_constituents(sender_orig_mode, fragments,
1465 fragment_constituent_counts,
1466 fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +01001467 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001468 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001469 goto out;
1470 }
1471
Jose Marinho09b1db82019-08-08 09:16:59 +01001472 /*
1473 * Complete the transfer by mapping the memory into the recipient. This
1474 * won't allocate because the transaction was already prepared above, so
1475 * it doesn't need to use the `local_page_pool`.
1476 */
J-Alvescf6253e2024-01-03 13:48:48 +00001477 CHECK(ffa_region_group_identity_map(to_locked, fragments,
1478 fragment_constituent_counts,
1479 fragment_count, to_mode, page_pool,
1480 MAP_ACTION_COMMIT, NULL)
1481 .func == FFA_SUCCESS_32);
Jose Marinho09b1db82019-08-08 09:16:59 +01001482
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001483 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001484
1485out:
1486 mpool_fini(&local_page_pool);
1487
1488 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001489 * Tidy up the page table by reclaiming failed mappings (if there was an
1490 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001491 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001492 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001493
1494 return ret;
1495}
1496
Andrew Walbran996d1d12020-05-27 14:08:43 +01001497static struct ffa_value ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01001498 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01001499 struct ffa_memory_region_constituent **fragments,
1500 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1501 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001502{
1503 uint32_t orig_from_mode;
1504 uint32_t from_mode;
1505 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001506 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001507
Andrew Walbranca808b12020-05-15 17:22:28 +01001508 ret = ffa_relinquish_check_transition(
1509 from_locked, &orig_from_mode, fragments,
1510 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001511 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001512 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001513 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001514 }
1515
1516 /*
1517 * Create a local pool so any freed memory can't be used by another
1518 * thread. This is to ensure the original mapping can be restored if the
1519 * clear fails.
1520 */
1521 mpool_init_with_fallback(&local_page_pool, page_pool);
1522
1523 /*
1524 * First reserve all required memory for the new page table entries
1525 * without committing, to make sure the entire operation will succeed
1526 * without exhausting the page pool.
1527 */
J-Alvescf6253e2024-01-03 13:48:48 +00001528 ret = ffa_region_group_identity_map(
1529 from_locked, fragments, fragment_constituent_counts,
1530 fragment_count, from_mode, page_pool, MAP_ACTION_CHECK, NULL);
1531 if (ret.func == FFA_ERROR_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001532 goto out;
1533 }
1534
1535 /*
1536 * Update the mapping for the sender. This won't allocate because the
1537 * transaction was already prepared above, but may free pages in the
1538 * case that a whole block is being unmapped that was previously
1539 * partially mapped.
1540 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001541 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001542 from_locked, fragments, fragment_constituent_counts,
1543 fragment_count, from_mode, &local_page_pool,
1544 MAP_ACTION_COMMIT, NULL)
1545 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001546
1547 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001548 if (clear &&
J-Alves26483382023-04-20 12:01:49 +01001549 !ffa_clear_memory_constituents(orig_from_mode, fragments,
1550 fragment_constituent_counts,
1551 fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001552 /*
1553 * On failure, roll back by returning memory to the sender. This
1554 * may allocate pages which were previously freed into
1555 * `local_page_pool` by the call above, but will never allocate
1556 * more pages than that so can never fail.
1557 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001558 CHECK(ffa_region_group_identity_map(
J-Alvescf6253e2024-01-03 13:48:48 +00001559 from_locked, fragments,
1560 fragment_constituent_counts, fragment_count,
1561 orig_from_mode, &local_page_pool,
1562 MAP_ACTION_COMMIT, NULL)
1563 .func == FFA_SUCCESS_32);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001564
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001565 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001566 goto out;
1567 }
1568
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001569 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001570
1571out:
1572 mpool_fini(&local_page_pool);
1573
1574 /*
1575 * Tidy up the page table by reclaiming failed mappings (if there was an
1576 * error) or merging entries into blocks where possible (on success).
1577 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001578 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001579
1580 return ret;
1581}
1582
1583/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001584 * Complete a memory sending operation by checking that it is valid, updating
1585 * the sender page table, and then either marking the share state as having
1586 * completed sending (on success) or freeing it (on failure).
1587 *
1588 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1589 */
J-Alvesfdd29272022-07-19 13:16:31 +01001590struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001591 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001592 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1593 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001594{
1595 struct ffa_memory_region *memory_region = share_state->memory_region;
J-Alves8f11cde2022-12-21 16:18:22 +00001596 struct ffa_composite_memory_region *composite;
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001597 struct ffa_memory_access *receiver;
Andrew Walbranca808b12020-05-15 17:22:28 +01001598 struct ffa_value ret;
1599
1600 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001601 assert(share_states.share_states != NULL);
J-Alves8f11cde2022-12-21 16:18:22 +00001602 assert(memory_region != NULL);
1603 composite = ffa_memory_region_get_composite(memory_region, 0);
1604 assert(composite != NULL);
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001605 receiver = ffa_memory_region_get_receiver(memory_region, 0);
1606 assert(receiver != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001607
1608 /* Check that state is valid in sender page table and update. */
1609 ret = ffa_send_check_update(
1610 from_locked, share_state->fragments,
1611 share_state->fragment_constituent_counts,
J-Alves8f11cde2022-12-21 16:18:22 +00001612 share_state->fragment_count, composite->page_count,
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001613 share_state->share_func, receiver,
J-Alves8f11cde2022-12-21 16:18:22 +00001614 memory_region->receiver_count, page_pool,
1615 memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001616 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001617 if (ret.func != FFA_SUCCESS_32) {
1618 /*
1619 * Free share state, it failed to send so it can't be retrieved.
1620 */
Karl Meakin4cec5e82023-06-30 16:30:22 +01001621 dlog_verbose("%s: failed to send check update: %s(%s)\n",
1622 __func__, ffa_func_name(ret.func),
1623 ffa_error_name(ffa_error_code(ret)));
Andrew Walbranca808b12020-05-15 17:22:28 +01001624 share_state_free(share_states, share_state, page_pool);
1625 return ret;
1626 }
1627
1628 share_state->sending_complete = true;
Karl Meakin4cec5e82023-06-30 16:30:22 +01001629 dlog_verbose("%s: marked sending complete.\n", __func__);
Andrew Walbranca808b12020-05-15 17:22:28 +01001630
J-Alvesee68c542020-10-29 17:48:20 +00001631 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001632}
1633
1634/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001635 * Check that the memory attributes match Hafnium expectations:
1636 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1637 * Write-Allocate Cacheable.
1638 */
1639static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001640 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001641{
1642 enum ffa_memory_type memory_type;
1643 enum ffa_memory_cacheability cacheability;
1644 enum ffa_memory_shareability shareability;
1645
1646 memory_type = ffa_get_memory_type_attr(attributes);
1647 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1648 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1649 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001650 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001651 }
1652
1653 cacheability = ffa_get_memory_cacheability_attr(attributes);
1654 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1655 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1656 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001657 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001658 }
1659
1660 shareability = ffa_get_memory_shareability_attr(attributes);
1661 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1662 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1663 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001664 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001665 }
1666
1667 return (struct ffa_value){.func = FFA_SUCCESS_32};
1668}
1669
1670/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001671 * Check that the given `memory_region` represents a valid memory send request
1672 * of the given `share_func` type, return the clear flag and permissions via the
1673 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001674 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001675 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001676 * not.
1677 */
J-Alves66652252022-07-06 09:49:51 +01001678struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001679 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1680 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001681 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001682{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001683 struct ffa_composite_memory_region *composite;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001684 struct ffa_memory_access *receiver =
1685 ffa_memory_region_get_receiver(memory_region, 0);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001686 uint64_t receivers_end;
1687 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001688 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001689 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001690 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001691 enum ffa_data_access data_access;
1692 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001693 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001694 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001695 const size_t minimum_first_fragment_length =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001696 memory_region->receivers_offset +
1697 memory_region->memory_access_desc_size +
1698 sizeof(struct ffa_composite_memory_region);
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001699
1700 if (fragment_length < minimum_first_fragment_length) {
1701 dlog_verbose("Fragment length %u too short (min %u).\n",
1702 (size_t)fragment_length,
1703 minimum_first_fragment_length);
1704 return ffa_error(FFA_INVALID_PARAMETERS);
1705 }
1706
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001707 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1708 "struct ffa_memory_region_constituent must be 16 bytes");
1709 if (!is_aligned(fragment_length,
1710 sizeof(struct ffa_memory_region_constituent)) ||
1711 !is_aligned(memory_share_length,
1712 sizeof(struct ffa_memory_region_constituent))) {
1713 dlog_verbose(
1714 "Fragment length %u or total length %u"
1715 " is not 16-byte aligned.\n",
1716 fragment_length, memory_share_length);
1717 return ffa_error(FFA_INVALID_PARAMETERS);
1718 }
1719
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001720 if (fragment_length > memory_share_length) {
1721 dlog_verbose(
1722 "Fragment length %u greater than total length %u.\n",
1723 (size_t)fragment_length, (size_t)memory_share_length);
1724 return ffa_error(FFA_INVALID_PARAMETERS);
1725 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001726
J-Alves95df0ef2022-12-07 10:09:48 +00001727 /* The sender must match the caller. */
1728 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1729 vm_id_is_current_world(memory_region->sender)) ||
1730 (vm_id_is_current_world(from_locked.vm->id) &&
1731 memory_region->sender != from_locked.vm->id)) {
1732 dlog_verbose("Invalid memory sender ID.\n");
1733 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001734 }
1735
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001736 if (memory_region->receiver_count <= 0) {
1737 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001738 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001739 }
1740
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001741 /*
1742 * Ensure that the composite header is within the memory bounds and
1743 * doesn't overlap the first part of the message. Cast to uint64_t
1744 * to prevent overflow.
1745 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001746 receivers_end = ((uint64_t)memory_region->memory_access_desc_size *
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001747 (uint64_t)memory_region->receiver_count) +
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01001748 memory_region->receivers_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001749 min_length = receivers_end +
1750 sizeof(struct ffa_composite_memory_region) +
1751 sizeof(struct ffa_memory_region_constituent);
1752 if (min_length > memory_share_length) {
1753 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1754 (size_t)memory_share_length, (size_t)min_length);
1755 return ffa_error(FFA_INVALID_PARAMETERS);
1756 }
1757
1758 composite_memory_region_offset =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001759 receiver->composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001760
1761 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001762 * Check that the composite memory region descriptor is after the access
1763 * descriptors, is at least 16-byte aligned, and fits in the first
1764 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001765 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001766 if ((composite_memory_region_offset < receivers_end) ||
1767 (composite_memory_region_offset % 16 != 0) ||
1768 (composite_memory_region_offset >
1769 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1770 dlog_verbose(
1771 "Invalid composite memory region descriptor offset "
1772 "%u.\n",
1773 (size_t)composite_memory_region_offset);
1774 return ffa_error(FFA_INVALID_PARAMETERS);
1775 }
1776
1777 /*
1778 * Compute the start of the constituent regions. Already checked
1779 * to be not more than fragment_length and thus not more than
1780 * memory_share_length.
1781 */
1782 constituents_start = composite_memory_region_offset +
1783 sizeof(struct ffa_composite_memory_region);
1784 constituents_length = memory_share_length - constituents_start;
1785
1786 /*
1787 * Check that the number of constituents is consistent with the length
1788 * of the constituent region.
1789 */
1790 composite = ffa_memory_region_get_composite(memory_region, 0);
1791 if ((constituents_length %
1792 sizeof(struct ffa_memory_region_constituent) !=
1793 0) ||
1794 ((constituents_length /
1795 sizeof(struct ffa_memory_region_constituent)) !=
1796 composite->constituent_count)) {
1797 dlog_verbose("Invalid length %u or composite offset %u.\n",
1798 (size_t)memory_share_length,
1799 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001800 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001801 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001802 if (fragment_length < memory_share_length &&
1803 fragment_length < HF_MAILBOX_SIZE) {
1804 dlog_warning(
1805 "Initial fragment length %d smaller than mailbox "
1806 "size.\n",
1807 fragment_length);
1808 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001809
Andrew Walbrana65a1322020-04-06 19:32:32 +01001810 /*
1811 * Clear is not allowed for memory sharing, as the sender still has
1812 * access to the memory.
1813 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001814 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1815 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001816 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001817 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001818 }
1819
1820 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001821 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001822 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001823 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001824 }
1825
J-Alves363f5722022-04-25 17:37:37 +01001826 /* Check that the permissions are valid, for each specified receiver. */
1827 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001828 struct ffa_memory_region_attributes receiver_permissions;
1829
1830 receiver = ffa_memory_region_get_receiver(memory_region, i);
1831 assert(receiver != NULL);
1832 receiver_permissions = receiver->receiver_permissions;
J-Alves363f5722022-04-25 17:37:37 +01001833 ffa_memory_access_permissions_t permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001834 receiver_permissions.permissions;
1835 ffa_id_t receiver_id = receiver_permissions.receiver;
J-Alves363f5722022-04-25 17:37:37 +01001836
1837 if (memory_region->sender == receiver_id) {
1838 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001839 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001840 }
Federico Recanati85090c42021-12-15 13:17:54 +01001841
J-Alves363f5722022-04-25 17:37:37 +01001842 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1843 j++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001844 struct ffa_memory_access *other_receiver =
1845 ffa_memory_region_get_receiver(memory_region,
1846 j);
1847 assert(other_receiver != NULL);
1848
J-Alves363f5722022-04-25 17:37:37 +01001849 if (receiver_id ==
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001850 other_receiver->receiver_permissions.receiver) {
J-Alves363f5722022-04-25 17:37:37 +01001851 dlog_verbose(
1852 "Repeated receiver(%x) in memory send "
1853 "operation.\n",
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001854 other_receiver->receiver_permissions
1855 .receiver);
J-Alves363f5722022-04-25 17:37:37 +01001856 return ffa_error(FFA_INVALID_PARAMETERS);
1857 }
1858 }
1859
1860 if (composite_memory_region_offset !=
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001861 receiver->composite_memory_region_offset) {
J-Alves363f5722022-04-25 17:37:37 +01001862 dlog_verbose(
1863 "All ffa_memory_access should point to the "
1864 "same composite memory region offset.\n");
1865 return ffa_error(FFA_INVALID_PARAMETERS);
1866 }
1867
1868 data_access = ffa_get_data_access_attr(permissions);
1869 instruction_access =
1870 ffa_get_instruction_access_attr(permissions);
1871 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1872 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1873 dlog_verbose(
1874 "Reserved value for receiver permissions "
1875 "%#x.\n",
1876 permissions);
1877 return ffa_error(FFA_INVALID_PARAMETERS);
1878 }
1879 if (instruction_access !=
1880 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1881 dlog_verbose(
1882 "Invalid instruction access permissions %#x "
1883 "for sending memory.\n",
1884 permissions);
1885 return ffa_error(FFA_INVALID_PARAMETERS);
1886 }
1887 if (share_func == FFA_MEM_SHARE_32) {
1888 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1889 dlog_verbose(
1890 "Invalid data access permissions %#x "
1891 "for sharing memory.\n",
1892 permissions);
1893 return ffa_error(FFA_INVALID_PARAMETERS);
1894 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00001895 /*
1896 * According to section 10.10.3 of the FF-A v1.1 EAC0
1897 * spec, NX is required for share operations (but must
1898 * not be specified by the sender) so set it in the
1899 * copy that we store, ready to be returned to the
1900 * retriever.
1901 */
1902 if (vm_id_is_current_world(receiver_id)) {
1903 ffa_set_instruction_access_attr(
1904 &permissions,
1905 FFA_INSTRUCTION_ACCESS_NX);
1906 receiver_permissions.permissions = permissions;
1907 }
J-Alves363f5722022-04-25 17:37:37 +01001908 }
1909 if (share_func == FFA_MEM_LEND_32 &&
1910 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1911 dlog_verbose(
1912 "Invalid data access permissions %#x for "
1913 "lending memory.\n",
1914 permissions);
1915 return ffa_error(FFA_INVALID_PARAMETERS);
1916 }
1917
1918 if (share_func == FFA_MEM_DONATE_32 &&
1919 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1920 dlog_verbose(
1921 "Invalid data access permissions %#x for "
1922 "donating memory.\n",
1923 permissions);
1924 return ffa_error(FFA_INVALID_PARAMETERS);
1925 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001926 }
1927
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001928 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1929 security_state =
1930 ffa_get_memory_security_attr(memory_region->attributes);
1931 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1932 dlog_verbose(
1933 "Invalid security state for memory share operation.\n");
1934 return ffa_error(FFA_INVALID_PARAMETERS);
1935 }
1936
Federico Recanatid937f5e2021-12-20 17:38:23 +01001937 /*
J-Alves807794e2022-06-16 13:42:47 +01001938 * If a memory donate or lend with single borrower, the memory type
1939 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001940 */
J-Alves807794e2022-06-16 13:42:47 +01001941 if (share_func == FFA_MEM_DONATE_32 ||
1942 (share_func == FFA_MEM_LEND_32 &&
1943 memory_region->receiver_count == 1)) {
1944 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1945 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1946 dlog_verbose(
1947 "Memory type shall not be specified by "
1948 "sender.\n");
1949 return ffa_error(FFA_INVALID_PARAMETERS);
1950 }
1951 } else {
1952 /*
1953 * Check that sender's memory attributes match Hafnium
1954 * expectations: Normal Memory, Inner shareable, Write-Back
1955 * Read-Allocate Write-Allocate Cacheable.
1956 */
1957 ret = ffa_memory_attributes_validate(memory_region->attributes);
1958 if (ret.func != FFA_SUCCESS_32) {
1959 return ret;
1960 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001961 }
1962
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001963 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001964}
1965
1966/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001967 * Gets the share state for continuing an operation to donate, lend or share
1968 * memory, and checks that it is a valid request.
1969 *
1970 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1971 * not.
1972 */
J-Alvesfdd29272022-07-19 13:16:31 +01001973struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001974 struct share_states_locked share_states, ffa_memory_handle_t handle,
J-Alves19e20cf2023-08-02 12:48:55 +01001975 struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001976 struct mpool *page_pool)
1977{
1978 struct ffa_memory_share_state *share_state;
1979 struct ffa_memory_region *memory_region;
1980
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001981 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001982
1983 /*
1984 * Look up the share state by handle and make sure that the VM ID
1985 * matches.
1986 */
Karl Meakin4a2854a2023-06-30 16:26:52 +01001987 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00001988 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001989 dlog_verbose(
1990 "Invalid handle %#x for memory send continuation.\n",
1991 handle);
1992 return ffa_error(FFA_INVALID_PARAMETERS);
1993 }
1994 memory_region = share_state->memory_region;
1995
J-Alvesfdd29272022-07-19 13:16:31 +01001996 if (vm_id_is_current_world(from_vm_id) &&
1997 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001998 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1999 return ffa_error(FFA_INVALID_PARAMETERS);
2000 }
2001
2002 if (share_state->sending_complete) {
2003 dlog_verbose(
2004 "Sending of memory handle %#x is already complete.\n",
2005 handle);
2006 return ffa_error(FFA_INVALID_PARAMETERS);
2007 }
2008
2009 if (share_state->fragment_count == MAX_FRAGMENTS) {
2010 /*
2011 * Log a warning as this is a sign that MAX_FRAGMENTS should
2012 * probably be increased.
2013 */
2014 dlog_warning(
2015 "Too many fragments for memory share with handle %#x; "
2016 "only %d supported.\n",
2017 handle, MAX_FRAGMENTS);
2018 /* Free share state, as it's not possible to complete it. */
2019 share_state_free(share_states, share_state, page_pool);
2020 return ffa_error(FFA_NO_MEMORY);
2021 }
2022
2023 *share_state_ret = share_state;
2024
2025 return (struct ffa_value){.func = FFA_SUCCESS_32};
2026}
2027
2028/**
J-Alves95df0ef2022-12-07 10:09:48 +00002029 * Checks if there is at least one receiver from the other world.
2030 */
J-Alvesfdd29272022-07-19 13:16:31 +01002031bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00002032 struct ffa_memory_region *memory_region)
2033{
2034 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002035 struct ffa_memory_access *receiver =
2036 ffa_memory_region_get_receiver(memory_region, i);
2037 assert(receiver != NULL);
2038 ffa_id_t receiver_id = receiver->receiver_permissions.receiver;
2039
2040 if (!vm_id_is_current_world(receiver_id)) {
J-Alves95df0ef2022-12-07 10:09:48 +00002041 return true;
2042 }
2043 }
2044 return false;
2045}
2046
2047/**
J-Alves9da280b2022-12-21 14:55:39 +00002048 * Validates a call to donate, lend or share memory in which Hafnium is the
2049 * designated allocator of the memory handle. In practice, this also means
2050 * Hafnium is responsible for managing the state structures for the transaction.
2051 * If Hafnium is the SPMC, it should allocate the memory handle when either the
2052 * sender is an SP or there is at least one borrower that is an SP.
2053 * If Hafnium is the hypervisor, it should allocate the memory handle when
2054 * operation involves only NWd VMs.
2055 *
2056 * If validation goes well, Hafnium updates the stage-2 page tables of the
2057 * sender. Validation consists of checking if the message length and number of
2058 * memory region constituents match, and if the transition is valid for the
2059 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00002060 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002061 * Assumes that the caller has already found and locked the sender VM and copied
2062 * the memory region descriptor from the sender's TX buffer to a freshly
2063 * allocated page from Hafnium's internal pool. The caller must have also
2064 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002065 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002066 * This function takes ownership of the `memory_region` passed in and will free
2067 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01002068 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002069struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002070 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002071 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002072 uint32_t fragment_length, uint32_t share_func,
2073 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01002074{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002075 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002076 struct share_states_locked share_states;
2077 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01002078
2079 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01002080 * If there is an error validating the `memory_region` then we need to
2081 * free it because we own it but we won't be storing it in a share state
2082 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01002083 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002084 ret = ffa_memory_send_validate(from_locked, memory_region,
2085 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01002086 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002087 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002088 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01002089 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002090 }
2091
Andrew Walbrana65a1322020-04-06 19:32:32 +01002092 /* Set flag for share function, ready to be retrieved later. */
2093 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002094 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002095 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002096 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002097 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002098 case FFA_MEM_LEND_32:
2099 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002100 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002101 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01002102 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002103 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01002104 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01002105 }
2106
Andrew Walbranca808b12020-05-15 17:22:28 +01002107 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002108 /*
2109 * Allocate a share state before updating the page table. Otherwise if
2110 * updating the page table succeeded but allocating the share state
2111 * failed then it would leave the memory in a state where nobody could
2112 * get it back.
2113 */
Karl Meakin52cdfe72023-06-30 14:49:10 +01002114 share_state = allocate_share_state(share_states, share_func,
2115 memory_region, fragment_length,
2116 FFA_MEMORY_HANDLE_INVALID);
J-Alvesb56aac82023-11-10 09:44:43 +00002117 if (share_state == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002118 dlog_verbose("Failed to allocate share state.\n");
2119 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01002120 ret = ffa_error(FFA_NO_MEMORY);
2121 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002122 }
2123
Andrew Walbranca808b12020-05-15 17:22:28 +01002124 if (fragment_length == memory_share_length) {
2125 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00002126 ret = ffa_memory_send_complete(
2127 from_locked, share_states, share_state, page_pool,
2128 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002129 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01002130 /*
2131 * Use sender ID from 'memory_region' assuming
2132 * that at this point it has been validated:
2133 * - MBZ at virtual FF-A instance.
2134 */
J-Alves19e20cf2023-08-02 12:48:55 +01002135 ffa_id_t sender_to_ret =
J-Alvesfdd29272022-07-19 13:16:31 +01002136 (from_locked.vm->id == HF_OTHER_WORLD_ID)
2137 ? memory_region->sender
2138 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01002139 ret = (struct ffa_value){
2140 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00002141 .arg1 = (uint32_t)memory_region->handle,
2142 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01002143 .arg3 = fragment_length,
2144 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01002145 }
2146
2147out:
2148 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002149 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01002150 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002151}
2152
2153/**
J-Alves8505a8a2022-06-15 18:10:18 +01002154 * Continues an operation to donate, lend or share memory to a VM from current
2155 * world. If this is the last fragment then checks that the transition is valid
2156 * for the type of memory sending operation and updates the stage-2 page tables
2157 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01002158 *
2159 * Assumes that the caller has already found and locked the sender VM and copied
2160 * the memory region descriptor from the sender's TX buffer to a freshly
2161 * allocated page from Hafnium's internal pool.
2162 *
2163 * This function takes ownership of the `fragment` passed in; it must not be
2164 * freed by the caller.
2165 */
2166struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
2167 void *fragment,
2168 uint32_t fragment_length,
2169 ffa_memory_handle_t handle,
2170 struct mpool *page_pool)
2171{
2172 struct share_states_locked share_states = share_states_lock();
2173 struct ffa_memory_share_state *share_state;
2174 struct ffa_value ret;
2175 struct ffa_memory_region *memory_region;
2176
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05002177 CHECK(is_aligned(fragment,
2178 alignof(struct ffa_memory_region_constituent)));
2179 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2180 0) {
2181 dlog_verbose("Fragment length %u misaligned.\n",
2182 fragment_length);
2183 ret = ffa_error(FFA_INVALID_PARAMETERS);
2184 goto out_free_fragment;
2185 }
2186
Andrew Walbranca808b12020-05-15 17:22:28 +01002187 ret = ffa_memory_send_continue_validate(share_states, handle,
2188 &share_state,
2189 from_locked.vm->id, page_pool);
2190 if (ret.func != FFA_SUCCESS_32) {
2191 goto out_free_fragment;
2192 }
2193 memory_region = share_state->memory_region;
2194
J-Alves95df0ef2022-12-07 10:09:48 +00002195 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002196 dlog_error(
2197 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01002198 "other world. This should never happen, and indicates "
2199 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01002200 "EL3 code.\n");
2201 ret = ffa_error(FFA_INVALID_PARAMETERS);
2202 goto out_free_fragment;
2203 }
2204
2205 /* Add this fragment. */
2206 share_state->fragments[share_state->fragment_count] = fragment;
2207 share_state->fragment_constituent_counts[share_state->fragment_count] =
2208 fragment_length / sizeof(struct ffa_memory_region_constituent);
2209 share_state->fragment_count++;
2210
2211 /* Check whether the memory send operation is now ready to complete. */
2212 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00002213 ret = ffa_memory_send_complete(
2214 from_locked, share_states, share_state, page_pool,
2215 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01002216 } else {
2217 ret = (struct ffa_value){
2218 .func = FFA_MEM_FRAG_RX_32,
2219 .arg1 = (uint32_t)handle,
2220 .arg2 = (uint32_t)(handle >> 32),
2221 .arg3 = share_state_next_fragment_offset(share_states,
2222 share_state)};
2223 }
2224 goto out;
2225
2226out_free_fragment:
2227 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002228
2229out:
Andrew Walbranca808b12020-05-15 17:22:28 +01002230 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002231 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002232}
2233
Andrew Walbranca808b12020-05-15 17:22:28 +01002234/** Clean up after the receiver has finished retrieving a memory region. */
2235static void ffa_memory_retrieve_complete(
2236 struct share_states_locked share_states,
2237 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
2238{
2239 if (share_state->share_func == FFA_MEM_DONATE_32) {
2240 /*
2241 * Memory that has been donated can't be relinquished,
2242 * so no need to keep the share state around.
2243 */
2244 share_state_free(share_states, share_state, page_pool);
2245 dlog_verbose("Freed share state for donate.\n");
2246 }
2247}
2248
J-Alves2d8457f2022-10-05 11:06:41 +01002249/**
2250 * Initialises the given memory region descriptor to be used for an
2251 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
2252 * fragment.
2253 * The memory region descriptor is initialized according to retriever's
2254 * FF-A version.
2255 *
2256 * Returns true on success, or false if the given constituents won't all fit in
2257 * the first fragment.
2258 */
2259static bool ffa_retrieved_memory_region_init(
2260 void *response, uint32_t ffa_version, size_t response_max_size,
J-Alves19e20cf2023-08-02 12:48:55 +01002261 ffa_id_t sender, ffa_memory_attributes_t attributes,
J-Alves2d8457f2022-10-05 11:06:41 +01002262 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002263 ffa_memory_access_permissions_t permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002264 struct ffa_memory_access *receivers, size_t receiver_count,
2265 uint32_t memory_access_desc_size, uint32_t page_count,
2266 uint32_t total_constituent_count,
J-Alves2d8457f2022-10-05 11:06:41 +01002267 const struct ffa_memory_region_constituent constituents[],
2268 uint32_t fragment_constituent_count, uint32_t *total_length,
2269 uint32_t *fragment_length)
2270{
2271 struct ffa_composite_memory_region *composite_memory_region;
J-Alves2d8457f2022-10-05 11:06:41 +01002272 uint32_t i;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002273 uint32_t composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002274 uint32_t constituents_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002275
2276 assert(response != NULL);
2277
2278 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2279 struct ffa_memory_region_v1_0 *retrieve_response =
2280 (struct ffa_memory_region_v1_0 *)response;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002281 struct ffa_memory_access_v1_0 *receiver;
J-Alves2d8457f2022-10-05 11:06:41 +01002282
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002283 ffa_memory_region_init_header_v1_0(retrieve_response, sender,
2284 attributes, flags, handle, 0,
2285 receiver_count);
J-Alves2d8457f2022-10-05 11:06:41 +01002286
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002287 receiver = (struct ffa_memory_access_v1_0 *)
2288 retrieve_response->receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002289 receiver_count = retrieve_response->receiver_count;
2290
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002291 for (uint32_t i = 0; i < receiver_count; i++) {
2292 ffa_id_t receiver_id =
2293 receivers[i].receiver_permissions.receiver;
2294 ffa_memory_receiver_flags_t recv_flags =
2295 receivers[i].receiver_permissions.flags;
2296
2297 /*
2298 * Initialized here as in memory retrieve responses we
2299 * currently expect one borrower to be specified.
2300 */
2301 ffa_memory_access_init_v1_0(
2302 receiver, receiver_id,
2303 ffa_get_data_access_attr(permissions),
2304 ffa_get_instruction_access_attr(permissions),
2305 recv_flags);
2306 }
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002307
2308 composite_offset =
J-Alves2d8457f2022-10-05 11:06:41 +01002309 sizeof(struct ffa_memory_region_v1_0) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002310 receiver_count * sizeof(struct ffa_memory_access_v1_0);
2311 receiver->composite_memory_region_offset = composite_offset;
J-Alves2d8457f2022-10-05 11:06:41 +01002312
2313 composite_memory_region = ffa_memory_region_get_composite_v1_0(
2314 retrieve_response, 0);
2315 } else {
J-Alves2d8457f2022-10-05 11:06:41 +01002316 struct ffa_memory_region *retrieve_response =
2317 (struct ffa_memory_region *)response;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002318 struct ffa_memory_access *retrieve_response_receivers;
J-Alves2d8457f2022-10-05 11:06:41 +01002319
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002320 ffa_memory_region_init_header(
2321 retrieve_response, sender, attributes, flags, handle, 0,
2322 receiver_count, memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002323
2324 /*
2325 * Note that `sizeof(struct_ffa_memory_region)` and
2326 * `sizeof(struct ffa_memory_access)` must both be multiples of
2327 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
2328 * guaranteed that the offset we calculate here is aligned to a
2329 * 64-bit boundary and so 64-bit values can be copied without
2330 * alignment faults.
2331 */
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002332 composite_offset =
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01002333 retrieve_response->receivers_offset +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002334 (uint32_t)(receiver_count *
2335 retrieve_response->memory_access_desc_size);
J-Alves2d8457f2022-10-05 11:06:41 +01002336
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002337 retrieve_response_receivers =
2338 ffa_memory_region_get_receiver(retrieve_response, 0);
2339 assert(retrieve_response_receivers != NULL);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002340
2341 /*
2342 * Initialized here as in memory retrieve responses we currently
2343 * expect one borrower to be specified.
2344 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002345 memcpy_s(retrieve_response_receivers,
2346 sizeof(struct ffa_memory_access) * receiver_count,
2347 receivers,
2348 sizeof(struct ffa_memory_access) * receiver_count);
2349
2350 retrieve_response_receivers->composite_memory_region_offset =
2351 composite_offset;
2352
J-Alves2d8457f2022-10-05 11:06:41 +01002353 composite_memory_region =
2354 ffa_memory_region_get_composite(retrieve_response, 0);
2355 }
2356
J-Alves2d8457f2022-10-05 11:06:41 +01002357 assert(composite_memory_region != NULL);
2358
J-Alves2d8457f2022-10-05 11:06:41 +01002359 composite_memory_region->page_count = page_count;
2360 composite_memory_region->constituent_count = total_constituent_count;
2361 composite_memory_region->reserved_0 = 0;
2362
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002363 constituents_offset =
2364 composite_offset + sizeof(struct ffa_composite_memory_region);
J-Alves2d8457f2022-10-05 11:06:41 +01002365 if (constituents_offset +
2366 fragment_constituent_count *
2367 sizeof(struct ffa_memory_region_constituent) >
2368 response_max_size) {
2369 return false;
2370 }
2371
2372 for (i = 0; i < fragment_constituent_count; ++i) {
2373 composite_memory_region->constituents[i] = constituents[i];
2374 }
2375
2376 if (total_length != NULL) {
2377 *total_length =
2378 constituents_offset +
2379 composite_memory_region->constituent_count *
2380 sizeof(struct ffa_memory_region_constituent);
2381 }
2382 if (fragment_length != NULL) {
2383 *fragment_length =
2384 constituents_offset +
2385 fragment_constituent_count *
2386 sizeof(struct ffa_memory_region_constituent);
2387 }
2388
2389 return true;
2390}
2391
J-Alves96de29f2022-04-26 16:05:24 +01002392/**
2393 * Validates the retrieved permissions against those specified by the lender
2394 * of memory share operation. Optionally can help set the permissions to be used
2395 * for the S2 mapping, through the `permissions` argument.
J-Alvesdcad8992023-09-15 14:10:35 +01002396 * Returns FFA_SUCCESS if all the fields are valid. FFA_ERROR, with error code:
2397 * - FFA_INVALID_PARAMETERS -> if the fields have invalid values as per the
2398 * specification for each ABI.
2399 * - FFA_DENIED -> if the permissions specified by the retriever are not
2400 * less permissive than those provided by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002401 */
J-Alvesdcad8992023-09-15 14:10:35 +01002402static struct ffa_value ffa_memory_retrieve_is_memory_access_valid(
2403 uint32_t share_func, enum ffa_data_access sent_data_access,
J-Alves96de29f2022-04-26 16:05:24 +01002404 enum ffa_data_access requested_data_access,
2405 enum ffa_instruction_access sent_instruction_access,
2406 enum ffa_instruction_access requested_instruction_access,
J-Alvesdcad8992023-09-15 14:10:35 +01002407 ffa_memory_access_permissions_t *permissions, bool multiple_borrowers)
J-Alves96de29f2022-04-26 16:05:24 +01002408{
2409 switch (sent_data_access) {
2410 case FFA_DATA_ACCESS_NOT_SPECIFIED:
2411 case FFA_DATA_ACCESS_RW:
2412 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2413 requested_data_access == FFA_DATA_ACCESS_RW) {
2414 if (permissions != NULL) {
2415 ffa_set_data_access_attr(permissions,
2416 FFA_DATA_ACCESS_RW);
2417 }
2418 break;
2419 }
2420 /* Intentional fall-through. */
2421 case FFA_DATA_ACCESS_RO:
2422 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
2423 requested_data_access == FFA_DATA_ACCESS_RO) {
2424 if (permissions != NULL) {
2425 ffa_set_data_access_attr(permissions,
2426 FFA_DATA_ACCESS_RO);
2427 }
2428 break;
2429 }
2430 dlog_verbose(
2431 "Invalid data access requested; sender specified "
2432 "permissions %#x but receiver requested %#x.\n",
2433 sent_data_access, requested_data_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002434 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002435 case FFA_DATA_ACCESS_RESERVED:
2436 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
2437 "checked before this point.");
2438 }
2439
J-Alvesdcad8992023-09-15 14:10:35 +01002440 /*
2441 * For operations with a single borrower, If it is an FFA_MEMORY_LEND
2442 * or FFA_MEMORY_DONATE the retriever should have specifed the
2443 * instruction permissions it wishes to receive.
2444 */
2445 switch (share_func) {
2446 case FFA_MEM_SHARE_32:
2447 if (requested_instruction_access !=
2448 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2449 dlog_verbose(
2450 "%s: for share instruction permissions must "
2451 "NOT be specified.\n",
2452 __func__);
2453 return ffa_error(FFA_INVALID_PARAMETERS);
2454 }
2455 break;
2456 case FFA_MEM_LEND_32:
2457 /*
2458 * For operations with multiple borrowers only permit XN
2459 * permissions, and both Sender and borrower should have used
2460 * FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED.
2461 */
2462 if (multiple_borrowers) {
2463 if (requested_instruction_access !=
2464 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2465 dlog_verbose(
2466 "%s: lend/share/donate with multiple "
2467 "borrowers "
2468 "instruction permissions must NOT be "
2469 "specified.\n",
2470 __func__);
2471 return ffa_error(FFA_INVALID_PARAMETERS);
2472 }
2473 break;
2474 }
2475 /* Fall through if the operation targets a single borrower. */
2476 case FFA_MEM_DONATE_32:
2477 if (!multiple_borrowers &&
2478 requested_instruction_access ==
2479 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
2480 dlog_verbose(
2481 "%s: for lend/donate with single borrower "
2482 "instruction permissions must be speficified "
2483 "by borrower\n",
2484 __func__);
2485 return ffa_error(FFA_INVALID_PARAMETERS);
2486 }
2487 break;
2488 default:
2489 panic("%s: Wrong func id provided.\n", __func__);
2490 }
2491
J-Alves96de29f2022-04-26 16:05:24 +01002492 switch (sent_instruction_access) {
2493 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
2494 case FFA_INSTRUCTION_ACCESS_X:
J-Alvesdcad8992023-09-15 14:10:35 +01002495 if (requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
J-Alves96de29f2022-04-26 16:05:24 +01002496 if (permissions != NULL) {
2497 ffa_set_instruction_access_attr(
2498 permissions, FFA_INSTRUCTION_ACCESS_X);
2499 }
2500 break;
2501 }
J-Alvesdcad8992023-09-15 14:10:35 +01002502 /*
2503 * Fall through if requested permissions are less
2504 * permissive than those provided by the sender.
2505 */
J-Alves96de29f2022-04-26 16:05:24 +01002506 case FFA_INSTRUCTION_ACCESS_NX:
2507 if (requested_instruction_access ==
2508 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
2509 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
2510 if (permissions != NULL) {
2511 ffa_set_instruction_access_attr(
2512 permissions, FFA_INSTRUCTION_ACCESS_NX);
2513 }
2514 break;
2515 }
2516 dlog_verbose(
2517 "Invalid instruction access requested; sender "
2518 "specified permissions %#x but receiver requested "
2519 "%#x.\n",
2520 sent_instruction_access, requested_instruction_access);
J-Alvesdcad8992023-09-15 14:10:35 +01002521 return ffa_error(FFA_DENIED);
J-Alves96de29f2022-04-26 16:05:24 +01002522 case FFA_INSTRUCTION_ACCESS_RESERVED:
2523 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
2524 "be checked before this point.");
2525 }
2526
J-Alvesdcad8992023-09-15 14:10:35 +01002527 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves96de29f2022-04-26 16:05:24 +01002528}
2529
2530/**
2531 * Validate the receivers' permissions in the retrieve request against those
2532 * specified by the lender.
2533 * In the `permissions` argument returns the permissions to set at S2 for the
2534 * caller to the FFA_MEMORY_RETRIEVE_REQ.
J-Alves3456e032023-07-20 12:20:05 +01002535 * The function looks into the flag to bypass multiple borrower checks:
2536 * - If not set returns FFA_SUCCESS if all specified permissions are valid.
2537 * - If set returns FFA_SUCCESS if the descriptor contains the permissions
2538 * to the caller of FFA_MEM_RETRIEVE_REQ and they are valid. Other permissions
2539 * are ignored, if provided.
J-Alves96de29f2022-04-26 16:05:24 +01002540 */
2541static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
2542 struct ffa_memory_region *memory_region,
J-Alves19e20cf2023-08-02 12:48:55 +01002543 struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002544 ffa_memory_access_permissions_t *permissions,
2545 struct ffa_memory_access **receiver_ret, uint32_t func_id)
J-Alves96de29f2022-04-26 16:05:24 +01002546{
2547 uint32_t retrieve_receiver_index;
J-Alves3456e032023-07-20 12:20:05 +01002548 bool bypass_multi_receiver_check =
2549 (retrieve_request->flags &
2550 FFA_MEMORY_REGION_FLAG_BYPASS_BORROWERS_CHECK) != 0U;
J-Alvesdcad8992023-09-15 14:10:35 +01002551 const uint32_t region_receiver_count = memory_region->receiver_count;
2552 struct ffa_value ret;
J-Alves96de29f2022-04-26 16:05:24 +01002553
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002554 assert(receiver_ret != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002555 assert(permissions != NULL);
2556
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002557 *permissions = 0;
2558
J-Alves3456e032023-07-20 12:20:05 +01002559 if (!bypass_multi_receiver_check) {
J-Alvesdcad8992023-09-15 14:10:35 +01002560 if (retrieve_request->receiver_count != region_receiver_count) {
J-Alves3456e032023-07-20 12:20:05 +01002561 dlog_verbose(
2562 "Retrieve request should contain same list of "
2563 "borrowers, as specified by the lender.\n");
2564 return ffa_error(FFA_INVALID_PARAMETERS);
2565 }
2566 } else {
2567 if (retrieve_request->receiver_count != 1) {
2568 dlog_verbose(
2569 "Set bypass multiple borrower check, receiver "
2570 "list must be sized 1 (%x)\n",
2571 memory_region->receiver_count);
2572 return ffa_error(FFA_INVALID_PARAMETERS);
2573 }
J-Alves96de29f2022-04-26 16:05:24 +01002574 }
2575
2576 retrieve_receiver_index = retrieve_request->receiver_count;
2577
J-Alves96de29f2022-04-26 16:05:24 +01002578 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2579 ffa_memory_access_permissions_t sent_permissions;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002580 struct ffa_memory_access *retrieve_request_receiver =
2581 ffa_memory_region_get_receiver(retrieve_request, i);
2582 assert(retrieve_request_receiver != NULL);
J-Alves96de29f2022-04-26 16:05:24 +01002583 ffa_memory_access_permissions_t requested_permissions =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002584 retrieve_request_receiver->receiver_permissions
2585 .permissions;
J-Alves19e20cf2023-08-02 12:48:55 +01002586 ffa_id_t current_receiver_id =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002587 retrieve_request_receiver->receiver_permissions
2588 .receiver;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002589 struct ffa_memory_access *receiver;
2590 uint32_t mem_region_receiver_index;
2591 bool permissions_RO;
2592 bool clear_memory_flags;
J-Alves96de29f2022-04-26 16:05:24 +01002593 bool found_to_id = current_receiver_id == to_vm_id;
2594
J-Alves3456e032023-07-20 12:20:05 +01002595 if (bypass_multi_receiver_check && !found_to_id) {
2596 dlog_verbose(
2597 "Bypass multiple borrower check for id %x.\n",
2598 current_receiver_id);
2599 continue;
2600 }
2601
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002602 if (retrieve_request_receiver->composite_memory_region_offset !=
2603 0U) {
2604 dlog_verbose(
2605 "Retriever specified address ranges not "
2606 "supported (got offset %d).\n",
2607 retrieve_request_receiver
2608 ->composite_memory_region_offset);
2609 return ffa_error(FFA_INVALID_PARAMETERS);
2610 }
2611
J-Alves96de29f2022-04-26 16:05:24 +01002612 /*
2613 * Find the current receiver in the transaction descriptor from
2614 * sender.
2615 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002616 mem_region_receiver_index =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002617 ffa_memory_region_get_receiver_index(
2618 memory_region, current_receiver_id);
J-Alves96de29f2022-04-26 16:05:24 +01002619
2620 if (mem_region_receiver_index ==
2621 memory_region->receiver_count) {
2622 dlog_verbose("%s: receiver %x not found\n", __func__,
2623 current_receiver_id);
2624 return ffa_error(FFA_DENIED);
2625 }
2626
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002627 receiver = ffa_memory_region_get_receiver(
2628 memory_region, mem_region_receiver_index);
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00002629 assert(receiver != NULL);
2630
2631 sent_permissions = receiver->receiver_permissions.permissions;
J-Alves96de29f2022-04-26 16:05:24 +01002632
2633 if (found_to_id) {
2634 retrieve_receiver_index = i;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002635
2636 *receiver_ret = receiver;
J-Alves96de29f2022-04-26 16:05:24 +01002637 }
2638
2639 /*
J-Alvesdcad8992023-09-15 14:10:35 +01002640 * Check if retrieve request memory access list is valid:
2641 * - The retrieve request complies with the specification.
2642 * - Permissions are within those specified by the sender.
J-Alves96de29f2022-04-26 16:05:24 +01002643 */
J-Alvesdcad8992023-09-15 14:10:35 +01002644 ret = ffa_memory_retrieve_is_memory_access_valid(
2645 func_id, ffa_get_data_access_attr(sent_permissions),
2646 ffa_get_data_access_attr(requested_permissions),
2647 ffa_get_instruction_access_attr(sent_permissions),
2648 ffa_get_instruction_access_attr(requested_permissions),
2649 found_to_id ? permissions : NULL,
2650 region_receiver_count > 1);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002651
J-Alvesdcad8992023-09-15 14:10:35 +01002652 if (ret.func != FFA_SUCCESS_32) {
2653 return ret;
J-Alves96de29f2022-04-26 16:05:24 +01002654 }
2655
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002656 permissions_RO = (ffa_get_data_access_attr(*permissions) ==
2657 FFA_DATA_ACCESS_RO);
2658 clear_memory_flags = (retrieve_request->flags &
2659 FFA_MEMORY_REGION_FLAG_CLEAR) != 0U;
2660
J-Alves96de29f2022-04-26 16:05:24 +01002661 /*
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002662 * Can't request PM to clear memory if only provided
2663 * with RO permissions.
J-Alves96de29f2022-04-26 16:05:24 +01002664 */
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002665 if (found_to_id && permissions_RO && clear_memory_flags) {
J-Alves96de29f2022-04-26 16:05:24 +01002666 dlog_verbose(
2667 "Receiver has RO permissions can not request "
2668 "clear.\n");
2669 return ffa_error(FFA_DENIED);
2670 }
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002671
2672 /*
2673 * Check the impdef in the retrieve_request matches the value in
2674 * the original memory send.
2675 */
2676 if (ffa_version_from_memory_access_desc_size(
2677 memory_region->memory_access_desc_size) >=
2678 MAKE_FFA_VERSION(1, 2) &&
2679 ffa_version_from_memory_access_desc_size(
2680 retrieve_request->memory_access_desc_size) >=
2681 MAKE_FFA_VERSION(1, 2)) {
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002682 if (receiver->impdef.val[0] !=
2683 retrieve_request_receiver->impdef.val[0] ||
2684 receiver->impdef.val[1] !=
2685 retrieve_request_receiver->impdef.val[1]) {
2686 dlog_verbose(
2687 "Impdef value in memory send does not "
2688 "match retrieve request value "
2689 "send value %#x %#x retrieve request "
2690 "value %#x %#x\n",
2691 receiver->impdef.val[0],
2692 receiver->impdef.val[1],
2693 retrieve_request_receiver->impdef
2694 .val[0],
2695 retrieve_request_receiver->impdef
2696 .val[1]);
2697 return ffa_error(FFA_INVALID_PARAMETERS);
2698 }
2699 }
J-Alves96de29f2022-04-26 16:05:24 +01002700 }
2701
2702 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2703 dlog_verbose(
2704 "Retrieve request does not contain caller's (%x) "
2705 "permissions\n",
2706 to_vm_id);
2707 return ffa_error(FFA_INVALID_PARAMETERS);
2708 }
2709
2710 return (struct ffa_value){.func = FFA_SUCCESS_32};
2711}
2712
J-Alvesa9cd7e32022-07-01 13:49:33 +01002713/*
2714 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2715 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2716 * of a pending memory sharing operation whose allocator is the SPM, for
2717 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2718 * the memory region descriptor of the retrieve request must be zeroed with the
2719 * exception of the sender ID and handle.
2720 */
J-Alves4f0d9c12024-01-17 17:23:11 +00002721bool is_ffa_hypervisor_retrieve_request(struct ffa_memory_region *request,
2722 struct vm_locked to_locked)
J-Alvesa9cd7e32022-07-01 13:49:33 +01002723{
2724 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2725 request->attributes == 0U && request->flags == 0U &&
2726 request->tag == 0U && request->receiver_count == 0U &&
2727 plat_ffa_memory_handle_allocated_by_current_world(
2728 request->handle);
2729}
2730
2731/*
2732 * Helper to reset count of fragments retrieved by the hypervisor.
2733 */
2734static void ffa_memory_retrieve_complete_from_hyp(
2735 struct ffa_memory_share_state *share_state)
2736{
2737 if (share_state->hypervisor_fragment_count ==
2738 share_state->fragment_count) {
2739 share_state->hypervisor_fragment_count = 0;
2740 }
2741}
2742
J-Alves089004f2022-07-13 14:25:44 +01002743/**
J-Alves4f0d9c12024-01-17 17:23:11 +00002744 * Prepares the return of the ffa_value for the memory retrieve response.
2745 */
2746static struct ffa_value ffa_memory_retrieve_resp(uint32_t total_length,
2747 uint32_t fragment_length)
2748{
2749 return (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
2750 .arg1 = total_length,
2751 .arg2 = fragment_length};
2752}
2753
2754/**
J-Alves089004f2022-07-13 14:25:44 +01002755 * Validate that the memory region descriptor provided by the borrower on
2756 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2757 * memory sharing call.
2758 */
2759static struct ffa_value ffa_memory_retrieve_validate(
J-Alves4f0d9c12024-01-17 17:23:11 +00002760 ffa_id_t to_id, struct ffa_memory_region *retrieve_request,
2761 uint32_t retrieve_request_length,
J-Alves089004f2022-07-13 14:25:44 +01002762 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2763 uint32_t share_func)
2764{
2765 ffa_memory_region_flags_t transaction_type =
2766 retrieve_request->flags &
2767 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002768 enum ffa_memory_security security_state;
J-Alves4f0d9c12024-01-17 17:23:11 +00002769 const uint64_t memory_access_desc_size =
2770 retrieve_request->memory_access_desc_size;
2771 const uint32_t expected_retrieve_request_length =
2772 retrieve_request->receivers_offset +
2773 (uint32_t)(retrieve_request->receiver_count *
2774 memory_access_desc_size);
J-Alves089004f2022-07-13 14:25:44 +01002775
2776 assert(retrieve_request != NULL);
2777 assert(memory_region != NULL);
2778 assert(receiver_index != NULL);
2779 assert(retrieve_request->sender == memory_region->sender);
2780
J-Alves4f0d9c12024-01-17 17:23:11 +00002781 if (retrieve_request_length != expected_retrieve_request_length) {
2782 dlog_verbose(
2783 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
2784 "but was %d.\n",
2785 expected_retrieve_request_length,
2786 retrieve_request_length);
2787 return ffa_error(FFA_INVALID_PARAMETERS);
2788 }
2789
2790 if (retrieve_request->sender != memory_region->sender) {
2791 dlog_verbose(
2792 "Memory with handle %#x not fully sent, can't "
2793 "retrieve.\n",
2794 memory_region->handle);
2795 return ffa_error(FFA_DENIED);
2796 }
2797
2798 /*
2799 * The SPMC can only process retrieve requests to memory share
2800 * operations with one borrower from the other world. It can't
2801 * determine the ID of the NWd VM that invoked the retrieve
2802 * request interface call. It relies on the hypervisor to
2803 * validate the caller's ID against that provided in the
2804 * `receivers` list of the retrieve response.
2805 * In case there is only one borrower from the NWd in the
2806 * transaction descriptor, record that in the `receiver_id` for
2807 * later use, and validate in the retrieve request message.
2808 * This limitation is due to the fact SPMC can't determine the
2809 * index in the memory share structures state to update.
2810 */
2811 if (to_id == HF_HYPERVISOR_VM_ID) {
2812 uint32_t other_world_count = 0;
2813
2814 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2815 struct ffa_memory_access *receiver =
2816 ffa_memory_region_get_receiver(retrieve_request,
2817 0);
2818 assert(receiver != NULL);
2819
2820 to_id = receiver->receiver_permissions.receiver;
2821
2822 if (!vm_id_is_current_world(to_id)) {
2823 other_world_count++;
2824 }
2825 }
2826
2827 if (other_world_count > 1) {
2828 dlog_verbose(
2829 "Support one receiver from the other "
2830 "world.\n");
2831 return ffa_error(FFA_NOT_SUPPORTED);
2832 }
2833 }
J-Alves089004f2022-07-13 14:25:44 +01002834 /*
2835 * Check that the transaction type expected by the receiver is
2836 * correct, if it has been specified.
2837 */
2838 if (transaction_type !=
2839 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2840 transaction_type != (memory_region->flags &
2841 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2842 dlog_verbose(
2843 "Incorrect transaction type %#x for "
2844 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2845 transaction_type,
2846 memory_region->flags &
2847 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2848 retrieve_request->handle);
2849 return ffa_error(FFA_INVALID_PARAMETERS);
2850 }
2851
2852 if (retrieve_request->tag != memory_region->tag) {
2853 dlog_verbose(
2854 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2855 "%d for handle %#x.\n",
2856 retrieve_request->tag, memory_region->tag,
2857 retrieve_request->handle);
2858 return ffa_error(FFA_INVALID_PARAMETERS);
2859 }
2860
J-Alves4f0d9c12024-01-17 17:23:11 +00002861 *receiver_index =
2862 ffa_memory_region_get_receiver_index(memory_region, to_id);
J-Alves089004f2022-07-13 14:25:44 +01002863
2864 if (*receiver_index == memory_region->receiver_count) {
2865 dlog_verbose(
2866 "Incorrect receiver VM ID %d for "
2867 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves4f0d9c12024-01-17 17:23:11 +00002868 to_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002869 return ffa_error(FFA_INVALID_PARAMETERS);
2870 }
2871
2872 if ((retrieve_request->flags &
2873 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2874 dlog_verbose(
2875 "Retriever specified 'address range alignment 'hint' "
2876 "not supported.\n");
2877 return ffa_error(FFA_INVALID_PARAMETERS);
2878 }
2879 if ((retrieve_request->flags &
2880 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2881 dlog_verbose(
2882 "Bits 8-5 must be zero in memory region's flags "
2883 "(address range alignment hint not supported).\n");
2884 return ffa_error(FFA_INVALID_PARAMETERS);
2885 }
2886
2887 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2888 dlog_verbose(
2889 "Bits 31-10 must be zero in memory region's flags.\n");
2890 return ffa_error(FFA_INVALID_PARAMETERS);
2891 }
2892
2893 if (share_func == FFA_MEM_SHARE_32 &&
2894 (retrieve_request->flags &
2895 (FFA_MEMORY_REGION_FLAG_CLEAR |
2896 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2897 dlog_verbose(
2898 "Memory Share operation can't clean after relinquish "
2899 "memory region.\n");
2900 return ffa_error(FFA_INVALID_PARAMETERS);
2901 }
2902
2903 /*
2904 * If the borrower needs the memory to be cleared before mapping
2905 * to its address space, the sender should have set the flag
2906 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2907 * FFA_DENIED.
2908 */
2909 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2910 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2911 dlog_verbose(
2912 "Borrower needs memory cleared. Sender needs to set "
2913 "flag for clearing memory.\n");
2914 return ffa_error(FFA_DENIED);
2915 }
2916
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002917 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2918 security_state =
2919 ffa_get_memory_security_attr(retrieve_request->attributes);
2920 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2921 dlog_verbose(
2922 "Invalid security state for memory retrieve request "
2923 "operation.\n");
2924 return ffa_error(FFA_INVALID_PARAMETERS);
2925 }
2926
J-Alves089004f2022-07-13 14:25:44 +01002927 /*
2928 * If memory type is not specified, bypass validation of memory
2929 * attributes in the retrieve request. The retriever is expecting to
2930 * obtain this information from the SPMC.
2931 */
2932 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2933 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2934 return (struct ffa_value){.func = FFA_SUCCESS_32};
2935 }
2936
2937 /*
2938 * Ensure receiver's attributes are compatible with how
2939 * Hafnium maps memory: Normal Memory, Inner shareable,
2940 * Write-Back Read-Allocate Write-Allocate Cacheable.
2941 */
2942 return ffa_memory_attributes_validate(retrieve_request->attributes);
2943}
2944
J-Alves4f0d9c12024-01-17 17:23:11 +00002945static struct ffa_value ffa_partition_retrieve_request(
2946 struct share_states_locked share_states,
2947 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
2948 struct ffa_memory_region *retrieve_request,
2949 uint32_t retrieve_request_length, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002950{
J-Alvesa9cd7e32022-07-01 13:49:33 +01002951 ffa_memory_access_permissions_t permissions = 0;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002952 uint32_t memory_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002953 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002954 struct ffa_composite_memory_region *composite;
2955 uint32_t total_length;
2956 uint32_t fragment_length;
J-Alves19e20cf2023-08-02 12:48:55 +01002957 ffa_id_t receiver_id = to_locked.vm->id;
J-Alves4f0d9c12024-01-17 17:23:11 +00002958 bool is_retrieve_complete = false;
Olivier Deprez878bd5b2021-04-15 19:05:10 +02002959 ffa_memory_attributes_t attributes;
J-Alves4f0d9c12024-01-17 17:23:11 +00002960 const uint64_t memory_access_desc_size =
Daniel Boulbyde974ca2023-12-12 13:53:31 +00002961 retrieve_request->memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00002962 uint32_t receiver_index;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00002963 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00002964 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002965
J-Alves4f0d9c12024-01-17 17:23:11 +00002966 struct ffa_memory_region *memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002967
J-Alves96de29f2022-04-26 16:05:24 +01002968 if (!share_state->sending_complete) {
2969 dlog_verbose(
2970 "Memory with handle %#x not fully sent, can't "
2971 "retrieve.\n",
2972 handle);
J-Alves4f0d9c12024-01-17 17:23:11 +00002973 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alves96de29f2022-04-26 16:05:24 +01002974 }
2975
J-Alves4f0d9c12024-01-17 17:23:11 +00002976 /*
2977 * Validate retrieve request, according to what was sent by the
2978 * sender. Function will output the `receiver_index` from the
2979 * provided memory region.
2980 */
2981 ret = ffa_memory_retrieve_validate(
2982 receiver_id, retrieve_request, retrieve_request_length,
2983 memory_region, &receiver_index, share_state->share_func);
J-Alves089004f2022-07-13 14:25:44 +01002984
J-Alves4f0d9c12024-01-17 17:23:11 +00002985 if (ret.func != FFA_SUCCESS_32) {
2986 return ret;
J-Alves089004f2022-07-13 14:25:44 +01002987 }
J-Alves96de29f2022-04-26 16:05:24 +01002988
J-Alves4f0d9c12024-01-17 17:23:11 +00002989 if (share_state->retrieved_fragment_count[receiver_index] != 0U) {
2990 dlog_verbose("Memory with handle %#x already retrieved.\n",
2991 handle);
2992 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002993 }
2994
J-Alves4f0d9c12024-01-17 17:23:11 +00002995 /*
2996 * Validate the requested permissions against the sent
2997 * permissions.
2998 * Outputs the permissions to give to retriever at S2
2999 * PTs.
3000 */
3001 ret = ffa_memory_retrieve_validate_memory_access_list(
3002 memory_region, retrieve_request, receiver_id, &permissions,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003003 &receiver, share_state->share_func);
J-Alves4f0d9c12024-01-17 17:23:11 +00003004 if (ret.func != FFA_SUCCESS_32) {
3005 return ret;
3006 }
3007
3008 memory_to_mode = ffa_memory_permissions_to_mode(
3009 permissions, share_state->sender_orig_mode);
3010
3011 ret = ffa_retrieve_check_update(
3012 to_locked, share_state->fragments,
3013 share_state->fragment_constituent_counts,
3014 share_state->fragment_count, memory_to_mode,
3015 share_state->share_func, false, page_pool);
3016
3017 if (ret.func != FFA_SUCCESS_32) {
3018 return ret;
3019 }
3020
3021 share_state->retrieved_fragment_count[receiver_index] = 1;
3022
3023 is_retrieve_complete =
3024 share_state->retrieved_fragment_count[receiver_index] ==
3025 share_state->fragment_count;
3026
J-Alvesb5084cf2022-07-06 14:20:12 +01003027 /* VMs acquire the RX buffer from SPMC. */
3028 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3029
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003030 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003031 * Copy response to RX buffer of caller and deliver the message.
3032 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003033 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003034 composite = ffa_memory_region_get_composite(memory_region, 0);
J-Alves4f0d9c12024-01-17 17:23:11 +00003035
Andrew Walbranca808b12020-05-15 17:22:28 +01003036 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003037 * Constituents which we received in the first fragment should
3038 * always fit in the first fragment we are sending, because the
3039 * header is the same size in both cases and we have a fixed
3040 * message buffer size. So `ffa_retrieved_memory_region_init`
3041 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01003042 */
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003043
3044 /*
3045 * Set the security state in the memory retrieve response attributes
3046 * if specified by the target mode.
3047 */
3048 attributes = plat_ffa_memory_security_mode(
3049 memory_region->attributes, share_state->sender_orig_mode);
3050
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003051 /* Provide the permissions that had been provided. */
3052 receiver->receiver_permissions.permissions = permissions;
3053
3054 /*
3055 * Prepare the memory region descriptor for the retrieve response.
3056 * Provide the pointer to the receiver tracked in the share state
3057 * strucutures.
3058 */
Andrew Walbranca808b12020-05-15 17:22:28 +01003059 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01003060 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
Olivier Deprez878bd5b2021-04-15 19:05:10 +02003061 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003062 memory_region->flags, handle, permissions, receiver, 1,
3063 memory_access_desc_size, composite->page_count,
3064 composite->constituent_count, share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01003065 share_state->fragment_constituent_counts[0], &total_length,
3066 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01003067
J-Alves4f0d9c12024-01-17 17:23:11 +00003068 if (is_retrieve_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003069 ffa_memory_retrieve_complete(share_states, share_state,
3070 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003071 }
J-Alves4f0d9c12024-01-17 17:23:11 +00003072
3073 return ffa_memory_retrieve_resp(total_length, fragment_length);
3074}
3075
3076static struct ffa_value ffa_hypervisor_retrieve_request(
3077 struct ffa_memory_share_state *share_state, struct vm_locked to_locked,
3078 struct ffa_memory_region *retrieve_request)
3079{
3080 struct ffa_value ret;
3081 struct ffa_composite_memory_region *composite;
3082 uint32_t total_length;
3083 uint32_t fragment_length;
J-Alves4f0d9c12024-01-17 17:23:11 +00003084 ffa_memory_attributes_t attributes;
J-Alves7b6ab612024-01-24 09:54:54 +00003085 uint64_t memory_access_desc_size;
J-Alves4f0d9c12024-01-17 17:23:11 +00003086 struct ffa_memory_region *memory_region;
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003087 struct ffa_memory_access *receiver;
J-Alves4f0d9c12024-01-17 17:23:11 +00003088 ffa_memory_handle_t handle = retrieve_request->handle;
3089
J-Alves4f0d9c12024-01-17 17:23:11 +00003090 memory_region = share_state->memory_region;
3091
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003092 assert(to_locked.vm->id == HF_HYPERVISOR_VM_ID);
3093
J-Alves7b6ab612024-01-24 09:54:54 +00003094 switch (to_locked.vm->ffa_version) {
3095 case MAKE_FFA_VERSION(1, 2):
3096 memory_access_desc_size = sizeof(struct ffa_memory_access);
3097 break;
3098 case MAKE_FFA_VERSION(1, 0):
3099 case MAKE_FFA_VERSION(1, 1):
3100 memory_access_desc_size = sizeof(struct ffa_memory_access_v1_0);
3101 break;
3102 default:
3103 panic("version not supported: %x\n", to_locked.vm->ffa_version);
3104 }
3105
J-Alves4f0d9c12024-01-17 17:23:11 +00003106 if (share_state->hypervisor_fragment_count != 0U) {
3107 dlog_verbose(
3108 "Memory with handle %#x already retrieved by "
3109 "the hypervisor.\n",
3110 handle);
3111 return ffa_error(FFA_DENIED);
3112 }
3113
3114 share_state->hypervisor_fragment_count = 1;
3115
3116 ffa_memory_retrieve_complete_from_hyp(share_state);
3117
3118 /* VMs acquire the RX buffer from SPMC. */
3119 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
3120
3121 /*
3122 * Copy response to RX buffer of caller and deliver the message.
3123 * This must be done before the share_state is (possibly) freed.
3124 */
3125 composite = ffa_memory_region_get_composite(memory_region, 0);
3126
3127 /*
3128 * Constituents which we received in the first fragment should
3129 * always fit in the first fragment we are sending, because the
3130 * header is the same size in both cases and we have a fixed
3131 * message buffer size. So `ffa_retrieved_memory_region_init`
3132 * should never fail.
3133 */
3134
3135 /*
3136 * Set the security state in the memory retrieve response attributes
3137 * if specified by the target mode.
3138 */
3139 attributes = plat_ffa_memory_security_mode(
3140 memory_region->attributes, share_state->sender_orig_mode);
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003141
3142 receiver = ffa_memory_region_get_receiver(memory_region, 0);
3143
J-Alves4f0d9c12024-01-17 17:23:11 +00003144 CHECK(ffa_retrieved_memory_region_init(
3145 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
3146 HF_MAILBOX_SIZE, memory_region->sender, attributes,
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003147 memory_region->flags, handle,
3148 receiver->receiver_permissions.permissions, receiver,
3149 memory_region->receiver_count, memory_access_desc_size,
J-Alves4f0d9c12024-01-17 17:23:11 +00003150 composite->page_count, composite->constituent_count,
3151 share_state->fragments[0],
3152 share_state->fragment_constituent_counts[0], &total_length,
3153 &fragment_length));
3154
3155 return ffa_memory_retrieve_resp(total_length, fragment_length);
3156}
3157
3158struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
3159 struct ffa_memory_region *retrieve_request,
3160 uint32_t retrieve_request_length,
3161 struct mpool *page_pool)
3162{
3163 ffa_memory_handle_t handle = retrieve_request->handle;
3164 struct share_states_locked share_states;
3165 struct ffa_memory_share_state *share_state;
3166 struct ffa_value ret;
3167
3168 dump_share_states();
3169
3170 share_states = share_states_lock();
3171 share_state = get_share_state(share_states, handle);
3172 if (share_state == NULL) {
3173 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
3174 handle);
3175 ret = ffa_error(FFA_INVALID_PARAMETERS);
3176 goto out;
3177 }
3178
3179 if (is_ffa_hypervisor_retrieve_request(retrieve_request, to_locked)) {
3180 ret = ffa_hypervisor_retrieve_request(share_state, to_locked,
3181 retrieve_request);
3182 } else {
3183 ret = ffa_partition_retrieve_request(
3184 share_states, share_state, to_locked, retrieve_request,
3185 retrieve_request_length, page_pool);
3186 }
3187
3188 /* Track use of the RX buffer if the handling has succeeded. */
3189 if (ret.func == FFA_MEM_RETRIEVE_RESP_32) {
3190 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
3191 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
3192 }
3193
Andrew Walbranca808b12020-05-15 17:22:28 +01003194out:
3195 share_states_unlock(&share_states);
3196 dump_share_states();
3197 return ret;
3198}
3199
J-Alves5da37d92022-10-24 16:33:48 +01003200/**
3201 * Determine expected fragment offset according to the FF-A version of
3202 * the caller.
3203 */
3204static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
3205 struct ffa_memory_region *memory_region,
3206 uint32_t retrieved_constituents_count, uint32_t ffa_version)
3207{
3208 uint32_t expected_fragment_offset;
3209 uint32_t composite_constituents_offset;
3210
Kathleen Capellae4fe2962023-09-01 17:08:47 -04003211 if (ffa_version >= MAKE_FFA_VERSION(1, 1)) {
J-Alves5da37d92022-10-24 16:33:48 +01003212 /*
3213 * Hafnium operates memory regions in FF-A v1.1 format, so we
3214 * can retrieve the constituents offset from descriptor.
3215 */
3216 composite_constituents_offset =
3217 ffa_composite_constituent_offset(memory_region, 0);
3218 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
3219 /*
3220 * If retriever is FF-A v1.0, determine the composite offset
3221 * as it is expected to have been configured in the
3222 * retrieve response.
3223 */
3224 composite_constituents_offset =
3225 sizeof(struct ffa_memory_region_v1_0) +
3226 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003227 sizeof(struct ffa_memory_access_v1_0) +
J-Alves5da37d92022-10-24 16:33:48 +01003228 sizeof(struct ffa_composite_memory_region);
3229 } else {
3230 panic("%s received an invalid FF-A version.\n", __func__);
3231 }
3232
3233 expected_fragment_offset =
3234 composite_constituents_offset +
3235 retrieved_constituents_count *
3236 sizeof(struct ffa_memory_region_constituent) -
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003237 (uint32_t)(memory_region->memory_access_desc_size *
3238 (memory_region->receiver_count - 1));
J-Alves5da37d92022-10-24 16:33:48 +01003239
3240 return expected_fragment_offset;
3241}
3242
Andrew Walbranca808b12020-05-15 17:22:28 +01003243struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
3244 ffa_memory_handle_t handle,
3245 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003246 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003247 struct mpool *page_pool)
3248{
3249 struct ffa_memory_region *memory_region;
3250 struct share_states_locked share_states;
3251 struct ffa_memory_share_state *share_state;
3252 struct ffa_value ret;
3253 uint32_t fragment_index;
3254 uint32_t retrieved_constituents_count;
3255 uint32_t i;
3256 uint32_t expected_fragment_offset;
3257 uint32_t remaining_constituent_count;
3258 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01003259 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01003260 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01003261
3262 dump_share_states();
3263
3264 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003265 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003266 if (share_state == NULL) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003267 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
3268 handle);
3269 ret = ffa_error(FFA_INVALID_PARAMETERS);
3270 goto out;
3271 }
3272
3273 memory_region = share_state->memory_region;
3274 CHECK(memory_region != NULL);
3275
Andrew Walbranca808b12020-05-15 17:22:28 +01003276 if (!share_state->sending_complete) {
3277 dlog_verbose(
3278 "Memory with handle %#x not fully sent, can't "
3279 "retrieve.\n",
3280 handle);
3281 ret = ffa_error(FFA_INVALID_PARAMETERS);
3282 goto out;
3283 }
3284
J-Alves59ed0042022-07-28 18:26:41 +01003285 /*
3286 * If retrieve request from the hypervisor has been initiated in the
3287 * given share_state, continue it, else assume it is a continuation of
3288 * retrieve request from a NWd VM.
3289 */
3290 continue_ffa_hyp_mem_retrieve_req =
3291 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
3292 (share_state->hypervisor_fragment_count != 0U) &&
J-Alves661e1b72023-08-02 13:39:40 +01003293 ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003294
J-Alves59ed0042022-07-28 18:26:41 +01003295 if (!continue_ffa_hyp_mem_retrieve_req) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003296 receiver_index = ffa_memory_region_get_receiver_index(
J-Alves59ed0042022-07-28 18:26:41 +01003297 memory_region, to_locked.vm->id);
3298
3299 if (receiver_index == memory_region->receiver_count) {
3300 dlog_verbose(
3301 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
3302 "borrower to memory sharing transaction (%x)\n",
3303 to_locked.vm->id, handle);
3304 ret = ffa_error(FFA_INVALID_PARAMETERS);
3305 goto out;
3306 }
3307
3308 if (share_state->retrieved_fragment_count[receiver_index] ==
3309 0 ||
3310 share_state->retrieved_fragment_count[receiver_index] >=
3311 share_state->fragment_count) {
3312 dlog_verbose(
3313 "Retrieval of memory with handle %#x not yet "
3314 "started or already completed (%d/%d fragments "
3315 "retrieved).\n",
3316 handle,
3317 share_state->retrieved_fragment_count
3318 [receiver_index],
3319 share_state->fragment_count);
3320 ret = ffa_error(FFA_INVALID_PARAMETERS);
3321 goto out;
3322 }
3323
3324 fragment_index =
3325 share_state->retrieved_fragment_count[receiver_index];
3326 } else {
3327 if (share_state->hypervisor_fragment_count == 0 ||
3328 share_state->hypervisor_fragment_count >=
3329 share_state->fragment_count) {
3330 dlog_verbose(
3331 "Retrieve of memory with handle %x not "
3332 "started from hypervisor.\n",
3333 handle);
3334 ret = ffa_error(FFA_INVALID_PARAMETERS);
3335 goto out;
3336 }
3337
3338 if (memory_region->sender != sender_vm_id) {
3339 dlog_verbose(
3340 "Sender ID (%x) is not as expected for memory "
3341 "handle %x\n",
3342 sender_vm_id, handle);
3343 ret = ffa_error(FFA_INVALID_PARAMETERS);
3344 goto out;
3345 }
3346
3347 fragment_index = share_state->hypervisor_fragment_count;
3348
3349 receiver_index = 0;
3350 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003351
3352 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003353 * Check that the given fragment offset is correct by counting
3354 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01003355 */
3356 retrieved_constituents_count = 0;
3357 for (i = 0; i < fragment_index; ++i) {
3358 retrieved_constituents_count +=
3359 share_state->fragment_constituent_counts[i];
3360 }
J-Alvesc7484f12022-05-13 12:41:14 +01003361
3362 CHECK(memory_region->receiver_count > 0);
3363
Andrew Walbranca808b12020-05-15 17:22:28 +01003364 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01003365 ffa_memory_retrieve_expected_offset_per_ffa_version(
3366 memory_region, retrieved_constituents_count,
3367 to_locked.vm->ffa_version);
3368
Andrew Walbranca808b12020-05-15 17:22:28 +01003369 if (fragment_offset != expected_fragment_offset) {
3370 dlog_verbose("Fragment offset was %d but expected %d.\n",
3371 fragment_offset, expected_fragment_offset);
3372 ret = ffa_error(FFA_INVALID_PARAMETERS);
3373 goto out;
3374 }
3375
J-Alves4f0d9c12024-01-17 17:23:11 +00003376 /*
3377 * When hafnium is the hypervisor, acquire the RX buffer of a VM, that
3378 * is currently ownder by the SPMC.
3379 */
3380 assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
J-Alves59ed0042022-07-28 18:26:41 +01003381
Andrew Walbranca808b12020-05-15 17:22:28 +01003382 remaining_constituent_count = ffa_memory_fragment_init(
3383 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
3384 share_state->fragments[fragment_index],
3385 share_state->fragment_constituent_counts[fragment_index],
3386 &fragment_length);
3387 CHECK(remaining_constituent_count == 0);
J-Alves674e4de2024-01-17 16:20:32 +00003388
Andrew Walbranca808b12020-05-15 17:22:28 +01003389 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00003390 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01003391
J-Alves59ed0042022-07-28 18:26:41 +01003392 if (!continue_ffa_hyp_mem_retrieve_req) {
3393 share_state->retrieved_fragment_count[receiver_index]++;
3394 if (share_state->retrieved_fragment_count[receiver_index] ==
3395 share_state->fragment_count) {
3396 ffa_memory_retrieve_complete(share_states, share_state,
3397 page_pool);
3398 }
3399 } else {
3400 share_state->hypervisor_fragment_count++;
3401
3402 ffa_memory_retrieve_complete_from_hyp(share_state);
3403 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003404 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
3405 .arg1 = (uint32_t)handle,
3406 .arg2 = (uint32_t)(handle >> 32),
3407 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003408
3409out:
3410 share_states_unlock(&share_states);
3411 dump_share_states();
3412 return ret;
3413}
3414
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003415struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003416 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003417 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003418{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003419 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003420 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003421 struct ffa_memory_share_state *share_state;
3422 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003423 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003424 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01003425 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00003426 bool receivers_relinquished_memory;
J-Alves639ddfc2023-11-21 14:17:26 +00003427 ffa_memory_access_permissions_t receiver_permissions = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003428
Andrew Walbrana65a1322020-04-06 19:32:32 +01003429 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003430 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003431 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01003432 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003433 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003434 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003435 }
3436
Andrew Walbrana65a1322020-04-06 19:32:32 +01003437 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003438 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003439 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01003440 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01003441 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003442 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003443 }
3444
3445 dump_share_states();
3446
3447 share_states = share_states_lock();
Karl Meakin4a2854a2023-06-30 16:26:52 +01003448 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003449 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003450 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003451 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003452 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003453 goto out;
3454 }
3455
Andrew Walbranca808b12020-05-15 17:22:28 +01003456 if (!share_state->sending_complete) {
3457 dlog_verbose(
3458 "Memory with handle %#x not fully sent, can't "
3459 "relinquish.\n",
3460 handle);
3461 ret = ffa_error(FFA_INVALID_PARAMETERS);
3462 goto out;
3463 }
3464
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003465 memory_region = share_state->memory_region;
3466 CHECK(memory_region != NULL);
3467
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003468 receiver_index = ffa_memory_region_get_receiver_index(
3469 memory_region, from_locked.vm->id);
J-Alves8eb19162022-04-28 10:56:48 +01003470
3471 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003472 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003473 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01003474 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01003475 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003476 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003477 goto out;
3478 }
3479
J-Alves8eb19162022-04-28 10:56:48 +01003480 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01003481 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003482 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003483 "Memory with handle %#x not yet fully "
3484 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01003485 "receiver %x can't relinquish.\n",
3486 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003487 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003488 goto out;
3489 }
3490
J-Alves3c5b2072022-11-21 12:45:40 +00003491 /*
3492 * Either clear if requested in relinquish call, or in a retrieve
3493 * request from one of the borrowers.
3494 */
3495 receivers_relinquished_memory = true;
3496
3497 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3498 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003499 ffa_memory_region_get_receiver(memory_region, i);
3500 assert(receiver != NULL);
J-Alves3c5b2072022-11-21 12:45:40 +00003501 if (receiver->receiver_permissions.receiver ==
3502 from_locked.vm->id) {
J-Alves639ddfc2023-11-21 14:17:26 +00003503 receiver_permissions =
3504 receiver->receiver_permissions.permissions;
J-Alves3c5b2072022-11-21 12:45:40 +00003505 continue;
3506 }
3507
3508 if (share_state->retrieved_fragment_count[i] != 0U) {
3509 receivers_relinquished_memory = false;
3510 break;
3511 }
3512 }
3513
3514 clear = receivers_relinquished_memory &&
Daniel Boulby2e14ebe2024-01-15 16:21:44 +00003515 ((relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3516 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003517
3518 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003519 * Clear is not allowed for memory that was shared, as the
3520 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003521 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003522 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003523 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003524 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003525 goto out;
3526 }
3527
J-Alves639ddfc2023-11-21 14:17:26 +00003528 if (clear && receiver_permissions == FFA_DATA_ACCESS_RO) {
3529 dlog_verbose("%s: RO memory can't use clear memory flag.\n",
3530 __func__);
3531 ret = ffa_error(FFA_DENIED);
3532 goto out;
3533 }
3534
Andrew Walbranca808b12020-05-15 17:22:28 +01003535 ret = ffa_relinquish_check_update(
J-Alves26483382023-04-20 12:01:49 +01003536 from_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003537 share_state->fragment_constituent_counts,
3538 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003539
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003540 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003541 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01003542 * Mark memory handle as not retrieved, so it can be
3543 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003544 */
J-Alves8eb19162022-04-28 10:56:48 +01003545 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003546 }
3547
3548out:
3549 share_states_unlock(&share_states);
3550 dump_share_states();
3551 return ret;
3552}
3553
3554/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01003555 * Validates that the reclaim transition is allowed for the given
3556 * handle, updates the page table of the reclaiming VM, and frees the
3557 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003558 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003559struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01003560 ffa_memory_handle_t handle,
3561 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003562 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003563{
3564 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003565 struct ffa_memory_share_state *share_state;
3566 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003567 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003568
3569 dump_share_states();
3570
3571 share_states = share_states_lock();
Karl Meakin52cdfe72023-06-30 14:49:10 +01003572
Karl Meakin4a2854a2023-06-30 16:26:52 +01003573 share_state = get_share_state(share_states, handle);
J-Alvesb56aac82023-11-10 09:44:43 +00003574 if (share_state == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003575 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003576 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003577 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003578 goto out;
3579 }
Karl Meakin4a2854a2023-06-30 16:26:52 +01003580 memory_region = share_state->memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003581
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003582 CHECK(memory_region != NULL);
3583
J-Alvesa9cd7e32022-07-01 13:49:33 +01003584 if (vm_id_is_current_world(to_locked.vm->id) &&
3585 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003586 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01003587 "VM %#x attempted to reclaim memory handle %#x "
3588 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003589 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003590 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003591 goto out;
3592 }
3593
Andrew Walbranca808b12020-05-15 17:22:28 +01003594 if (!share_state->sending_complete) {
3595 dlog_verbose(
3596 "Memory with handle %#x not fully sent, can't "
3597 "reclaim.\n",
3598 handle);
3599 ret = ffa_error(FFA_INVALID_PARAMETERS);
3600 goto out;
3601 }
3602
J-Alves752236c2022-04-28 11:07:47 +01003603 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
3604 if (share_state->retrieved_fragment_count[i] != 0) {
3605 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01003606 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00003607 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01003608 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01003609 handle,
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003610 ffa_memory_region_get_receiver(memory_region, i)
3611 ->receiver_permissions.receiver);
J-Alves752236c2022-04-28 11:07:47 +01003612 ret = ffa_error(FFA_DENIED);
3613 goto out;
3614 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003615 }
3616
Andrew Walbranca808b12020-05-15 17:22:28 +01003617 ret = ffa_retrieve_check_update(
J-Alves26483382023-04-20 12:01:49 +01003618 to_locked, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01003619 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00003620 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01003621 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003622
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003623 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003624 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00003625 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003626 }
3627
3628out:
3629 share_states_unlock(&share_states);
3630 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01003631}