blob: 92648e81add3df55597a2ed57eeb0508de7d1a29 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Jose Marinho75509b42019-04-09 09:34:59 +01007 */
8
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01009#include "hf/ffa_memory.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000010
Federico Recanati4fd065d2021-12-13 20:06:23 +010011#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020012#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/arch/plat/ffa.h"
Andrew Walbran290b0c92020-02-03 16:37:14 +000014
Jose Marinho75509b42019-04-09 09:34:59 +010015#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010017#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa_internal.h"
J-Alves66652252022-07-06 09:49:51 +010020#include "hf/ffa_memory_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000021#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010022#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000023#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010024
J-Alves2d8457f2022-10-05 11:06:41 +010025#include "vmapi/hf/ffa_v1_0.h"
26
J-Alves5da37d92022-10-24 16:33:48 +010027#define RECEIVERS_COUNT_IN_RETRIEVE_RESP 1
28
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000029/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010030 * All access to members of a `struct ffa_memory_share_state` must be guarded
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000031 * by this lock.
32 */
33static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010034static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000035
36/**
J-Alves917d2f22020-10-30 18:39:30 +000037 * Extracts the index from a memory handle allocated by Hafnium's current world.
38 */
39uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
40{
41 return handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
42}
43
44/**
Andrew Walbranca808b12020-05-15 17:22:28 +010045 * Initialises the next available `struct ffa_memory_share_state` and sets
46 * `share_state_ret` to a pointer to it. If `handle` is
47 * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise
48 * uses the provided handle which is assumed to be globally unique.
49 *
50 * Returns true on success or false if none are available.
51 */
J-Alves66652252022-07-06 09:49:51 +010052bool allocate_share_state(struct share_states_locked share_states,
53 uint32_t share_func,
54 struct ffa_memory_region *memory_region,
55 uint32_t fragment_length, ffa_memory_handle_t handle,
56 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000057{
Andrew Walbrana65a1322020-04-06 19:32:32 +010058 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000059
Daniel Boulbya2f8c662021-11-26 17:52:53 +000060 assert(share_states.share_states != NULL);
61 assert(memory_region != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000062
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000063 for (i = 0; i < MAX_MEM_SHARES; ++i) {
Andrew Walbranca808b12020-05-15 17:22:28 +010064 if (share_states.share_states[i].share_func == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000065 uint32_t j;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010066 struct ffa_memory_share_state *allocated_state =
Andrew Walbranca808b12020-05-15 17:22:28 +010067 &share_states.share_states[i];
68 struct ffa_composite_memory_region *composite =
69 ffa_memory_region_get_composite(memory_region,
70 0);
71
72 if (handle == FFA_MEMORY_HANDLE_INVALID) {
J-Alvesee68c542020-10-29 17:48:20 +000073 memory_region->handle =
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 plat_ffa_memory_handle_make(i);
Andrew Walbranca808b12020-05-15 17:22:28 +010075 } else {
J-Alvesee68c542020-10-29 17:48:20 +000076 memory_region->handle = handle;
Andrew Walbranca808b12020-05-15 17:22:28 +010077 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000078 allocated_state->share_func = share_func;
79 allocated_state->memory_region = memory_region;
Andrew Walbranca808b12020-05-15 17:22:28 +010080 allocated_state->fragment_count = 1;
81 allocated_state->fragments[0] = composite->constituents;
82 allocated_state->fragment_constituent_counts[0] =
83 (fragment_length -
84 ffa_composite_constituent_offset(memory_region,
85 0)) /
86 sizeof(struct ffa_memory_region_constituent);
87 allocated_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000088 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
Andrew Walbranca808b12020-05-15 17:22:28 +010089 allocated_state->retrieved_fragment_count[j] =
90 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000091 }
Andrew Walbranca808b12020-05-15 17:22:28 +010092 if (share_state_ret != NULL) {
93 *share_state_ret = allocated_state;
94 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000095 return true;
96 }
97 }
98
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000099 return false;
100}
101
102/** Locks the share states lock. */
103struct share_states_locked share_states_lock(void)
104{
105 sl_lock(&share_states_lock_instance);
106
107 return (struct share_states_locked){.share_states = share_states};
108}
109
110/** Unlocks the share states lock. */
J-Alves66652252022-07-06 09:49:51 +0100111void share_states_unlock(struct share_states_locked *share_states)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000112{
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000113 assert(share_states->share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000114 share_states->share_states = NULL;
115 sl_unlock(&share_states_lock_instance);
116}
117
118/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100119 * If the given handle is a valid handle for an allocated share state then
120 * initialises `share_state_ret` to point to the share state and returns true.
121 * Otherwise returns false.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000122 */
J-Alvesfdd29272022-07-19 13:16:31 +0100123bool get_share_state(struct share_states_locked share_states,
124 ffa_memory_handle_t handle,
125 struct ffa_memory_share_state **share_state_ret)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000126{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100127 struct ffa_memory_share_state *share_state;
J-Alves917d2f22020-10-30 18:39:30 +0000128 uint64_t index;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000129
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000130 assert(share_states.share_states != NULL);
131 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100132
133 /*
134 * First look for a share_state allocated by us, in which case the
135 * handle is based on the index.
136 */
Olivier Deprez55a189e2021-06-09 15:45:27 +0200137 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
J-Alves917d2f22020-10-30 18:39:30 +0000138 index = ffa_memory_handle_get_index(handle);
Andrew Walbranca808b12020-05-15 17:22:28 +0100139 if (index < MAX_MEM_SHARES) {
140 share_state = &share_states.share_states[index];
141 if (share_state->share_func != 0) {
142 *share_state_ret = share_state;
143 return true;
144 }
145 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000146 }
147
Andrew Walbranca808b12020-05-15 17:22:28 +0100148 /* Fall back to a linear scan. */
149 for (index = 0; index < MAX_MEM_SHARES; ++index) {
150 share_state = &share_states.share_states[index];
J-Alvesee68c542020-10-29 17:48:20 +0000151 if (share_state->memory_region != NULL &&
152 share_state->memory_region->handle == handle &&
Andrew Walbranca808b12020-05-15 17:22:28 +0100153 share_state->share_func != 0) {
154 *share_state_ret = share_state;
155 return true;
156 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000157 }
158
Andrew Walbranca808b12020-05-15 17:22:28 +0100159 return false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000160}
161
162/** Marks a share state as unallocated. */
J-Alvesfdd29272022-07-19 13:16:31 +0100163void share_state_free(struct share_states_locked share_states,
164 struct ffa_memory_share_state *share_state,
165 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000166{
Andrew Walbranca808b12020-05-15 17:22:28 +0100167 uint32_t i;
168
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000169 assert(share_states.share_states != NULL);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000170 share_state->share_func = 0;
Andrew Walbranca808b12020-05-15 17:22:28 +0100171 share_state->sending_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000172 mpool_free(page_pool, share_state->memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100173 /*
174 * First fragment is part of the same page as the `memory_region`, so it
175 * doesn't need to be freed separately.
176 */
177 share_state->fragments[0] = NULL;
178 share_state->fragment_constituent_counts[0] = 0;
179 for (i = 1; i < share_state->fragment_count; ++i) {
180 mpool_free(page_pool, share_state->fragments[i]);
181 share_state->fragments[i] = NULL;
182 share_state->fragment_constituent_counts[i] = 0;
183 }
184 share_state->fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000185 share_state->memory_region = NULL;
J-Alvesa9cd7e32022-07-01 13:49:33 +0100186 share_state->hypervisor_fragment_count = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000187}
188
Andrew Walbranca808b12020-05-15 17:22:28 +0100189/** Checks whether the given share state has been fully sent. */
J-Alvesfdd29272022-07-19 13:16:31 +0100190bool share_state_sending_complete(struct share_states_locked share_states,
191 struct ffa_memory_share_state *share_state)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000192{
Andrew Walbranca808b12020-05-15 17:22:28 +0100193 struct ffa_composite_memory_region *composite;
194 uint32_t expected_constituent_count;
195 uint32_t fragment_constituent_count_total = 0;
196 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000197
Andrew Walbranca808b12020-05-15 17:22:28 +0100198 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000199 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100200
201 /*
202 * Share state must already be valid, or it's not possible to get hold
203 * of it.
204 */
205 CHECK(share_state->memory_region != NULL &&
206 share_state->share_func != 0);
207
208 composite =
209 ffa_memory_region_get_composite(share_state->memory_region, 0);
210 expected_constituent_count = composite->constituent_count;
211 for (i = 0; i < share_state->fragment_count; ++i) {
212 fragment_constituent_count_total +=
213 share_state->fragment_constituent_counts[i];
214 }
215 dlog_verbose(
216 "Checking completion: constituent count %d/%d from %d "
217 "fragments.\n",
218 fragment_constituent_count_total, expected_constituent_count,
219 share_state->fragment_count);
220
221 return fragment_constituent_count_total == expected_constituent_count;
222}
223
224/**
225 * Calculates the offset of the next fragment expected for the given share
226 * state.
227 */
J-Alvesfdd29272022-07-19 13:16:31 +0100228uint32_t share_state_next_fragment_offset(
Andrew Walbranca808b12020-05-15 17:22:28 +0100229 struct share_states_locked share_states,
230 struct ffa_memory_share_state *share_state)
231{
232 uint32_t next_fragment_offset;
233 uint32_t i;
234
235 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000236 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +0100237
238 next_fragment_offset =
239 ffa_composite_constituent_offset(share_state->memory_region, 0);
240 for (i = 0; i < share_state->fragment_count; ++i) {
241 next_fragment_offset +=
242 share_state->fragment_constituent_counts[i] *
243 sizeof(struct ffa_memory_region_constituent);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000244 }
245
Andrew Walbranca808b12020-05-15 17:22:28 +0100246 return next_fragment_offset;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000247}
248
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100249static void dump_memory_region(struct ffa_memory_region *memory_region)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000250{
251 uint32_t i;
252
253 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
254 return;
255 }
256
Olivier Deprez935e1b12020-12-22 18:01:29 +0100257 dlog("from VM %#x, attributes %#x, flags %#x, tag %u, to "
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100258 "%u "
Andrew Walbrana65a1322020-04-06 19:32:32 +0100259 "recipients [",
260 memory_region->sender, memory_region->attributes,
Olivier Deprez935e1b12020-12-22 18:01:29 +0100261 memory_region->flags, memory_region->tag,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100262 memory_region->receiver_count);
263 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000264 if (i != 0) {
265 dlog(", ");
266 }
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100267 dlog("VM %#x: %#x (offset %u)",
Andrew Walbrana65a1322020-04-06 19:32:32 +0100268 memory_region->receivers[i].receiver_permissions.receiver,
269 memory_region->receivers[i]
270 .receiver_permissions.permissions,
271 memory_region->receivers[i]
272 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000273 }
274 dlog("]");
275}
276
J-Alves66652252022-07-06 09:49:51 +0100277void dump_share_states(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000278{
279 uint32_t i;
280
281 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
282 return;
283 }
284
285 dlog("Current share states:\n");
286 sl_lock(&share_states_lock_instance);
287 for (i = 0; i < MAX_MEM_SHARES; ++i) {
288 if (share_states[i].share_func != 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000289 switch (share_states[i].share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100290 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000291 dlog("SHARE");
292 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100293 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000294 dlog("LEND");
295 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100296 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000297 dlog("DONATE");
298 break;
299 default:
300 dlog("invalid share_func %#x",
301 share_states[i].share_func);
302 }
Olivier Deprez935e1b12020-12-22 18:01:29 +0100303 dlog(" %#x (", share_states[i].memory_region->handle);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000304 dump_memory_region(share_states[i].memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +0100305 if (share_states[i].sending_complete) {
306 dlog("): fully sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000307 } else {
Andrew Walbranca808b12020-05-15 17:22:28 +0100308 dlog("): partially sent");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000309 }
J-Alves2a0d2882020-10-29 14:49:50 +0000310 dlog(" with %d fragments, %d retrieved, "
311 " sender's original mode: %#x\n",
Andrew Walbranca808b12020-05-15 17:22:28 +0100312 share_states[i].fragment_count,
J-Alves2a0d2882020-10-29 14:49:50 +0000313 share_states[i].retrieved_fragment_count[0],
314 share_states[i].sender_orig_mode);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000315 }
316 }
317 sl_unlock(&share_states_lock_instance);
318}
319
Andrew Walbran475c1452020-02-07 13:22:22 +0000320/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100321static inline uint32_t ffa_memory_permissions_to_mode(
J-Alves7cd5eb32020-10-16 19:06:10 +0100322 ffa_memory_access_permissions_t permissions, uint32_t default_mode)
Andrew Walbran475c1452020-02-07 13:22:22 +0000323{
324 uint32_t mode = 0;
325
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326 switch (ffa_get_data_access_attr(permissions)) {
327 case FFA_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000328 mode = MM_MODE_R;
329 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100330 case FFA_DATA_ACCESS_RW:
Andrew Walbran475c1452020-02-07 13:22:22 +0000331 mode = MM_MODE_R | MM_MODE_W;
332 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100333 case FFA_DATA_ACCESS_NOT_SPECIFIED:
334 mode = (default_mode & (MM_MODE_R | MM_MODE_W));
335 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 case FFA_DATA_ACCESS_RESERVED:
337 panic("Tried to convert FFA_DATA_ACCESS_RESERVED.");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100338 }
339
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100340 switch (ffa_get_instruction_access_attr(permissions)) {
341 case FFA_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000342 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100343 case FFA_INSTRUCTION_ACCESS_X:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100344 mode |= MM_MODE_X;
345 break;
J-Alves7cd5eb32020-10-16 19:06:10 +0100346 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
347 mode |= (default_mode & MM_MODE_X);
348 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100349 case FFA_INSTRUCTION_ACCESS_RESERVED:
350 panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000351 }
352
353 return mode;
354}
355
Jose Marinho75509b42019-04-09 09:34:59 +0100356/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000357 * Get the current mode in the stage-2 page table of the given vm of all the
358 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100359 * an appropriate FF-A error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100360 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100361static struct ffa_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000362 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100363 struct ffa_memory_region_constituent **fragments,
364 const uint32_t *fragment_constituent_counts, uint32_t fragment_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100365{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100366 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100367 uint32_t j;
Jose Marinho75509b42019-04-09 09:34:59 +0100368
Andrew Walbranca808b12020-05-15 17:22:28 +0100369 if (fragment_count == 0 || fragment_constituent_counts[0] == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100370 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000371 * Fail if there are no constituents. Otherwise we would get an
372 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100373 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100374 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100375 }
376
Andrew Walbranca808b12020-05-15 17:22:28 +0100377 for (i = 0; i < fragment_count; ++i) {
378 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
379 ipaddr_t begin = ipa_init(fragments[i][j].address);
380 size_t size = fragments[i][j].page_count * PAGE_SIZE;
381 ipaddr_t end = ipa_add(begin, size);
382 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100383
Andrew Walbranca808b12020-05-15 17:22:28 +0100384 /* Fail if addresses are not page-aligned. */
385 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
386 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
387 return ffa_error(FFA_INVALID_PARAMETERS);
388 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100389
Andrew Walbranca808b12020-05-15 17:22:28 +0100390 /*
391 * Ensure that this constituent memory range is all
392 * mapped with the same mode.
393 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800394 if (!vm_mem_get_mode(vm, begin, end, &current_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100395 return ffa_error(FFA_DENIED);
396 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100397
Andrew Walbranca808b12020-05-15 17:22:28 +0100398 /*
399 * Ensure that all constituents are mapped with the same
400 * mode.
401 */
402 if (i == 0) {
403 *orig_mode = current_mode;
404 } else if (current_mode != *orig_mode) {
405 dlog_verbose(
406 "Expected mode %#x but was %#x for %d "
407 "pages at %#x.\n",
408 *orig_mode, current_mode,
409 fragments[i][j].page_count,
410 ipa_addr(begin));
411 return ffa_error(FFA_DENIED);
412 }
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100413 }
Jose Marinho75509b42019-04-09 09:34:59 +0100414 }
415
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100416 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000417}
418
419/**
420 * Verify that all pages have the same mode, that the starting mode
421 * constitutes a valid state and obtain the next mode to apply
422 * to the sending VM.
423 *
424 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100425 * 1) FFA_DENIED if a state transition was not found;
426 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100427 * the <from> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100429 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100430 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
431 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000432 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100433static struct ffa_value ffa_send_check_transition(
Andrew Walbrana65a1322020-04-06 19:32:32 +0100434 struct vm_locked from, uint32_t share_func,
J-Alves363f5722022-04-25 17:37:37 +0100435 struct ffa_memory_access *receivers, uint32_t receivers_count,
436 uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100437 struct ffa_memory_region_constituent **fragments,
438 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
439 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000440{
441 const uint32_t state_mask =
442 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100443 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000444
Andrew Walbranca808b12020-05-15 17:22:28 +0100445 ret = constituents_get_mode(from, orig_from_mode, fragments,
446 fragment_constituent_counts,
447 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100448 if (ret.func != FFA_SUCCESS_32) {
Olivier Depreze7eb1682022-03-16 17:09:03 +0100449 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100450 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100451 }
452
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000453 /* Ensure the address range is normal memory and not a device. */
454 if (*orig_from_mode & MM_MODE_D) {
455 dlog_verbose("Can't share device memory (mode is %#x).\n",
456 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100457 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000458 }
459
460 /*
461 * Ensure the sender is the owner and has exclusive access to the
462 * memory.
463 */
464 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100465 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100466 }
467
J-Alves363f5722022-04-25 17:37:37 +0100468 assert(receivers != NULL && receivers_count > 0U);
J-Alves7cd5eb32020-10-16 19:06:10 +0100469
J-Alves363f5722022-04-25 17:37:37 +0100470 for (uint32_t i = 0U; i < receivers_count; i++) {
471 ffa_memory_access_permissions_t permissions =
472 receivers[i].receiver_permissions.permissions;
473 uint32_t required_from_mode = ffa_memory_permissions_to_mode(
474 permissions, *orig_from_mode);
475
476 if ((*orig_from_mode & required_from_mode) !=
477 required_from_mode) {
478 dlog_verbose(
479 "Sender tried to send memory with permissions "
480 "which "
481 "required mode %#x but only had %#x itself.\n",
482 required_from_mode, *orig_from_mode);
483 return ffa_error(FFA_DENIED);
484 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000485 }
486
487 /* Find the appropriate new mode. */
488 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000489 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100490 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000491 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100492 break;
493
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100494 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000495 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100496 break;
497
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100498 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000499 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100500 break;
501
Jose Marinho75509b42019-04-09 09:34:59 +0100502 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100503 return ffa_error(FFA_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100504 }
505
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100506 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000507}
508
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100509static struct ffa_value ffa_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000510 struct vm_locked from, uint32_t *orig_from_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100511 struct ffa_memory_region_constituent **fragments,
512 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
513 uint32_t *from_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000514{
515 const uint32_t state_mask =
516 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
517 uint32_t orig_from_state;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100518 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000519
Andrew Walbranca808b12020-05-15 17:22:28 +0100520 ret = constituents_get_mode(from, orig_from_mode, fragments,
521 fragment_constituent_counts,
522 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100523 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100524 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000525 }
526
527 /* Ensure the address range is normal memory and not a device. */
528 if (*orig_from_mode & MM_MODE_D) {
529 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
530 *orig_from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100531 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000532 }
533
534 /*
535 * Ensure the relinquishing VM is not the owner but has access to the
536 * memory.
537 */
538 orig_from_state = *orig_from_mode & state_mask;
539 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
540 dlog_verbose(
541 "Tried to relinquish memory in state %#x (masked %#x "
Andrew Walbranca808b12020-05-15 17:22:28 +0100542 "but should be %#x).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000543 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100544 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000545 }
546
547 /* Find the appropriate new mode. */
548 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
549
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100550 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000551}
552
553/**
554 * Verify that all pages have the same mode, that the starting mode
555 * constitutes a valid state and obtain the next mode to apply
556 * to the retrieving VM.
557 *
558 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100559 * 1) FFA_DENIED if a state transition was not found;
560 * 2) FFA_DENIED if the pages being shared do not have the same mode within
Andrew Walbrana65a1322020-04-06 19:32:32 +0100561 * the <to> VM;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100562 * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page
Andrew Walbrana65a1322020-04-06 19:32:32 +0100563 * aligned;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100564 * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
565 * Or FFA_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000566 */
J-Alvesfc19b372022-07-06 12:17:35 +0100567struct ffa_value ffa_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000568 struct vm_locked to, uint32_t share_func,
Andrew Walbranca808b12020-05-15 17:22:28 +0100569 struct ffa_memory_region_constituent **fragments,
570 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
571 uint32_t memory_to_attributes, uint32_t *to_mode)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000572{
573 uint32_t orig_to_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100574 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000575
Andrew Walbranca808b12020-05-15 17:22:28 +0100576 ret = constituents_get_mode(to, &orig_to_mode, fragments,
577 fragment_constituent_counts,
578 fragment_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100579 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100580 dlog_verbose("Inconsistent modes.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100581 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000582 }
583
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100584 if (share_func == FFA_MEM_RECLAIM_32) {
J-Alves9256f162021-12-09 13:18:43 +0000585 /*
586 * If the original ffa memory send call has been processed
587 * successfully, it is expected the orig_to_mode would overlay
588 * with `state_mask`, as a result of the function
589 * `ffa_send_check_transition`.
590 */
J-Alves59ed0042022-07-28 18:26:41 +0100591 if (vm_id_is_current_world(to.vm->id)) {
592 assert((orig_to_mode &
593 (MM_MODE_INVALID | MM_MODE_UNOWNED |
594 MM_MODE_SHARED)) != 0U);
595 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000596 } else {
597 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +0100598 * If the retriever is from virtual FF-A instance:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000599 * Ensure the retriever has the expected state. We don't care
600 * about the MM_MODE_SHARED bit; either with or without it set
601 * are both valid representations of the !O-NA state.
602 */
J-Alvesa9cd7e32022-07-01 13:49:33 +0100603 if (vm_id_is_current_world(to.vm->id) &&
604 to.vm->id != HF_PRIMARY_VM_ID &&
605 (orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
606 MM_MODE_UNMAPPED_MASK) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100607 return ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000608 }
609 }
610
611 /* Find the appropriate new mode. */
612 *to_mode = memory_to_attributes;
613 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100614 case FFA_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000615 *to_mode |= 0;
616 break;
617
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100618 case FFA_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000619 *to_mode |= MM_MODE_UNOWNED;
620 break;
621
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100622 case FFA_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000623 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
624 break;
625
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100626 case FFA_MEM_RECLAIM_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000627 *to_mode |= 0;
628 break;
629
630 default:
Andrew Walbranca808b12020-05-15 17:22:28 +0100631 dlog_error("Invalid share_func %#x.\n", share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100632 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000633 }
634
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100635 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100636}
Jose Marinho09b1db82019-08-08 09:16:59 +0100637
638/**
639 * Updates a VM's page table such that the given set of physical address ranges
640 * are mapped in the address space at the corresponding address ranges, in the
641 * mode provided.
642 *
643 * If commit is false, the page tables will be allocated from the mpool but no
644 * mappings will actually be updated. This function must always be called first
645 * with commit false to check that it will succeed before calling with commit
646 * true, to avoid leaving the page table in a half-updated state. To make a
647 * series of changes atomically you can call them all with commit false before
648 * calling them all with commit true.
649 *
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700650 * vm_ptable_defrag should always be called after a series of page table
651 * updates, whether they succeed or fail.
Jose Marinho09b1db82019-08-08 09:16:59 +0100652 *
653 * Returns true on success, or false if the update failed and no changes were
654 * made to memory mappings.
655 */
J-Alves66652252022-07-06 09:49:51 +0100656bool ffa_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000657 struct vm_locked vm_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100658 struct ffa_memory_region_constituent **fragments,
659 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
Daniel Boulby4dd3f532021-09-21 09:57:08 +0100660 uint32_t mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100661{
Andrew Walbranca808b12020-05-15 17:22:28 +0100662 uint32_t i;
663 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100664
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700665 if (vm_locked.vm->el0_partition) {
666 mode |= MM_MODE_USER | MM_MODE_NG;
667 }
668
Andrew Walbranca808b12020-05-15 17:22:28 +0100669 /* Iterate over the memory region constituents within each fragment. */
670 for (i = 0; i < fragment_count; ++i) {
671 for (j = 0; j < fragment_constituent_counts[i]; ++j) {
672 size_t size = fragments[i][j].page_count * PAGE_SIZE;
673 paddr_t pa_begin =
674 pa_from_ipa(ipa_init(fragments[i][j].address));
675 paddr_t pa_end = pa_add(pa_begin, size);
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200676 uint32_t pa_bits =
677 arch_mm_get_pa_bits(arch_mm_get_pa_range());
Federico Recanati4fd065d2021-12-13 20:06:23 +0100678
679 /*
680 * Ensure the requested region falls into system's PA
681 * range.
682 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200683 if (((pa_addr(pa_begin) >> pa_bits) > 0) ||
684 ((pa_addr(pa_end) >> pa_bits) > 0)) {
Federico Recanati4fd065d2021-12-13 20:06:23 +0100685 dlog_error("Region is outside of PA Range\n");
686 return false;
687 }
Andrew Walbranca808b12020-05-15 17:22:28 +0100688
689 if (commit) {
690 vm_identity_commit(vm_locked, pa_begin, pa_end,
691 mode, ppool, NULL);
692 } else if (!vm_identity_prepare(vm_locked, pa_begin,
693 pa_end, mode, ppool)) {
694 return false;
695 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100696 }
697 }
698
699 return true;
700}
701
702/**
703 * Clears a region of physical memory by overwriting it with zeros. The data is
704 * flushed from the cache so the memory has been cleared across the system.
705 */
J-Alves7db32002021-12-14 14:44:50 +0000706static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool,
707 uint32_t extra_mode_attributes)
Jose Marinho09b1db82019-08-08 09:16:59 +0100708{
709 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000710 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100711 * global mapping of the whole range. Such an approach will limit
712 * the changes to stage-1 tables and will allow only local
713 * invalidation.
714 */
715 bool ret;
716 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
J-Alves7db32002021-12-14 14:44:50 +0000717 void *ptr = mm_identity_map(stage1_locked, begin, end,
718 MM_MODE_W | (extra_mode_attributes &
719 plat_ffa_other_world_mode()),
720 ppool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100721 size_t size = pa_difference(begin, end);
722
723 if (!ptr) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100724 goto fail;
725 }
726
727 memset_s(ptr, size, 0, size);
728 arch_mm_flush_dcache(ptr, size);
729 mm_unmap(stage1_locked, begin, end, ppool);
730
731 ret = true;
732 goto out;
733
734fail:
735 ret = false;
736
737out:
738 mm_unlock_stage1(&stage1_locked);
739
740 return ret;
741}
742
743/**
744 * Clears a region of physical memory by overwriting it with zeros. The data is
745 * flushed from the cache so the memory has been cleared across the system.
746 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100747static bool ffa_clear_memory_constituents(
J-Alves7db32002021-12-14 14:44:50 +0000748 uint32_t security_state_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +0100749 struct ffa_memory_region_constituent **fragments,
750 const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
751 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100752{
753 struct mpool local_page_pool;
Andrew Walbranca808b12020-05-15 17:22:28 +0100754 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100755 bool ret = false;
756
757 /*
758 * Create a local pool so any freed memory can't be used by another
759 * thread. This is to ensure each constituent that is mapped can be
760 * unmapped again afterwards.
761 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000762 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100763
Andrew Walbranca808b12020-05-15 17:22:28 +0100764 /* Iterate over the memory region constituents within each fragment. */
765 for (i = 0; i < fragment_count; ++i) {
766 uint32_t j;
Jose Marinho09b1db82019-08-08 09:16:59 +0100767
Andrew Walbranca808b12020-05-15 17:22:28 +0100768 for (j = 0; j < fragment_constituent_counts[j]; ++j) {
769 size_t size = fragments[i][j].page_count * PAGE_SIZE;
770 paddr_t begin =
771 pa_from_ipa(ipa_init(fragments[i][j].address));
772 paddr_t end = pa_add(begin, size);
773
J-Alves7db32002021-12-14 14:44:50 +0000774 if (!clear_memory(begin, end, &local_page_pool,
775 security_state_mode)) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100776 /*
777 * api_clear_memory will defrag on failure, so
778 * no need to do it here.
779 */
780 goto out;
781 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100782 }
783 }
784
Jose Marinho09b1db82019-08-08 09:16:59 +0100785 ret = true;
786
787out:
788 mpool_fini(&local_page_pool);
789 return ret;
790}
791
792/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000793 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100794 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000795 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100796 *
797 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000798 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100799 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Jose Marinho09b1db82019-08-08 09:16:59 +0100800 * erroneous;
Andrew Walbranf07f04d2020-05-01 18:09:00 +0100801 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the
802 * request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100803 * 3) FFA_DENIED - The sender doesn't have sufficient access to send the
Andrew Walbrana65a1322020-04-06 19:32:32 +0100804 * memory with the given permissions.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100805 * Success is indicated by FFA_SUCCESS.
Jose Marinho09b1db82019-08-08 09:16:59 +0100806 */
J-Alves66652252022-07-06 09:49:51 +0100807struct ffa_value ffa_send_check_update(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000808 struct vm_locked from_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +0100809 struct ffa_memory_region_constituent **fragments,
810 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
J-Alves363f5722022-04-25 17:37:37 +0100811 uint32_t share_func, struct ffa_memory_access *receivers,
812 uint32_t receivers_count, struct mpool *page_pool, bool clear,
813 uint32_t *orig_from_mode_ret)
Jose Marinho09b1db82019-08-08 09:16:59 +0100814{
Andrew Walbranca808b12020-05-15 17:22:28 +0100815 uint32_t i;
Jose Marinho09b1db82019-08-08 09:16:59 +0100816 uint32_t orig_from_mode;
817 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100818 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100819 struct ffa_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100820
821 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100822 * Make sure constituents are properly aligned to a 64-bit boundary. If
823 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100824 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100825 for (i = 0; i < fragment_count; ++i) {
826 if (!is_aligned(fragments[i], 8)) {
827 dlog_verbose("Constituents not aligned.\n");
828 return ffa_error(FFA_INVALID_PARAMETERS);
829 }
Jose Marinho09b1db82019-08-08 09:16:59 +0100830 }
831
832 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000833 * Check if the state transition is lawful for the sender, ensure that
834 * all constituents of a memory region being shared are at the same
835 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100836 */
J-Alves363f5722022-04-25 17:37:37 +0100837 ret = ffa_send_check_transition(from_locked, share_func, receivers,
838 receivers_count, &orig_from_mode,
839 fragments, fragment_constituent_counts,
Andrew Walbranca808b12020-05-15 17:22:28 +0100840 fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100841 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100842 dlog_verbose("Invalid transition for send.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100843 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100844 }
845
Andrew Walbran37c574e2020-06-03 11:45:46 +0100846 if (orig_from_mode_ret != NULL) {
847 *orig_from_mode_ret = orig_from_mode;
848 }
849
Jose Marinho09b1db82019-08-08 09:16:59 +0100850 /*
851 * Create a local pool so any freed memory can't be used by another
852 * thread. This is to ensure the original mapping can be restored if the
853 * clear fails.
854 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000855 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100856
857 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000858 * First reserve all required memory for the new page table entries
859 * without committing, to make sure the entire operation will succeed
860 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100861 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100862 if (!ffa_region_group_identity_map(
863 from_locked, fragments, fragment_constituent_counts,
864 fragment_count, from_mode, page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100865 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100866 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100867 goto out;
868 }
869
870 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000871 * Update the mapping for the sender. This won't allocate because the
872 * transaction was already prepared above, but may free pages in the
873 * case that a whole block is being unmapped that was previously
874 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100875 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100876 CHECK(ffa_region_group_identity_map(
877 from_locked, fragments, fragment_constituent_counts,
878 fragment_count, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100879
880 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000881 if (clear &&
882 !ffa_clear_memory_constituents(
883 plat_ffa_owner_world_mode(from_locked.vm->id), fragments,
884 fragment_constituent_counts, fragment_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100885 /*
886 * On failure, roll back by returning memory to the sender. This
887 * may allocate pages which were previously freed into
888 * `local_page_pool` by the call above, but will never allocate
889 * more pages than that so can never fail.
890 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100891 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +0100892 from_locked, fragments, fragment_constituent_counts,
893 fragment_count, orig_from_mode, &local_page_pool,
894 true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100895
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100896 ret = ffa_error(FFA_NO_MEMORY);
Jose Marinho09b1db82019-08-08 09:16:59 +0100897 goto out;
898 }
899
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100900 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000901
902out:
903 mpool_fini(&local_page_pool);
904
905 /*
906 * Tidy up the page table by reclaiming failed mappings (if there was an
907 * error) or merging entries into blocks where possible (on success).
908 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700909 vm_ptable_defrag(from_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000910
911 return ret;
912}
913
914/**
915 * Validates and maps memory shared from one VM to another.
916 *
917 * This function requires the calling context to hold the <to> lock.
918 *
919 * Returns:
920 * In case of error, one of the following values is returned:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100921 * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000922 * erroneous;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100923 * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000924 * the request.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100925 * Success is indicated by FFA_SUCCESS.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000926 */
J-Alvesb5084cf2022-07-06 14:20:12 +0100927struct ffa_value ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +0000928 struct vm_locked to_locked, ffa_vm_id_t from_id,
Andrew Walbranca808b12020-05-15 17:22:28 +0100929 struct ffa_memory_region_constituent **fragments,
930 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
931 uint32_t memory_to_attributes, uint32_t share_func, bool clear,
932 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000933{
Andrew Walbranca808b12020-05-15 17:22:28 +0100934 uint32_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000935 uint32_t to_mode;
936 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100937 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000938
939 /*
Andrew Walbranca808b12020-05-15 17:22:28 +0100940 * Make sure constituents are properly aligned to a 64-bit boundary. If
941 * not we would get alignment faults trying to read (64-bit) values.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000942 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100943 for (i = 0; i < fragment_count; ++i) {
944 if (!is_aligned(fragments[i], 8)) {
J-Alvesb5084cf2022-07-06 14:20:12 +0100945 dlog_verbose("Fragment not properly aligned.\n");
Andrew Walbranca808b12020-05-15 17:22:28 +0100946 return ffa_error(FFA_INVALID_PARAMETERS);
947 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000948 }
949
950 /*
951 * Check if the state transition is lawful for the recipient, and ensure
952 * that all constituents of the memory region being retrieved are at the
953 * same state.
954 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100955 ret = ffa_retrieve_check_transition(
956 to_locked, share_func, fragments, fragment_constituent_counts,
957 fragment_count, memory_to_attributes, &to_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100958 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100959 dlog_verbose("Invalid transition for retrieve.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100960 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000961 }
962
963 /*
964 * Create a local pool so any freed memory can't be used by another
965 * thread. This is to ensure the original mapping can be restored if the
966 * clear fails.
967 */
968 mpool_init_with_fallback(&local_page_pool, page_pool);
969
970 /*
971 * First reserve all required memory for the new page table entries in
972 * the recipient page tables without committing, to make sure the entire
973 * operation will succeed without exhausting the page pool.
974 */
Andrew Walbranca808b12020-05-15 17:22:28 +0100975 if (!ffa_region_group_identity_map(
976 to_locked, fragments, fragment_constituent_counts,
977 fragment_count, to_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000978 /* TODO: partial defrag of failed range. */
979 dlog_verbose(
980 "Insufficient memory to update recipient page "
981 "table.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100982 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000983 goto out;
984 }
985
986 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +0000987 if (clear &&
988 !ffa_clear_memory_constituents(
989 plat_ffa_owner_world_mode(from_id), fragments,
990 fragment_constituent_counts, fragment_count, page_pool)) {
J-Alvesb5084cf2022-07-06 14:20:12 +0100991 dlog_verbose("Couldn't clear constituents.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000993 goto out;
994 }
995
Jose Marinho09b1db82019-08-08 09:16:59 +0100996 /*
997 * Complete the transfer by mapping the memory into the recipient. This
998 * won't allocate because the transaction was already prepared above, so
999 * it doesn't need to use the `local_page_pool`.
1000 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001001 CHECK(ffa_region_group_identity_map(
1002 to_locked, fragments, fragment_constituent_counts,
1003 fragment_count, to_mode, page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +01001004
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001005 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinho09b1db82019-08-08 09:16:59 +01001006
1007out:
1008 mpool_fini(&local_page_pool);
1009
1010 /*
Andrew Walbranf07f04d2020-05-01 18:09:00 +01001011 * Tidy up the page table by reclaiming failed mappings (if there was an
1012 * error) or merging entries into blocks where possible (on success).
Jose Marinho09b1db82019-08-08 09:16:59 +01001013 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001014 vm_ptable_defrag(to_locked, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001015
1016 return ret;
1017}
1018
Andrew Walbran996d1d12020-05-27 14:08:43 +01001019static struct ffa_value ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00001020 struct vm_locked from_locked, ffa_vm_id_t owner_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01001021 struct ffa_memory_region_constituent **fragments,
1022 uint32_t *fragment_constituent_counts, uint32_t fragment_count,
1023 struct mpool *page_pool, bool clear)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001024{
1025 uint32_t orig_from_mode;
1026 uint32_t from_mode;
1027 struct mpool local_page_pool;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001028 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001029
Andrew Walbranca808b12020-05-15 17:22:28 +01001030 ret = ffa_relinquish_check_transition(
1031 from_locked, &orig_from_mode, fragments,
1032 fragment_constituent_counts, fragment_count, &from_mode);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001033 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001034 dlog_verbose("Invalid transition for relinquish.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001035 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001036 }
1037
1038 /*
1039 * Create a local pool so any freed memory can't be used by another
1040 * thread. This is to ensure the original mapping can be restored if the
1041 * clear fails.
1042 */
1043 mpool_init_with_fallback(&local_page_pool, page_pool);
1044
1045 /*
1046 * First reserve all required memory for the new page table entries
1047 * without committing, to make sure the entire operation will succeed
1048 * without exhausting the page pool.
1049 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001050 if (!ffa_region_group_identity_map(
1051 from_locked, fragments, fragment_constituent_counts,
1052 fragment_count, from_mode, page_pool, false)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001053 /* TODO: partial defrag of failed range. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001054 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001055 goto out;
1056 }
1057
1058 /*
1059 * Update the mapping for the sender. This won't allocate because the
1060 * transaction was already prepared above, but may free pages in the
1061 * case that a whole block is being unmapped that was previously
1062 * partially mapped.
1063 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001064 CHECK(ffa_region_group_identity_map(
1065 from_locked, fragments, fragment_constituent_counts,
1066 fragment_count, from_mode, &local_page_pool, true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001067
1068 /* Clear the memory so no VM or device can see the previous contents. */
J-Alves7db32002021-12-14 14:44:50 +00001069 if (clear &&
1070 !ffa_clear_memory_constituents(
J-Alves3c5b2072022-11-21 12:45:40 +00001071 plat_ffa_owner_world_mode(owner_id), fragments,
J-Alves7db32002021-12-14 14:44:50 +00001072 fragment_constituent_counts, fragment_count, page_pool)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001073 /*
1074 * On failure, roll back by returning memory to the sender. This
1075 * may allocate pages which were previously freed into
1076 * `local_page_pool` by the call above, but will never allocate
1077 * more pages than that so can never fail.
1078 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001079 CHECK(ffa_region_group_identity_map(
Andrew Walbranca808b12020-05-15 17:22:28 +01001080 from_locked, fragments, fragment_constituent_counts,
1081 fragment_count, orig_from_mode, &local_page_pool,
1082 true));
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001083
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001084 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001085 goto out;
1086 }
1087
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001088 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001089
1090out:
1091 mpool_fini(&local_page_pool);
1092
1093 /*
1094 * Tidy up the page table by reclaiming failed mappings (if there was an
1095 * error) or merging entries into blocks where possible (on success).
1096 */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001097 vm_ptable_defrag(from_locked, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001098
1099 return ret;
1100}
1101
1102/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001103 * Complete a memory sending operation by checking that it is valid, updating
1104 * the sender page table, and then either marking the share state as having
1105 * completed sending (on success) or freeing it (on failure).
1106 *
1107 * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR.
1108 */
J-Alvesfdd29272022-07-19 13:16:31 +01001109struct ffa_value ffa_memory_send_complete(
Andrew Walbranca808b12020-05-15 17:22:28 +01001110 struct vm_locked from_locked, struct share_states_locked share_states,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001111 struct ffa_memory_share_state *share_state, struct mpool *page_pool,
1112 uint32_t *orig_from_mode_ret)
Andrew Walbranca808b12020-05-15 17:22:28 +01001113{
1114 struct ffa_memory_region *memory_region = share_state->memory_region;
1115 struct ffa_value ret;
1116
1117 /* Lock must be held. */
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001118 assert(share_states.share_states != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001119
1120 /* Check that state is valid in sender page table and update. */
1121 ret = ffa_send_check_update(
1122 from_locked, share_state->fragments,
1123 share_state->fragment_constituent_counts,
1124 share_state->fragment_count, share_state->share_func,
J-Alves363f5722022-04-25 17:37:37 +01001125 memory_region->receivers, memory_region->receiver_count,
Andrew Walbran37c574e2020-06-03 11:45:46 +01001126 page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR,
1127 orig_from_mode_ret);
Andrew Walbranca808b12020-05-15 17:22:28 +01001128 if (ret.func != FFA_SUCCESS_32) {
1129 /*
1130 * Free share state, it failed to send so it can't be retrieved.
1131 */
1132 dlog_verbose("Complete failed, freeing share state.\n");
1133 share_state_free(share_states, share_state, page_pool);
1134 return ret;
1135 }
1136
1137 share_state->sending_complete = true;
1138 dlog_verbose("Marked sending complete.\n");
1139
J-Alvesee68c542020-10-29 17:48:20 +00001140 return ffa_mem_success(share_state->memory_region->handle);
Andrew Walbranca808b12020-05-15 17:22:28 +01001141}
1142
1143/**
Federico Recanatia98603a2021-12-20 18:04:03 +01001144 * Check that the memory attributes match Hafnium expectations:
1145 * Normal Memory, Inner shareable, Write-Back Read-Allocate
1146 * Write-Allocate Cacheable.
1147 */
1148static struct ffa_value ffa_memory_attributes_validate(
J-Alves7a99d0d2023-02-08 13:49:48 +00001149 ffa_memory_attributes_t attributes)
Federico Recanatia98603a2021-12-20 18:04:03 +01001150{
1151 enum ffa_memory_type memory_type;
1152 enum ffa_memory_cacheability cacheability;
1153 enum ffa_memory_shareability shareability;
1154
1155 memory_type = ffa_get_memory_type_attr(attributes);
1156 if (memory_type != FFA_MEMORY_NORMAL_MEM) {
1157 dlog_verbose("Invalid memory type %#x, expected %#x.\n",
1158 memory_type, FFA_MEMORY_NORMAL_MEM);
Federico Recanati3d953f32022-02-17 09:31:29 +01001159 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001160 }
1161
1162 cacheability = ffa_get_memory_cacheability_attr(attributes);
1163 if (cacheability != FFA_MEMORY_CACHE_WRITE_BACK) {
1164 dlog_verbose("Invalid cacheability %#x, expected %#x.\n",
1165 cacheability, FFA_MEMORY_CACHE_WRITE_BACK);
Federico Recanati3d953f32022-02-17 09:31:29 +01001166 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001167 }
1168
1169 shareability = ffa_get_memory_shareability_attr(attributes);
1170 if (shareability != FFA_MEMORY_INNER_SHAREABLE) {
1171 dlog_verbose("Invalid shareability %#x, expected #%x.\n",
1172 shareability, FFA_MEMORY_INNER_SHAREABLE);
Federico Recanati3d953f32022-02-17 09:31:29 +01001173 return ffa_error(FFA_DENIED);
Federico Recanatia98603a2021-12-20 18:04:03 +01001174 }
1175
1176 return (struct ffa_value){.func = FFA_SUCCESS_32};
1177}
1178
1179/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001180 * Check that the given `memory_region` represents a valid memory send request
1181 * of the given `share_func` type, return the clear flag and permissions via the
1182 * respective output parameters, and update the permissions if necessary.
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001183 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001184 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
Andrew Walbrana65a1322020-04-06 19:32:32 +01001185 * not.
1186 */
J-Alves66652252022-07-06 09:49:51 +01001187struct ffa_value ffa_memory_send_validate(
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001188 struct vm_locked from_locked, struct ffa_memory_region *memory_region,
1189 uint32_t memory_share_length, uint32_t fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001190 uint32_t share_func)
Andrew Walbrana65a1322020-04-06 19:32:32 +01001191{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001192 struct ffa_composite_memory_region *composite;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001193 uint64_t receivers_end;
1194 uint64_t min_length;
Federico Recanati872cd692022-01-05 13:10:10 +01001195 uint32_t composite_memory_region_offset;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001196 uint32_t constituents_start;
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001197 uint32_t constituents_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001198 enum ffa_data_access data_access;
1199 enum ffa_instruction_access instruction_access;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001200 enum ffa_memory_security security_state;
Federico Recanatia98603a2021-12-20 18:04:03 +01001201 struct ffa_value ret;
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001202 const size_t minimum_first_fragment_length =
1203 (sizeof(struct ffa_memory_region) +
1204 sizeof(struct ffa_memory_access) +
1205 sizeof(struct ffa_composite_memory_region));
1206
1207 if (fragment_length < minimum_first_fragment_length) {
1208 dlog_verbose("Fragment length %u too short (min %u).\n",
1209 (size_t)fragment_length,
1210 minimum_first_fragment_length);
1211 return ffa_error(FFA_INVALID_PARAMETERS);
1212 }
1213
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001214 static_assert(sizeof(struct ffa_memory_region_constituent) == 16,
1215 "struct ffa_memory_region_constituent must be 16 bytes");
1216 if (!is_aligned(fragment_length,
1217 sizeof(struct ffa_memory_region_constituent)) ||
1218 !is_aligned(memory_share_length,
1219 sizeof(struct ffa_memory_region_constituent))) {
1220 dlog_verbose(
1221 "Fragment length %u or total length %u"
1222 " is not 16-byte aligned.\n",
1223 fragment_length, memory_share_length);
1224 return ffa_error(FFA_INVALID_PARAMETERS);
1225 }
1226
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001227 if (fragment_length > memory_share_length) {
1228 dlog_verbose(
1229 "Fragment length %u greater than total length %u.\n",
1230 (size_t)fragment_length, (size_t)memory_share_length);
1231 return ffa_error(FFA_INVALID_PARAMETERS);
1232 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001233
J-Alves0b6653d2022-04-22 13:17:38 +01001234 assert(memory_region->receivers_offset ==
1235 offsetof(struct ffa_memory_region, receivers));
1236 assert(memory_region->memory_access_desc_size ==
1237 sizeof(struct ffa_memory_access));
1238
J-Alves95df0ef2022-12-07 10:09:48 +00001239 /* The sender must match the caller. */
1240 if ((!vm_id_is_current_world(from_locked.vm->id) &&
1241 vm_id_is_current_world(memory_region->sender)) ||
1242 (vm_id_is_current_world(from_locked.vm->id) &&
1243 memory_region->sender != from_locked.vm->id)) {
1244 dlog_verbose("Invalid memory sender ID.\n");
1245 return ffa_error(FFA_DENIED);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001246 }
1247
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001248 if (memory_region->receiver_count <= 0) {
1249 dlog_verbose("No receivers!\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001250 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001251 }
1252
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001253 /*
1254 * Ensure that the composite header is within the memory bounds and
1255 * doesn't overlap the first part of the message. Cast to uint64_t
1256 * to prevent overflow.
1257 */
1258 receivers_end = ((uint64_t)sizeof(struct ffa_memory_access) *
1259 (uint64_t)memory_region->receiver_count) +
1260 sizeof(struct ffa_memory_region);
1261 min_length = receivers_end +
1262 sizeof(struct ffa_composite_memory_region) +
1263 sizeof(struct ffa_memory_region_constituent);
1264 if (min_length > memory_share_length) {
1265 dlog_verbose("Share too short: got %u but minimum is %u.\n",
1266 (size_t)memory_share_length, (size_t)min_length);
1267 return ffa_error(FFA_INVALID_PARAMETERS);
1268 }
1269
1270 composite_memory_region_offset =
1271 memory_region->receivers[0].composite_memory_region_offset;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001272
1273 /*
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001274 * Check that the composite memory region descriptor is after the access
1275 * descriptors, is at least 16-byte aligned, and fits in the first
1276 * fragment.
Andrew Walbrana65a1322020-04-06 19:32:32 +01001277 */
Demi Marie Obenourd4677412023-02-03 20:35:12 -05001278 if ((composite_memory_region_offset < receivers_end) ||
1279 (composite_memory_region_offset % 16 != 0) ||
1280 (composite_memory_region_offset >
1281 fragment_length - sizeof(struct ffa_composite_memory_region))) {
1282 dlog_verbose(
1283 "Invalid composite memory region descriptor offset "
1284 "%u.\n",
1285 (size_t)composite_memory_region_offset);
1286 return ffa_error(FFA_INVALID_PARAMETERS);
1287 }
1288
1289 /*
1290 * Compute the start of the constituent regions. Already checked
1291 * to be not more than fragment_length and thus not more than
1292 * memory_share_length.
1293 */
1294 constituents_start = composite_memory_region_offset +
1295 sizeof(struct ffa_composite_memory_region);
1296 constituents_length = memory_share_length - constituents_start;
1297
1298 /*
1299 * Check that the number of constituents is consistent with the length
1300 * of the constituent region.
1301 */
1302 composite = ffa_memory_region_get_composite(memory_region, 0);
1303 if ((constituents_length %
1304 sizeof(struct ffa_memory_region_constituent) !=
1305 0) ||
1306 ((constituents_length /
1307 sizeof(struct ffa_memory_region_constituent)) !=
1308 composite->constituent_count)) {
1309 dlog_verbose("Invalid length %u or composite offset %u.\n",
1310 (size_t)memory_share_length,
1311 (size_t)composite_memory_region_offset);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001312 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001313 }
Andrew Walbranca808b12020-05-15 17:22:28 +01001314 if (fragment_length < memory_share_length &&
1315 fragment_length < HF_MAILBOX_SIZE) {
1316 dlog_warning(
1317 "Initial fragment length %d smaller than mailbox "
1318 "size.\n",
1319 fragment_length);
1320 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001321
Andrew Walbrana65a1322020-04-06 19:32:32 +01001322 /*
1323 * Clear is not allowed for memory sharing, as the sender still has
1324 * access to the memory.
1325 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001326 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) &&
1327 share_func == FFA_MEM_SHARE_32) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001328 dlog_verbose("Memory can't be cleared while being shared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001329 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001330 }
1331
1332 /* No other flags are allowed/supported here. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001333 if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001334 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001335 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001336 }
1337
J-Alves363f5722022-04-25 17:37:37 +01001338 /* Check that the permissions are valid, for each specified receiver. */
1339 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
1340 ffa_memory_access_permissions_t permissions =
1341 memory_region->receivers[i]
1342 .receiver_permissions.permissions;
1343 ffa_vm_id_t receiver_id =
1344 memory_region->receivers[i]
1345 .receiver_permissions.receiver;
1346
1347 if (memory_region->sender == receiver_id) {
1348 dlog_verbose("Can't share memory with itself.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001349 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001350 }
Federico Recanati85090c42021-12-15 13:17:54 +01001351
J-Alves363f5722022-04-25 17:37:37 +01001352 for (uint32_t j = i + 1; j < memory_region->receiver_count;
1353 j++) {
1354 if (receiver_id ==
1355 memory_region->receivers[j]
1356 .receiver_permissions.receiver) {
1357 dlog_verbose(
1358 "Repeated receiver(%x) in memory send "
1359 "operation.\n",
1360 memory_region->receivers[j]
1361 .receiver_permissions.receiver);
1362 return ffa_error(FFA_INVALID_PARAMETERS);
1363 }
1364 }
1365
1366 if (composite_memory_region_offset !=
1367 memory_region->receivers[i]
1368 .composite_memory_region_offset) {
1369 dlog_verbose(
1370 "All ffa_memory_access should point to the "
1371 "same composite memory region offset.\n");
1372 return ffa_error(FFA_INVALID_PARAMETERS);
1373 }
1374
1375 data_access = ffa_get_data_access_attr(permissions);
1376 instruction_access =
1377 ffa_get_instruction_access_attr(permissions);
1378 if (data_access == FFA_DATA_ACCESS_RESERVED ||
1379 instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) {
1380 dlog_verbose(
1381 "Reserved value for receiver permissions "
1382 "%#x.\n",
1383 permissions);
1384 return ffa_error(FFA_INVALID_PARAMETERS);
1385 }
1386 if (instruction_access !=
1387 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1388 dlog_verbose(
1389 "Invalid instruction access permissions %#x "
1390 "for sending memory.\n",
1391 permissions);
1392 return ffa_error(FFA_INVALID_PARAMETERS);
1393 }
1394 if (share_func == FFA_MEM_SHARE_32) {
1395 if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1396 dlog_verbose(
1397 "Invalid data access permissions %#x "
1398 "for sharing memory.\n",
1399 permissions);
1400 return ffa_error(FFA_INVALID_PARAMETERS);
1401 }
1402 /*
1403 * According to section 10.10.3 of the FF-A v1.1 EAC0
1404 * spec, NX is required for share operations (but must
1405 * not be specified by the sender) so set it in the
1406 * copy that we store, ready to be returned to the
1407 * retriever.
1408 */
J-Alvesb19731a2022-06-20 17:30:33 +01001409 if (vm_id_is_current_world(receiver_id)) {
1410 ffa_set_instruction_access_attr(
1411 &permissions,
1412 FFA_INSTRUCTION_ACCESS_NX);
1413 memory_region->receivers[i]
1414 .receiver_permissions.permissions =
1415 permissions;
1416 }
J-Alves363f5722022-04-25 17:37:37 +01001417 }
1418 if (share_func == FFA_MEM_LEND_32 &&
1419 data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) {
1420 dlog_verbose(
1421 "Invalid data access permissions %#x for "
1422 "lending memory.\n",
1423 permissions);
1424 return ffa_error(FFA_INVALID_PARAMETERS);
1425 }
1426
1427 if (share_func == FFA_MEM_DONATE_32 &&
1428 data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) {
1429 dlog_verbose(
1430 "Invalid data access permissions %#x for "
1431 "donating memory.\n",
1432 permissions);
1433 return ffa_error(FFA_INVALID_PARAMETERS);
1434 }
Andrew Walbrana65a1322020-04-06 19:32:32 +01001435 }
1436
Olivier Deprez4342a3c2022-02-28 09:37:25 +01001437 /* Memory region attributes NS-Bit MBZ for FFA_MEM_SHARE/LEND/DONATE. */
1438 security_state =
1439 ffa_get_memory_security_attr(memory_region->attributes);
1440 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
1441 dlog_verbose(
1442 "Invalid security state for memory share operation.\n");
1443 return ffa_error(FFA_INVALID_PARAMETERS);
1444 }
1445
Federico Recanatid937f5e2021-12-20 17:38:23 +01001446 /*
J-Alves807794e2022-06-16 13:42:47 +01001447 * If a memory donate or lend with single borrower, the memory type
1448 * shall not be specified by the sender.
Federico Recanatid937f5e2021-12-20 17:38:23 +01001449 */
J-Alves807794e2022-06-16 13:42:47 +01001450 if (share_func == FFA_MEM_DONATE_32 ||
1451 (share_func == FFA_MEM_LEND_32 &&
1452 memory_region->receiver_count == 1)) {
1453 if (ffa_get_memory_type_attr(memory_region->attributes) !=
1454 FFA_MEMORY_NOT_SPECIFIED_MEM) {
1455 dlog_verbose(
1456 "Memory type shall not be specified by "
1457 "sender.\n");
1458 return ffa_error(FFA_INVALID_PARAMETERS);
1459 }
1460 } else {
1461 /*
1462 * Check that sender's memory attributes match Hafnium
1463 * expectations: Normal Memory, Inner shareable, Write-Back
1464 * Read-Allocate Write-Allocate Cacheable.
1465 */
1466 ret = ffa_memory_attributes_validate(memory_region->attributes);
1467 if (ret.func != FFA_SUCCESS_32) {
1468 return ret;
1469 }
Federico Recanatid937f5e2021-12-20 17:38:23 +01001470 }
1471
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001472 return (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Walbrana65a1322020-04-06 19:32:32 +01001473}
1474
1475/**
Andrew Walbranca808b12020-05-15 17:22:28 +01001476 * Gets the share state for continuing an operation to donate, lend or share
1477 * memory, and checks that it is a valid request.
1478 *
1479 * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if
1480 * not.
1481 */
J-Alvesfdd29272022-07-19 13:16:31 +01001482struct ffa_value ffa_memory_send_continue_validate(
Andrew Walbranca808b12020-05-15 17:22:28 +01001483 struct share_states_locked share_states, ffa_memory_handle_t handle,
1484 struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
1485 struct mpool *page_pool)
1486{
1487 struct ffa_memory_share_state *share_state;
1488 struct ffa_memory_region *memory_region;
1489
Daniel Boulbya2f8c662021-11-26 17:52:53 +00001490 assert(share_state_ret != NULL);
Andrew Walbranca808b12020-05-15 17:22:28 +01001491
1492 /*
1493 * Look up the share state by handle and make sure that the VM ID
1494 * matches.
1495 */
1496 if (!get_share_state(share_states, handle, &share_state)) {
1497 dlog_verbose(
1498 "Invalid handle %#x for memory send continuation.\n",
1499 handle);
1500 return ffa_error(FFA_INVALID_PARAMETERS);
1501 }
1502 memory_region = share_state->memory_region;
1503
J-Alvesfdd29272022-07-19 13:16:31 +01001504 if (vm_id_is_current_world(from_vm_id) &&
1505 memory_region->sender != from_vm_id) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001506 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1507 return ffa_error(FFA_INVALID_PARAMETERS);
1508 }
1509
1510 if (share_state->sending_complete) {
1511 dlog_verbose(
1512 "Sending of memory handle %#x is already complete.\n",
1513 handle);
1514 return ffa_error(FFA_INVALID_PARAMETERS);
1515 }
1516
1517 if (share_state->fragment_count == MAX_FRAGMENTS) {
1518 /*
1519 * Log a warning as this is a sign that MAX_FRAGMENTS should
1520 * probably be increased.
1521 */
1522 dlog_warning(
1523 "Too many fragments for memory share with handle %#x; "
1524 "only %d supported.\n",
1525 handle, MAX_FRAGMENTS);
1526 /* Free share state, as it's not possible to complete it. */
1527 share_state_free(share_states, share_state, page_pool);
1528 return ffa_error(FFA_NO_MEMORY);
1529 }
1530
1531 *share_state_ret = share_state;
1532
1533 return (struct ffa_value){.func = FFA_SUCCESS_32};
1534}
1535
1536/**
J-Alves95df0ef2022-12-07 10:09:48 +00001537 * Checks if there is at least one receiver from the other world.
1538 */
J-Alvesfdd29272022-07-19 13:16:31 +01001539bool memory_region_receivers_from_other_world(
J-Alves95df0ef2022-12-07 10:09:48 +00001540 struct ffa_memory_region *memory_region)
1541{
1542 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
1543 ffa_vm_id_t receiver = memory_region->receivers[i]
1544 .receiver_permissions.receiver;
1545 if (!vm_id_is_current_world(receiver)) {
1546 return true;
1547 }
1548 }
1549 return false;
1550}
1551
1552/**
J-Alves9da280b2022-12-21 14:55:39 +00001553 * Validates a call to donate, lend or share memory in which Hafnium is the
1554 * designated allocator of the memory handle. In practice, this also means
1555 * Hafnium is responsible for managing the state structures for the transaction.
1556 * If Hafnium is the SPMC, it should allocate the memory handle when either the
1557 * sender is an SP or there is at least one borrower that is an SP.
1558 * If Hafnium is the hypervisor, it should allocate the memory handle when
1559 * operation involves only NWd VMs.
1560 *
1561 * If validation goes well, Hafnium updates the stage-2 page tables of the
1562 * sender. Validation consists of checking if the message length and number of
1563 * memory region constituents match, and if the transition is valid for the
1564 * type of memory sending operation.
Andrew Walbran475c1452020-02-07 13:22:22 +00001565 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001566 * Assumes that the caller has already found and locked the sender VM and copied
1567 * the memory region descriptor from the sender's TX buffer to a freshly
1568 * allocated page from Hafnium's internal pool. The caller must have also
1569 * validated that the receiver VM ID is valid.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001570 *
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001571 * This function takes ownership of the `memory_region` passed in and will free
1572 * it when necessary; it must not be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001573 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001574struct ffa_value ffa_memory_send(struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001575 struct ffa_memory_region *memory_region,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01001576 uint32_t memory_share_length,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001577 uint32_t fragment_length, uint32_t share_func,
1578 struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001579{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001580 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01001581 struct share_states_locked share_states;
1582 struct ffa_memory_share_state *share_state;
Jose Marinho09b1db82019-08-08 09:16:59 +01001583
1584 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001585 * If there is an error validating the `memory_region` then we need to
1586 * free it because we own it but we won't be storing it in a share state
1587 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001588 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001589 ret = ffa_memory_send_validate(from_locked, memory_region,
1590 memory_share_length, fragment_length,
J-Alves363f5722022-04-25 17:37:37 +01001591 share_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001592 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001593 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001594 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001595 }
1596
Andrew Walbrana65a1322020-04-06 19:32:32 +01001597 /* Set flag for share function, ready to be retrieved later. */
1598 switch (share_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001599 case FFA_MEM_SHARE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001600 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001601 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001602 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001603 case FFA_MEM_LEND_32:
1604 memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001605 break;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001606 case FFA_MEM_DONATE_32:
Andrew Walbrana65a1322020-04-06 19:32:32 +01001607 memory_region->flags |=
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001608 FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001609 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001610 }
1611
Andrew Walbranca808b12020-05-15 17:22:28 +01001612 share_states = share_states_lock();
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001613 /*
1614 * Allocate a share state before updating the page table. Otherwise if
1615 * updating the page table succeeded but allocating the share state
1616 * failed then it would leave the memory in a state where nobody could
1617 * get it back.
1618 */
Andrew Walbranca808b12020-05-15 17:22:28 +01001619 if (!allocate_share_state(share_states, share_func, memory_region,
1620 fragment_length, FFA_MEMORY_HANDLE_INVALID,
1621 &share_state)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001622 dlog_verbose("Failed to allocate share state.\n");
1623 mpool_free(page_pool, memory_region);
Andrew Walbranca808b12020-05-15 17:22:28 +01001624 ret = ffa_error(FFA_NO_MEMORY);
1625 goto out;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001626 }
1627
Andrew Walbranca808b12020-05-15 17:22:28 +01001628 if (fragment_length == memory_share_length) {
1629 /* No more fragments to come, everything fit in one message. */
J-Alves2a0d2882020-10-29 14:49:50 +00001630 ret = ffa_memory_send_complete(
1631 from_locked, share_states, share_state, page_pool,
1632 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001633 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01001634 /*
1635 * Use sender ID from 'memory_region' assuming
1636 * that at this point it has been validated:
1637 * - MBZ at virtual FF-A instance.
1638 */
1639 ffa_vm_id_t sender_to_ret =
1640 (from_locked.vm->id == HF_OTHER_WORLD_ID)
1641 ? memory_region->sender
1642 : 0;
Andrew Walbranca808b12020-05-15 17:22:28 +01001643 ret = (struct ffa_value){
1644 .func = FFA_MEM_FRAG_RX_32,
J-Alvesee68c542020-10-29 17:48:20 +00001645 .arg1 = (uint32_t)memory_region->handle,
1646 .arg2 = (uint32_t)(memory_region->handle >> 32),
J-Alvesfdd29272022-07-19 13:16:31 +01001647 .arg3 = fragment_length,
1648 .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16};
Andrew Walbranca808b12020-05-15 17:22:28 +01001649 }
1650
1651out:
1652 share_states_unlock(&share_states);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001653 dump_share_states();
Andrew Walbranca808b12020-05-15 17:22:28 +01001654 return ret;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001655}
1656
1657/**
J-Alves8505a8a2022-06-15 18:10:18 +01001658 * Continues an operation to donate, lend or share memory to a VM from current
1659 * world. If this is the last fragment then checks that the transition is valid
1660 * for the type of memory sending operation and updates the stage-2 page tables
1661 * of the sender.
Andrew Walbranca808b12020-05-15 17:22:28 +01001662 *
1663 * Assumes that the caller has already found and locked the sender VM and copied
1664 * the memory region descriptor from the sender's TX buffer to a freshly
1665 * allocated page from Hafnium's internal pool.
1666 *
1667 * This function takes ownership of the `fragment` passed in; it must not be
1668 * freed by the caller.
1669 */
1670struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked,
1671 void *fragment,
1672 uint32_t fragment_length,
1673 ffa_memory_handle_t handle,
1674 struct mpool *page_pool)
1675{
1676 struct share_states_locked share_states = share_states_lock();
1677 struct ffa_memory_share_state *share_state;
1678 struct ffa_value ret;
1679 struct ffa_memory_region *memory_region;
1680
Demi Marie Obenour73a1e942023-02-04 14:09:18 -05001681 CHECK(is_aligned(fragment,
1682 alignof(struct ffa_memory_region_constituent)));
1683 if (fragment_length % sizeof(struct ffa_memory_region_constituent) !=
1684 0) {
1685 dlog_verbose("Fragment length %u misaligned.\n",
1686 fragment_length);
1687 ret = ffa_error(FFA_INVALID_PARAMETERS);
1688 goto out_free_fragment;
1689 }
1690
Andrew Walbranca808b12020-05-15 17:22:28 +01001691 ret = ffa_memory_send_continue_validate(share_states, handle,
1692 &share_state,
1693 from_locked.vm->id, page_pool);
1694 if (ret.func != FFA_SUCCESS_32) {
1695 goto out_free_fragment;
1696 }
1697 memory_region = share_state->memory_region;
1698
J-Alves95df0ef2022-12-07 10:09:48 +00001699 if (memory_region_receivers_from_other_world(memory_region)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01001700 dlog_error(
1701 "Got hypervisor-allocated handle for memory send to "
J-Alves8505a8a2022-06-15 18:10:18 +01001702 "other world. This should never happen, and indicates "
1703 "a bug in "
Andrew Walbranca808b12020-05-15 17:22:28 +01001704 "EL3 code.\n");
1705 ret = ffa_error(FFA_INVALID_PARAMETERS);
1706 goto out_free_fragment;
1707 }
1708
1709 /* Add this fragment. */
1710 share_state->fragments[share_state->fragment_count] = fragment;
1711 share_state->fragment_constituent_counts[share_state->fragment_count] =
1712 fragment_length / sizeof(struct ffa_memory_region_constituent);
1713 share_state->fragment_count++;
1714
1715 /* Check whether the memory send operation is now ready to complete. */
1716 if (share_state_sending_complete(share_states, share_state)) {
J-Alves2a0d2882020-10-29 14:49:50 +00001717 ret = ffa_memory_send_complete(
1718 from_locked, share_states, share_state, page_pool,
1719 &(share_state->sender_orig_mode));
Andrew Walbranca808b12020-05-15 17:22:28 +01001720 } else {
1721 ret = (struct ffa_value){
1722 .func = FFA_MEM_FRAG_RX_32,
1723 .arg1 = (uint32_t)handle,
1724 .arg2 = (uint32_t)(handle >> 32),
1725 .arg3 = share_state_next_fragment_offset(share_states,
1726 share_state)};
1727 }
1728 goto out;
1729
1730out_free_fragment:
1731 mpool_free(page_pool, fragment);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001732
1733out:
Andrew Walbranca808b12020-05-15 17:22:28 +01001734 share_states_unlock(&share_states);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001735 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001736}
1737
Andrew Walbranca808b12020-05-15 17:22:28 +01001738/** Clean up after the receiver has finished retrieving a memory region. */
1739static void ffa_memory_retrieve_complete(
1740 struct share_states_locked share_states,
1741 struct ffa_memory_share_state *share_state, struct mpool *page_pool)
1742{
1743 if (share_state->share_func == FFA_MEM_DONATE_32) {
1744 /*
1745 * Memory that has been donated can't be relinquished,
1746 * so no need to keep the share state around.
1747 */
1748 share_state_free(share_states, share_state, page_pool);
1749 dlog_verbose("Freed share state for donate.\n");
1750 }
1751}
1752
J-Alves2d8457f2022-10-05 11:06:41 +01001753/**
1754 * Initialises the given memory region descriptor to be used for an
1755 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
1756 * fragment.
1757 * The memory region descriptor is initialized according to retriever's
1758 * FF-A version.
1759 *
1760 * Returns true on success, or false if the given constituents won't all fit in
1761 * the first fragment.
1762 */
1763static bool ffa_retrieved_memory_region_init(
1764 void *response, uint32_t ffa_version, size_t response_max_size,
1765 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
1766 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
1767 ffa_vm_id_t receiver_id, ffa_memory_access_permissions_t permissions,
1768 uint32_t page_count, uint32_t total_constituent_count,
1769 const struct ffa_memory_region_constituent constituents[],
1770 uint32_t fragment_constituent_count, uint32_t *total_length,
1771 uint32_t *fragment_length)
1772{
1773 struct ffa_composite_memory_region *composite_memory_region;
1774 struct ffa_memory_access *receiver;
1775 uint32_t i;
1776 uint32_t constituents_offset;
1777 uint32_t receiver_count;
1778
1779 assert(response != NULL);
1780
1781 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1782 struct ffa_memory_region_v1_0 *retrieve_response =
1783 (struct ffa_memory_region_v1_0 *)response;
1784
J-Alves5da37d92022-10-24 16:33:48 +01001785 ffa_memory_region_init_header_v1_0(
1786 retrieve_response, sender, attributes, flags, handle, 0,
1787 RECEIVERS_COUNT_IN_RETRIEVE_RESP);
J-Alves2d8457f2022-10-05 11:06:41 +01001788
1789 receiver = &retrieve_response->receivers[0];
1790 receiver_count = retrieve_response->receiver_count;
1791
1792 receiver->composite_memory_region_offset =
1793 sizeof(struct ffa_memory_region_v1_0) +
1794 receiver_count * sizeof(struct ffa_memory_access);
1795
1796 composite_memory_region = ffa_memory_region_get_composite_v1_0(
1797 retrieve_response, 0);
1798 } else {
1799 /* Default to FF-A v1.1 version. */
1800 struct ffa_memory_region *retrieve_response =
1801 (struct ffa_memory_region *)response;
1802
1803 ffa_memory_region_init_header(retrieve_response, sender,
1804 attributes, flags, handle, 0, 1);
1805
1806 receiver = &retrieve_response->receivers[0];
1807 receiver_count = retrieve_response->receiver_count;
1808
1809 /*
1810 * Note that `sizeof(struct_ffa_memory_region)` and
1811 * `sizeof(struct ffa_memory_access)` must both be multiples of
1812 * 16 (as verified by the asserts in `ffa_memory.c`, so it is
1813 * guaranteed that the offset we calculate here is aligned to a
1814 * 64-bit boundary and so 64-bit values can be copied without
1815 * alignment faults.
1816 */
1817 receiver->composite_memory_region_offset =
1818 sizeof(struct ffa_memory_region) +
1819 receiver_count * sizeof(struct ffa_memory_access);
1820
1821 composite_memory_region =
1822 ffa_memory_region_get_composite(retrieve_response, 0);
1823 }
1824
1825 assert(receiver != NULL);
1826 assert(composite_memory_region != NULL);
1827
1828 /*
1829 * Initialized here as in memory retrieve responses we currently expect
1830 * one borrower to be specified.
1831 */
1832 ffa_memory_access_init_permissions(receiver, receiver_id, 0, 0, flags);
1833 receiver->receiver_permissions.permissions = permissions;
1834
1835 composite_memory_region->page_count = page_count;
1836 composite_memory_region->constituent_count = total_constituent_count;
1837 composite_memory_region->reserved_0 = 0;
1838
1839 constituents_offset = receiver->composite_memory_region_offset +
1840 sizeof(struct ffa_composite_memory_region);
1841 if (constituents_offset +
1842 fragment_constituent_count *
1843 sizeof(struct ffa_memory_region_constituent) >
1844 response_max_size) {
1845 return false;
1846 }
1847
1848 for (i = 0; i < fragment_constituent_count; ++i) {
1849 composite_memory_region->constituents[i] = constituents[i];
1850 }
1851
1852 if (total_length != NULL) {
1853 *total_length =
1854 constituents_offset +
1855 composite_memory_region->constituent_count *
1856 sizeof(struct ffa_memory_region_constituent);
1857 }
1858 if (fragment_length != NULL) {
1859 *fragment_length =
1860 constituents_offset +
1861 fragment_constituent_count *
1862 sizeof(struct ffa_memory_region_constituent);
1863 }
1864
1865 return true;
1866}
1867
J-Alves96de29f2022-04-26 16:05:24 +01001868/*
1869 * Gets the receiver's access permissions from 'struct ffa_memory_region' and
1870 * returns its index in the receiver's array. If receiver's ID doesn't exist
1871 * in the array, return the region's 'receiver_count'.
1872 */
J-Alvesb5084cf2022-07-06 14:20:12 +01001873uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
1874 ffa_vm_id_t receiver)
J-Alves96de29f2022-04-26 16:05:24 +01001875{
1876 struct ffa_memory_access *receivers;
1877 uint32_t i;
1878
1879 assert(memory_region != NULL);
1880
1881 receivers = memory_region->receivers;
1882
1883 for (i = 0U; i < memory_region->receiver_count; i++) {
1884 if (receivers[i].receiver_permissions.receiver == receiver) {
1885 break;
1886 }
1887 }
1888
1889 return i;
1890}
1891
1892/**
1893 * Validates the retrieved permissions against those specified by the lender
1894 * of memory share operation. Optionally can help set the permissions to be used
1895 * for the S2 mapping, through the `permissions` argument.
1896 * Returns true if permissions are valid, false otherwise.
1897 */
1898static bool ffa_memory_retrieve_is_memory_access_valid(
1899 enum ffa_data_access sent_data_access,
1900 enum ffa_data_access requested_data_access,
1901 enum ffa_instruction_access sent_instruction_access,
1902 enum ffa_instruction_access requested_instruction_access,
1903 ffa_memory_access_permissions_t *permissions)
1904{
1905 switch (sent_data_access) {
1906 case FFA_DATA_ACCESS_NOT_SPECIFIED:
1907 case FFA_DATA_ACCESS_RW:
1908 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1909 requested_data_access == FFA_DATA_ACCESS_RW) {
1910 if (permissions != NULL) {
1911 ffa_set_data_access_attr(permissions,
1912 FFA_DATA_ACCESS_RW);
1913 }
1914 break;
1915 }
1916 /* Intentional fall-through. */
1917 case FFA_DATA_ACCESS_RO:
1918 if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED ||
1919 requested_data_access == FFA_DATA_ACCESS_RO) {
1920 if (permissions != NULL) {
1921 ffa_set_data_access_attr(permissions,
1922 FFA_DATA_ACCESS_RO);
1923 }
1924 break;
1925 }
1926 dlog_verbose(
1927 "Invalid data access requested; sender specified "
1928 "permissions %#x but receiver requested %#x.\n",
1929 sent_data_access, requested_data_access);
1930 return false;
1931 case FFA_DATA_ACCESS_RESERVED:
1932 panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be "
1933 "checked before this point.");
1934 }
1935
1936 switch (sent_instruction_access) {
1937 case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED:
1938 case FFA_INSTRUCTION_ACCESS_X:
1939 if (requested_instruction_access ==
1940 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1941 requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) {
1942 if (permissions != NULL) {
1943 ffa_set_instruction_access_attr(
1944 permissions, FFA_INSTRUCTION_ACCESS_X);
1945 }
1946 break;
1947 }
1948 case FFA_INSTRUCTION_ACCESS_NX:
1949 if (requested_instruction_access ==
1950 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1951 requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) {
1952 if (permissions != NULL) {
1953 ffa_set_instruction_access_attr(
1954 permissions, FFA_INSTRUCTION_ACCESS_NX);
1955 }
1956 break;
1957 }
1958 dlog_verbose(
1959 "Invalid instruction access requested; sender "
1960 "specified permissions %#x but receiver requested "
1961 "%#x.\n",
1962 sent_instruction_access, requested_instruction_access);
1963 return false;
1964 case FFA_INSTRUCTION_ACCESS_RESERVED:
1965 panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should "
1966 "be checked before this point.");
1967 }
1968
1969 return true;
1970}
1971
1972/**
1973 * Validate the receivers' permissions in the retrieve request against those
1974 * specified by the lender.
1975 * In the `permissions` argument returns the permissions to set at S2 for the
1976 * caller to the FFA_MEMORY_RETRIEVE_REQ.
1977 * Returns FFA_SUCCESS if all specified permissions are valid.
1978 */
1979static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
1980 struct ffa_memory_region *memory_region,
1981 struct ffa_memory_region *retrieve_request, ffa_vm_id_t to_vm_id,
1982 ffa_memory_access_permissions_t *permissions)
1983{
1984 uint32_t retrieve_receiver_index;
1985
1986 assert(permissions != NULL);
1987
1988 if (retrieve_request->receiver_count != memory_region->receiver_count) {
1989 dlog_verbose(
1990 "Retrieve request should contain same list of "
1991 "borrowers, as specified by the lender.\n");
1992 return ffa_error(FFA_INVALID_PARAMETERS);
1993 }
1994
1995 retrieve_receiver_index = retrieve_request->receiver_count;
1996
1997 /* Should be populated with the permissions of the retriever. */
1998 *permissions = 0;
1999
2000 for (uint32_t i = 0U; i < retrieve_request->receiver_count; i++) {
2001 ffa_memory_access_permissions_t sent_permissions;
2002 struct ffa_memory_access *current_receiver =
2003 &retrieve_request->receivers[i];
2004 ffa_memory_access_permissions_t requested_permissions =
2005 current_receiver->receiver_permissions.permissions;
2006 ffa_vm_id_t current_receiver_id =
2007 current_receiver->receiver_permissions.receiver;
2008 bool found_to_id = current_receiver_id == to_vm_id;
2009
2010 /*
2011 * Find the current receiver in the transaction descriptor from
2012 * sender.
2013 */
2014 uint32_t mem_region_receiver_index =
2015 ffa_memory_region_get_receiver(memory_region,
2016 current_receiver_id);
2017
2018 if (mem_region_receiver_index ==
2019 memory_region->receiver_count) {
2020 dlog_verbose("%s: receiver %x not found\n", __func__,
2021 current_receiver_id);
2022 return ffa_error(FFA_DENIED);
2023 }
2024
2025 sent_permissions =
2026 memory_region->receivers[mem_region_receiver_index]
2027 .receiver_permissions.permissions;
2028
2029 if (found_to_id) {
2030 retrieve_receiver_index = i;
2031 }
2032
2033 /*
2034 * Since we are traversing the list of receivers, save the index
2035 * of the caller. As it needs to be there.
2036 */
2037
2038 if (current_receiver->composite_memory_region_offset != 0U) {
2039 dlog_verbose(
2040 "Retriever specified address ranges not "
2041 "supported (got offset %d).\n",
2042 current_receiver
2043 ->composite_memory_region_offset);
2044 return ffa_error(FFA_INVALID_PARAMETERS);
2045 }
2046
2047 /*
2048 * Check permissions from sender against permissions requested
2049 * by receiver.
2050 */
2051 if (!ffa_memory_retrieve_is_memory_access_valid(
2052 ffa_get_data_access_attr(sent_permissions),
2053 ffa_get_data_access_attr(requested_permissions),
2054 ffa_get_instruction_access_attr(sent_permissions),
2055 ffa_get_instruction_access_attr(
2056 requested_permissions),
2057 found_to_id ? permissions : NULL)) {
2058 return ffa_error(FFA_DENIED);
2059 }
2060
2061 /*
2062 * Can't request PM to clear memory if only provided with RO
2063 * permissions.
2064 */
2065 if (found_to_id &&
2066 (ffa_get_data_access_attr(*permissions) ==
2067 FFA_DATA_ACCESS_RO) &&
2068 (retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2069 0U) {
2070 dlog_verbose(
2071 "Receiver has RO permissions can not request "
2072 "clear.\n");
2073 return ffa_error(FFA_DENIED);
2074 }
2075 }
2076
2077 if (retrieve_receiver_index == retrieve_request->receiver_count) {
2078 dlog_verbose(
2079 "Retrieve request does not contain caller's (%x) "
2080 "permissions\n",
2081 to_vm_id);
2082 return ffa_error(FFA_INVALID_PARAMETERS);
2083 }
2084
2085 return (struct ffa_value){.func = FFA_SUCCESS_32};
2086}
2087
J-Alvesa9cd7e32022-07-01 13:49:33 +01002088/*
2089 * According to section 16.4.3 of FF-A v1.1 EAC0 specification, the hypervisor
2090 * may issue an FFA_MEM_RETRIEVE_REQ to obtain the memory region description
2091 * of a pending memory sharing operation whose allocator is the SPM, for
2092 * validation purposes before forwarding an FFA_MEM_RECLAIM call. In doing so
2093 * the memory region descriptor of the retrieve request must be zeroed with the
2094 * exception of the sender ID and handle.
2095 */
2096bool is_ffa_memory_retrieve_borrower_request(struct ffa_memory_region *request,
2097 struct vm_locked to_locked)
2098{
2099 return to_locked.vm->id == HF_HYPERVISOR_VM_ID &&
2100 request->attributes == 0U && request->flags == 0U &&
2101 request->tag == 0U && request->receiver_count == 0U &&
2102 plat_ffa_memory_handle_allocated_by_current_world(
2103 request->handle);
2104}
2105
2106/*
2107 * Helper to reset count of fragments retrieved by the hypervisor.
2108 */
2109static void ffa_memory_retrieve_complete_from_hyp(
2110 struct ffa_memory_share_state *share_state)
2111{
2112 if (share_state->hypervisor_fragment_count ==
2113 share_state->fragment_count) {
2114 share_state->hypervisor_fragment_count = 0;
2115 }
2116}
2117
J-Alves089004f2022-07-13 14:25:44 +01002118/**
2119 * Validate that the memory region descriptor provided by the borrower on
2120 * FFA_MEM_RETRIEVE_REQ, against saved memory region provided by lender at the
2121 * memory sharing call.
2122 */
2123static struct ffa_value ffa_memory_retrieve_validate(
2124 ffa_vm_id_t receiver_id, struct ffa_memory_region *retrieve_request,
2125 struct ffa_memory_region *memory_region, uint32_t *receiver_index,
2126 uint32_t share_func)
2127{
2128 ffa_memory_region_flags_t transaction_type =
2129 retrieve_request->flags &
2130 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002131 enum ffa_memory_security security_state;
J-Alves089004f2022-07-13 14:25:44 +01002132
2133 assert(retrieve_request != NULL);
2134 assert(memory_region != NULL);
2135 assert(receiver_index != NULL);
2136 assert(retrieve_request->sender == memory_region->sender);
2137
2138 /*
2139 * Check that the transaction type expected by the receiver is
2140 * correct, if it has been specified.
2141 */
2142 if (transaction_type !=
2143 FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
2144 transaction_type != (memory_region->flags &
2145 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
2146 dlog_verbose(
2147 "Incorrect transaction type %#x for "
2148 "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
2149 transaction_type,
2150 memory_region->flags &
2151 FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK,
2152 retrieve_request->handle);
2153 return ffa_error(FFA_INVALID_PARAMETERS);
2154 }
2155
2156 if (retrieve_request->tag != memory_region->tag) {
2157 dlog_verbose(
2158 "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected "
2159 "%d for handle %#x.\n",
2160 retrieve_request->tag, memory_region->tag,
2161 retrieve_request->handle);
2162 return ffa_error(FFA_INVALID_PARAMETERS);
2163 }
2164
2165 *receiver_index =
2166 ffa_memory_region_get_receiver(memory_region, receiver_id);
2167
2168 if (*receiver_index == memory_region->receiver_count) {
2169 dlog_verbose(
2170 "Incorrect receiver VM ID %d for "
2171 "FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
J-Alves59ed0042022-07-28 18:26:41 +01002172 receiver_id, memory_region->handle);
J-Alves089004f2022-07-13 14:25:44 +01002173 return ffa_error(FFA_INVALID_PARAMETERS);
2174 }
2175
2176 if ((retrieve_request->flags &
2177 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_VALID) != 0U) {
2178 dlog_verbose(
2179 "Retriever specified 'address range alignment 'hint' "
2180 "not supported.\n");
2181 return ffa_error(FFA_INVALID_PARAMETERS);
2182 }
2183 if ((retrieve_request->flags &
2184 FFA_MEMORY_REGION_ADDRESS_RANGE_HINT_MASK) != 0) {
2185 dlog_verbose(
2186 "Bits 8-5 must be zero in memory region's flags "
2187 "(address range alignment hint not supported).\n");
2188 return ffa_error(FFA_INVALID_PARAMETERS);
2189 }
2190
2191 if ((retrieve_request->flags & ~0x7FF) != 0U) {
2192 dlog_verbose(
2193 "Bits 31-10 must be zero in memory region's flags.\n");
2194 return ffa_error(FFA_INVALID_PARAMETERS);
2195 }
2196
2197 if (share_func == FFA_MEM_SHARE_32 &&
2198 (retrieve_request->flags &
2199 (FFA_MEMORY_REGION_FLAG_CLEAR |
2200 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH)) != 0U) {
2201 dlog_verbose(
2202 "Memory Share operation can't clean after relinquish "
2203 "memory region.\n");
2204 return ffa_error(FFA_INVALID_PARAMETERS);
2205 }
2206
2207 /*
2208 * If the borrower needs the memory to be cleared before mapping
2209 * to its address space, the sender should have set the flag
2210 * when calling FFA_MEM_LEND/FFA_MEM_DONATE, else return
2211 * FFA_DENIED.
2212 */
2213 if ((retrieve_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U &&
2214 (memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) == 0U) {
2215 dlog_verbose(
2216 "Borrower needs memory cleared. Sender needs to set "
2217 "flag for clearing memory.\n");
2218 return ffa_error(FFA_DENIED);
2219 }
2220
Olivier Deprez4342a3c2022-02-28 09:37:25 +01002221 /* Memory region attributes NS-Bit MBZ for FFA_MEM_RETRIEVE_REQ. */
2222 security_state =
2223 ffa_get_memory_security_attr(retrieve_request->attributes);
2224 if (security_state != FFA_MEMORY_SECURITY_UNSPECIFIED) {
2225 dlog_verbose(
2226 "Invalid security state for memory retrieve request "
2227 "operation.\n");
2228 return ffa_error(FFA_INVALID_PARAMETERS);
2229 }
2230
J-Alves089004f2022-07-13 14:25:44 +01002231 /*
2232 * If memory type is not specified, bypass validation of memory
2233 * attributes in the retrieve request. The retriever is expecting to
2234 * obtain this information from the SPMC.
2235 */
2236 if (ffa_get_memory_type_attr(retrieve_request->attributes) ==
2237 FFA_MEMORY_NOT_SPECIFIED_MEM) {
2238 return (struct ffa_value){.func = FFA_SUCCESS_32};
2239 }
2240
2241 /*
2242 * Ensure receiver's attributes are compatible with how
2243 * Hafnium maps memory: Normal Memory, Inner shareable,
2244 * Write-Back Read-Allocate Write-Allocate Cacheable.
2245 */
2246 return ffa_memory_attributes_validate(retrieve_request->attributes);
2247}
2248
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002249struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked,
2250 struct ffa_memory_region *retrieve_request,
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002251 uint32_t retrieve_request_length,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002252 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002253{
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002254 uint32_t expected_retrieve_request_length =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002255 sizeof(struct ffa_memory_region) +
Andrew Walbrana65a1322020-04-06 19:32:32 +01002256 retrieve_request->receiver_count *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002257 sizeof(struct ffa_memory_access);
2258 ffa_memory_handle_t handle = retrieve_request->handle;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002259 struct ffa_memory_region *memory_region;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002260 ffa_memory_access_permissions_t permissions = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002261 uint32_t memory_to_attributes;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002262 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002263 struct ffa_memory_share_state *share_state;
2264 struct ffa_value ret;
Andrew Walbranca808b12020-05-15 17:22:28 +01002265 struct ffa_composite_memory_region *composite;
2266 uint32_t total_length;
2267 uint32_t fragment_length;
J-Alves089004f2022-07-13 14:25:44 +01002268 ffa_vm_id_t receiver_id = to_locked.vm->id;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002269 bool is_send_complete = false;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002270
2271 dump_share_states();
2272
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002273 if (retrieve_request_length != expected_retrieve_request_length) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002274 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002275 "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002276 "but was %d.\n",
Andrew Walbran130a8ae2020-05-15 16:27:15 +01002277 expected_retrieve_request_length,
2278 retrieve_request_length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002279 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002280 }
2281
2282 share_states = share_states_lock();
2283 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002284 dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002285 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002286 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002287 goto out;
2288 }
2289
J-Alves96de29f2022-04-26 16:05:24 +01002290 if (!share_state->sending_complete) {
2291 dlog_verbose(
2292 "Memory with handle %#x not fully sent, can't "
2293 "retrieve.\n",
2294 handle);
2295 ret = ffa_error(FFA_INVALID_PARAMETERS);
2296 goto out;
2297 }
2298
Andrew Walbrana65a1322020-04-06 19:32:32 +01002299 memory_region = share_state->memory_region;
J-Alves089004f2022-07-13 14:25:44 +01002300
Andrew Walbrana65a1322020-04-06 19:32:32 +01002301 CHECK(memory_region != NULL);
2302
J-Alves089004f2022-07-13 14:25:44 +01002303 if (retrieve_request->sender != memory_region->sender) {
2304 dlog_verbose(
2305 "Memory with handle %#x not fully sent, can't "
2306 "retrieve.\n",
2307 handle);
2308 ret = ffa_error(FFA_INVALID_PARAMETERS);
2309 goto out;
2310 }
J-Alves96de29f2022-04-26 16:05:24 +01002311
J-Alvesa9cd7e32022-07-01 13:49:33 +01002312 if (!is_ffa_memory_retrieve_borrower_request(retrieve_request,
2313 to_locked)) {
2314 uint32_t receiver_index;
J-Alvesa9cd7e32022-07-01 13:49:33 +01002315
J-Alvesb5084cf2022-07-06 14:20:12 +01002316 /*
2317 * The SPMC can only process retrieve requests to memory share
2318 * operations with one borrower from the other world. It can't
2319 * determine the ID of the NWd VM that invoked the retrieve
2320 * request interface call. It relies on the hypervisor to
2321 * validate the caller's ID against that provided in the
2322 * `receivers` list of the retrieve response.
2323 * In case there is only one borrower from the NWd in the
2324 * transaction descriptor, record that in the `receiver_id` for
2325 * later use, and validate in the retrieve request message.
2326 */
2327 if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
2328 uint32_t other_world_count = 0;
2329
2330 for (uint32_t i = 0; i < memory_region->receiver_count;
2331 i++) {
2332 receiver_id =
2333 retrieve_request->receivers[0]
2334 .receiver_permissions.receiver;
2335 if (!vm_id_is_current_world(receiver_id)) {
2336 other_world_count++;
2337 }
2338 }
2339 if (other_world_count > 1) {
2340 dlog_verbose(
2341 "Support one receiver from the other "
2342 "world.\n");
2343 return ffa_error(FFA_NOT_SUPPORTED);
2344 }
2345 }
2346
2347 /*
2348 * Validate retrieve request, according to what was sent by the
2349 * sender. Function will output the `receiver_index` from the
2350 * provided memory region, and will output `permissions` from
2351 * the validated requested permissions.
2352 */
J-Alves089004f2022-07-13 14:25:44 +01002353 ret = ffa_memory_retrieve_validate(
2354 receiver_id, retrieve_request, memory_region,
2355 &receiver_index, share_state->share_func);
2356 if (ret.func != FFA_SUCCESS_32) {
J-Alvesa9cd7e32022-07-01 13:49:33 +01002357 goto out;
2358 }
2359
2360 if (share_state->retrieved_fragment_count[receiver_index] !=
2361 0U) {
2362 dlog_verbose(
2363 "Memory with handle %#x already retrieved.\n",
2364 handle);
2365 ret = ffa_error(FFA_DENIED);
2366 goto out;
2367 }
2368
J-Alvesa9cd7e32022-07-01 13:49:33 +01002369 ret = ffa_memory_retrieve_validate_memory_access_list(
2370 memory_region, retrieve_request, receiver_id,
2371 &permissions);
J-Alves614d9f42022-06-28 14:03:10 +01002372 if (ret.func != FFA_SUCCESS_32) {
2373 goto out;
2374 }
Federico Recanatia98603a2021-12-20 18:04:03 +01002375
J-Alvesa9cd7e32022-07-01 13:49:33 +01002376 memory_to_attributes = ffa_memory_permissions_to_mode(
2377 permissions, share_state->sender_orig_mode);
J-Alves40e260e2022-09-22 17:52:43 +01002378
2379 if (to_locked.vm->el0_partition) {
2380 /*
2381 * Get extra mapping attributes for the given VM ID.
2382 * If the memory is shared by a VM executing in non
2383 * secure world, attribute MM_MODE_NS has to be set
2384 * while mapping that in a SP executing in secure world.
2385 */
2386 memory_to_attributes |=
2387 arch_mm_extra_attributes_from_vm(
2388 retrieve_request->sender);
2389 }
2390
J-Alvesa9cd7e32022-07-01 13:49:33 +01002391 ret = ffa_retrieve_check_update(
2392 to_locked, memory_region->sender,
2393 share_state->fragments,
2394 share_state->fragment_constituent_counts,
2395 share_state->fragment_count, memory_to_attributes,
2396 share_state->share_func, false, page_pool);
2397
2398 if (ret.func != FFA_SUCCESS_32) {
2399 goto out;
2400 }
2401
2402 share_state->retrieved_fragment_count[receiver_index] = 1;
2403 is_send_complete =
2404 share_state->retrieved_fragment_count[receiver_index] ==
2405 share_state->fragment_count;
J-Alves3c5b2072022-11-21 12:45:40 +00002406
2407 share_state->clear_after_relinquish =
2408 (retrieve_request->flags &
2409 FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) != 0U;
2410
J-Alvesa9cd7e32022-07-01 13:49:33 +01002411 } else {
2412 if (share_state->hypervisor_fragment_count != 0U) {
2413 dlog_verbose(
J-Alvesb5084cf2022-07-06 14:20:12 +01002414 "Memory with handle %#x already retrieved by "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002415 "the hypervisor.\n",
2416 handle);
2417 ret = ffa_error(FFA_DENIED);
2418 goto out;
2419 }
2420
2421 share_state->hypervisor_fragment_count = 1;
2422
2423 ffa_memory_retrieve_complete_from_hyp(share_state);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002424 }
2425
J-Alvesb5084cf2022-07-06 14:20:12 +01002426 /* VMs acquire the RX buffer from SPMC. */
2427 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2428
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002429 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002430 * Copy response to RX buffer of caller and deliver the message.
2431 * This must be done before the share_state is (possibly) freed.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002432 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002433 /* TODO: combine attributes from sender and request. */
Andrew Walbranca808b12020-05-15 17:22:28 +01002434 composite = ffa_memory_region_get_composite(memory_region, 0);
2435 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002436 * Constituents which we received in the first fragment should
2437 * always fit in the first fragment we are sending, because the
2438 * header is the same size in both cases and we have a fixed
2439 * message buffer size. So `ffa_retrieved_memory_region_init`
2440 * should never fail.
Andrew Walbranca808b12020-05-15 17:22:28 +01002441 */
2442 CHECK(ffa_retrieved_memory_region_init(
J-Alves2d8457f2022-10-05 11:06:41 +01002443 to_locked.vm->mailbox.recv, to_locked.vm->ffa_version,
2444 HF_MAILBOX_SIZE, memory_region->sender,
2445 memory_region->attributes, memory_region->flags, handle,
2446 receiver_id, permissions, composite->page_count,
2447 composite->constituent_count, share_state->fragments[0],
Andrew Walbranca808b12020-05-15 17:22:28 +01002448 share_state->fragment_constituent_counts[0], &total_length,
2449 &fragment_length));
J-Alvesb5084cf2022-07-06 14:20:12 +01002450
Andrew Walbranca808b12020-05-15 17:22:28 +01002451 to_locked.vm->mailbox.recv_size = fragment_length;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002452 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002453 to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002454 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002455
J-Alvesa9cd7e32022-07-01 13:49:33 +01002456 if (is_send_complete) {
Andrew Walbranca808b12020-05-15 17:22:28 +01002457 ffa_memory_retrieve_complete(share_states, share_state,
2458 page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002459 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002460 ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
Andrew Walbranca808b12020-05-15 17:22:28 +01002461 .arg1 = total_length,
2462 .arg2 = fragment_length};
Andrew Walbranca808b12020-05-15 17:22:28 +01002463out:
2464 share_states_unlock(&share_states);
2465 dump_share_states();
2466 return ret;
2467}
2468
J-Alves5da37d92022-10-24 16:33:48 +01002469/**
2470 * Determine expected fragment offset according to the FF-A version of
2471 * the caller.
2472 */
2473static uint32_t ffa_memory_retrieve_expected_offset_per_ffa_version(
2474 struct ffa_memory_region *memory_region,
2475 uint32_t retrieved_constituents_count, uint32_t ffa_version)
2476{
2477 uint32_t expected_fragment_offset;
2478 uint32_t composite_constituents_offset;
2479
2480 if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
2481 /*
2482 * Hafnium operates memory regions in FF-A v1.1 format, so we
2483 * can retrieve the constituents offset from descriptor.
2484 */
2485 composite_constituents_offset =
2486 ffa_composite_constituent_offset(memory_region, 0);
2487 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
2488 /*
2489 * If retriever is FF-A v1.0, determine the composite offset
2490 * as it is expected to have been configured in the
2491 * retrieve response.
2492 */
2493 composite_constituents_offset =
2494 sizeof(struct ffa_memory_region_v1_0) +
2495 RECEIVERS_COUNT_IN_RETRIEVE_RESP *
2496 sizeof(struct ffa_memory_access) +
2497 sizeof(struct ffa_composite_memory_region);
2498 } else {
2499 panic("%s received an invalid FF-A version.\n", __func__);
2500 }
2501
2502 expected_fragment_offset =
2503 composite_constituents_offset +
2504 retrieved_constituents_count *
2505 sizeof(struct ffa_memory_region_constituent) -
2506 sizeof(struct ffa_memory_access) *
2507 (memory_region->receiver_count - 1);
2508
2509 return expected_fragment_offset;
2510}
2511
Andrew Walbranca808b12020-05-15 17:22:28 +01002512struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
2513 ffa_memory_handle_t handle,
2514 uint32_t fragment_offset,
J-Alves59ed0042022-07-28 18:26:41 +01002515 ffa_vm_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01002516 struct mpool *page_pool)
2517{
2518 struct ffa_memory_region *memory_region;
2519 struct share_states_locked share_states;
2520 struct ffa_memory_share_state *share_state;
2521 struct ffa_value ret;
2522 uint32_t fragment_index;
2523 uint32_t retrieved_constituents_count;
2524 uint32_t i;
2525 uint32_t expected_fragment_offset;
2526 uint32_t remaining_constituent_count;
2527 uint32_t fragment_length;
J-Alvesc7484f12022-05-13 12:41:14 +01002528 uint32_t receiver_index;
J-Alves59ed0042022-07-28 18:26:41 +01002529 bool continue_ffa_hyp_mem_retrieve_req;
Andrew Walbranca808b12020-05-15 17:22:28 +01002530
2531 dump_share_states();
2532
2533 share_states = share_states_lock();
2534 if (!get_share_state(share_states, handle, &share_state)) {
2535 dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n",
2536 handle);
2537 ret = ffa_error(FFA_INVALID_PARAMETERS);
2538 goto out;
2539 }
2540
2541 memory_region = share_state->memory_region;
2542 CHECK(memory_region != NULL);
2543
Andrew Walbranca808b12020-05-15 17:22:28 +01002544 if (!share_state->sending_complete) {
2545 dlog_verbose(
2546 "Memory with handle %#x not fully sent, can't "
2547 "retrieve.\n",
2548 handle);
2549 ret = ffa_error(FFA_INVALID_PARAMETERS);
2550 goto out;
2551 }
2552
J-Alves59ed0042022-07-28 18:26:41 +01002553 /*
2554 * If retrieve request from the hypervisor has been initiated in the
2555 * given share_state, continue it, else assume it is a continuation of
2556 * retrieve request from a NWd VM.
2557 */
2558 continue_ffa_hyp_mem_retrieve_req =
2559 (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
2560 (share_state->hypervisor_fragment_count != 0U) &&
2561 plat_ffa_is_vm_id(sender_vm_id);
Andrew Walbranca808b12020-05-15 17:22:28 +01002562
J-Alves59ed0042022-07-28 18:26:41 +01002563 if (!continue_ffa_hyp_mem_retrieve_req) {
2564 receiver_index = ffa_memory_region_get_receiver(
2565 memory_region, to_locked.vm->id);
2566
2567 if (receiver_index == memory_region->receiver_count) {
2568 dlog_verbose(
2569 "Caller of FFA_MEM_FRAG_RX (%x) is not a "
2570 "borrower to memory sharing transaction (%x)\n",
2571 to_locked.vm->id, handle);
2572 ret = ffa_error(FFA_INVALID_PARAMETERS);
2573 goto out;
2574 }
2575
2576 if (share_state->retrieved_fragment_count[receiver_index] ==
2577 0 ||
2578 share_state->retrieved_fragment_count[receiver_index] >=
2579 share_state->fragment_count) {
2580 dlog_verbose(
2581 "Retrieval of memory with handle %#x not yet "
2582 "started or already completed (%d/%d fragments "
2583 "retrieved).\n",
2584 handle,
2585 share_state->retrieved_fragment_count
2586 [receiver_index],
2587 share_state->fragment_count);
2588 ret = ffa_error(FFA_INVALID_PARAMETERS);
2589 goto out;
2590 }
2591
2592 fragment_index =
2593 share_state->retrieved_fragment_count[receiver_index];
2594 } else {
2595 if (share_state->hypervisor_fragment_count == 0 ||
2596 share_state->hypervisor_fragment_count >=
2597 share_state->fragment_count) {
2598 dlog_verbose(
2599 "Retrieve of memory with handle %x not "
2600 "started from hypervisor.\n",
2601 handle);
2602 ret = ffa_error(FFA_INVALID_PARAMETERS);
2603 goto out;
2604 }
2605
2606 if (memory_region->sender != sender_vm_id) {
2607 dlog_verbose(
2608 "Sender ID (%x) is not as expected for memory "
2609 "handle %x\n",
2610 sender_vm_id, handle);
2611 ret = ffa_error(FFA_INVALID_PARAMETERS);
2612 goto out;
2613 }
2614
2615 fragment_index = share_state->hypervisor_fragment_count;
2616
2617 receiver_index = 0;
2618 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002619
2620 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002621 * Check that the given fragment offset is correct by counting
2622 * how many constituents were in the fragments previously sent.
Andrew Walbranca808b12020-05-15 17:22:28 +01002623 */
2624 retrieved_constituents_count = 0;
2625 for (i = 0; i < fragment_index; ++i) {
2626 retrieved_constituents_count +=
2627 share_state->fragment_constituent_counts[i];
2628 }
J-Alvesc7484f12022-05-13 12:41:14 +01002629
2630 CHECK(memory_region->receiver_count > 0);
2631
Andrew Walbranca808b12020-05-15 17:22:28 +01002632 expected_fragment_offset =
J-Alves5da37d92022-10-24 16:33:48 +01002633 ffa_memory_retrieve_expected_offset_per_ffa_version(
2634 memory_region, retrieved_constituents_count,
2635 to_locked.vm->ffa_version);
2636
Andrew Walbranca808b12020-05-15 17:22:28 +01002637 if (fragment_offset != expected_fragment_offset) {
2638 dlog_verbose("Fragment offset was %d but expected %d.\n",
2639 fragment_offset, expected_fragment_offset);
2640 ret = ffa_error(FFA_INVALID_PARAMETERS);
2641 goto out;
2642 }
2643
J-Alves59ed0042022-07-28 18:26:41 +01002644 /* VMs acquire the RX buffer from SPMC. */
2645 CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
2646
Andrew Walbranca808b12020-05-15 17:22:28 +01002647 remaining_constituent_count = ffa_memory_fragment_init(
2648 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
2649 share_state->fragments[fragment_index],
2650 share_state->fragment_constituent_counts[fragment_index],
2651 &fragment_length);
2652 CHECK(remaining_constituent_count == 0);
2653 to_locked.vm->mailbox.recv_size = fragment_length;
2654 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
2655 to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002656 to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
Andrew Walbranca808b12020-05-15 17:22:28 +01002657
J-Alves59ed0042022-07-28 18:26:41 +01002658 if (!continue_ffa_hyp_mem_retrieve_req) {
2659 share_state->retrieved_fragment_count[receiver_index]++;
2660 if (share_state->retrieved_fragment_count[receiver_index] ==
2661 share_state->fragment_count) {
2662 ffa_memory_retrieve_complete(share_states, share_state,
2663 page_pool);
2664 }
2665 } else {
2666 share_state->hypervisor_fragment_count++;
2667
2668 ffa_memory_retrieve_complete_from_hyp(share_state);
2669 }
Andrew Walbranca808b12020-05-15 17:22:28 +01002670 ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
2671 .arg1 = (uint32_t)handle,
2672 .arg2 = (uint32_t)(handle >> 32),
2673 .arg3 = fragment_length};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002674
2675out:
2676 share_states_unlock(&share_states);
2677 dump_share_states();
2678 return ret;
2679}
2680
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002681struct ffa_value ffa_memory_relinquish(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002682 struct vm_locked from_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002683 struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002684{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002685 ffa_memory_handle_t handle = relinquish_request->handle;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002686 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002687 struct ffa_memory_share_state *share_state;
2688 struct ffa_memory_region *memory_region;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002689 bool clear;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002690 struct ffa_value ret;
J-Alves8eb19162022-04-28 10:56:48 +01002691 uint32_t receiver_index;
J-Alves3c5b2072022-11-21 12:45:40 +00002692 bool receivers_relinquished_memory;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002693
Andrew Walbrana65a1322020-04-06 19:32:32 +01002694 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002695 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002696 "Stream endpoints not supported (got %d "
J-Alves668a86e2023-05-10 11:53:25 +01002697 "endpoints on FFA_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002698 relinquish_request->endpoint_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002699 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002700 }
2701
Andrew Walbrana65a1322020-04-06 19:32:32 +01002702 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002703 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002704 "VM ID %d in relinquish message doesn't match "
J-Alves668a86e2023-05-10 11:53:25 +01002705 "calling VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01002706 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002707 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002708 }
2709
2710 dump_share_states();
2711
2712 share_states = share_states_lock();
2713 if (!get_share_state(share_states, handle, &share_state)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002714 dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002715 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002716 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002717 goto out;
2718 }
2719
Andrew Walbranca808b12020-05-15 17:22:28 +01002720 if (!share_state->sending_complete) {
2721 dlog_verbose(
2722 "Memory with handle %#x not fully sent, can't "
2723 "relinquish.\n",
2724 handle);
2725 ret = ffa_error(FFA_INVALID_PARAMETERS);
2726 goto out;
2727 }
2728
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002729 memory_region = share_state->memory_region;
2730 CHECK(memory_region != NULL);
2731
J-Alves8eb19162022-04-28 10:56:48 +01002732 receiver_index = ffa_memory_region_get_receiver(memory_region,
2733 from_locked.vm->id);
2734
2735 if (receiver_index == memory_region->receiver_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002736 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002737 "VM ID %d tried to relinquish memory region "
J-Alves668a86e2023-05-10 11:53:25 +01002738 "with handle %#x and it is not a valid borrower.\n",
J-Alves8eb19162022-04-28 10:56:48 +01002739 from_locked.vm->id, handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002740 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002741 goto out;
2742 }
2743
J-Alves8eb19162022-04-28 10:56:48 +01002744 if (share_state->retrieved_fragment_count[receiver_index] !=
Andrew Walbranca808b12020-05-15 17:22:28 +01002745 share_state->fragment_count) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002746 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002747 "Memory with handle %#x not yet fully "
2748 "retrieved, "
J-Alves8eb19162022-04-28 10:56:48 +01002749 "receiver %x can't relinquish.\n",
2750 handle, from_locked.vm->id);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002751 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002752 goto out;
2753 }
2754
J-Alves3c5b2072022-11-21 12:45:40 +00002755 /*
2756 * Either clear if requested in relinquish call, or in a retrieve
2757 * request from one of the borrowers.
2758 */
2759 receivers_relinquished_memory = true;
2760
2761 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2762 struct ffa_memory_access *receiver =
2763 &memory_region->receivers[i];
2764
2765 if (receiver->receiver_permissions.receiver ==
2766 from_locked.vm->id) {
2767 continue;
2768 }
2769
2770 if (share_state->retrieved_fragment_count[i] != 0U) {
2771 receivers_relinquished_memory = false;
2772 break;
2773 }
2774 }
2775
2776 clear = receivers_relinquished_memory &&
2777 (share_state->clear_after_relinquish ||
2778 (relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2779 0U);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002780
2781 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002782 * Clear is not allowed for memory that was shared, as the
2783 * original sender still has access to the memory.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002784 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002785 if (clear && share_state->share_func == FFA_MEM_SHARE_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002786 dlog_verbose("Memory which was shared can't be cleared.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002787 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002788 goto out;
2789 }
2790
Andrew Walbranca808b12020-05-15 17:22:28 +01002791 ret = ffa_relinquish_check_update(
J-Alves3c5b2072022-11-21 12:45:40 +00002792 from_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002793 share_state->fragment_constituent_counts,
2794 share_state->fragment_count, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002795
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002796 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002797 /*
J-Alvesa9cd7e32022-07-01 13:49:33 +01002798 * Mark memory handle as not retrieved, so it can be
2799 * reclaimed (or retrieved again).
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002800 */
J-Alves8eb19162022-04-28 10:56:48 +01002801 share_state->retrieved_fragment_count[receiver_index] = 0;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002802 }
2803
2804out:
2805 share_states_unlock(&share_states);
2806 dump_share_states();
2807 return ret;
2808}
2809
2810/**
J-Alvesa9cd7e32022-07-01 13:49:33 +01002811 * Validates that the reclaim transition is allowed for the given
2812 * handle, updates the page table of the reclaiming VM, and frees the
2813 * internal state associated with the handle.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002814 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002815struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked,
Andrew Walbranca808b12020-05-15 17:22:28 +01002816 ffa_memory_handle_t handle,
2817 ffa_memory_region_flags_t flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002818 struct mpool *page_pool)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002819{
2820 struct share_states_locked share_states;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002821 struct ffa_memory_share_state *share_state;
2822 struct ffa_memory_region *memory_region;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002823 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002824
2825 dump_share_states();
2826
2827 share_states = share_states_lock();
J-Alvesb5084cf2022-07-06 14:20:12 +01002828 if (get_share_state(share_states, handle, &share_state)) {
2829 memory_region = share_state->memory_region;
2830 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002831 dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002832 handle);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002833 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002834 goto out;
2835 }
2836
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002837 CHECK(memory_region != NULL);
2838
J-Alvesa9cd7e32022-07-01 13:49:33 +01002839 if (vm_id_is_current_world(to_locked.vm->id) &&
2840 to_locked.vm->id != memory_region->sender) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002841 dlog_verbose(
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002842 "VM %#x attempted to reclaim memory handle %#x "
2843 "originally sent by VM %#x.\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002844 to_locked.vm->id, handle, memory_region->sender);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002845 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002846 goto out;
2847 }
2848
Andrew Walbranca808b12020-05-15 17:22:28 +01002849 if (!share_state->sending_complete) {
2850 dlog_verbose(
2851 "Memory with handle %#x not fully sent, can't "
2852 "reclaim.\n",
2853 handle);
2854 ret = ffa_error(FFA_INVALID_PARAMETERS);
2855 goto out;
2856 }
2857
J-Alves752236c2022-04-28 11:07:47 +01002858 for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
2859 if (share_state->retrieved_fragment_count[i] != 0) {
2860 dlog_verbose(
J-Alvesa9cd7e32022-07-01 13:49:33 +01002861 "Tried to reclaim memory handle %#x "
J-Alves3c5b2072022-11-21 12:45:40 +00002862 "that has not been relinquished by all "
J-Alvesa9cd7e32022-07-01 13:49:33 +01002863 "borrowers(%x).\n",
J-Alves752236c2022-04-28 11:07:47 +01002864 handle,
2865 memory_region->receivers[i]
2866 .receiver_permissions.receiver);
2867 ret = ffa_error(FFA_DENIED);
2868 goto out;
2869 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002870 }
2871
Andrew Walbranca808b12020-05-15 17:22:28 +01002872 ret = ffa_retrieve_check_update(
J-Alves7db32002021-12-14 14:44:50 +00002873 to_locked, memory_region->sender, share_state->fragments,
Andrew Walbranca808b12020-05-15 17:22:28 +01002874 share_state->fragment_constituent_counts,
J-Alves2a0d2882020-10-29 14:49:50 +00002875 share_state->fragment_count, share_state->sender_orig_mode,
Andrew Walbranca808b12020-05-15 17:22:28 +01002876 FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002877
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002878 if (ret.func == FFA_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002879 share_state_free(share_states, share_state, page_pool);
J-Alves3c5b2072022-11-21 12:45:40 +00002880 dlog_verbose("Freed share state after successful reclaim.\n");
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002881 }
2882
2883out:
2884 share_states_unlock(&share_states);
2885 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01002886}