blob: 603f2ac18f567bc52276f7cb8cf799951741636e [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Federico Recanati25053ee2022-03-14 15:01:53 +01002 * Copyright 2022 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010010
Andrew Walbran318f5732018-11-20 16:23:42 +000011#include "hf/arch/cpu.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000012#include "hf/arch/ffa.h"
J-Alves0a21db12024-03-28 12:43:30 +000013#include "hf/arch/memcpy_trapped.h"
Olivier Deprez96a2a262020-06-11 17:21:38 +020014#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020015#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020016#include "hf/arch/plat/ffa.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000017#include "hf/arch/timer.h"
Olivier Deprez764fd2e2020-07-29 15:14:09 +020018#include "hf/arch/vm.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000019
Karl Meakin49ec1e42024-05-10 13:08:24 +010020#include "hf/bits.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010021#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000022#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023#include "hf/ffa_internal.h"
24#include "hf/ffa_memory.h"
J-Alves33c47bf2022-09-29 11:36:20 +010025#include "hf/ffa_v1_0.h"
Andrew Scull6386f252018-12-06 13:29:10 +000026#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010027#include "hf/plat/console.h"
Manish Pandeya5f39fb2020-09-11 09:47:11 +010028#include "hf/plat/interrupts.h"
Andrew Scull6386f252018-12-06 13:29:10 +000029#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010030#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010031#include "hf/std.h"
Karl Meakinc5cebbc2024-06-17 11:30:27 +010032#include "hf/vcpu.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010033#include "hf/vm.h"
34
Andrew Scullf35a5c92018-08-07 18:09:46 +010035#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010036#include "vmapi/hf/ffa.h"
J-Alves126ab502022-09-29 11:37:33 +010037#include "vmapi/hf/ffa_v1_0.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038
Daniel Boulby1ddb3d72021-12-16 18:16:50 +000039static_assert(sizeof(struct ffa_partition_info_v1_0) == 8,
Fuad Tabbae4efcc32020-07-16 15:37:27 +010040 "Partition information descriptor size doesn't match the one in "
41 "the FF-A 1.0 EAC specification, Table 82.");
Daniel Boulby1ddb3d72021-12-16 18:16:50 +000042static_assert(sizeof(struct ffa_partition_info) == 24,
43 "Partition information descriptor size doesn't match the one in "
44 "the FF-A 1.1 BETA0 EAC specification, Table 13.34.");
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -080045static_assert((sizeof(struct ffa_partition_info) & 7) == 0,
46 "Partition information descriptor must be a multiple of 8 bytes"
47 " for ffa_partition_info_get_regs to work correctly. Information"
48 " from this structure are returned in 8 byte registers and the"
49 " count of 8 byte registers is returned by the ABI.");
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000050/*
51 * To eliminate the risk of deadlocks, we define a partial order for the
52 * acquisition of locks held concurrently by the same physical CPU. Our current
53 * ordering requirements are as follows:
54 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010055 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000056 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010057 * Locks of the same kind require the lock of lowest address to be locked first,
58 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000059 */
60
Andrew Scullaa039b32018-10-04 15:02:26 +010061static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010062 "Currently, a page is mapped for the send and receive buffers so "
63 "the maximum request is the size of a page.");
64
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000065static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
66 "The page pool entry size must be at least as big as the mailbox "
67 "size, so that memory region descriptors can be copied from the "
68 "mailbox for memory sharing.");
69
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -080070/*
71 * Maximum ffa_partition_info entries that can be returned by an invocation
72 * of FFA_PARTITION_INFO_GET_REGS_64 is size in bytes, of available
73 * registers/args in struct ffa_value divided by size of struct
74 * ffa_partition_info. For this ABI, arg3-arg17 in ffa_value can be used, i.e.
75 * 15 uint64_t fields. For FF-A v1.1, this value should be 5.
76 */
77#define MAX_INFO_REGS_ENTRIES_PER_CALL \
78 ((15 * sizeof(uint64_t)) / sizeof(struct ffa_partition_info))
79static_assert(MAX_INFO_REGS_ENTRIES_PER_CALL == 5,
80 "FF-A v1.1 supports no more than 5 entries"
81 " per FFA_PARTITION_INFO_GET_REGS64 calls");
82
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000083static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000084
85/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000086 * Initialises the API page pool by taking ownership of the contents of the
87 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000088 */
89void api_init(struct mpool *ppool)
90{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000091 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000092}
93
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010094/**
J-Alvesad6a0432021-04-09 16:06:21 +010095 * Get target VM vCPU:
96 * If VM is UP then return first vCPU.
97 * If VM is MP then return vCPU whose index matches current CPU index.
98 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050099struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
J-Alvesad6a0432021-04-09 16:06:21 +0100100{
101 ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
102 struct vcpu *vcpu = NULL;
103
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500104 CHECK((vm != NULL) && (current != NULL));
105
Karl Meakin82041822024-05-20 11:14:34 +0100106 if (vm_is_up(vm)) {
J-Alvesad6a0432021-04-09 16:06:21 +0100107 vcpu = vm_get_vcpu(vm, 0);
108 } else if (current_cpu_index < vm->vcpu_count) {
109 vcpu = vm_get_vcpu(vm, current_cpu_index);
110 }
111
112 return vcpu;
113}
114
115/**
J-Alvesfe7f7372020-11-09 11:32:12 +0000116 * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
117 * is given as argument of the function.
118 *
119 * Called to change the context between SPs for direct messaging (when Hafnium
120 * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
121 *
122 * This function works for partitions that are:
J-Alvesad6a0432021-04-09 16:06:21 +0100123 * - UP migratable.
J-Alvesfe7f7372020-11-09 11:32:12 +0000124 * - MP with pinned Execution Contexts.
125 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600126struct vcpu *api_switch_to_vm(struct vcpu_locked current_locked,
127 struct ffa_value to_ret,
J-Alves19e20cf2023-08-02 12:48:55 +0100128 enum vcpu_state vcpu_state, ffa_id_t to_id)
J-Alvesfe7f7372020-11-09 11:32:12 +0000129{
130 struct vm *to_vm = vm_find(to_id);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600131 struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current_locked.vcpu);
J-Alvesfe7f7372020-11-09 11:32:12 +0000132
133 CHECK(next != NULL);
134
135 /* Set the return value for the target VM. */
136 arch_regs_set_retval(&next->regs, to_ret);
137
138 /* Set the current vCPU state. */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600139 current_locked.vcpu->state = vcpu_state;
J-Alvesfe7f7372020-11-09 11:32:12 +0000140
141 return next;
142}
143
144/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000145 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100146 *
147 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100149 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600150struct vcpu *api_switch_to_primary(struct vcpu_locked current_locked,
J-Alves27b71962022-12-12 15:29:58 +0000151 struct ffa_value primary_ret,
152 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100153{
Andrew Walbran508e63c2018-12-20 17:02:37 +0000154 /*
155 * If the secondary is blocked but has a timer running, sleep until the
156 * timer fires rather than indefinitely.
157 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100158 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100159 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
160 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100161 if (arch_timer_enabled_current()) {
162 uint64_t remaining_ns =
163 arch_timer_remaining_ns_current();
164
165 if (remaining_ns == 0) {
166 /*
167 * Timer is pending, so the current vCPU should
168 * be run again right away.
169 */
Andrew Walbran7e824602020-10-22 16:51:40 +0100170 primary_ret = (struct ffa_value){
171 .func = FFA_INTERRUPT_32};
172
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100173 } else {
174 primary_ret.arg2 = remaining_ns;
175 }
176 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100177 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100178 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000179 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100180 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000181
182 default:
183 /* Do nothing. */
184 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000185 }
186
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600187 return api_switch_to_vm(current_locked, primary_ret, secondary_state,
J-Alvesfe7f7372020-11-09 11:32:12 +0000188 HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100189}
190
191/**
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200192 * Choose next vCPU to run to be the counterpart vCPU in the other
193 * world (run the normal world if currently running in the secure
194 * world). Set current vCPU state to the given vcpu_state parameter.
195 * Set FF-A return values to the target vCPU in the other world.
196 *
197 * Called in context of a direct message response from a secure
198 * partition to a VM.
199 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600200struct vcpu *api_switch_to_other_world(struct vcpu_locked current_locked,
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100201 struct ffa_value other_world_ret,
202 enum vcpu_state vcpu_state)
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200203{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600204 return api_switch_to_vm(current_locked, other_world_ret, vcpu_state,
J-Alvesfe7f7372020-11-09 11:32:12 +0000205 HF_OTHER_WORLD_ID);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200206}
207
208/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000209 * Returns true if the given vCPU is executing in context of an
210 * FFA_MSG_SEND_DIRECT_REQ invocation.
211 */
J-Alves27b71962022-12-12 15:29:58 +0000212bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000213{
Kathleen Capellae468c112023-12-13 17:56:28 -0500214 return locked.vcpu->direct_request_origin.vm_id != HF_INVALID_VM_ID;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000215}
216
217/**
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100218 * Returns true if the VM owning the given vCPU is supporting managed exit and
219 * the vCPU is currently processing a managed exit.
220 */
221static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
222{
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100223 return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100224 vcpu_locked.vcpu->processing_managed_exit);
225}
226
227/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000228 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000229 */
230struct vcpu *api_preempt(struct vcpu *current)
231{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600232 struct vcpu_locked current_locked;
233 struct vcpu *next;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100234 struct ffa_value ret = {
235 .func = FFA_INTERRUPT_32,
Olivier Deprez0d725f52022-07-06 14:20:27 +0200236 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000237 };
238
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600239 current_locked = vcpu_lock(current);
240 next = api_switch_to_primary(current_locked, ret, VCPU_STATE_PREEMPTED);
241 vcpu_unlock(&current_locked);
242
243 return next;
Andrew Scull33fecd32019-01-08 14:48:27 +0000244}
245
246/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000247 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000248 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100249 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100250struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100251{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600252 struct vcpu_locked current_locked;
253 struct vcpu *next;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 struct ffa_value ret = {
255 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
256 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100257 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000258
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600259 current_locked = vcpu_lock(current);
260 next = api_switch_to_primary(current_locked, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100261 VCPU_STATE_BLOCKED_INTERRUPT);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600262 vcpu_unlock(&current_locked);
263
264 return next;
Andrew Scullaa039b32018-10-04 15:02:26 +0100265}
266
267/**
Andrew Walbran33645652019-04-15 12:29:31 +0100268 * Puts the current vCPU in off mode, and returns to the primary VM.
269 */
270struct vcpu *api_vcpu_off(struct vcpu *current)
271{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600272 struct vcpu_locked current_locked;
273 struct vcpu *next;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100274 struct ffa_value ret = {
275 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
276 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100277 };
278
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600279 current_locked = vcpu_lock(current);
Andrew Walbran33645652019-04-15 12:29:31 +0100280 /*
281 * Disable the timer, so the scheduler doesn't get told to call back
282 * based on it.
283 */
284 arch_timer_disable_current();
285
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600286 next = api_switch_to_primary(current_locked, ret, VCPU_STATE_OFF);
287 vcpu_unlock(&current_locked);
288
289 return next;
Andrew Walbran33645652019-04-15 12:29:31 +0100290}
291
292/**
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500293 * The current vCPU is blocked on some resource and needs to relinquish
294 * control back to the execution context of the endpoint that originally
295 * allocated cycles to it.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000296 */
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500297struct ffa_value api_yield(struct vcpu *current, struct vcpu **next,
298 struct ffa_value *args)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000299{
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000300 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
301 struct vcpu_locked current_locked;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -0500302 bool transition_allowed;
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -0500303 enum vcpu_state next_state = VCPU_STATE_RUNNING;
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500304 uint32_t timeout_low = 0;
305 uint32_t timeout_high = 0;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600306 struct vcpu_locked next_locked = (struct vcpu_locked){
307 .vcpu = NULL,
308 };
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500309
310 if (args != NULL) {
311 if (args->arg4 != 0U || args->arg5 != 0U || args->arg6 != 0U ||
312 args->arg7 != 0U) {
313 dlog_error(
314 "Parameters passed through registers X4-X7 "
315 "must be zero\n");
316 return ffa_error(FFA_INVALID_PARAMETERS);
317 }
318 timeout_low = (uint32_t)args->arg2 & 0xFFFFFFFF;
319 timeout_high = (uint32_t)args->arg3 & 0xFFFFFFFF;
320 }
Andrew Scull66d62bf2019-02-01 13:54:10 +0000321
Karl Meakin5e996992024-05-20 11:27:07 +0100322 if (vm_is_primary(current->vm)) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000323 /* NOOP on the primary as it makes the scheduling decisions. */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000324 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000325 }
326
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000327 current_locked = vcpu_lock(current);
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -0500328 transition_allowed = plat_ffa_check_runtime_state_transition(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600329 current_locked, current->vm->id, HF_INVALID_VM_ID, next_locked,
330 FFA_YIELD_32, &next_state);
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000331
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -0500332 if (!transition_allowed) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600333 ret = ffa_error(FFA_DENIED);
334 goto out;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000335 }
336
Madhukar Pappireddy65764072023-05-23 17:18:58 -0500337 /*
338 * The current vCPU is expected to move to BLOCKED state. However,
339 * under certain circumstances, it is allowed for the current vCPU
340 * to be resumed immediately without ever moving to BLOCKED state. One
341 * such scenario occurs when an SP's execution context attempts to
342 * yield cycles while handling secure interrupt. Refer to the comments
343 * in the SPMC variant of the plat_ffa_yield_prepare function.
344 */
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -0500345 assert(!vm_id_is_current_world(current->vm->id) ||
346 next_state == VCPU_STATE_BLOCKED);
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -0500347
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600348 ret = plat_ffa_yield_prepare(current_locked, next, timeout_low,
349 timeout_high);
350out:
351 vcpu_unlock(&current_locked);
352 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000353}
354
355/**
Andrew Walbran33645652019-04-15 12:29:31 +0100356 * Switches to the primary so that it can switch to the target, or kick it if it
357 * is already running on a different physical CPU.
358 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600359static struct vcpu *api_wake_up_locked(struct vcpu_locked current_locked,
360 struct vcpu *target_vcpu)
Andrew Walbran33645652019-04-15 12:29:31 +0100361{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100362 struct ffa_value ret = {
Andrew Walbran7e824602020-10-22 16:51:40 +0100363 .func = FFA_INTERRUPT_32,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100364 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
365 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100366 };
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600367
368 return api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
369}
370
371struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
372{
373 struct vcpu_locked current_locked;
374 struct vcpu *next;
375
376 current_locked = vcpu_lock(current);
377 next = api_wake_up_locked(current_locked, target_vcpu);
378 vcpu_unlock(&current_locked);
379
380 return next;
Andrew Walbran33645652019-04-15 12:29:31 +0100381}
382
383/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000384 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000385 */
386struct vcpu *api_abort(struct vcpu *current)
387{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100388 struct ffa_value ret = ffa_error(FFA_ABORTED);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600389 struct vcpu_locked current_locked;
390 struct vcpu *next;
Madhukar Pappireddy9e09d7a2023-08-08 14:53:49 -0500391 struct vm_locked vm_locked;
Andrew Scull9726c252019-01-23 13:44:19 +0000392
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100393 dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000394 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000395
Karl Meakin5e996992024-05-20 11:27:07 +0100396 if (vm_is_primary(current->vm)) {
Andrew Scull9726c252019-01-23 13:44:19 +0000397 /* TODO: what to do when the primary aborts? */
398 for (;;) {
399 /* Do nothing. */
400 }
401 }
402
403 atomic_store_explicit(&current->vm->aborting, true,
404 memory_order_relaxed);
405
Madhukar Pappireddy9e09d7a2023-08-08 14:53:49 -0500406 vm_locked = vm_lock(current->vm);
407 plat_ffa_free_vm_resources(vm_locked);
408 vm_unlock(&vm_locked);
Andrew Scull9726c252019-01-23 13:44:19 +0000409
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600410 current_locked = vcpu_lock(current);
411 next = api_switch_to_primary(current_locked, ret, VCPU_STATE_ABORTED);
412 vcpu_unlock(&current_locked);
413
414 return next;
Andrew Scull9726c252019-01-23 13:44:19 +0000415}
416
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000417/*
418 * Format the partition info descriptors according to the version supported
419 * by the endpoint and return the size of the array created.
420 */
421static struct ffa_value send_versioned_partition_info_descriptors(
Daniel Boulby191b5f02022-02-17 17:26:14 +0000422 struct vm_locked vm_locked, struct ffa_partition_info *partitions,
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000423 uint32_t vm_count)
424{
Daniel Boulby191b5f02022-02-17 17:26:14 +0000425 struct vm *vm = vm_locked.vm;
Karl Meakin0e617d92024-04-05 12:55:22 +0100426 enum ffa_version version = vm->ffa_version;
Daniel Boulby835e1592022-05-30 17:38:51 +0100427 uint32_t partition_info_size;
428 uint32_t buffer_size;
Federico Recanati644f0462022-03-17 12:04:00 +0100429 struct ffa_value ret;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000430
J-Alvese8c8c2b2022-12-16 15:34:48 +0000431 /* Acquire receiver's RX buffer. */
432 if (!plat_ffa_acquire_receiver_rx(vm_locked, &ret)) {
433 dlog_verbose("Failed to acquire RX buffer for VM %x\n", vm->id);
434 return ret;
435 }
436
J-Alves122f1a12022-12-12 15:55:42 +0000437 if (vm_is_mailbox_busy(vm_locked)) {
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000438 /*
439 * Can't retrieve memory information if the mailbox is not
440 * available.
441 */
442 dlog_verbose("RX buffer not ready.\n");
Daniel Boulby191b5f02022-02-17 17:26:14 +0000443 return ffa_error(FFA_BUSY);
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000444 }
445
Karl Meakin0e617d92024-04-05 12:55:22 +0100446 if (version == FFA_VERSION_1_0) {
Daniel Boulby191b5f02022-02-17 17:26:14 +0000447 struct ffa_partition_info_v1_0 *recv_mailbox = vm->mailbox.recv;
448
Daniel Boulby835e1592022-05-30 17:38:51 +0100449 partition_info_size = sizeof(struct ffa_partition_info_v1_0);
450 buffer_size = partition_info_size * vm_count;
451 if (buffer_size > HF_MAILBOX_SIZE) {
Daniel Boulby191b5f02022-02-17 17:26:14 +0000452 dlog_error(
453 "Partition information does not fit in the "
J-Alves0a21db12024-03-28 12:43:30 +0000454 "VM's RX buffer.\n");
Daniel Boulby191b5f02022-02-17 17:26:14 +0000455 return ffa_error(FFA_NO_MEMORY);
456 }
457
458 for (uint32_t i = 0; i < vm_count; i++) {
459 /*
460 * Populate the VM's RX buffer with the partition
Kathleen Capella402fa852022-11-09 16:16:51 -0500461 * information. Clear properties bits that must be zero
462 * according to DEN0077A FF-A v1.0 REL Table 8.25.
Daniel Boulby191b5f02022-02-17 17:26:14 +0000463 */
464 recv_mailbox[i].vm_id = partitions[i].vm_id;
465 recv_mailbox[i].vcpu_count = partitions[i].vcpu_count;
Kathleen Capella402fa852022-11-09 16:16:51 -0500466 recv_mailbox[i].properties =
467 partitions[i].properties &
468 ~FFA_PARTITION_v1_0_RES_MASK;
Daniel Boulby191b5f02022-02-17 17:26:14 +0000469 }
470
471 } else {
Daniel Boulby835e1592022-05-30 17:38:51 +0100472 partition_info_size = sizeof(struct ffa_partition_info);
473 buffer_size = partition_info_size * vm_count;
J-Alves0a21db12024-03-28 12:43:30 +0000474
Daniel Boulby835e1592022-05-30 17:38:51 +0100475 if (buffer_size > HF_MAILBOX_SIZE) {
Daniel Boulby191b5f02022-02-17 17:26:14 +0000476 dlog_error(
477 "Partition information does not fit in the "
J-Alves0a824e92024-04-26 16:20:12 +0100478 "VM's RX buffer.\n");
Daniel Boulby191b5f02022-02-17 17:26:14 +0000479 return ffa_error(FFA_NO_MEMORY);
480 }
481
J-Alves0a21db12024-03-28 12:43:30 +0000482 /*
483 * Populate the VM's RX buffer with the partition information.
Daniel Boulby191b5f02022-02-17 17:26:14 +0000484 */
J-Alves0a21db12024-03-28 12:43:30 +0000485 if (!memcpy_trapped(vm->mailbox.recv, HF_MAILBOX_SIZE,
486 partitions, buffer_size)) {
487 dlog_error(
488 "%s: Failed to copy ffa_partition_info "
489 "descriptor\n",
490 __func__);
491 return ffa_error(FFA_ABORTED);
492 }
Daniel Boulby191b5f02022-02-17 17:26:14 +0000493 }
494
Daniel Boulby835e1592022-05-30 17:38:51 +0100495 vm->mailbox.recv_size = buffer_size;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000496
497 /* Sender is Hypervisor in the normal world (TEE in secure world). */
Daniel Boulby191b5f02022-02-17 17:26:14 +0000498 vm->mailbox.recv_sender = HF_VM_ID_BASE;
499 vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +0000500 vm->mailbox.state = MAILBOX_STATE_FULL;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000501
Daniel Boulby835e1592022-05-30 17:38:51 +0100502 /*
503 * Return the count of partition information descriptors in w2
504 * and the size of the descriptors in w3.
505 */
506 return (struct ffa_value){.func = FFA_SUCCESS_32,
507 .arg2 = vm_count,
508 .arg3 = partition_info_size};
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000509}
510
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800511static void api_ffa_fill_partitions_info_array(
512 struct ffa_partition_info *partitions, size_t partitions_len,
J-Alves19e20cf2023-08-02 12:48:55 +0100513 const struct ffa_uuid *uuid, bool count_flag, ffa_id_t vm_id,
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800514 ffa_vm_count_t *vm_count_out)
515{
516 ffa_vm_count_t vm_count = 0;
517 bool uuid_is_null = ffa_uuid_is_null(uuid);
518
519 assert(vm_get_count() <= partitions_len);
520
521 /*
522 * Iterate through the VMs to find the ones with a matching
523 * UUID. A Null UUID retrieves information for all VMs.
524 */
525 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
526 struct vm *vm = vm_find_index(index);
527
Kathleen Capellaa1de3c52023-08-28 20:10:02 -0400528 for (uint32_t i = 0; i < PARTITION_MAX_UUIDS; i++) {
529 /*
530 * Null UUID indicates reaching the end of a
531 * partition's array of UUIDs.
532 */
533 if (ffa_uuid_is_null(&vm->uuids[i])) {
534 break;
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800535 }
Kathleen Capellaa1de3c52023-08-28 20:10:02 -0400536 if (uuid_is_null ||
537 ffa_uuid_equal(uuid, &vm->uuids[i])) {
538 uint16_t array_index = vm_count;
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800539
Kathleen Capellaa1de3c52023-08-28 20:10:02 -0400540 ++vm_count;
541 if (count_flag) {
542 continue;
543 }
544
545 partitions[array_index].vm_id = vm->id;
546 partitions[array_index].vcpu_count =
547 vm->vcpu_count;
548 partitions[array_index].properties =
549 plat_ffa_partition_properties(vm_id,
550 vm);
551 partitions[array_index].properties |=
552 vm_are_notifications_enabled(vm)
553 ? FFA_PARTITION_NOTIFICATION
554 : 0;
555 partitions[array_index].properties |=
556 FFA_PARTITION_AARCH64_EXEC;
557 if (uuid_is_null) {
558 partitions[array_index].uuid =
559 vm->uuids[i];
560 }
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800561 }
562 }
563 }
564
565 *vm_count_out = vm_count;
566}
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800567
568static inline void api_ffa_pack_vmid_count_props(
J-Alves19e20cf2023-08-02 12:48:55 +0100569 uint64_t *xn, ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800570 ffa_partition_properties_t properties)
571{
572 *xn = (uint64_t)vm_id;
573 *xn |= (uint64_t)vcpu_count << 16;
574 *xn |= (uint64_t)properties << 32;
575}
576
577static inline void api_ffa_pack_uuid(uint64_t *xn_1, uint64_t *xn_2,
578 struct ffa_uuid *uuid)
579{
580 *xn_1 = (uint64_t)uuid->uuid[0];
581 *xn_1 |= (uint64_t)uuid->uuid[1] << 32;
582 *xn_2 = (uint64_t)uuid->uuid[2];
583 *xn_2 |= (uint64_t)uuid->uuid[3] << 32;
584}
585
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700586/**
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700587 * This function forwards the FFA_PARTITION_INFO_GET_REGS ABI to the other world
588 * when hafnium is the hypervisor to determine the secure partitions. When
589 * hafnium is the SPMC, this function forwards the call to the SPMD to discover
590 * SPMD logical partitions. The function returns true when partition information
591 * is filled in the partitions array and false if there are errors. Note that
592 * the SPMD and SPMC may return an FF-A error code of FFA_NOT_SUPPORTED when
593 * there are no SPMD logical partitions or no secure partitions respectively,
594 * and this is not considered a failure of the forwarded call. A caller is
595 * expected to check the return value before consuming the information in the
596 * partitions array passed in and ret_count.
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700597 */
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700598static bool api_ffa_partition_info_get_regs_forward(
599 const struct ffa_uuid *uuid, const uint16_t tag,
600 struct ffa_partition_info *partitions, uint16_t partitions_len,
601 ffa_vm_count_t *ret_count)
602{
603 (void)tag;
604 struct ffa_value ret;
605 uint16_t last_index = UINT16_MAX;
606 uint16_t curr_index = 0;
607 uint16_t start_index = 0;
608
609 if (!plat_ffa_partition_info_get_regs_forward_allowed()) {
610 return true;
611 }
612
613 while (start_index <= last_index) {
614 ret = ffa_partition_info_get_regs(uuid, start_index, 0);
615 if (ffa_func_id(ret) != FFA_SUCCESS_64) {
616 /*
617 * If there are no logical partitions, SPMD returns
618 * NOT_SUPPORTED, that is not an error. If there are no
619 * secure partitions the SPMC returns NOT_SUPPORTED.
620 */
621 if ((ffa_func_id(ret) == FFA_ERROR_32) &&
622 (ffa_error_code(ret) == FFA_NOT_SUPPORTED)) {
623 return true;
624 }
625
626 return false;
627 }
628
629 if (!api_ffa_fill_partition_info_from_regs(
630 ret, start_index, partitions, partitions_len,
631 ret_count)) {
632 return false;
633 }
634
635 last_index = ffa_partition_info_regs_get_last_idx(ret);
636 curr_index = ffa_partition_info_regs_get_curr_idx(ret);
637 start_index = curr_index + 1;
638 }
639 return true;
640}
641
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700642bool api_ffa_fill_partition_info_from_regs(
643 struct ffa_value ret, uint16_t start_index,
644 struct ffa_partition_info *partitions, uint16_t partitions_len,
645 ffa_vm_count_t *ret_count)
646{
647 uint16_t vm_count = *ret_count;
648 uint16_t curr_index = 0;
649 uint8_t num_entries = 0;
650 uint8_t idx = 0;
651 /* List of pointers to args in return value. */
652 uint64_t *arg_ptrs[] = {
653 &ret.arg3,
654 &ret.arg4,
655 &ret.arg5,
656 &ret.arg6,
657 &ret.arg7,
658 &ret.extended_val.arg8,
659 &ret.extended_val.arg9,
660 &ret.extended_val.arg10,
661 &ret.extended_val.arg11,
662 &ret.extended_val.arg12,
663 &ret.extended_val.arg13,
664 &ret.extended_val.arg14,
665 &ret.extended_val.arg15,
666 &ret.extended_val.arg16,
667 &ret.extended_val.arg17,
668 };
669
670 if (vm_count > partitions_len) {
671 return false;
672 }
673
674 /*
675 * Tags are currently unused in the implementation. Expect it to be
676 * zero since the implementation does not provide a tag when calling
677 * the FFA_PARTITION_INFO_GET_REGS ABI.
678 */
679 assert(ffa_partition_info_regs_get_tag(ret) == 0);
680
681 /*
682 * Hafnium expects the size of the returned descriptor to be equal to
683 * the size of the structure in the FF-A 1.1 specification. When future
684 * enhancements are made, this assert can be relaxed.
685 */
686 assert(ffa_partition_info_regs_get_desc_size(ret) ==
687 sizeof(struct ffa_partition_info));
688
689 curr_index = ffa_partition_info_regs_get_curr_idx(ret);
690
691 /* FF-A 1.2 ALP0, section 14.9.2 Usage rule 7. */
692 assert(start_index <= curr_index);
693
694 num_entries = curr_index - start_index + 1;
695 if (num_entries > (partitions_len - vm_count) ||
696 num_entries > MAX_INFO_REGS_ENTRIES_PER_CALL) {
697 return false;
698 }
699
700 while (num_entries) {
701 uint64_t info = *(arg_ptrs[(ptrdiff_t)(idx++)]);
702 uint64_t uuid_lo = *(arg_ptrs[(ptrdiff_t)(idx++)]);
703 uint64_t uuid_high = *(arg_ptrs[(ptrdiff_t)(idx++)]);
704
705 partitions[vm_count].vm_id = info & 0xFFFF;
706 partitions[vm_count].vcpu_count = (info >> 16) & 0xFFFF;
707 partitions[vm_count].properties = (info >> 32);
708 partitions[vm_count].uuid.uuid[0] = uuid_lo & 0xFFFFFFFF;
709 partitions[vm_count].uuid.uuid[1] =
710 (uuid_lo >> 32) & 0xFFFFFFFF;
711 partitions[vm_count].uuid.uuid[2] = uuid_high & 0xFFFFFFFF;
712 partitions[vm_count].uuid.uuid[3] =
713 (uuid_high >> 32) & 0xFFFFFFFF;
714 vm_count++;
715 num_entries--;
716 }
717
718 *ret_count = vm_count;
719 return true;
720}
721
Raghu Krishnamurthy7592bcb2022-12-25 13:09:00 -0800722struct ffa_value api_ffa_partition_info_get_regs(struct vcpu *current,
723 const struct ffa_uuid *uuid,
724 const uint16_t start_index,
725 const uint16_t tag)
726{
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800727 struct vm *current_vm = current->vm;
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800728 static struct ffa_partition_info partitions[2 * MAX_VMS];
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800729 bool uuid_is_null = ffa_uuid_is_null(uuid);
730 ffa_vm_count_t vm_count = 0;
731 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
732 uint16_t max_idx = 0;
733 uint16_t curr_idx = 0;
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800734 uint8_t num_entries_to_ret = 0;
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800735 uint8_t arg_idx = 3;
736
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800737 /* list of pointers to args in return value */
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800738 uint64_t *arg_ptrs[] = {
739 &(ret).func,
740 &(ret).arg1,
741 &(ret).arg2,
742 &(ret).arg3,
743 &(ret).arg4,
744 &(ret).arg5,
745 &(ret).arg6,
746 &(ret).arg7,
747 &(ret).extended_val.arg8,
748 &(ret).extended_val.arg9,
749 &(ret).extended_val.arg10,
750 &(ret).extended_val.arg11,
751 &(ret).extended_val.arg12,
752 &(ret).extended_val.arg13,
753 &(ret).extended_val.arg14,
754 &(ret).extended_val.arg15,
755 &(ret).extended_val.arg16,
756 &(ret).extended_val.arg17,
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800757 };
758
759 /* TODO: Add support for using tags */
760 if (tag != 0) {
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800761 dlog_error("Tag not 0. Unsupported tag. %d\n", tag);
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800762 return ffa_error(FFA_RETRY);
763 }
764
765 memset_s(&partitions, sizeof(partitions), 0, sizeof(partitions));
766
767 api_ffa_fill_partitions_info_array(partitions, ARRAY_SIZE(partitions),
768 uuid, false, current_vm->id,
769 &vm_count);
770
771 /* If UUID is Null vm_count must not be zero at this stage. */
772 CHECK(!uuid_is_null || vm_count != 0);
773
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800774 /*
775 * When running the Hypervisor:
776 * - If UUID is Null the Hypervisor forwards the query to the SPMC for
777 * it to fill with secure partitions information.
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700778 * When running the SPMC, the SPMC forwards the call to the SPMD to
779 * discover any EL3 SPMD logical partitions, if the call came from an
780 * SP. Otherwise, the call is not forwarded.
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800781 * TODO: Note that for this ABI, forwarding on every invocation when
782 * uuid is Null is inefficient,and if performance becomes a problem,
783 * this would be a good place to optimize using strategies such as
784 * caching info etc. For now, assuming this inefficiency is not a major
785 * issue.
786 * - If UUID is non-Null vm_count may be zero because the UUID matches
787 * a secure partition and the query is forwarded to the SPMC.
788 * When running the SPMC:
789 * - If UUID is non-Null and vm_count is zero it means there is no such
790 * partition identified in the system.
791 */
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700792 if (vm_id_is_current_world(current_vm->id)) {
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700793 if (!api_ffa_partition_info_get_regs_forward(
794 uuid, tag, partitions, ARRAY_SIZE(partitions),
795 &vm_count)) {
Raghu Krishnamurthy2d2b0f42023-04-23 09:54:45 -0700796 dlog_error(
797 "Failed to forward "
798 "ffa_partition_info_get_regs.\n");
799 return ffa_error(FFA_DENIED);
800 }
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800801 }
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800802
803 /*
804 * Unrecognized UUID: does not match any of the VMs (or SPs)
805 * and is not Null.
806 */
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800807 if (vm_count == 0 || vm_count > ARRAY_SIZE(partitions)) {
Olivier Deprez89da0a42023-09-01 17:38:33 +0200808 dlog_verbose(
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800809 "Invalid parameters. vm_count = %d (must not be zero "
Karl Meakine8937d92024-03-19 16:04:25 +0000810 "or > %lu)\n",
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800811 vm_count, ARRAY_SIZE(partitions));
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800812 return ffa_error(FFA_INVALID_PARAMETERS);
813 }
814
815 if (start_index >= vm_count) {
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800816 dlog_error(
817 "start index = %d vm_count = %d (start_index must be "
818 "less than vm_count)\n",
819 start_index, vm_count);
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800820 return ffa_error(FFA_INVALID_PARAMETERS);
821 }
822
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800823 max_idx = vm_count - 1;
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800824 num_entries_to_ret = (max_idx - start_index) + 1;
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800825 num_entries_to_ret =
826 MIN(num_entries_to_ret, MAX_INFO_REGS_ENTRIES_PER_CALL);
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800827 curr_idx = start_index + num_entries_to_ret - 1;
828 assert(curr_idx <= max_idx);
829
830 ret.func = FFA_SUCCESS_64;
831 ret.arg2 = (sizeof(struct ffa_partition_info) & 0xFFFF) << 48;
832 ret.arg2 |= curr_idx << 16;
833 ret.arg2 |= max_idx;
834
835 if (num_entries_to_ret > 1) {
836 ret.extended_val.valid = 1;
837 }
838
839 for (uint16_t idx = start_index; idx <= curr_idx; ++idx) {
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800840 uint64_t *xn_0 = arg_ptrs[arg_idx++];
841 uint64_t *xn_1 = arg_ptrs[arg_idx++];
842 uint64_t *xn_2 = arg_ptrs[arg_idx++];
843
844 api_ffa_pack_vmid_count_props(xn_0, partitions[idx].vm_id,
845 partitions[idx].vcpu_count,
846 partitions[idx].properties);
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800847 if (uuid_is_null) {
Raghu Krishnamurthybefdcc82023-02-24 06:41:56 -0800848 api_ffa_pack_uuid(xn_1, xn_2, &partitions[idx].uuid);
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800849 }
Raghu Krishnamurthycded31b2023-04-23 15:07:35 -0700850 assert(arg_idx <= ARRAY_SIZE(arg_ptrs));
Raghu Krishnamurthyf67776f2022-12-26 10:10:05 -0800851 }
852
853 return ret;
Raghu Krishnamurthy7592bcb2022-12-25 13:09:00 -0800854}
855
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100856struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000857 const struct ffa_uuid *uuid,
858 const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100859{
860 struct vm *current_vm = current->vm;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100861 ffa_vm_count_t vm_count = 0;
Olivier Deprez013f4d62022-11-21 15:46:20 +0100862 bool count_flag = (flags & FFA_PARTITION_COUNT_FLAG_MASK) ==
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000863 FFA_PARTITION_COUNT_FLAG;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100864 bool uuid_is_null = ffa_uuid_is_null(uuid);
Daniel Boulbyce541842022-05-31 15:54:31 +0100865 struct ffa_partition_info partitions[2 * MAX_VMS] = {0};
Daniel Boulby191b5f02022-02-17 17:26:14 +0000866 struct vm_locked vm_locked;
867 struct ffa_value ret;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100868
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000869 /* Bits 31:1 Must Be Zero */
Olivier Deprez013f4d62022-11-21 15:46:20 +0100870 if ((flags & ~FFA_PARTITION_COUNT_FLAG_MASK) != 0) {
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000871 return ffa_error(FFA_INVALID_PARAMETERS);
872 }
873
Kathleen Capellaa1de3c52023-08-28 20:10:02 -0400874 api_ffa_fill_partitions_info_array(partitions, ARRAY_SIZE(partitions),
875 uuid, count_flag, current_vm->id,
876 &vm_count);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100877
Olivier Depreze562e542020-06-11 17:31:54 +0200878 /* If UUID is Null vm_count must not be zero at this stage. */
879 CHECK(!uuid_is_null || vm_count != 0);
880
881 /*
882 * When running the Hypervisor:
883 * - If UUID is Null the Hypervisor forwards the query to the SPMC for
884 * it to fill with secure partitions information.
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000885 * - If UUID is non-Null vm_count may be zero because the UUID matches
Olivier Depreze562e542020-06-11 17:31:54 +0200886 * a secure partition and the query is forwarded to the SPMC.
887 * When running the SPMC:
888 * - If UUID is non-Null and vm_count is zero it means there is no such
889 * partition identified in the system.
890 */
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000891 plat_ffa_partition_info_get_forward(uuid, flags, partitions, &vm_count);
Olivier Depreze562e542020-06-11 17:31:54 +0200892
893 /*
894 * Unrecognized UUID: does not match any of the VMs (or SPs)
895 * and is not Null.
896 */
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100897 if (vm_count == 0) {
898 return ffa_error(FFA_INVALID_PARAMETERS);
899 }
900
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000901 /*
902 * If the count flag is set we don't need to return the partition info
903 * descriptors.
904 */
905 if (count_flag) {
906 return (struct ffa_value){.func = FFA_SUCCESS_32,
907 .arg2 = vm_count};
908 }
909
Daniel Boulby191b5f02022-02-17 17:26:14 +0000910 vm_locked = vm_lock(current_vm);
911 ret = send_versioned_partition_info_descriptors(vm_locked, partitions,
912 vm_count);
913 vm_unlock(&vm_locked);
914 return ret;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100915}
916
Andrew Scull9726c252019-01-23 13:44:19 +0000917/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000918 * Returns the ID of the VM.
919 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100920struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000921{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100922 return (struct ffa_value){.func = FFA_SUCCESS_32,
923 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000924}
925
926/**
Olivier Deprez421677d2021-06-18 12:18:53 +0200927 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
928 * FF-A instances.
929 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000930 */
931struct ffa_value api_ffa_spm_id_get(void)
932{
Karl Meakin0e617d92024-04-05 12:55:22 +0100933 if (FFA_VERSION_1_1 <= FFA_VERSION_COMPILED) {
934 /*
935 * Return the SPMC ID that was fetched during FF-A
936 * initialization.
937 */
938 return (struct ffa_value){.func = FFA_SUCCESS_32,
939 .arg2 = arch_ffa_spmc_id_get()};
940 }
941
J-Alves3829fc02021-03-18 12:49:18 +0000942 return ffa_error(FFA_NOT_SUPPORTED);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000943}
944
945/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000946 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000947 * function to indicate that register state for the given vCPU has been saved
948 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000949 */
950void api_regs_state_saved(struct vcpu *vcpu)
951{
952 sl_lock(&vcpu->lock);
953 vcpu->regs_available = true;
954 sl_unlock(&vcpu->lock);
955}
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000956/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000957 * Assuming that the arguments have already been checked by the caller, injects
958 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
959 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
960 * is next run, which is up to the scheduler.
961 *
962 * Returns:
963 * - 0 on success if no further action is needed.
964 * - 1 if it was called by the primary VM and the primary VM now needs to wake
965 * up or kick the target vCPU.
966 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100967int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600968 uint32_t intid,
969 struct vcpu_locked current_locked,
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100970 struct vcpu **next)
Andrew Walbran508e63c2018-12-20 17:02:37 +0000971{
Manish Pandey35e452f2021-02-18 21:36:34 +0000972 struct vcpu *target_vcpu = target_locked.vcpu;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600973 struct vcpu *current = current_locked.vcpu;
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100974 struct interrupts *interrupts = &target_vcpu->interrupts;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000975 int64_t ret = 0;
976
Andrew Walbran508e63c2018-12-20 17:02:37 +0000977 /*
Manish Pandey35e452f2021-02-18 21:36:34 +0000978 * We only need to change state and (maybe) trigger a virtual interrupt
979 * if it is enabled and was not previously pending. Otherwise we can
980 * skip everything except setting the pending bit.
Andrew Walbran508e63c2018-12-20 17:02:37 +0000981 */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100982 if (!(vcpu_is_virt_interrupt_enabled(interrupts, intid) &&
983 !vcpu_is_virt_interrupt_pending(interrupts, intid))) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000984 goto out;
985 }
986
987 /* Increment the count. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100988 vcpu_interrupt_count_increment(target_locked, interrupts, intid);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000989
990 /*
991 * Only need to update state if there was not already an
992 * interrupt enabled and pending.
993 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000994 if (vcpu_interrupt_count_get(target_locked) != 1) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000995 goto out;
996 }
997
Karl Meakin5e996992024-05-20 11:27:07 +0100998 if (vm_is_primary(current->vm)) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000999 /*
1000 * If the call came from the primary VM, let it know that it
1001 * should run or kick the target vCPU.
1002 */
1003 ret = 1;
Manish Pandey35e452f2021-02-18 21:36:34 +00001004 } else if (current != target_vcpu && next != NULL) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001005 *next = api_wake_up_locked(current_locked, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +00001006 }
1007
1008out:
1009 /* Either way, make it pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +01001010 vcpu_virt_interrupt_set_pending(interrupts, intid);
Andrew Walbran508e63c2018-12-20 17:02:37 +00001011
Olivier Deprezc19a6ea2020-08-06 11:16:07 +02001012 return ret;
1013}
1014
Andrew Walbran508e63c2018-12-20 17:02:37 +00001015/**
J-Alves2ced1672022-12-12 14:35:38 +00001016 * Constructs the return value from a successful FFA_MSG_WAIT call, when used
1017 * with FFA_MSG_SEND_32.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001018 */
J-Alves27b71962022-12-12 15:29:58 +00001019struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001020{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001021 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001022 case FFA_MSG_SEND_32:
1023 return (struct ffa_value){
1024 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001025 .arg1 = (receiver->mailbox.recv_sender << 16) |
1026 receiver->id,
1027 .arg3 = receiver->mailbox.recv_size};
Federico Recanati25053ee2022-03-14 15:01:53 +01001028 case FFA_MSG_SEND2_32:
1029 return (struct ffa_value){
1030 .func = FFA_RUN_32,
1031 /*
1032 * TODO: FFA_RUN should return vCPU and VM ID in arg1.
1033 * Retrieving vCPU requires a rework of the function,
1034 * while receiver ID must be set because it's checked by
1035 * other APIs (eg: FFA_NOTIFICATION_GET).
1036 */
1037 .arg1 = receiver->id};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001038 default:
1039 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +00001040 dlog_error("Tried to return an invalid message function %#x\n",
1041 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001042 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001043 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001044}
1045
J-Alvese8c8c2b2022-12-16 15:34:48 +00001046/**
1047 * Change the state of mailbox to empty, such that the ownership is given to the
1048 * Partition manager.
Karl Meakin1064a9c2024-06-04 17:20:43 +01001049 * Returns FFA_SUCCESS if the mailbox was reset successfully, FFA_ERROR
1050 * otherwise.
J-Alvese8c8c2b2022-12-16 15:34:48 +00001051 */
Karl Meakin1064a9c2024-06-04 17:20:43 +01001052static struct ffa_value api_release_mailbox(struct vm_locked vm_locked)
J-Alvese8c8c2b2022-12-16 15:34:48 +00001053{
Karl Meakin1064a9c2024-06-04 17:20:43 +01001054 struct ffa_value ret = {.func = FFA_SUCCESS_32};
J-Alves19e20cf2023-08-02 12:48:55 +01001055 ffa_id_t vm_id = vm_locked.vm->id;
J-Alvese8c8c2b2022-12-16 15:34:48 +00001056
1057 switch (vm_locked.vm->mailbox.state) {
1058 case MAILBOX_STATE_EMPTY:
1059 dlog_verbose("Mailbox of %x is empty.\n", vm_id);
Karl Meakin1064a9c2024-06-04 17:20:43 +01001060 ret = ffa_error(FFA_DENIED);
J-Alves31f8bef2023-07-14 12:46:28 +01001061 break;
J-Alvese8c8c2b2022-12-16 15:34:48 +00001062 case MAILBOX_STATE_FULL:
1063 /* Check it doesn't have pending RX full notifications. */
1064 if (vm_are_fwk_notifications_pending(vm_locked)) {
1065 dlog_verbose(
1066 "Mailbox of endpoint %x has pending "
1067 "messages.\n",
1068 vm_id);
Karl Meakin1064a9c2024-06-04 17:20:43 +01001069 ret = ffa_error(FFA_DENIED);
J-Alvese8c8c2b2022-12-16 15:34:48 +00001070 }
1071 break;
1072 case MAILBOX_STATE_OTHER_WORLD_OWNED:
1073 /*
1074 * The SPMC shouldn't let SP's mailbox get into this state.
1075 * For the Hypervisor, the VM may call FFA_RX_RELEASE, whilst
1076 * the mailbox is in this state. In that case, we should report
1077 * error.
1078 */
1079 if (vm_id_is_current_world(vm_id)) {
1080 dlog_verbose(
Karl Meakin1064a9c2024-06-04 17:20:43 +01001081 "Mailbox of endpoint %x is in an incorrect "
1082 "state.\n",
J-Alvese8c8c2b2022-12-16 15:34:48 +00001083 vm_id);
Karl Meakin1064a9c2024-06-04 17:20:43 +01001084 ret = ffa_error(FFA_ABORTED);
J-Alvese8c8c2b2022-12-16 15:34:48 +00001085 }
1086 break;
1087 }
1088
Karl Meakin1064a9c2024-06-04 17:20:43 +01001089 if (ret.func == FFA_SUCCESS_32) {
1090 vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
J-Alvese8c8c2b2022-12-16 15:34:48 +00001091 }
1092
Karl Meakin1064a9c2024-06-04 17:20:43 +01001093 return ret;
J-Alvese8c8c2b2022-12-16 15:34:48 +00001094}
1095
Kathleen Capelladb6a08b2023-10-04 13:42:39 -04001096/*
1097 * Helper to check if extended arguments (corresponding to regs x8-x17)
1098 * are zeroed out.
1099 */
Kathleen Capella036cc592023-11-30 18:26:15 -05001100bool api_extended_args_are_zero(struct ffa_value *args)
Kathleen Capelladb6a08b2023-10-04 13:42:39 -04001101{
1102 if (args->extended_val.arg8 != 0U || args->extended_val.arg9 != 0U ||
1103 args->extended_val.arg10 != 0U || args->extended_val.arg11 != 0U ||
1104 args->extended_val.arg12 != 0U || args->extended_val.arg13 != 0U ||
1105 args->extended_val.arg14 != 0U || args->extended_val.arg15 != 0U ||
1106 args->extended_val.arg16 != 0U || args->extended_val.arg17 != 0U) {
1107 return false;
1108 }
1109 return true;
1110}
1111
Kathleen Capellabe1a0b72024-08-16 12:56:39 -04001112static void api_ffa_msg_wait_rx_release(struct vcpu *current)
1113{
1114 struct vm_locked vm_locked;
1115
1116 vm_locked = plat_ffa_vm_find_locked(current->vm->id);
1117 if (vm_locked.vm == NULL) {
1118 return;
1119 }
1120
1121 api_release_mailbox(vm_locked);
1122
1123 if (vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY) {
1124 dlog_warning("Mailbox not released to producer\n");
1125 }
1126
1127 vm_unlock(&vm_locked);
1128}
1129
Kathleen Capella7253bd52024-08-14 17:36:09 -04001130static bool api_retain_rx_buffer_ownership(struct ffa_value args)
1131{
1132 return ((args.arg2 & FFA_MSG_WAIT_FLAG_RETAIN_RX) != 0U);
1133}
1134
Madhukar Pappireddy5522c672021-12-17 16:35:51 -06001135struct ffa_value api_ffa_msg_wait(struct vcpu *current, struct vcpu **next,
1136 struct ffa_value *args)
1137{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001138 struct vcpu_locked current_locked;
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05001139 enum vcpu_state next_state = VCPU_STATE_RUNNING;
J-Alvese8c8c2b2022-12-16 15:34:48 +00001140 struct ffa_value ret;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001141 struct vcpu_locked next_locked = (struct vcpu_locked){
1142 .vcpu = NULL,
1143 };
Madhukar Pappireddy5522c672021-12-17 16:35:51 -06001144
Kathleen Capella7253bd52024-08-14 17:36:09 -04001145 if (args->arg1 != 0U || args->arg3 != 0U || args->arg4 != 0U ||
1146 args->arg5 != 0U || args->arg6 != 0U || args->arg7 != 0U) {
Madhukar Pappireddy5522c672021-12-17 16:35:51 -06001147 return ffa_error(FFA_INVALID_PARAMETERS);
1148 }
1149
Kathleen Capella7253bd52024-08-14 17:36:09 -04001150 if (current->vm->ffa_version >= FFA_VERSION_1_2) {
1151 if (!api_extended_args_are_zero(args)) {
1152 return ffa_error(FFA_INVALID_PARAMETERS);
1153 }
1154 } else {
1155 if (args->arg2 != 0U) {
1156 return ffa_error(FFA_INVALID_PARAMETERS);
1157 }
Kathleen Capelladb6a08b2023-10-04 13:42:39 -04001158 }
1159
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001160 current_locked = vcpu_lock(current);
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001161 if (!plat_ffa_check_runtime_state_transition(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001162 current_locked, current->vm->id, HF_INVALID_VM_ID,
1163 next_locked, FFA_MSG_WAIT_32, &next_state)) {
1164 ret = ffa_error(FFA_DENIED);
1165 goto out;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001166 }
1167
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05001168 assert(!vm_id_is_current_world(current->vm->id) ||
1169 next_state == VCPU_STATE_WAITING);
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001170
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001171 ret = plat_ffa_msg_wait_prepare(current_locked, next);
Kathleen Capellabe1a0b72024-08-16 12:56:39 -04001172
1173 /*
1174 * To maintain partial ordering of locks, release vCPU lock before
1175 * releasing the VM's RX buffer, a process which requires locking the
1176 * VM.
1177 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001178out:
1179 vcpu_unlock(&current_locked);
Kathleen Capellabe1a0b72024-08-16 12:56:39 -04001180
Kathleen Capella7253bd52024-08-14 17:36:09 -04001181 if (ret.func != FFA_ERROR_32 &&
1182 !api_retain_rx_buffer_ownership(*args)) {
Kathleen Capellabe1a0b72024-08-16 12:56:39 -04001183 api_ffa_msg_wait_rx_release(current);
1184 }
J-Alvese8c8c2b2022-12-16 15:34:48 +00001185 return ret;
Madhukar Pappireddy5522c672021-12-17 16:35:51 -06001186}
1187
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001188/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001189 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001190 * value needs to be forced onto the vCPU.
1191 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001192static bool api_vcpu_prepare_run(struct vcpu_locked current_locked,
1193 struct vcpu_locked vcpu_next_locked,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001194 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001195{
Max Shvetsov40108e72020-08-27 12:39:50 +01001196 struct vm_locked vm_locked;
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001197 bool ret;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001198 uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
Olivier Deprez04168ed2022-09-26 09:20:00 +02001199 bool vcpu_was_init_state = false;
J-Alves6e2abc62021-12-02 14:58:56 +00001200 bool need_vm_lock;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001201 struct two_vcpu_locked vcpus_locked;
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001202
Andrew Scullb06d1752019-02-04 10:15:48 +00001203 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +00001204 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +00001205 *
Andrew Scull4caadaf2019-07-03 13:13:47 +01001206 * The VM lock is not needed in the common case so it must only be taken
1207 * when it is going to be needed. This ensures there are no inter-vCPU
1208 * dependencies in the common run case meaning the sensitive context
1209 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +00001210 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001211 struct vcpu *vcpu = vcpu_next_locked.vcpu;
1212 struct vcpu *current = current_locked.vcpu;
Max Shvetsov40108e72020-08-27 12:39:50 +01001213
Andrew Walbrand81c7d82019-11-27 18:34:46 +00001214 /* The VM needs to be locked to deliver mailbox messages. */
J-Alves6e2abc62021-12-02 14:58:56 +00001215 need_vm_lock = vcpu->state == VCPU_STATE_WAITING ||
1216 (!vcpu->vm->el0_partition &&
1217 (vcpu->state == VCPU_STATE_BLOCKED_INTERRUPT ||
1218 vcpu->state == VCPU_STATE_BLOCKED ||
1219 vcpu->state == VCPU_STATE_PREEMPTED));
1220
Andrew Walbrand81c7d82019-11-27 18:34:46 +00001221 if (need_vm_lock) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001222 vcpu_unlock(&vcpu_next_locked);
1223 vcpu_unlock(&current_locked);
Max Shvetsov40108e72020-08-27 12:39:50 +01001224 vm_locked = vm_lock(vcpu->vm);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001225
1226 /* Lock both vCPUs at once to avoid deadlock. */
1227 vcpus_locked = vcpu_lock_both(current, vcpu);
1228 current_locked = vcpus_locked.vcpu1;
1229 vcpu_next_locked = vcpus_locked.vcpu2;
Andrew Walbrand81c7d82019-11-27 18:34:46 +00001230 }
1231
1232 /*
1233 * If the vCPU is already running somewhere then we can't run it here
1234 * simultaneously. While it is actually running then the state should be
1235 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
1236 * stops running but while Hafnium is in the process of switching back
1237 * to the primary there will be a brief period while the state has been
1238 * updated but `regs_available` is still false (until
1239 * `api_regs_state_saved` is called). We can't start running it again
1240 * until this has finished, so count this state as still running for the
1241 * purposes of this check.
1242 */
1243 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
1244 /*
1245 * vCPU is running on another pCPU.
1246 *
1247 * It's okay not to return the sleep duration here because the
1248 * other physical CPU that is currently running this vCPU will
1249 * return the sleep duration if needed.
1250 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001251 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +00001252 ret = false;
1253 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +00001254 }
Andrew Scull9726c252019-01-23 13:44:19 +00001255
1256 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001257 if (vcpu->state != VCPU_STATE_ABORTED) {
Kathleen Capella6e9765b2023-07-11 17:44:58 -04001258 dlog_verbose("VM %#x was aborted, cannot run vCPU %u\n",
1259 vcpu->vm->id, vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +01001260 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +00001261 }
Kathleen Capella6ab05132023-05-10 12:27:35 -04001262 *run_ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +00001263 ret = false;
1264 goto out;
1265 }
1266
Andrew Walbran508e63c2018-12-20 17:02:37 +00001267 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001268 case VCPU_STATE_RUNNING:
1269 case VCPU_STATE_OFF:
1270 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001271 ret = false;
1272 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +00001273
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001274 case VCPU_STATE_WAITING:
1275 /*
Olivier Deprezb2808332023-02-02 15:25:40 +01001276 * An initial FFA_RUN is necessary for SP's secondary vCPUs to
1277 * reach the message wait loop.
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001278 */
Olivier Deprezb2808332023-02-02 15:25:40 +01001279 if (vcpu->rt_model == RTM_SP_INIT) {
1280 /*
1281 * TODO: this should be removed, but omitting it makes
1282 * normal world arch gicv3 tests failing.
1283 */
1284 vcpu->rt_model = RTM_NONE;
Olivier Deprez04168ed2022-09-26 09:20:00 +02001285
1286 vcpu_was_init_state = true;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001287 break;
1288 }
1289
J-Alves6e2abc62021-12-02 14:58:56 +00001290 assert(need_vm_lock == true);
1291 if (!vm_locked.vm->el0_partition &&
1292 plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001293 vcpu_next_locked, current_locked, vm_locked)) {
Federico Recanati9dccc4b2022-04-27 12:52:36 +02001294 /* TODO: setting a return value to override
1295 * the placeholder (FFA_ERROR(INTERRUPTED))
1296 * set by FFA_MSG_WAIT. FF-A v1.1 allows
1297 * FFA_MSG_WAIT to successfully return even if
1298 * it didn't receive a message. TFTF tests are
1299 * still expecting an FFA_ERROR instead,
1300 * should be fixed?
1301 */
1302 arch_regs_set_retval(
1303 &vcpu->regs,
1304 (struct ffa_value){.func = FFA_RUN_32,
1305 // TODO: does it make sense
1306 // to set vCPU/receiver?
1307 .arg1 = 0});
J-Alves6e2abc62021-12-02 14:58:56 +00001308 break;
1309 }
1310
Andrew Scullb06d1752019-02-04 10:15:48 +00001311 /*
1312 * A pending message allows the vCPU to run so the message can
1313 * be delivered directly.
1314 */
J-Alvese8c8c2b2022-12-16 15:34:48 +00001315 if (vcpu->vm->mailbox.state == MAILBOX_STATE_FULL) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001316 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001317 ffa_msg_recv_return(vcpu->vm));
Andrew Scullb06d1752019-02-04 10:15:48 +00001318 break;
1319 }
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001320
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001321 if (vcpu_interrupt_count_get(vcpu_next_locked) > 0) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001322 break;
1323 }
1324
1325 if (arch_timer_enabled(&vcpu->regs)) {
1326 timer_remaining_ns =
1327 arch_timer_remaining_ns(&vcpu->regs);
1328 if (timer_remaining_ns == 0) {
1329 break;
1330 }
1331 } else {
1332 dlog_verbose("Timer disabled\n");
1333 }
1334 run_ret->func = FFA_MSG_WAIT_32;
1335 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
1336 run_ret->arg2 = timer_remaining_ns;
1337 ret = false;
1338 goto out;
Andrew Sculld6ee1102019-04-05 22:12:42 +01001339 case VCPU_STATE_BLOCKED_INTERRUPT:
J-Alves6e2abc62021-12-02 14:58:56 +00001340 if (need_vm_lock &&
1341 plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001342 vcpu_next_locked, current_locked, vm_locked)) {
1343 assert(vcpu_interrupt_count_get(vcpu_next_locked) > 0);
J-Alves6e2abc62021-12-02 14:58:56 +00001344 break;
1345 }
1346
Andrew Scullb06d1752019-02-04 10:15:48 +00001347 /* Allow virtual interrupts to be delivered. */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001348 if (vcpu_interrupt_count_get(vcpu_next_locked) > 0) {
Andrew Scullb06d1752019-02-04 10:15:48 +00001349 break;
1350 }
1351
Andrew Walbran508e63c2018-12-20 17:02:37 +00001352 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran4692a3a2020-08-07 12:42:01 +01001353 timer_remaining_ns =
Andrew Walbran2fc856a2019-11-04 15:17:24 +00001354 arch_timer_remaining_ns(&vcpu->regs);
1355
1356 /*
1357 * The timer expired so allow the interrupt to be
1358 * delivered.
1359 */
1360 if (timer_remaining_ns == 0) {
1361 break;
1362 }
Andrew Walbran4692a3a2020-08-07 12:42:01 +01001363 }
Andrew Walbran2fc856a2019-11-04 15:17:24 +00001364
Andrew Walbran4692a3a2020-08-07 12:42:01 +01001365 /*
1366 * The vCPU is not ready to run, return the appropriate code to
1367 * the primary which called vcpu_run.
1368 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001369 run_ret->func = HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4692a3a2020-08-07 12:42:01 +01001370 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
1371 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +00001372
1373 ret = false;
1374 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +00001375
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001376 case VCPU_STATE_BLOCKED:
1377 /* A blocked vCPU is run unconditionally. Fall through. */
1378 case VCPU_STATE_PREEMPTED:
J-Alves6e2abc62021-12-02 14:58:56 +00001379 /* Check NPI is to be injected here. */
1380 if (need_vm_lock) {
1381 plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001382 vcpu_next_locked, current_locked, vm_locked);
J-Alves6e2abc62021-12-02 14:58:56 +00001383 }
Andrew Walbran508e63c2018-12-20 17:02:37 +00001384 break;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001385 default:
1386 /*
1387 * Execution not expected to reach here. Deny the request
1388 * gracefully.
1389 */
1390 *run_ret = ffa_error(FFA_DENIED);
1391 ret = false;
1392 goto out;
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001393 }
1394
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001395 plat_ffa_init_schedule_mode_ffa_run(current_locked, vcpu_next_locked);
Madhukar Pappireddy1480fce2022-06-21 18:09:25 -05001396
Andrew Scullb06d1752019-02-04 10:15:48 +00001397 /* It has been decided that the vCPU should be run. */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001398 vcpu->cpu = current_locked.vcpu->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +01001399 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001400
Olivier Deprez04168ed2022-09-26 09:20:00 +02001401 if (vcpu_was_init_state) {
1402 vcpu_set_phys_core_idx(vcpu);
1403 vcpu_set_boot_info_gp_reg(vcpu);
J-Alves7ac49052022-02-08 17:20:53 +00001404 }
J-Alves7ac49052022-02-08 17:20:53 +00001405
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001406 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001407 * Mark the registers as unavailable now that we're about to reflect
1408 * them onto the real registers. This will also prevent another physical
1409 * CPU from trying to read these registers.
1410 */
1411 vcpu->regs_available = false;
1412
1413 ret = true;
1414
1415out:
Andrew Scullb06d1752019-02-04 10:15:48 +00001416 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +01001417 vm_unlock(&vm_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +00001418 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001419 return ret;
1420}
1421
J-Alves19e20cf2023-08-02 12:48:55 +01001422struct ffa_value api_ffa_run(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001423 struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001424{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001425 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001426 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001427 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05001428 enum vcpu_state next_state = VCPU_STATE_RUNNING;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001429 struct vcpu_locked current_locked;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001430 struct vcpu_locked vcpu_next_locked;
1431 struct two_vcpu_locked vcpus_locked;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001432
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001433 current_locked = vcpu_lock(current);
1434 if (!plat_ffa_run_checks(current_locked, vm_id, vcpu_idx, &ret, next)) {
1435 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +01001436 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001437
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -07001438 if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001439 goto out;
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -07001440 }
1441
Andrew Scull19503262018-09-20 14:48:39 +01001442 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +01001443 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +01001444 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +01001445 goto out;
Andrew Scull19503262018-09-20 14:48:39 +01001446 }
1447
Fuad Tabbaed294af2019-12-20 10:43:01 +00001448 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001449 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +01001450 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +01001451 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001452
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001453 /*
1454 * Refer Figure 8.13 Scenario 1 of the FF-A v1.1 EAC spec. SPMC
1455 * bypasses the intermediate execution contexts and resumes the
1456 * SP execution context that was originally preempted.
1457 */
1458 if (*next != NULL) {
1459 vcpu = *next;
1460 } else {
1461 vcpu = vm_get_vcpu(vm, vcpu_idx);
1462 }
1463
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001464 /*
1465 * Unlock current vCPU to allow it to be locked together with next
1466 * vcpu.
1467 */
1468 vcpu_unlock(&current_locked);
1469
1470 /* Lock both vCPUs at once to avoid deadlock. */
1471 vcpus_locked = vcpu_lock_both(current, vcpu);
1472 current_locked = vcpus_locked.vcpu1;
1473 vcpu_next_locked = vcpus_locked.vcpu2;
1474
1475 if (!plat_ffa_check_runtime_state_transition(
1476 current_locked, current->vm->id, HF_INVALID_VM_ID,
1477 vcpu_next_locked, FFA_RUN_32, &next_state)) {
1478 ret = ffa_error(FFA_DENIED);
1479 goto out_vcpu;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001480 }
1481
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001482 if (!api_vcpu_prepare_run(current_locked, vcpu_next_locked, &ret)) {
1483 goto out_vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001484 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001485
Andrew Walbran508e63c2018-12-20 17:02:37 +00001486 /*
1487 * Inject timer interrupt if timer has expired. It's safe to access
1488 * vcpu->regs here because api_vcpu_prepare_run already made sure that
1489 * regs_available was true (and then set it to false) before returning
1490 * true.
1491 */
1492 if (arch_timer_pending(&vcpu->regs)) {
1493 /* Make virtual timer interrupt pending. */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001494 api_interrupt_inject_locked(vcpu_next_locked,
1495 HF_VIRTUAL_TIMER_INTID,
1496 vcpu_next_locked, NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +00001497
1498 /*
1499 * Set the mask bit so the hardware interrupt doesn't fire
1500 * again. Ideally we wouldn't do this because it affects what
1501 * the secondary vCPU sees, but if we don't then we end up with
1502 * a loop of the interrupt firing each time we try to return to
1503 * the secondary vCPU.
1504 */
1505 arch_timer_mask(&vcpu->regs);
1506 }
1507
Fuad Tabbaed294af2019-12-20 10:43:01 +00001508 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001509 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +00001510
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05001511 assert(!vm_id_is_current_world(current->vm->id) ||
1512 next_state == VCPU_STATE_BLOCKED);
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001513 current->state = VCPU_STATE_BLOCKED;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05001514
Andrew Scull33fecd32019-01-08 14:48:27 +00001515 /*
1516 * Set a placeholder return code to the scheduler. This will be
1517 * overwritten when the switch back to the primary occurs.
1518 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001519 ret.func = FFA_INTERRUPT_32;
Andrew Walbran7e824602020-10-22 16:51:40 +01001520 ret.arg1 = 0;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +01001521 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +00001522
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001523out_vcpu:
1524 vcpu_unlock(&vcpu_next_locked);
1525
Andrew Scull6d2db332018-10-10 15:28:17 +01001526out:
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06001527 vcpu_unlock(&current_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001528 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001529}
1530
1531/**
Andrew Scull81e85092018-12-12 12:56:20 +00001532 * Check that the mode indicates memory that is valid, owned and exclusive.
1533 */
Andrew Walbran1281ed42019-10-22 17:23:40 +01001534static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +00001535{
Andrew Scullb5f49e02019-10-02 13:20:47 +01001536 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
1537 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +00001538}
1539
1540/**
Manish Pandeyd34f8892020-06-19 17:41:07 +01001541 * Configures the hypervisor's stage-1 view of the send and receive pages.
Andrew Sculle1322792019-07-01 17:46:10 +01001542 */
Karl Meakin738bee12024-01-12 15:19:45 +00001543static struct ffa_value api_vm_configure_stage1(
1544 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
1545 paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
1546 paddr_t pa_recv_end, uint32_t extra_attributes,
1547 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +01001548{
Karl Meakin738bee12024-01-12 15:19:45 +00001549 struct ffa_value ret;
Andrew Sculle1322792019-07-01 17:46:10 +01001550
Karl Meakin738bee12024-01-12 15:19:45 +00001551 /*
1552 * Map the send page as read-only in the SPMC/hypervisor address space.
1553 */
Andrew Sculle1322792019-07-01 17:46:10 +01001554 vm_locked.vm->mailbox.send =
1555 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001556 MM_MODE_R | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001557 if (!vm_locked.vm->mailbox.send) {
Karl Meakin738bee12024-01-12 15:19:45 +00001558 ret = ffa_error(FFA_NO_MEMORY);
1559 goto out;
Andrew Sculle1322792019-07-01 17:46:10 +01001560 }
1561
1562 /*
Karl Meakin738bee12024-01-12 15:19:45 +00001563 * Map the receive page as writable in the SPMC/hypervisor address
1564 * space. On failure, unmap the send page before returning.
Andrew Sculle1322792019-07-01 17:46:10 +01001565 */
1566 vm_locked.vm->mailbox.recv =
1567 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001568 MM_MODE_W | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001569 if (!vm_locked.vm->mailbox.recv) {
Karl Meakin738bee12024-01-12 15:19:45 +00001570 ret = ffa_error(FFA_NO_MEMORY);
Andrew Sculle1322792019-07-01 17:46:10 +01001571 goto fail_undo_send;
1572 }
1573
Karl Meakin738bee12024-01-12 15:19:45 +00001574 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +01001575 goto out;
1576
1577 /*
1578 * The following mappings will not require more memory than is available
1579 * in the local pool.
1580 */
1581fail_undo_send:
1582 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +01001583 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
1584 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +01001585
Andrew Sculle1322792019-07-01 17:46:10 +01001586out:
Andrew Sculle1322792019-07-01 17:46:10 +01001587 return ret;
1588}
1589
1590/**
Manish Pandeyd34f8892020-06-19 17:41:07 +01001591 * Sanity checks and configures the send and receive pages in the VM stage-2
1592 * and hypervisor stage-1 page tables.
1593 *
1594 * Returns:
1595 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001596 * aligned, are the same or have invalid attributes.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001597 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
1598 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001599 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001600 * - FFA_SUCCESS on success if no further action is needed.
Andrew Sculle1322792019-07-01 17:46:10 +01001601 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001602
1603struct ffa_value api_vm_configure_pages(
1604 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
1605 ipaddr_t send, ipaddr_t recv, uint32_t page_count,
1606 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +01001607{
Manish Pandeyd34f8892020-06-19 17:41:07 +01001608 struct ffa_value ret;
1609 paddr_t pa_send_begin;
1610 paddr_t pa_send_end;
1611 paddr_t pa_recv_begin;
1612 paddr_t pa_recv_end;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001613 uint32_t orig_send_mode = 0;
1614 uint32_t orig_recv_mode = 0;
Olivier Deprez96a2a262020-06-11 17:21:38 +02001615 uint32_t extra_attributes;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001616
1617 /* We only allow these to be setup once. */
1618 if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
Karl Meakin738bee12024-01-12 15:19:45 +00001619 dlog_error("%s: Mailboxes have already been setup for VM %#x\n",
1620 __func__, vm_locked.vm->id);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001621 ret = ffa_error(FFA_DENIED);
1622 goto out;
1623 }
1624
1625 /* Hafnium only supports a fixed size of RX/TX buffers. */
1626 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
Karl Meakine8937d92024-03-19 16:04:25 +00001627 dlog_error("%s: Page count must be %zu, it is %d\n", __func__,
Karl Meakin738bee12024-01-12 15:19:45 +00001628 HF_MAILBOX_SIZE / FFA_PAGE_SIZE, page_count);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001629 ret = ffa_error(FFA_INVALID_PARAMETERS);
1630 goto out;
1631 }
1632
1633 /* Fail if addresses are not page-aligned. */
1634 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
1635 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
Karl Meakin738bee12024-01-12 15:19:45 +00001636 dlog_error("%s: Mailbox buffers not page-aligned\n", __func__);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001637 ret = ffa_error(FFA_INVALID_PARAMETERS);
1638 goto out;
1639 }
1640
1641 /* Convert to physical addresses. */
1642 pa_send_begin = pa_from_ipa(send);
1643 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
1644 pa_recv_begin = pa_from_ipa(recv);
1645 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
1646
1647 /* Fail if the same page is used for the send and receive pages. */
1648 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
Karl Meakin738bee12024-01-12 15:19:45 +00001649 dlog_error("%s: Mailbox buffers overlap\n", __func__);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001650 ret = ffa_error(FFA_INVALID_PARAMETERS);
1651 goto out;
1652 }
Andrew Sculle1322792019-07-01 17:46:10 +01001653
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001654 /* Set stage 2 translation tables only for virtual FF-A instances. */
1655 if (vm_id_is_current_world(vm_locked.vm->id)) {
1656 /*
1657 * Ensure the pages are valid, owned and exclusive to the VM and
1658 * that the VM has the required access to the memory.
1659 */
1660 if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
1661 &orig_send_mode) ||
1662 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
1663 (orig_send_mode & MM_MODE_R) == 0 ||
1664 (orig_send_mode & MM_MODE_W) == 0) {
1665 dlog_error(
1666 "VM doesn't have required access rights to map "
1667 "TX buffer in stage 2.\n");
1668 ret = ffa_error(FFA_INVALID_PARAMETERS);
1669 goto out;
1670 }
Manish Pandeyd34f8892020-06-19 17:41:07 +01001671
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001672 if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
1673 &orig_recv_mode) ||
1674 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
1675 (orig_recv_mode & MM_MODE_R) == 0) {
1676 dlog_error(
1677 "VM doesn't have required access rights to map "
1678 "RX buffer in stage 2.\n");
1679 ret = ffa_error(FFA_INVALID_PARAMETERS);
1680 goto out;
1681 }
Andrew Sculle1322792019-07-01 17:46:10 +01001682
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001683 /* Take memory ownership away from the VM and mark as shared. */
1684 uint32_t mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R |
1685 MM_MODE_W;
1686 if (vm_locked.vm->el0_partition) {
1687 mode |= MM_MODE_USER | MM_MODE_NG;
1688 }
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001689
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001690 if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
1691 mode, local_page_pool, NULL)) {
1692 dlog_error(
1693 "Cannot allocate a new entry in stage 2 "
1694 "translation table.\n");
1695 ret = ffa_error(FFA_NO_MEMORY);
1696 goto out;
1697 }
Andrew Sculle1322792019-07-01 17:46:10 +01001698
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001699 mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
1700 if (vm_locked.vm->el0_partition) {
1701 mode |= MM_MODE_USER | MM_MODE_NG;
1702 }
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001703
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001704 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
1705 mode, local_page_pool, NULL)) {
1706 /* TODO: partial defrag of failed range. */
1707 /* Recover any memory consumed in failed mapping. */
Karl Meakin738bee12024-01-12 15:19:45 +00001708 dlog_error("%s: cannot map recv page\n", __func__);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001709 vm_ptable_defrag(vm_locked, local_page_pool);
Karl Meakin738bee12024-01-12 15:19:45 +00001710 ret = ffa_error(FFA_NO_MEMORY);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001711 goto fail_undo_send;
1712 }
Karl Meakin738bee12024-01-12 15:19:45 +00001713 } else {
1714 ret = arch_other_world_vm_configure_rxtx_map(
1715 vm_locked, local_page_pool, pa_send_begin, pa_send_end,
1716 pa_recv_begin, pa_recv_end);
1717 if (ret.func != FFA_SUCCESS_32) {
1718 goto out;
1719 }
Andrew Sculle1322792019-07-01 17:46:10 +01001720 }
1721
Olivier Deprez96a2a262020-06-11 17:21:38 +02001722 /* Get extra send/recv pages mapping attributes for the given VM ID. */
1723 extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
1724
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001725 /*
1726 * For EL0 partitions, since both the partition and the hypervisor code
1727 * use the EL2&0 translation regime, it is critical to mark the mappings
1728 * of the send and recv buffers as non-global in the TLB. For one, if we
1729 * dont mark it as non-global, it would cause TLB conflicts since there
1730 * would be an identity mapping with non-global attribute in the
1731 * partitions page tables, but another identity mapping in the
1732 * hypervisor page tables with the global attribute. The other issue is
1733 * one of security, we dont want other partitions to be able to access
1734 * other partitions buffers through cached translations.
1735 */
1736 if (vm_locked.vm->el0_partition) {
1737 extra_attributes |= MM_MODE_NG;
1738 }
1739
Karl Meakin738bee12024-01-12 15:19:45 +00001740 ret = api_vm_configure_stage1(
1741 mm_stage1_locked, vm_locked, pa_send_begin, pa_send_end,
1742 pa_recv_begin, pa_recv_end, extra_attributes, local_page_pool);
1743 if (ret.func != FFA_SUCCESS_32) {
Andrew Sculle1322792019-07-01 17:46:10 +01001744 goto fail_undo_send_and_recv;
1745 }
1746
Manish Pandeyd34f8892020-06-19 17:41:07 +01001747 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +01001748 goto out;
1749
Andrew Sculle1322792019-07-01 17:46:10 +01001750fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +00001751 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001752 orig_recv_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +01001753
1754fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +00001755 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001756 orig_send_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +01001757
1758out:
Andrew Sculle1322792019-07-01 17:46:10 +01001759 return ret;
1760}
1761
Karl Meakinb9705e22024-04-05 13:58:28 +01001762/**
1763 * Read the buffer addresses and VM ID of an FFA_RXTX_MAP request. Handles
1764 * forwarded messages by reading from the hypervisor's TX buffer.
1765 *
1766 * Returns the VM/SP ID on success.
1767 *
1768 * Returns `HF_INVALID_VM_ID` when the arguments provided via the ABI
1769 * FFA_RXTX_MAP indicate the SPMC should retrieve the RXTX description from the
1770 * Hypervisor RXTX buffers, and the hypervisor hasn't given its own RXTX buffers
1771 * for the SPMC to map.
1772 */
1773static ffa_id_t api_get_rxtx_description(struct vm *current_vm, ipaddr_t *send,
1774 ipaddr_t *recv, uint32_t *page_count)
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001775{
Karl Meakinb9705e22024-04-05 13:58:28 +01001776 bool forwarded;
1777 struct vm_locked vm_locked;
1778 ffa_id_t owner_vm_id;
1779 struct ffa_endpoint_rx_tx_descriptor *endpoint_desc;
1780 struct ffa_composite_memory_region *rx_region;
1781 struct ffa_composite_memory_region *tx_region;
1782
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001783 /*
1784 * If the message has been forwarded the effective addresses are in
1785 * hypervisor's TX buffer.
1786 */
Karl Meakinb9705e22024-04-05 13:58:28 +01001787 forwarded = (current_vm->id == HF_OTHER_WORLD_ID) &&
1788 (ipa_addr(*send) == 0) && (ipa_addr(*recv) == 0) &&
1789 (*page_count == 0);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001790
1791 if (forwarded) {
Karl Meakinb9705e22024-04-05 13:58:28 +01001792 vm_locked = vm_lock(current_vm);
1793 endpoint_desc = (struct ffa_endpoint_rx_tx_descriptor *)
1794 vm_locked.vm->mailbox.send;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001795
Karl Meakinb9705e22024-04-05 13:58:28 +01001796 if (endpoint_desc == NULL) {
1797 dlog_error(
1798 "Trying to access RXTX description, but "
1799 "hypervisor has not provided RXTX buffers\n");
1800 vm_unlock(&vm_locked);
1801 return HF_INVALID_VM_ID;
1802 }
1803
1804 rx_region = ffa_endpoint_get_rx_memory_region(endpoint_desc);
1805 tx_region = ffa_endpoint_get_tx_memory_region(endpoint_desc);
1806
1807 owner_vm_id = endpoint_desc->endpoint_id;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001808 *recv = ipa_init(rx_region->constituents[0].address);
1809 *send = ipa_init(tx_region->constituents[0].address);
1810 *page_count = rx_region->constituents[0].page_count;
J-Alves28ad9b42022-12-08 12:19:43 +00001811
1812 vm_unlock(&vm_locked);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001813 } else {
Karl Meakinb9705e22024-04-05 13:58:28 +01001814 owner_vm_id = current_vm->id;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001815 }
Karl Meakinb9705e22024-04-05 13:58:28 +01001816
1817 return owner_vm_id;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001818}
Karl Meakinb9705e22024-04-05 13:58:28 +01001819
Andrew Sculle1322792019-07-01 17:46:10 +01001820/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001821 * Configures the VM to send/receive data through the specified pages. The pages
Manish Pandeyd34f8892020-06-19 17:41:07 +01001822 * must not be shared. Locking of the page tables combined with a local memory
1823 * pool ensures there will always be enough memory to recover from any errors
1824 * that arise. The stage-1 page tables must be locked so memory cannot be taken
1825 * by another core which could result in this transaction being unable to roll
1826 * back in the case of an error.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001827 *
1828 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001829 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001830 * aligned, are the same or have invalid attributes.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001831 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001832 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001833 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001834 * - FFA_SUCCESS on success if no further action is needed.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001835 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001836struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
Federico Recanati9f1b6532022-04-14 13:15:28 +02001837 uint32_t page_count, struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001838{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001839 struct ffa_value ret;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001840 struct vm_locked owner_vm_locked;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001841 struct mm_stage1_locked mm_stage1_locked;
1842 struct mpool local_page_pool;
J-Alves19e20cf2023-08-02 12:48:55 +01001843 ffa_id_t owner_vm_id;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001844
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001845 /*
1846 * Get the original buffer addresses and VM ID in case of forwarded
1847 * message.
1848 */
Karl Meakinb9705e22024-04-05 13:58:28 +01001849 owner_vm_id = api_get_rxtx_description(current->vm, &send, &recv,
1850 &page_count);
1851 if (owner_vm_id == HF_INVALID_VM_ID) {
1852 return ffa_error(FFA_INVALID_PARAMETERS);
1853 }
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001854
1855 owner_vm_locked = plat_ffa_vm_find_locked_create(owner_vm_id);
1856 if (owner_vm_locked.vm == NULL) {
1857 dlog_error("Cannot map RX/TX for VM ID %#x, not found.\n",
1858 owner_vm_id);
1859 return ffa_error(FFA_DENIED);
1860 }
Andrew Scull220e6212018-12-21 18:09:00 +00001861
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001862 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001863 * Create a local pool so any freed memory can't be used by another
1864 * thread. This is to ensure the original mapping can be restored if any
1865 * stage of the process fails.
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001866 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001867 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1868
Manish Pandeyd34f8892020-06-19 17:41:07 +01001869 mm_stage1_locked = mm_lock_stage1();
Andrew Scull220e6212018-12-21 18:09:00 +00001870
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001871 ret = api_vm_configure_pages(mm_stage1_locked, owner_vm_locked, send,
1872 recv, page_count, &local_page_pool);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001873 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001874 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001875 }
1876
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001877 /* Forward buffer mapping to SPMC if coming from a VM. */
1878 plat_ffa_rxtx_map_forward(owner_vm_locked);
1879
Federico Recanati9f1b6532022-04-14 13:15:28 +02001880 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Scull220e6212018-12-21 18:09:00 +00001881
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001882exit:
Manish Pandeyd34f8892020-06-19 17:41:07 +01001883 mpool_fini(&local_page_pool);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001884 mm_unlock_stage1(&mm_stage1_locked);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001885 vm_unlock(&owner_vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001886
1887 return ret;
1888}
1889
1890/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001891 * Unmaps the RX/TX buffer pair with a partition or partition manager from the
1892 * translation regime of the caller. Unmap the region for the hypervisor and
1893 * set the memory region to owned and exclusive for the component. Since the
1894 * memory region mapped in the page table, when the buffers were originally
1895 * created we can safely remap it.
1896 *
1897 * Returns:
1898 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
1899 * behalf of the caller.
1900 * - FFA_SUCCESS on success if no further action is needed.
1901 */
J-Alves19e20cf2023-08-02 12:48:55 +01001902struct ffa_value api_ffa_rxtx_unmap(ffa_id_t allocator_id, struct vcpu *current)
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001903{
1904 struct vm *vm = current->vm;
1905 struct vm_locked vm_locked;
J-Alves19e20cf2023-08-02 12:48:55 +01001906 ffa_id_t owner_vm_id;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001907 struct mm_stage1_locked mm_stage1_locked;
1908 paddr_t send_pa_begin;
1909 paddr_t send_pa_end;
1910 paddr_t recv_pa_begin;
1911 paddr_t recv_pa_end;
Federico Recanati8da9e332022-02-10 11:00:17 +01001912 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001913
J-Alves661e1b72023-08-02 13:39:40 +01001914 if (vm->id == HF_HYPERVISOR_VM_ID && !ffa_is_vm_id(allocator_id)) {
Olivier Deprez106c8ce2024-02-08 11:49:02 +01001915 dlog_verbose(
J-Alves9bd324a2023-01-12 10:52:52 +00001916 "The Hypervisor must specify a valid VM ID in register "
1917 "W1, if FFA_RXTX_UNMAP call forwarded to SPM.\n");
1918 return ffa_error(FFA_INVALID_PARAMETERS);
1919 }
1920
Federico Recanati8da9e332022-02-10 11:00:17 +01001921 /* Ensure `allocator_id` is set only at Non-Secure Physical instance. */
1922 if (vm_id_is_current_world(vm->id) && (allocator_id != 0)) {
J-Alves9bd324a2023-01-12 10:52:52 +00001923 dlog_error(
1924 "The register W1 (containing ID) must be 0 at virtual "
1925 "instances.\n");
Federico Recanati8da9e332022-02-10 11:00:17 +01001926 return ffa_error(FFA_INVALID_PARAMETERS);
1927 }
1928
1929 /* VM ID of which buffers have to be unmapped. */
1930 owner_vm_id = (allocator_id != 0) ? allocator_id : vm->id;
1931
1932 vm_locked = plat_ffa_vm_find_locked(owner_vm_id);
1933 vm = vm_locked.vm;
1934 if (vm == NULL) {
1935 dlog_error("Cannot unmap RX/TX for VM ID %#x, not found.\n",
1936 owner_vm_id);
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001937 return ffa_error(FFA_INVALID_PARAMETERS);
1938 }
1939
1940 /* Get send and receive buffers. */
1941 if (vm->mailbox.send == NULL || vm->mailbox.recv == NULL) {
Olivier Deprez86d87ae2021-08-19 14:27:46 +02001942 dlog_verbose(
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001943 "No buffer pair registered on behalf of the caller.\n");
Federico Recanati8da9e332022-02-10 11:00:17 +01001944 ret = ffa_error(FFA_INVALID_PARAMETERS);
1945 goto out;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001946 }
1947
1948 /* Currently a mailbox size of 1 page is assumed. */
1949 send_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.send));
1950 send_pa_end = pa_add(send_pa_begin, HF_MAILBOX_SIZE);
1951 recv_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.recv));
1952 recv_pa_end = pa_add(recv_pa_begin, HF_MAILBOX_SIZE);
1953
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001954 mm_stage1_locked = mm_lock_stage1();
1955
Federico Recanati8da9e332022-02-10 11:00:17 +01001956 /* Reset stage 2 mapping only for virtual FF-A instances. */
1957 if (vm_id_is_current_world(owner_vm_id)) {
1958 /*
1959 * Set the memory region of the buffers back to the default mode
1960 * for the VM. Since this memory region was already mapped for
1961 * the RXTX buffers we can safely remap them.
1962 */
1963 CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
1964 MM_MODE_R | MM_MODE_W | MM_MODE_X,
1965 &api_page_pool, NULL));
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001966
Federico Recanati8da9e332022-02-10 11:00:17 +01001967 CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
1968 MM_MODE_R | MM_MODE_W | MM_MODE_X,
1969 &api_page_pool, NULL));
Karl Meakin738bee12024-01-12 15:19:45 +00001970 } else {
1971 ret = arch_other_world_vm_configure_rxtx_unmap(
1972 vm_locked, &api_page_pool, send_pa_begin, send_pa_end,
1973 recv_pa_begin, recv_pa_end);
1974 if (ret.func != FFA_SUCCESS_32) {
1975 goto out;
1976 }
Federico Recanati8da9e332022-02-10 11:00:17 +01001977 }
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001978
1979 /* Unmap the buffers in the partition manager. */
1980 CHECK(mm_unmap(mm_stage1_locked, send_pa_begin, send_pa_end,
1981 &api_page_pool));
1982 CHECK(mm_unmap(mm_stage1_locked, recv_pa_begin, recv_pa_end,
1983 &api_page_pool));
1984
1985 vm->mailbox.send = NULL;
1986 vm->mailbox.recv = NULL;
Federico Recanati10bd06c2022-02-23 17:32:59 +01001987 plat_ffa_vm_destroy(vm_locked);
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001988
Federico Recanati8da9e332022-02-10 11:00:17 +01001989 /* Forward buffer unmapping to SPMC if coming from a VM. */
J-Alves70079932022-12-07 17:32:20 +00001990 plat_ffa_rxtx_unmap_forward(vm_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +01001991
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001992 mm_unlock_stage1(&mm_stage1_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +01001993
1994out:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001995 vm_unlock(&vm_locked);
1996
Federico Recanati8da9e332022-02-10 11:00:17 +01001997 return ret;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001998}
1999
2000/**
Federico Recanati25053ee2022-03-14 15:01:53 +01002001 * Copies data from the sender's send buffer to the recipient's receive buffer
2002 * and notifies the receiver.
2003 */
J-Alves19e20cf2023-08-02 12:48:55 +01002004struct ffa_value api_ffa_msg_send2(ffa_id_t sender_vm_id, uint32_t flags,
Federico Recanati25053ee2022-03-14 15:01:53 +01002005 struct vcpu *current)
2006{
2007 struct vm *from = current->vm;
2008 struct vm *to;
2009 struct vm_locked to_locked;
J-Alves19e20cf2023-08-02 12:48:55 +01002010 ffa_id_t msg_sender_id;
Federico Recanati25053ee2022-03-14 15:01:53 +01002011 struct vm_locked sender_locked;
2012 const void *from_msg;
2013 struct ffa_value ret;
J-Alves19e20cf2023-08-02 12:48:55 +01002014 ffa_id_t sender_id;
2015 ffa_id_t receiver_id;
Federico Recanati25053ee2022-03-14 15:01:53 +01002016 uint32_t msg_size;
2017 ffa_notifications_bitmap_t rx_buffer_full;
2018
Karl Meakina5ea9092024-05-28 15:40:33 +01002019 alignas(8) struct ffa_partition_rxtx_header header;
2020
Federico Recanati25053ee2022-03-14 15:01:53 +01002021 /* Only Hypervisor can set `sender_vm_id` when forwarding messages. */
2022 if (from->id != HF_HYPERVISOR_VM_ID && sender_vm_id != 0) {
2023 dlog_error("Sender VM ID must be zero.\n");
2024 return ffa_error(FFA_INVALID_PARAMETERS);
2025 }
2026
Federico Recanati25053ee2022-03-14 15:01:53 +01002027 /*
2028 * Get message sender's mailbox, which can be different to the `from` vm
2029 * when the message is forwarded.
2030 */
2031 msg_sender_id = (sender_vm_id != 0) ? sender_vm_id : from->id;
2032 sender_locked = plat_ffa_vm_find_locked(msg_sender_id);
2033 if (sender_locked.vm == NULL) {
2034 dlog_error("Cannot send message from VM ID %#x, not found.\n",
2035 msg_sender_id);
2036 return ffa_error(FFA_DENIED);
2037 }
2038
2039 from_msg = sender_locked.vm->mailbox.send;
2040 if (from_msg == NULL) {
2041 dlog_error("Cannot retrieve TX buffer for VM ID %#x.\n",
2042 msg_sender_id);
2043 ret = ffa_error(FFA_DENIED);
2044 goto out_unlock_sender;
2045 }
2046
2047 /*
2048 * Copy message header as safety measure to avoid multiple accesses to
2049 * unsafe memory which could be 'corrupted' between safety checks and
2050 * final buffer copy.
2051 */
J-Alves8a5fd9d2024-04-09 11:22:49 +01002052 if (!memcpy_trapped(&header, FFA_RXTX_HEADER_SIZE, from_msg,
2053 FFA_RXTX_HEADER_SIZE)) {
2054 dlog_error(
2055 "%s: Failed to copy message from sender's(%x) TX "
2056 "buffer.\n",
2057 __func__, sender_locked.vm->id);
2058 ret = ffa_error(FFA_ABORTED);
2059 goto out_unlock_sender;
2060 }
2061
Federico Recanati25053ee2022-03-14 15:01:53 +01002062 sender_id = ffa_rxtx_header_sender(&header);
2063 receiver_id = ffa_rxtx_header_receiver(&header);
2064
2065 /* Ensure Sender IDs from API and from message header match. */
2066 if (msg_sender_id != sender_id) {
2067 dlog_error(
2068 "Message sender VM ID (%#x) doesn't match header's VM "
2069 "ID (%#x).\n",
2070 msg_sender_id, sender_id);
2071 ret = ffa_error(FFA_INVALID_PARAMETERS);
2072 goto out_unlock_sender;
2073 }
2074
2075 /* Disallow reflexive requests as this suggests an error in the VM. */
2076 if (receiver_id == sender_id) {
2077 dlog_error("Sender and receive VM IDs must be different.\n");
2078 ret = ffa_error(FFA_INVALID_PARAMETERS);
2079 goto out_unlock_sender;
2080 }
2081
Federico Recanati7b39ff22022-03-14 15:01:53 +01002082 /* `flags` can be set only at secure virtual FF-A instances. */
J-Alves661e1b72023-08-02 13:39:40 +01002083 if (ffa_is_vm_id(sender_id) && (flags != 0)) {
Federico Recanati7b39ff22022-03-14 15:01:53 +01002084 dlog_error("flags must be zero.\n");
2085 return ffa_error(FFA_INVALID_PARAMETERS);
2086 }
2087
Olivier Deprez37b75112024-03-04 18:27:30 +01002088 if (header.offset != FFA_RXTX_HEADER_SIZE) {
2089 dlog_error("Indirect msg payload must follow the header.\n");
2090 return ffa_error(FFA_INVALID_PARAMETERS);
2091 }
2092
Federico Recanati25053ee2022-03-14 15:01:53 +01002093 /*
2094 * Check if the message has to be forwarded to the SPMC, in
2095 * this case return, the SPMC will handle the buffer copy.
2096 */
2097 if (plat_ffa_msg_send2_forward(receiver_id, sender_id, &ret)) {
2098 goto out_unlock_sender;
2099 }
2100
2101 /* Ensure the receiver VM exists. */
2102 to_locked = plat_ffa_vm_find_locked(receiver_id);
2103 to = to_locked.vm;
2104
2105 if (to == NULL) {
2106 dlog_error("Cannot deliver message to VM %#x, not found.\n",
2107 receiver_id);
2108 ret = ffa_error(FFA_INVALID_PARAMETERS);
2109 goto out_unlock_sender;
2110 }
2111
2112 /*
2113 * Check sender and receiver can use indirect messages.
2114 * Sender is the VM/SP who originally sent the message, not the
2115 * hypervisor possibly relaying it.
2116 */
2117 if (!plat_ffa_is_indirect_msg_supported(sender_locked, to_locked)) {
2118 dlog_verbose("VM %#x doesn't support indirect message\n",
2119 sender_id);
2120 ret = ffa_error(FFA_DENIED);
2121 goto out;
2122 }
2123
J-Alvese8c8c2b2022-12-16 15:34:48 +00002124 if (vm_is_mailbox_busy(to_locked)) {
Federico Recanati25053ee2022-03-14 15:01:53 +01002125 dlog_error(
2126 "Cannot deliver message to VM %#x, RX buffer not "
2127 "ready.\n",
2128 receiver_id);
2129 ret = ffa_error(FFA_BUSY);
2130 goto out;
2131 }
2132
Federico Recanati644f0462022-03-17 12:04:00 +01002133 /* Acquire receiver's RX buffer. */
2134 if (!plat_ffa_acquire_receiver_rx(to_locked, &ret)) {
2135 dlog_error("Failed to acquire RX buffer for VM %#x\n", to->id);
2136 goto out;
2137 }
2138
Federico Recanati25053ee2022-03-14 15:01:53 +01002139 /* Check the size of transfer. */
2140 msg_size = FFA_RXTX_HEADER_SIZE + header.size;
Olivier Deprezf1dd1e12024-03-04 18:21:01 +01002141 if ((msg_size > FFA_MSG_PAYLOAD_MAX) ||
Federico Recanati25053ee2022-03-14 15:01:53 +01002142 (header.size > FFA_PARTITION_MSG_PAYLOAD_MAX)) {
2143 dlog_error("Message is too big.\n");
2144 ret = ffa_error(FFA_INVALID_PARAMETERS);
2145 goto out;
2146 }
2147
2148 /* Copy data. */
J-Alves8a5fd9d2024-04-09 11:22:49 +01002149 if (!memcpy_trapped(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg,
2150 msg_size)) {
2151 dlog_error(
2152 "%s: Failed to copy message to receiver's(%x) RX "
2153 "buffer.\n",
2154 __func__, to->id);
2155 ret = ffa_error(FFA_ABORTED);
2156 goto out;
2157 }
2158
Federico Recanati25053ee2022-03-14 15:01:53 +01002159 to->mailbox.recv_size = msg_size;
2160 to->mailbox.recv_sender = sender_id;
2161 to->mailbox.recv_func = FFA_MSG_SEND2_32;
J-Alvese8c8c2b2022-12-16 15:34:48 +00002162 to->mailbox.state = MAILBOX_STATE_FULL;
Federico Recanati25053ee2022-03-14 15:01:53 +01002163
J-Alves661e1b72023-08-02 13:39:40 +01002164 rx_buffer_full = ffa_is_vm_id(sender_id)
Federico Recanati25053ee2022-03-14 15:01:53 +01002165 ? FFA_NOTIFICATION_HYP_BUFFER_FULL_MASK
2166 : FFA_NOTIFICATION_SPM_BUFFER_FULL_MASK;
2167 vm_notifications_framework_set_pending(to_locked, rx_buffer_full);
2168
2169 if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
2170 dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
2171 vcpu_index(current));
2172 plat_ffa_sri_trigger_not_delayed(current->cpu);
2173 } else {
2174 plat_ffa_sri_state_set(DELAYED);
2175 }
2176
2177 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
2178
2179out:
2180 vm_unlock(&to_locked);
2181
2182out_unlock_sender:
2183 vm_unlock(&sender_locked);
2184
2185 return ret;
2186}
2187
2188/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00002189 * Releases the caller's mailbox so that a new message can be received. The
2190 * caller must have copied out all data they wish to preserve as new messages
2191 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00002192 *
2193 * Returns:
Federico Recanati7bef0b92022-03-17 14:56:22 +01002194 * - FFA_ERROR FFA_INVALID_PARAMETERS if message is forwarded to SPMC but
2195 * there's no buffer pair mapped.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002196 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
2197 * - FFA_SUCCESS on success if no further action is needed.
2198 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00002199 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00002200 * hf_mailbox_waiter_get.
2201 */
J-Alves19e20cf2023-08-02 12:48:55 +01002202struct ffa_value api_ffa_rx_release(ffa_id_t receiver_id, struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00002203{
Federico Recanati7bef0b92022-03-17 14:56:22 +01002204 struct vm *current_vm = current->vm;
2205 struct vm *vm;
2206 struct vm_locked vm_locked;
J-Alves19e20cf2023-08-02 12:48:55 +01002207 ffa_id_t current_vm_id = current_vm->id;
2208 ffa_id_t release_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002209 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00002210
Federico Recanati7bef0b92022-03-17 14:56:22 +01002211 /* `receiver_id` can be set only at Non-Secure Physical interface. */
2212 if (vm_id_is_current_world(current_vm_id) && (receiver_id != 0)) {
2213 dlog_error("Invalid `receiver_id`, must be zero.\n");
2214 return ffa_error(FFA_INVALID_PARAMETERS);
2215 }
2216
2217 /*
2218 * VM ID to be released: `receiver_id` if message has been forwarded by
2219 * Hypervisor to release a VM's buffer, current VM ID otherwise.
2220 */
2221 if (vm_id_is_current_world(current_vm_id) || (receiver_id == 0)) {
2222 release_vm_id = current_vm_id;
2223 } else {
2224 release_vm_id = receiver_id;
2225 }
2226
2227 vm_locked = plat_ffa_vm_find_locked(release_vm_id);
2228 vm = vm_locked.vm;
2229 if (vm == NULL) {
2230 dlog_error("No buffer registered for VM ID %#x.\n",
2231 release_vm_id);
2232 return ffa_error(FFA_INVALID_PARAMETERS);
2233 }
2234
J-Alvese8c8c2b2022-12-16 15:34:48 +00002235 if (plat_ffa_rx_release_forward(vm_locked, &ret)) {
Federico Recanati7bef0b92022-03-17 14:56:22 +01002236 goto out;
2237 }
2238
Karl Meakin1064a9c2024-06-04 17:20:43 +01002239 ret = api_release_mailbox(vm_locked);
Federico Recanati7bef0b92022-03-17 14:56:22 +01002240
2241out:
2242 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01002243
2244 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002245}
Andrew Walbran318f5732018-11-20 16:23:42 +00002246
2247/**
Federico Recanati644f0462022-03-17 12:04:00 +01002248 * Acquire ownership of an RX buffer before writing to it. Both
2249 * Hypervisor and SPMC are producers of VM's RX buffer, and they
2250 * could contend for the same buffer. SPMC owns VM's RX buffer after
2251 * it's mapped in its translation regime. This ABI should be
2252 * used by the Hypervisor to get the ownership of a VM's RX buffer
2253 * from the SPMC solving the aforementioned possible contention.
2254 *
2255 * Returns:
2256 * - FFA_DENIED: callee cannot relinquish ownership of RX buffer.
2257 * - FFA_INVALID_PARAMETERS: there is no buffer pair registered for the VM.
2258 * - FFA_NOT_SUPPORTED: function not implemented at the FF-A instance.
2259 */
J-Alves19e20cf2023-08-02 12:48:55 +01002260struct ffa_value api_ffa_rx_acquire(ffa_id_t receiver_id, struct vcpu *current)
Federico Recanati644f0462022-03-17 12:04:00 +01002261{
2262 struct vm_locked receiver_locked;
2263 struct vm *receiver;
2264 struct ffa_value ret;
2265
2266 if ((current->vm->id != HF_HYPERVISOR_VM_ID) ||
J-Alves661e1b72023-08-02 13:39:40 +01002267 !ffa_is_vm_id(receiver_id)) {
Federico Recanati644f0462022-03-17 12:04:00 +01002268 dlog_error(
2269 "FFA_RX_ACQUIRE not supported at this FF-A "
2270 "instance.\n");
2271 return ffa_error(FFA_NOT_SUPPORTED);
2272 }
2273
2274 receiver_locked = plat_ffa_vm_find_locked(receiver_id);
2275 receiver = receiver_locked.vm;
2276
2277 if (receiver == NULL || receiver->mailbox.recv == NULL) {
2278 dlog_error("Cannot retrieve RX buffer for VM ID %#x.\n",
2279 receiver_id);
2280 ret = ffa_error(FFA_INVALID_PARAMETERS);
2281 goto out;
2282 }
2283
2284 if (receiver->mailbox.state != MAILBOX_STATE_EMPTY) {
2285 dlog_error("Mailbox busy for VM ID %#x.\n", receiver_id);
2286 ret = ffa_error(FFA_DENIED);
2287 goto out;
2288 }
2289
J-Alvese8c8c2b2022-12-16 15:34:48 +00002290 receiver->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
Federico Recanati644f0462022-03-17 12:04:00 +01002291
2292 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
2293
2294out:
2295 vm_unlock(&receiver_locked);
2296
2297 return ret;
2298}
2299
J-Alvescc542042024-07-24 19:13:22 +01002300/*
2301 * Returns true if intid relates with either of those:
2302 * - NPI
2303 * - ME
2304 * - Virtual Timer.
2305 *
2306 * These are VIs with no expected interrupt descriptor.
2307 */
2308static bool api_is_maintenance_virtual_interrupt(uint32_t intid)
2309{
2310 return intid == HF_NOTIFICATION_PENDING_INTID ||
2311 intid == HF_MANAGED_EXIT_INTID ||
2312 intid == HF_VIRTUAL_TIMER_INTID;
2313}
2314
Federico Recanati644f0462022-03-17 12:04:00 +01002315/**
Andrew Walbran318f5732018-11-20 16:23:42 +00002316 * Enables or disables a given interrupt ID for the calling vCPU.
2317 *
2318 * Returns 0 on success, or -1 if the intid is invalid.
2319 */
Manish Pandey35e452f2021-02-18 21:36:34 +00002320int64_t api_interrupt_enable(uint32_t intid, bool enable,
2321 enum interrupt_type type, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00002322{
Manish Pandey35e452f2021-02-18 21:36:34 +00002323 struct vcpu_locked current_locked;
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002324 struct interrupts *interrupts = &current->interrupts;
J-Alvescc542042024-07-24 19:13:22 +01002325 struct interrupt_descriptor *int_desc = NULL;
2326 struct vm *vm = current->vm;
2327 struct vm_locked vm_locked;
2328
2329 int64_t ret = -1;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002330
Andrew Walbran318f5732018-11-20 16:23:42 +00002331 if (intid >= HF_NUM_INTIDS) {
2332 return -1;
2333 }
2334
J-Alvescc542042024-07-24 19:13:22 +01002335 vm_locked = vm_lock(vm);
Manish Pandey35e452f2021-02-18 21:36:34 +00002336 current_locked = vcpu_lock(current);
J-Alvescc542042024-07-24 19:13:22 +01002337
2338 int_desc = vm_interrupt_set_enable(vm_locked, intid, enable);
2339
2340 if (!api_is_maintenance_virtual_interrupt(intid)) {
2341 if (int_desc == NULL) {
2342 dlog_error("%s: invalid interrupt ID.\n", __func__);
2343 goto out;
2344 }
2345
2346 plat_interrupts_configure_interrupt(*int_desc);
2347 }
2348
Andrew Walbran318f5732018-11-20 16:23:42 +00002349 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00002350 /*
2351 * If it is pending and was not enabled before, increment the
2352 * count.
2353 */
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002354 if (vcpu_is_virt_interrupt_pending(interrupts, intid) &&
2355 !vcpu_is_virt_interrupt_enabled(interrupts, intid)) {
2356 vcpu_interrupt_count_increment(current_locked,
2357 interrupts, intid);
Andrew Walbran3d84a262018-12-13 14:41:19 +00002358 }
J-Alvescc542042024-07-24 19:13:22 +01002359
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002360 vcpu_virt_interrupt_set_enabled(interrupts, intid);
2361 vcpu_virt_interrupt_set_type(interrupts, intid, type);
Andrew Walbran318f5732018-11-20 16:23:42 +00002362 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00002363 /*
2364 * If it is pending and was enabled before, decrement the count.
2365 */
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002366 if (vcpu_is_virt_interrupt_pending(interrupts, intid) &&
2367 vcpu_is_virt_interrupt_enabled(interrupts, intid)) {
2368 vcpu_interrupt_count_decrement(current_locked,
2369 interrupts, intid);
Andrew Walbran3d84a262018-12-13 14:41:19 +00002370 }
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002371 vcpu_virt_interrupt_clear_enabled(interrupts, intid);
2372 vcpu_virt_interrupt_set_type(interrupts, intid,
2373 INTERRUPT_TYPE_IRQ);
Andrew Walbran318f5732018-11-20 16:23:42 +00002374 }
2375
J-Alvescc542042024-07-24 19:13:22 +01002376 ret = 0;
2377
2378out:
2379 vm_unlock(&vm_locked);
Manish Pandey35e452f2021-02-18 21:36:34 +00002380 vcpu_unlock(&current_locked);
J-Alvescc542042024-07-24 19:13:22 +01002381
2382 return ret;
Andrew Walbran318f5732018-11-20 16:23:42 +00002383}
2384
2385/**
2386 * Returns the ID of the next pending interrupt for the calling vCPU, and
2387 * acknowledges it (i.e. marks it as no longer pending). Returns
2388 * HF_INVALID_INTID if there are no pending interrupts.
2389 */
Madhukar Pappireddyc64d0642024-08-07 16:55:46 -05002390uint32_t api_interrupt_get(struct vcpu_locked current_locked)
Andrew Walbran318f5732018-11-20 16:23:42 +00002391{
Raghu Krishnamurthy61a025b2022-07-20 08:37:04 -07002392 uint32_t i;
Andrew Walbran318f5732018-11-20 16:23:42 +00002393 uint32_t first_interrupt = HF_INVALID_INTID;
Madhukar Pappireddyc64d0642024-08-07 16:55:46 -05002394 struct interrupts *interrupts = &current_locked.vcpu->interrupts;
Andrew Walbran318f5732018-11-20 16:23:42 +00002395
2396 /*
2397 * Find the first enabled and pending interrupt ID, return it, and
2398 * deactivate it.
2399 */
Andrew Walbran318f5732018-11-20 16:23:42 +00002400 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
2401 uint32_t enabled_and_pending =
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002402 interrupts->interrupt_enabled.bitmap[i] &
2403 interrupts->interrupt_pending.bitmap[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002404
Andrew Walbran318f5732018-11-20 16:23:42 +00002405 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00002406 uint8_t bit_index = ctz(enabled_and_pending);
Daniel Boulby4ca50f02022-07-29 18:29:34 +01002407
2408 first_interrupt =
2409 i * INTERRUPT_REGISTER_BITS + bit_index;
Manish Pandey35e452f2021-02-18 21:36:34 +00002410
Andrew Walbran3d84a262018-12-13 14:41:19 +00002411 /*
2412 * Mark it as no longer pending and decrement the count.
2413 */
J-Alvesb8730e92024-08-07 18:28:55 +01002414 vcpu_interrupt_clear_decrement(current_locked,
2415 first_interrupt);
Andrew Walbran318f5732018-11-20 16:23:42 +00002416 break;
2417 }
2418 }
Andrew Walbran318f5732018-11-20 16:23:42 +00002419
Andrew Walbran318f5732018-11-20 16:23:42 +00002420 return first_interrupt;
2421}
2422
2423/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00002424 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00002425 * given VM and vCPU.
2426 */
2427static inline bool is_injection_allowed(uint32_t target_vm_id,
2428 struct vcpu *current)
2429{
2430 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002431
Andrew Walbran318f5732018-11-20 16:23:42 +00002432 /*
2433 * The primary VM is allowed to inject interrupts into any VM. Secondary
2434 * VMs are only allowed to inject interrupts into their own vCPUs.
2435 */
2436 return current_vm_id == HF_PRIMARY_VM_ID ||
2437 current_vm_id == target_vm_id;
2438}
2439
2440/**
2441 * Injects a virtual interrupt of the given ID into the given target vCPU.
2442 * This doesn't cause the vCPU to actually be run immediately; it will be taken
2443 * when the vCPU is next run, which is up to the scheduler.
2444 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00002445 * Returns:
2446 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
2447 * ID is invalid, or the current VM is not allowed to inject interrupts to
2448 * the target VM.
2449 * - 0 on success if no further action is needed.
2450 * - 1 if it was called by the primary VM and the primary VM now needs to wake
2451 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00002452 */
J-Alves19e20cf2023-08-02 12:48:55 +01002453int64_t api_interrupt_inject(ffa_id_t target_vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002454 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01002455 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00002456{
Andrew Walbran318f5732018-11-20 16:23:42 +00002457 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01002458 struct vm *target_vm = vm_find(target_vm_id);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002459 struct vcpu_locked current_locked;
2460 struct vcpu_locked target_locked;
2461 struct two_vcpu_locked vcpus_locked;
2462 int64_t ret;
Andrew Walbran318f5732018-11-20 16:23:42 +00002463
2464 if (intid >= HF_NUM_INTIDS) {
2465 return -1;
2466 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002467
Andrew Walbran318f5732018-11-20 16:23:42 +00002468 if (target_vm == NULL) {
2469 return -1;
2470 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002471
Andrew Walbran318f5732018-11-20 16:23:42 +00002472 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00002473 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00002474 return -1;
2475 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002476
Andrew Walbran318f5732018-11-20 16:23:42 +00002477 if (!is_injection_allowed(target_vm_id, current)) {
2478 return -1;
2479 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00002480
Andrew Walbrane1310df2019-04-29 17:28:28 +01002481 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00002482
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002483 /* A VM could inject an interrupt for itself. */
2484 if (target_vcpu != current) {
2485 /* Lock both vCPUs at once to avoid deadlock. */
2486 vcpus_locked = vcpu_lock_both(current, target_vcpu);
2487 current_locked = vcpus_locked.vcpu1;
2488 target_locked = vcpus_locked.vcpu2;
2489 } else {
2490 current_locked = vcpu_lock(current);
2491 target_locked = current_locked;
2492 }
2493
Manish Pandey35e452f2021-02-18 21:36:34 +00002494 dlog_verbose(
2495 "Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
2496 "%u\n",
2497 intid, target_vm_id, target_vcpu_idx, current->vm->id,
2498 vcpu_index(current));
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002499 ret = api_interrupt_inject_locked(target_locked, intid, current_locked,
2500 next);
2501 if (target_vcpu != current) {
2502 vcpu_unlock(&target_locked);
2503 }
2504
2505 vcpu_unlock(&current_locked);
2506 return ret;
Andrew Walbran318f5732018-11-20 16:23:42 +00002507}
Andrew Scull6386f252018-12-06 13:29:10 +00002508
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002509/**
2510 * Negotiate the FF-A version to be used for this FF-A instance.
2511 * See section 13.2 of the FF-A v1.2 ALP1 spec.
2512 *
2513 * Returns Hafnium's version number (`FFA_VERSION_COMPILED`) on success.
2514 * Returns `FFA_NOT_SUPPORTED` on error:
2515 * - The version is invalid (highest bit set).
2516 * - The requested version is incompatible.
2517 * - The version has already been negotiated and cannot be changed.
2518 */
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00002519struct ffa_value api_ffa_version(struct vcpu *current,
Karl Meakin0e617d92024-04-05 12:55:22 +01002520 enum ffa_version requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01002521{
Karl Meakin0e617d92024-04-05 12:55:22 +01002522 static_assert(sizeof(enum ffa_version) == 4,
2523 "enum ffa_version must be 4 bytes wide");
2524
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002525 const struct ffa_value error = {.func = (uint32_t)FFA_NOT_SUPPORTED};
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00002526 struct vm_locked current_vm_locked;
2527
Karl Meakin0e617d92024-04-05 12:55:22 +01002528 if (!ffa_version_is_valid(requested_version)) {
2529 dlog_error(
2530 "FFA_VERSION: requested version %#x is invalid "
2531 "(highest bit must be zero)\n",
2532 requested_version);
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002533 return error;
Andrew Walbran9fd29072020-04-22 12:12:14 +01002534 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01002535
Karl Meakin0e617d92024-04-05 12:55:22 +01002536 if (!ffa_versions_are_compatible(requested_version,
2537 FFA_VERSION_COMPILED)) {
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002538 dlog_error(
2539 "FFA_VERSION: requested version v%u.%u is not "
2540 "compatible with v%u.%u\n",
2541 ffa_version_get_major(requested_version),
2542 ffa_version_get_minor(requested_version),
2543 ffa_version_get_major(FFA_VERSION_COMPILED),
2544 ffa_version_get_minor(FFA_VERSION_COMPILED));
2545 return error;
J-Alves3cc9d6f2023-07-20 16:46:15 +01002546 }
2547
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00002548 current_vm_locked = vm_lock(current->vm);
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002549
2550 if (current_vm_locked.vm->ffa_version_negotiated &&
2551 requested_version != current_vm_locked.vm->ffa_version) {
2552 vm_unlock(&current_vm_locked);
2553 dlog_error(
2554 "FFA_VERSION: Cannot change FF-A version after other "
2555 "FF-A calls have been made\n");
2556 return error;
2557 }
2558
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00002559 current_vm_locked.vm->ffa_version = requested_version;
2560 vm_unlock(&current_vm_locked);
2561
Karl Meakin6eeec8e2024-03-07 18:07:20 +00002562 return (struct ffa_value){.func = (uint32_t)FFA_VERSION_COMPILED};
Jose Marinhofc0b2b62019-06-06 11:18:45 +01002563}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01002564
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002565/**
Karl Meakin49ec1e42024-05-10 13:08:24 +01002566 * Helper for success return of FFA_FEATURES.
J-Alves6f72ca82021-11-01 12:34:58 +00002567 */
2568struct ffa_value api_ffa_feature_success(uint32_t arg2)
2569{
2570 return (struct ffa_value){
Karl Meakin49ec1e42024-05-10 13:08:24 +01002571 .func = FFA_SUCCESS_32,
2572 .arg1 = 0U,
2573 .arg2 = arg2,
2574 };
J-Alves6f72ca82021-11-01 12:34:58 +00002575}
2576
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002577static struct ffa_value ffa_features_function(uint32_t func,
2578 uint32_t input_property,
2579 struct vcpu *current)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002580{
Karl Meakin0e617d92024-04-05 12:55:22 +01002581 const enum ffa_version ffa_version = current->vm->ffa_version;
Karl Meakinf1ed5f12024-02-22 15:57:36 +00002582 const bool el0_partition = current->vm->el0_partition;
2583
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002584 switch (func) {
J-Alves6f72ca82021-11-01 12:34:58 +00002585 /* Check support of the given Function ID. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002586 case FFA_ERROR_32:
2587 case FFA_SUCCESS_32:
2588 case FFA_INTERRUPT_32:
2589 case FFA_VERSION_32:
2590 case FFA_FEATURES_32:
2591 case FFA_RX_RELEASE_32:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01002592 case FFA_RXTX_UNMAP_32:
Fuad Tabbae4efcc32020-07-16 15:37:27 +01002593 case FFA_PARTITION_INFO_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002594 case FFA_ID_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002595 case FFA_MSG_WAIT_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002596 case FFA_RUN_32:
J-Alves95fbb312024-03-20 15:19:16 +00002597 case FFA_MEM_DONATE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002598 case FFA_MEM_DONATE_32:
2599 case FFA_MEM_LEND_32:
J-Alves95fbb312024-03-20 15:19:16 +00002600 case FFA_MEM_LEND_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002601 case FFA_MEM_SHARE_32:
J-Alves95fbb312024-03-20 15:19:16 +00002602 case FFA_MEM_SHARE_64:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002603 case FFA_MEM_RETRIEVE_RESP_32:
2604 case FFA_MEM_RELINQUISH_32:
2605 case FFA_MEM_RECLAIM_32:
J-Alvesff676c12022-05-13 17:25:33 +01002606 case FFA_MEM_FRAG_RX_32:
2607 case FFA_MEM_FRAG_TX_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00002608 case FFA_MSG_SEND_DIRECT_RESP_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002609 case FFA_MSG_SEND_DIRECT_RESP_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00002610 case FFA_MSG_SEND_DIRECT_REQ_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002611 case FFA_MSG_SEND_DIRECT_REQ_32:
Karl Meakin0e617d92024-04-05 12:55:22 +01002612
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00002613 /* FF-A v1.1 features. */
2614 case FFA_SPM_ID_GET_32:
J-Alves6f72ca82021-11-01 12:34:58 +00002615 case FFA_NOTIFICATION_BITMAP_CREATE_32:
2616 case FFA_NOTIFICATION_BITMAP_DESTROY_32:
2617 case FFA_NOTIFICATION_BIND_32:
2618 case FFA_NOTIFICATION_UNBIND_32:
2619 case FFA_NOTIFICATION_SET_32:
2620 case FFA_NOTIFICATION_GET_32:
2621 case FFA_NOTIFICATION_INFO_GET_64:
Federico Recanati25053ee2022-03-14 15:01:53 +01002622 case FFA_MSG_SEND2_32:
Karl Meakin0e617d92024-04-05 12:55:22 +01002623 if (FFA_VERSION_1_1 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002624 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002625 }
2626
Kathleen Capellae4fe2962023-09-01 17:08:47 -04002627 /* FF-A v1.2 features. */
2628 case FFA_CONSOLE_LOG_32:
2629 case FFA_CONSOLE_LOG_64:
Raghu Krishnamurthy7592bcb2022-12-25 13:09:00 -08002630 case FFA_PARTITION_INFO_GET_REGS_64:
Kathleen Capella41fea932023-06-23 17:39:28 -04002631 case FFA_MSG_SEND_DIRECT_REQ2_64:
Kathleen Capella087e5022023-09-07 18:04:15 -04002632 case FFA_MSG_SEND_DIRECT_RESP2_64:
Karl Meakin0e617d92024-04-05 12:55:22 +01002633 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002634 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002635 }
2636
Karl Meakin49ec1e42024-05-10 13:08:24 +01002637 return api_ffa_feature_success(0);
2638
Karl Meakin5a222642024-06-20 10:23:37 +01002639 /* These functions are only supported on S-EL0 partitions. */
2640 case FFA_MEM_PERM_GET_32:
2641 case FFA_MEM_PERM_SET_32:
2642 case FFA_MEM_PERM_GET_64:
2643 case FFA_MEM_PERM_SET_64:
2644 if (!(vm_id_is_current_world(current->vm->id) &&
2645 el0_partition)) {
2646 dlog_verbose(
2647 "FFA_FEATURE: %s is only supported on S-EL0 "
2648 "partitions\n",
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002649 ffa_func_name(func));
Karl Meakin5a222642024-06-20 10:23:37 +01002650 return ffa_error(FFA_NOT_SUPPORTED);
2651 }
2652 return api_ffa_feature_success(0);
2653
Karl Meakin1a8c0cd2024-06-20 10:26:12 +01002654 case FFA_SECONDARY_EP_REGISTER_64:
2655 if (FFA_VERSION_COMPILED < FFA_VERSION_1_1) {
2656 return ffa_error(FFA_NOT_SUPPORTED);
2657 }
2658
2659 if (!(vm_id_is_current_world(current->vm->id) &&
2660 current->vm->vcpu_count > 1)) {
2661 dlog_verbose(
2662 "FFA_FEATURE: %s is only supported on SPs with "
2663 "more than 1 vCPU\n",
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002664 ffa_func_name(func));
Karl Meakin1a8c0cd2024-06-20 10:26:12 +01002665 return ffa_error(FFA_NOT_SUPPORTED);
2666 }
2667 return api_ffa_feature_success(0);
2668
Karl Meakin650cb142024-06-20 10:31:57 +01002669 /*
2670 * This function is restricted to the secure virtual FF-A instance (i.e.
2671 * only report success to SPs).
2672 */
2673 case FFA_YIELD_32:
2674 if (!vm_id_is_current_world(current->vm->id)) {
2675 dlog_verbose(
2676 "FFA_FEATURES: %s is only supported at secure "
2677 "virtual FF-A instance\n",
2678 ffa_func_name(FFA_YIELD_32));
2679 return ffa_error(FFA_NOT_SUPPORTED);
2680 }
2681 return api_ffa_feature_success(0);
2682
Karl Meakin0fd67292024-02-06 17:33:05 +00002683 case FFA_RXTX_MAP_64: {
Karl Meakin0e617d92024-04-05 12:55:22 +01002684 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002685 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002686 }
2687
Karl Meakin0fd67292024-02-06 17:33:05 +00002688 uint32_t arg2 = 0;
2689 struct ffa_features_rxtx_map_params params = {
2690 .min_buf_size = FFA_RXTX_MAP_MIN_BUF_4K,
2691 .mbz = 0,
2692 .max_buf_size =
Karl Meakin0e617d92024-04-05 12:55:22 +01002693 (ffa_version >= FFA_VERSION_1_2)
Karl Meakin0fd67292024-02-06 17:33:05 +00002694 ? FFA_RXTX_MAP_MAX_BUF_PAGE_COUNT
2695 : 0,
2696 };
2697
2698 memcpy_s(&arg2, sizeof(arg2), &params, sizeof(params));
Karl Meakin49ec1e42024-05-10 13:08:24 +01002699 return api_ffa_feature_success(arg2);
Karl Meakin0fd67292024-02-06 17:33:05 +00002700 }
2701
J-Alves95fbb312024-03-20 15:19:16 +00002702 case FFA_MEM_RETRIEVE_REQ_64:
Karl Meakin49ec1e42024-05-10 13:08:24 +01002703 case FFA_MEM_RETRIEVE_REQ_32: {
Karl Meakin0e617d92024-04-05 12:55:22 +01002704 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002705 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002706 }
2707
Karl Meakin49ec1e42024-05-10 13:08:24 +01002708 if (ANY_BITS_SET(input_property,
2709 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_HI_BIT,
2710 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_LO_BIT) ||
2711 IS_BIT_SET(input_property,
2712 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_BIT)) {
2713 dlog_warning(
2714 "FFA_FEATURES: Bits[%u:%u] and Bit[%u] of "
2715 "input_property should be 0 (input_property = "
2716 "%#x)\n",
2717 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_HI_BIT,
2718 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_LO_BIT,
2719 FFA_FEATURES_MEM_RETRIEVE_REQ_MBZ_BIT,
Karl Meakin34b8ae92023-01-13 13:33:07 +00002720 input_property);
Karl Meakin34b8ae92023-01-13 13:33:07 +00002721 }
2722
Karl Meakin0e617d92024-04-05 12:55:22 +01002723 if (ffa_version >= FFA_VERSION_1_1 &&
2724 (input_property &
2725 FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT) == 0U) {
Karl Meakin4e8e4792024-07-05 16:28:42 +01002726 dlog_verbose(
2727 "FFA_FEATURES: NS bit support must be 1\n");
Karl Meakin6fd6c1d2024-05-13 12:09:13 +01002728 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin34b8ae92023-01-13 13:33:07 +00002729 }
2730
2731 return api_ffa_feature_success(
2732 FFA_FEATURES_MEM_RETRIEVE_REQ_BUFFER_SUPPORT |
Karl Meakin49ec1e42024-05-10 13:08:24 +01002733 FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT |
Karl Meakin34b8ae92023-01-13 13:33:07 +00002734 FFA_FEATURES_MEM_RETRIEVE_REQ_HYPERVISOR_SUPPORT);
Karl Meakin49ec1e42024-05-10 13:08:24 +01002735 }
J-Alves6f72ca82021-11-01 12:34:58 +00002736
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002737 default:
2738 return ffa_error(FFA_NOT_SUPPORTED);
2739 }
2740}
2741
2742static struct ffa_value ffa_features_feature(enum ffa_feature_id feature,
2743 uint32_t input_property,
2744 struct vcpu *current)
2745{
2746 const bool el0_partition = current->vm->el0_partition;
2747
2748 if (ANY_BITS_SET(feature, FFA_FEATURES_FEATURE_MBZ_HI_BIT,
2749 FFA_FEATURES_FEATURE_MBZ_LO_BIT)) {
2750 dlog_verbose(
2751 "FFA_FEATURES: feature ID %#x is invalid (bits [%u:%u] "
2752 "must be zero)\n",
2753 feature, FFA_FEATURES_FEATURE_MBZ_HI_BIT,
2754 FFA_FEATURES_FEATURE_MBZ_LO_BIT);
2755 return ffa_error(FFA_NOT_SUPPORTED);
2756 }
2757 if (input_property != 0) {
2758 dlog_verbose(
2759 "FFA_FEATURES: input_property must be 0 "
2760 "(input_property = %#x)\n",
2761 input_property);
2762 return ffa_error(FFA_NOT_SUPPORTED);
2763 }
2764
2765 switch (feature) {
J-Alves6f72ca82021-11-01 12:34:58 +00002766 /* Check support of a feature provided respective feature ID. */
Karl Meakin17c9ad32024-04-12 16:41:00 +01002767
2768 /*
2769 * For NPI and MEI, report the IDs as supported only to partitions at
2770 * the virtual FF-A instances.
2771 */
J-Alves6f72ca82021-11-01 12:34:58 +00002772 case FFA_FEATURE_NPI:
Karl Meakin0e617d92024-04-05 12:55:22 +01002773 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002774 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002775 }
2776
Karl Meakinf1ed5f12024-02-22 15:57:36 +00002777 if (el0_partition) {
2778 return ffa_error(FFA_NOT_SUPPORTED);
2779 }
Karl Meakin17c9ad32024-04-12 16:41:00 +01002780 if (!vm_id_is_current_world(current->vm->id)) {
2781 return ffa_error(FFA_NOT_SUPPORTED);
2782 }
J-Alves6f72ca82021-11-01 12:34:58 +00002783 return api_ffa_feature_success(HF_NOTIFICATION_PENDING_INTID);
Karl Meakin17c9ad32024-04-12 16:41:00 +01002784
2785 case FFA_FEATURE_MEI:
Karl Meakin0e617d92024-04-05 12:55:22 +01002786 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002787 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002788 }
2789
Karl Meakin17c9ad32024-04-12 16:41:00 +01002790 if (!vm_id_is_current_world(current->vm->id)) {
2791 return ffa_error(FFA_NOT_SUPPORTED);
2792 }
2793 return api_ffa_feature_success(HF_MANAGED_EXIT_INTID);
2794
J-Alves6f72ca82021-11-01 12:34:58 +00002795 case FFA_FEATURE_SRI:
Karl Meakin0e617d92024-04-05 12:55:22 +01002796 if (FFA_VERSION_1_2 > FFA_VERSION_COMPILED) {
Karl Meakin4271ff92024-05-13 12:33:15 +01002797 return ffa_error(FFA_NOT_SUPPORTED);
Karl Meakin0e617d92024-04-05 12:55:22 +01002798 }
2799
Karl Meakincb6642c2024-02-27 14:43:45 +00002800 if (!ffa_is_vm_id(current->vm->id)) {
2801 return ffa_error(FFA_NOT_SUPPORTED);
2802 }
J-Alves6f72ca82021-11-01 12:34:58 +00002803 return api_ffa_feature_success(HF_SCHEDULE_RECEIVER_INTID);
Karl Meakin0e617d92024-04-05 12:55:22 +01002804
J-Alves6f72ca82021-11-01 12:34:58 +00002805 /* Platform specific feature support. */
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002806 default:
Karl Meakin4271ff92024-05-13 12:33:15 +01002807 return ffa_error(FFA_NOT_SUPPORTED);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002808 }
2809}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002810
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002811/**
Karl Meakin9c5b1d32024-06-24 12:10:36 +01002812 * Discovery function returning information about the implementation of optional
2813 * FF-A interfaces. See section 13.3 of the FF-A v1.2 ALP1 spec.
2814 *
2815 * `function_or_feature_id` is interpreted as either a function ID or a feature
2816 * ID, depending on the value of bit 31.
2817 * When it is a feature ID, bits [30:8] MBZ and input_property MBZ.
2818 *
2819 * Returns `FFA_SUCCESS` if the interface is supported.
2820 * Returns `FFA_NOT_SUPPORTED` if the interface is not supported or the
2821 * parameters are invalid.
2822 */
2823struct ffa_value api_ffa_features(uint32_t function_or_feature_id,
2824 uint32_t input_property, struct vcpu *current)
2825{
2826 return IS_BIT_UNSET(function_or_feature_id, FFA_FEATURES_FEATURE_BIT)
2827 ? ffa_features_feature(function_or_feature_id,
2828 input_property, current)
2829 : ffa_features_function(function_or_feature_id,
2830 input_property, current);
2831}
2832
2833/**
Kathleen Capella41fea932023-06-23 17:39:28 -04002834 * FF-A specification states that x2/w2 Must Be Zero for FFA_MSG_SEND_DIRECT_REQ
2835 * and FFA_MSG_SEND_DIRECT_RESP interfaces when used for partition messages. See
2836 * FF-A v1.2 Table 16.6: FFA_MSG_SEND_DIRECT_REQ function syntax.
J-Alves645eabe2021-02-22 16:08:27 +00002837 */
2838static inline bool api_ffa_dir_msg_is_arg2_zero(struct ffa_value args)
2839{
2840 return args.arg2 == 0U;
2841}
2842
2843/**
J-Alves76d99af2021-03-10 17:42:11 +00002844 * Limits size of arguments in ffa_value structure to 32-bit.
2845 */
2846static struct ffa_value api_ffa_value_copy32(struct ffa_value args)
2847{
2848 return (struct ffa_value){
2849 .func = (uint32_t)args.func,
2850 .arg1 = (uint32_t)args.arg1,
2851 .arg2 = (uint32_t)0,
2852 .arg3 = (uint32_t)args.arg3,
2853 .arg4 = (uint32_t)args.arg4,
2854 .arg5 = (uint32_t)args.arg5,
2855 .arg6 = (uint32_t)args.arg6,
2856 .arg7 = (uint32_t)args.arg7,
2857 };
2858}
2859
2860/**
Kathleen Capella41fea932023-06-23 17:39:28 -04002861 * Helper to copy direct message payload, depending on SMC used, direct
2862 * messaging interface used, and expected registers size.
J-Alves76d99af2021-03-10 17:42:11 +00002863 */
2864static struct ffa_value api_ffa_dir_msg_value(struct ffa_value args)
2865{
2866 if (args.func == FFA_MSG_SEND_DIRECT_REQ_32 ||
2867 args.func == FFA_MSG_SEND_DIRECT_RESP_32) {
2868 return api_ffa_value_copy32(args);
2869 }
2870
Kathleen Capella41fea932023-06-23 17:39:28 -04002871 if (args.func == FFA_MSG_SEND_DIRECT_REQ_64 ||
2872 args.func == FFA_MSG_SEND_DIRECT_RESP_64) {
2873 args.arg2 = 0;
2874 }
2875
Kathleen Capelladb6a08b2023-10-04 13:42:39 -04002876 if (args.func == FFA_MSG_SEND_DIRECT_REQ2_64) {
2877 args.extended_val.valid = true;
2878 }
2879
Kathleen Capella087e5022023-09-07 18:04:15 -04002880 if (args.func == FFA_MSG_SEND_DIRECT_RESP2_64) {
2881 args.arg2 = 0;
2882 args.arg3 = 0;
2883 }
2884
Kathleen Capella41fea932023-06-23 17:39:28 -04002885 return args;
2886}
2887
2888static bool api_ffa_dir_msg_req2_is_uuid_valid(struct vm *receiver_vm,
2889 struct ffa_value args)
2890{
2891 struct ffa_uuid target_uuid;
Kathleen Capella422b10b2023-06-30 18:28:27 -04002892 uint16_t i;
Kathleen Capella41fea932023-06-23 17:39:28 -04002893
2894 ffa_uuid_unpack_from_uint64(args.arg2, args.arg3, &target_uuid);
2895
Kathleen Capella422b10b2023-06-30 18:28:27 -04002896 /* Allow for use of Nil UUID. */
2897 if (ffa_uuid_is_null(&target_uuid)) {
2898 return true;
2899 }
2900
2901 for (i = 0; i < PARTITION_MAX_UUIDS; i++) {
2902 if (ffa_uuid_is_null(&receiver_vm->uuids[i])) {
2903 break;
2904 }
2905 if (ffa_uuid_equal(&target_uuid, &receiver_vm->uuids[i])) {
2906 return true;
2907 }
2908 }
2909 return false;
J-Alves76d99af2021-03-10 17:42:11 +00002910}
2911
2912/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002913 * Send an FF-A direct message request.
Kathleen Capella41fea932023-06-23 17:39:28 -04002914 * This handler covers both FFA_MSG_SEND_DIRECT_REQ_32/64
2915 * and FFA_MSG_SEND_DIRECT_REQ2_64 (introduced in FF-A v1.2) with function-based
2916 * checks to accomodate for the difference between the ABIs.
2917 *
2918 * FFA_MSG_SEND_DIRECT_REQ2_64 works mostly the same as
2919 * FFA_MSG_SEND_DIRECT_REQ_32/64, but adds the ability to send a direct message
2920 * request to a specified UUID within a partition and the usage of an extended
2921 * range of registers (x4-x17, instead of x4-x7) to be used as part of the
2922 * message payload.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002923 */
J-Alves19e20cf2023-08-02 12:48:55 +01002924struct ffa_value api_ffa_msg_send_direct_req(ffa_id_t sender_vm_id,
2925 ffa_id_t receiver_vm_id,
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002926 struct ffa_value args,
2927 struct vcpu *current,
2928 struct vcpu **next)
2929{
J-Alves17228f72021-04-20 17:13:19 +01002930 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002931 struct vm *receiver_vm;
J-Alves6e2abc62021-12-02 14:58:56 +00002932 struct vm_locked receiver_locked;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002933 struct vcpu *receiver_vcpu;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002934 struct vcpu_locked current_locked;
2935 struct vcpu_locked receiver_vcpu_locked;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002936 struct two_vcpu_locked vcpus_locked;
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05002937 enum vcpu_state next_state = VCPU_STATE_RUNNING;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002938
Kathleen Capella41fea932023-06-23 17:39:28 -04002939 if ((args.func == FFA_MSG_SEND_DIRECT_REQ_32 ||
2940 args.func == FFA_MSG_SEND_DIRECT_REQ_64) &&
2941 !api_ffa_dir_msg_is_arg2_zero(args)) {
J-Alves645eabe2021-02-22 16:08:27 +00002942 return ffa_error(FFA_INVALID_PARAMETERS);
2943 }
2944
Olivier Deprez55a189e2021-06-09 15:45:27 +02002945 if (!plat_ffa_is_direct_request_valid(current, sender_vm_id,
2946 receiver_vm_id)) {
J-Alvese0caac32022-12-19 14:37:08 +00002947 dlog_verbose("Invalid direct message request.\n");
J-Alvesaa336102021-03-01 13:02:45 +00002948 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002949 }
2950
Olivier Deprez55a189e2021-06-09 15:45:27 +02002951 if (plat_ffa_direct_request_forward(receiver_vm_id, args, &ret)) {
J-Alves17228f72021-04-20 17:13:19 +01002952 return ret;
2953 }
2954
2955 ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
2956
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002957 receiver_vm = vm_find(receiver_vm_id);
2958 if (receiver_vm == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002959 dlog_verbose("Invalid Receiver!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002960 return ffa_error(FFA_INVALID_PARAMETERS);
2961 }
2962
Kathleen Capella41fea932023-06-23 17:39:28 -04002963 if (args.func == FFA_MSG_SEND_DIRECT_REQ2_64 &&
2964 !api_ffa_dir_msg_req2_is_uuid_valid(receiver_vm, args)) {
2965 dlog_verbose("UUID unrecognized for this VM\n");
2966 return ffa_error(FFA_INVALID_PARAMETERS);
2967 }
2968
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002969 /*
J-Alves439ac972021-11-18 17:32:03 +00002970 * Check if sender supports sending direct message req, and if
2971 * receiver supports receipt of direct message requests.
2972 */
Kathleen Capella41fea932023-06-23 17:39:28 -04002973 if (!plat_ffa_is_direct_request_supported(current->vm, receiver_vm,
2974 args.func)) {
J-Alves439ac972021-11-18 17:32:03 +00002975 return ffa_error(FFA_DENIED);
2976 }
2977
2978 /*
Olivier Deprezc13a8692022-04-08 17:47:14 +02002979 * Per FF-A EAC spec section 4.4.1 the firmware framework supports
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002980 * UP (migratable) or MP partitions with a number of vCPUs matching the
2981 * number of PEs in the system. It further states that MP partitions
2982 * accepting direct request messages cannot migrate.
2983 */
J-Alvesad6a0432021-04-09 16:06:21 +01002984 receiver_vcpu = api_ffa_get_vm_vcpu(receiver_vm, current);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002985 if (receiver_vcpu == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002986 dlog_verbose("Invalid vCPU!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002987 return ffa_error(FFA_INVALID_PARAMETERS);
2988 }
2989
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002990 /*
2991 * If VM must be locked, it must be done before any of its vCPUs are
2992 * locked.
2993 */
J-Alves6e2abc62021-12-02 14:58:56 +00002994 receiver_locked = vm_lock(receiver_vm);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06002995
2996 /* Lock both vCPUs at once to avoid deadlock. */
2997 vcpus_locked = vcpu_lock_both(current, receiver_vcpu);
2998 current_locked = vcpus_locked.vcpu1;
2999 receiver_vcpu_locked = vcpus_locked.vcpu2;
3000
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003001 /*
3002 * If destination vCPU is executing or already received an
3003 * FFA_MSG_SEND_DIRECT_REQ then return to caller hinting recipient is
3004 * busy. There is a brief period of time where the vCPU state has
3005 * changed but regs_available is still false thus consider this case as
3006 * the vCPU not yet ready to receive a direct message request.
3007 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003008 if (is_ffa_direct_msg_request_ongoing(receiver_vcpu_locked) ||
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003009 receiver_vcpu->state == VCPU_STATE_RUNNING ||
3010 !receiver_vcpu->regs_available) {
J-Alves88a13542021-12-14 15:39:52 +00003011 dlog_verbose("Receiver is busy with another request.\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003012 ret = ffa_error(FFA_BUSY);
3013 goto out;
3014 }
3015
J-Alves7ea3f7e2024-03-27 11:05:11 +00003016 if (!plat_ffa_check_runtime_state_transition(
3017 current_locked, sender_vm_id, HF_INVALID_VM_ID,
3018 receiver_vcpu_locked, args.func, &next_state)) {
3019 ret = ffa_error(FFA_DENIED);
3020 goto out;
3021 }
3022
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003023 if (atomic_load_explicit(&receiver_vcpu->vm->aborting,
3024 memory_order_relaxed)) {
3025 if (receiver_vcpu->state != VCPU_STATE_ABORTED) {
Kathleen Capella6e9765b2023-07-11 17:44:58 -04003026 dlog_verbose(
3027 "Receiver VM %#x aborted, cannot run vCPU %u\n",
3028 receiver_vcpu->vm->id,
3029 vcpu_index(receiver_vcpu));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003030 receiver_vcpu->state = VCPU_STATE_ABORTED;
3031 }
3032
3033 ret = ffa_error(FFA_ABORTED);
3034 goto out;
3035 }
3036
3037 switch (receiver_vcpu->state) {
3038 case VCPU_STATE_OFF:
3039 case VCPU_STATE_RUNNING:
3040 case VCPU_STATE_ABORTED:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003041 case VCPU_STATE_BLOCKED_INTERRUPT:
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05003042 case VCPU_STATE_BLOCKED:
3043 case VCPU_STATE_PREEMPTED:
J-Alves88a13542021-12-14 15:39:52 +00003044 dlog_verbose("Receiver's vCPU can't receive request (%u)!\n",
3045 vcpu_index(receiver_vcpu));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003046 ret = ffa_error(FFA_BUSY);
3047 goto out;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05003048 case VCPU_STATE_WAITING:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003049 /*
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05003050 * We expect target vCPU to be in WAITING state after either
3051 * having called ffa_msg_wait or sent a direct message response.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003052 */
3053 break;
3054 }
3055
3056 /* Inject timer interrupt if any pending */
3057 if (arch_timer_pending(&receiver_vcpu->regs)) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003058 api_interrupt_inject_locked(receiver_vcpu_locked,
3059 HF_VIRTUAL_TIMER_INTID,
3060 current_locked, NULL);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003061
3062 arch_timer_mask(&receiver_vcpu->regs);
3063 }
3064
3065 /* The receiver vCPU runs upon direct message invocation */
3066 receiver_vcpu->cpu = current->cpu;
3067 receiver_vcpu->state = VCPU_STATE_RUNNING;
3068 receiver_vcpu->regs_available = false;
Kathleen Capellae468c112023-12-13 17:56:28 -05003069 receiver_vcpu->direct_request_origin.is_ffa_req2 =
3070 (args.func == FFA_MSG_SEND_DIRECT_REQ2_64);
3071 receiver_vcpu->direct_request_origin.vm_id = sender_vm_id;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003072
J-Alves76d99af2021-03-10 17:42:11 +00003073 arch_regs_set_retval(&receiver_vcpu->regs, api_ffa_dir_msg_value(args));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003074
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05003075 assert(!vm_id_is_current_world(current->vm->id) ||
3076 next_state == VCPU_STATE_BLOCKED);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05003077 current->state = VCPU_STATE_BLOCKED;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003078
Raghu Krishnamurthya9ccf122023-03-27 20:42:01 -07003079 plat_ffa_wind_call_chain_ffa_direct_req(
3080 current_locked, receiver_vcpu_locked, sender_vm_id);
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -05003081
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003082 /* Switch to receiver vCPU targeted to by direct msg request */
3083 *next = receiver_vcpu;
3084
J-Alves6e2abc62021-12-02 14:58:56 +00003085 if (!receiver_locked.vm->el0_partition) {
3086 /*
3087 * If the scheduler in the system is giving CPU cycles to the
3088 * receiver, due to pending notifications, inject the NPI
3089 * interrupt. Following call assumes that '*next' has been set
3090 * to receiver_vcpu.
3091 */
3092 plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003093 receiver_vcpu_locked, current_locked, receiver_locked);
J-Alves6e2abc62021-12-02 14:58:56 +00003094 }
3095
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003096 /*
3097 * Since this flow will lead to a VM switch, the return value will not
3098 * be applied to current vCPU.
3099 */
3100
3101out:
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003102 vcpu_unlock(&receiver_vcpu_locked);
J-Alves6e2abc62021-12-02 14:58:56 +00003103 vm_unlock(&receiver_locked);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003104 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003105
3106 return ret;
3107}
3108
3109/**
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003110 * Resume the target vCPU after the current vCPU sent a direct response.
3111 * Current vCPU moves to waiting state.
3112 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003113void api_ffa_resume_direct_resp_target(struct vcpu_locked current_locked,
3114 struct vcpu **next,
J-Alves19e20cf2023-08-02 12:48:55 +01003115 ffa_id_t receiver_vm_id,
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003116 struct ffa_value to_ret,
3117 bool is_nwd_call_chain)
3118{
Raghu Krishnamurthya9ccf122023-03-27 20:42:01 -07003119 if (plat_ffa_is_spmd_lp_id(receiver_vm_id) ||
3120 !vm_id_is_current_world(receiver_vm_id)) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003121 *next = api_switch_to_other_world(current_locked, to_ret,
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003122 VCPU_STATE_WAITING);
3123
3124 /* End of NWd scheduled call chain. */
3125 assert(!is_nwd_call_chain ||
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003126 (current_locked.vcpu->call_chain.prev_node == NULL));
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003127 } else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003128 *next = api_switch_to_primary(current_locked, to_ret,
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003129 VCPU_STATE_WAITING);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003130 } else if (vm_id_is_current_world(receiver_vm_id)) {
3131 /*
3132 * It is expected the receiver_vm_id to be from an SP, otherwise
3133 * 'plat_ffa_is_direct_response_valid' should have
3134 * made function return error before getting to this point.
3135 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003136 *next = api_switch_to_vm(current_locked, to_ret,
3137 VCPU_STATE_WAITING, receiver_vm_id);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003138 } else {
3139 panic("Invalid direct message response invocation");
3140 }
3141}
3142
3143/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003144 * Send an FF-A direct message response.
Kathleen Capella087e5022023-09-07 18:04:15 -04003145 * This handler covers both FFA_MSG_SEND_DIRECT_RESP_32/64
3146 * and FFA_MSG_SEND_DIRECT_RESP2_64 (introduced in FF-A v1.2) with
3147 * function-based checks to accomodate for the difference between the ABIs.
3148 *
3149 * FFA_MSG_SEND_DIRECT_RESP2_64 is used to respond to requests sent via
3150 * FFA_MSG_SEND_DIRECT_REQ2_64 and adds the usage of an extended range
3151 * of registers (x4-x17, instead of x4-x7) to be used as part of the
3152 * message payload.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003153 */
J-Alves19e20cf2023-08-02 12:48:55 +01003154struct ffa_value api_ffa_msg_send_direct_resp(ffa_id_t sender_vm_id,
3155 ffa_id_t receiver_vm_id,
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003156 struct ffa_value args,
3157 struct vcpu *current,
3158 struct vcpu **next)
3159{
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02003160 struct vcpu_locked current_locked;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003161 struct vcpu_locked next_locked = (struct vcpu_locked){
3162 .vcpu = NULL,
3163 };
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05003164 enum vcpu_state next_state = VCPU_STATE_RUNNING;
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003165 struct ffa_value ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003166 struct ffa_value signal_interrupt =
3167 (struct ffa_value){.func = FFA_INTERRUPT_32};
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003168 struct ffa_value to_ret = api_ffa_dir_msg_value(args);
3169 struct two_vcpu_locked vcpus_locked;
Kathleen Capellae468c112023-12-13 17:56:28 -05003170 bool received_req2;
J-Alves645eabe2021-02-22 16:08:27 +00003171
Kathleen Capella35238872024-01-12 15:05:52 -05003172 /*
3173 * If using FFA_MSG_SEND_DIRECT_RESP, the caller's
3174 * - x2 MBZ for partition messages
3175 * - x8-x17 SBZ if caller's FF-A version >= FF-A v1.2
3176 */
3177 if (args.func != FFA_MSG_SEND_DIRECT_RESP2_64) {
3178 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
3179 return ffa_error(FFA_INVALID_PARAMETERS);
3180 }
3181
Karl Meakin0e617d92024-04-05 12:55:22 +01003182 if (current->vm->ffa_version >= FFA_VERSION_1_2 &&
Kathleen Capella35238872024-01-12 15:05:52 -05003183 !api_extended_args_are_zero(&args)) {
3184 return ffa_error(FFA_INVALID_PARAMETERS);
3185 }
J-Alves645eabe2021-02-22 16:08:27 +00003186 }
3187
Olivier Deprez55a189e2021-06-09 15:45:27 +02003188 if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
3189 receiver_vm_id)) {
J-Alvese0caac32022-12-19 14:37:08 +00003190 dlog_verbose("Invalid direct response call.\n");
J-Alvesaa336102021-03-01 13:02:45 +00003191 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003192 }
3193
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003194 current_locked = vcpu_lock(current);
3195
3196 if (!plat_ffa_check_runtime_state_transition(
3197 current_locked, sender_vm_id, receiver_vm_id, next_locked,
3198 args.func, &next_state)) {
3199 ret = ffa_error(FFA_DENIED);
3200 goto out;
Madhukar Pappireddyc857ac22022-06-21 17:37:53 -05003201 }
3202
Madhukar Pappireddyd456ac22023-06-26 15:29:34 -05003203 assert(!vm_id_is_current_world(current->vm->id) ||
3204 next_state == VCPU_STATE_WAITING);
Madhukar Pappireddy469fa712022-06-21 18:53:33 -05003205
3206 /*
3207 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
3208 * defined originator.
3209 */
3210 if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
3211 /*
3212 * Sending direct response but direct request origin
3213 * vCPU is not set.
3214 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003215 ret = ffa_error(FFA_DENIED);
3216 goto out;
Madhukar Pappireddy469fa712022-06-21 18:53:33 -05003217 }
3218
Kathleen Capellae468c112023-12-13 17:56:28 -05003219 received_req2 = current->direct_request_origin.is_ffa_req2;
3220
3221 if (args.func != FFA_MSG_SEND_DIRECT_RESP2_64 && received_req2) {
3222 dlog_verbose(
3223 "%s: FFA_MSG_SEND_DIRECT_RESP must be used with "
3224 "FFA_MSG_SEND_DIRECT_REQ.\n",
3225 __func__);
3226 ret = ffa_error(FFA_DENIED);
3227 goto out;
3228 } else if (args.func == FFA_MSG_SEND_DIRECT_RESP2_64 &&
3229 !received_req2) {
3230 dlog_verbose(
3231 "%s: FFA_MSG_SEND_DIRECT_RESP2 must be used with "
3232 "FFA_MSG_SEND_DIRECT_REQ2.\n",
3233 __func__);
3234 ret = ffa_error(FFA_DENIED);
3235 goto out;
3236 }
3237
Manish Pandeya5f39fb2020-09-11 09:47:11 +01003238 if (api_ffa_is_managed_exit_ongoing(current_locked)) {
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003239 /*
Madhukar Pappireddy469fa712022-06-21 18:53:33 -05003240 * Per FF-A v1.1 EAC0 section 8.3.1.2.1 rule 6, SPMC can signal
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05003241 * a secure interrupt to a SP that is performing managed exit.
3242 * We have taken a implementation defined choice to not allow
3243 * Managed exit while a SP is processing a secure interrupt.
3244 */
Madhukar Pappireddy7945bb52024-08-20 15:22:41 -05003245 CHECK(current->scheduling_mode != SPMC_MODE);
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05003246
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -06003247 plat_interrupts_set_priority_mask(current->priority_mask);
Madhukar Pappireddya1019112022-06-21 18:57:44 -05003248 /*
3249 * A SP may be signaled a managed exit but actually not trap
3250 * the virtual interrupt, probably because it has virtual
3251 * interrupts masked, and emit direct resp. In this case the
3252 * managed exit operation is considered completed and it would
3253 * also need to clear the pending managed exit flag for the SP
3254 * vCPU.
3255 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01003256 current->processing_managed_exit = false;
Madhukar Pappireddya1019112022-06-21 18:57:44 -05003257 struct interrupts *interrupts = &current->interrupts;
3258
3259 if (vcpu_is_virt_interrupt_pending(interrupts,
3260 HF_MANAGED_EXIT_INTID)) {
J-Alvesb8730e92024-08-07 18:28:55 +01003261 vcpu_interrupt_clear_decrement(current_locked,
3262 HF_MANAGED_EXIT_INTID);
Madhukar Pappireddya1019112022-06-21 18:57:44 -05003263 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003264 }
3265
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003266 if (plat_ffa_intercept_direct_response(current_locked, next, to_ret,
3267 &signal_interrupt)) {
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003268 ret = signal_interrupt;
3269 goto out;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -05003270 }
3271
Kathleen Capellae468c112023-12-13 17:56:28 -05003272 /* Clear direct request origin vm_id and request type for the caller. */
3273 current->direct_request_origin.is_ffa_req2 = false;
3274 current->direct_request_origin.vm_id = HF_INVALID_VM_ID;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003275
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003276 api_ffa_resume_direct_resp_target(current_locked, next, receiver_vm_id,
3277 to_ret, false);
3278
3279 /*
3280 * Unlock current vCPU to allow it to be locked together with next
3281 * vcpu.
3282 */
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02003283 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003284
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003285 /* Lock both vCPUs at once to avoid deadlock. */
3286 vcpus_locked = vcpu_lock_both(current, *next);
3287 current_locked = vcpus_locked.vcpu1;
3288 next_locked = vcpus_locked.vcpu2;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003289
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003290 plat_ffa_unwind_call_chain_ffa_direct_resp(current_locked, next_locked);
3291 vcpu_unlock(&next_locked);
Madhukar Pappireddyc0fb87e2022-06-21 17:59:15 -05003292
Madhukar Pappireddybd10e572023-03-06 16:39:49 -06003293out:
3294 vcpu_unlock(&current_locked);
3295 return ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00003296}
3297
J-Alves84658fc2021-06-17 14:37:32 +01003298static bool api_memory_region_check_flags(
3299 struct ffa_memory_region *memory_region, uint32_t share_func)
3300{
3301 switch (share_func) {
J-Alves95fbb312024-03-20 15:19:16 +00003302 case FFA_MEM_SHARE_64:
J-Alves84658fc2021-06-17 14:37:32 +01003303 case FFA_MEM_SHARE_32:
3304 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
3305 0U) {
3306 return false;
3307 }
3308 /* Intentional fall-through */
J-Alves95fbb312024-03-20 15:19:16 +00003309 case FFA_MEM_LEND_64:
J-Alves84658fc2021-06-17 14:37:32 +01003310 case FFA_MEM_LEND_32:
J-Alves95fbb312024-03-20 15:19:16 +00003311 case FFA_MEM_DONATE_64:
J-Alves84658fc2021-06-17 14:37:32 +01003312 case FFA_MEM_DONATE_32: {
3313 /* Bits 31:2 Must Be Zero. */
3314 ffa_memory_receiver_flags_t to_mask =
3315 ~(FFA_MEMORY_REGION_FLAG_CLEAR |
3316 FFA_MEMORY_REGION_FLAG_TIME_SLICE);
3317
3318 if ((memory_region->flags & to_mask) != 0U) {
3319 return false;
3320 }
3321 break;
3322 }
3323 default:
3324 panic("Check for mem send calls only.\n");
3325 }
3326
3327 /* Last check reserved values are 0 */
3328 return true;
3329}
3330
J-Alves33c47bf2022-09-29 11:36:20 +01003331/*
3332 * Convert memory transaction descriptor from FF-A v1.0 to FF-A v1.1 EAC0.
3333 */
3334static void api_ffa_memory_region_v1_1_from_v1_0(
3335 struct ffa_memory_region_v1_0 *memory_region_v1_0,
3336 struct ffa_memory_region *memory_region_v1_1)
3337{
3338 memory_region_v1_1->sender = memory_region_v1_0->sender;
J-Alves00847062022-10-03 17:08:44 +01003339 memory_region_v1_1->handle = memory_region_v1_0->handle;
Karl Meakin84710f32023-10-12 15:14:49 +01003340 memory_region_v1_1->attributes =
3341 ffa_memory_attributes_extend(memory_region_v1_0->attributes);
J-Alves33c47bf2022-09-29 11:36:20 +01003342 memory_region_v1_1->flags = memory_region_v1_0->flags;
3343 memory_region_v1_1->tag = memory_region_v1_0->tag;
3344 memory_region_v1_1->memory_access_desc_size =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003345 sizeof(struct ffa_memory_access_v1_0);
J-Alves33c47bf2022-09-29 11:36:20 +01003346 memory_region_v1_1->receiver_count = memory_region_v1_0->receiver_count;
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01003347 memory_region_v1_1->receivers_offset = sizeof(struct ffa_memory_region);
J-Alves33c47bf2022-09-29 11:36:20 +01003348
3349 /* Zero reserved fields. */
3350 for (uint32_t i = 0; i < 3U; i++) {
3351 memory_region_v1_1->reserved[i] = 0U;
3352 }
3353}
3354
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003355/**
3356 * Updates a v1.0 transaction descriptor to v1.1. This gives us the
3357 * memory_access_desc_size field we need for forwards compatability.
3358 * Copy the receivers and composite descriptors to the new struct.
3359 * We also check the fields in the v1.0 transaction descriptor and return:
3360 * - FFA_ERROR FFA_INVALID_PARAMETERS: If any of the fields are not valid
3361 * values, eg the reserved fields are not 0, receiver_count is too large or
3362 * composite offsets are not 0 for retrieve requests or in bounds for send
3363 * requests.
3364 * - FFA ERROR FFA_NOT_SUPPORTED: If an invalid ffa_version is supplied to the
3365 * function. Or the fragment length is more than a single page.
3366 * - FFA_ERROR FFA_NO_MEMORY: If we do not have enough memory for a scratch
3367 * memory transaction descriptor.
3368 * - FFA_SUCCESS: If a successful update has occured.
J-Alves33c47bf2022-09-29 11:36:20 +01003369 */
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003370static struct ffa_value api_ffa_memory_transaction_descriptor_v1_1_from_v1_0(
3371 void *allocated, uint32_t *fragment_length, uint32_t *total_length,
Karl Meakin0e617d92024-04-05 12:55:22 +01003372 enum ffa_version ffa_version, bool send_transaction)
J-Alves33c47bf2022-09-29 11:36:20 +01003373{
3374 struct ffa_memory_region_v1_0 *memory_region_v1_0;
3375 struct ffa_memory_region *memory_region_v1_1 = NULL;
3376 struct ffa_composite_memory_region *composite_v1_0;
3377 struct ffa_composite_memory_region *composite_v1_1;
3378 size_t receivers_length;
3379 size_t space_left;
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003380 size_t receivers_end;
J-Alves33c47bf2022-09-29 11:36:20 +01003381 size_t composite_offset_v1_1;
3382 size_t composite_offset_v1_0;
3383 size_t fragment_constituents_size;
3384 size_t fragment_length_v1_1;
3385
J-Alves33c47bf2022-09-29 11:36:20 +01003386 assert(fragment_length != NULL);
3387 assert(total_length != NULL);
3388
Karl Meakin0e617d92024-04-05 12:55:22 +01003389 if (ffa_version >= FFA_VERSION_1_1) {
J-Alves33c47bf2022-09-29 11:36:20 +01003390 return (struct ffa_value){.func = FFA_SUCCESS_32};
3391 }
3392
Karl Meakin0e617d92024-04-05 12:55:22 +01003393 if (ffa_version != FFA_VERSION_1_0) {
J-Alves33c47bf2022-09-29 11:36:20 +01003394 dlog_verbose("%s: Unsupported FF-A version %x\n", __func__,
3395 ffa_version);
3396 return ffa_error(FFA_NOT_SUPPORTED);
3397 }
3398
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003399 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003400 "Updating memory transaction descriptor from v1.0 to v1.1.\n");
J-Alves33c47bf2022-09-29 11:36:20 +01003401
3402 memory_region_v1_0 = (struct ffa_memory_region_v1_0 *)allocated;
3403
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003404 receivers_length = sizeof(struct ffa_memory_access_v1_0) *
J-Alves33c47bf2022-09-29 11:36:20 +01003405 memory_region_v1_0->receiver_count;
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003406 receivers_end = sizeof(struct ffa_memory_region) + receivers_length;
J-Alves33c47bf2022-09-29 11:36:20 +01003407
3408 /*
3409 * Check the specified composite offset of v1.0 descriptor, and that all
3410 * receivers were configured with the same offset.
3411 */
3412 composite_offset_v1_0 =
3413 memory_region_v1_0->receivers[0].composite_memory_region_offset;
3414
J-Alves33c47bf2022-09-29 11:36:20 +01003415 /* Determine the composite offset for v1.1 descriptor. */
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003416 if (send_transaction) {
3417 fragment_constituents_size =
3418 *fragment_length - composite_offset_v1_0 -
3419 sizeof(struct ffa_composite_memory_region);
3420 fragment_length_v1_1 =
3421 receivers_end +
3422 sizeof(struct ffa_composite_memory_region) +
3423 fragment_constituents_size;
3424 composite_offset_v1_1 = receivers_end;
3425 } else {
3426 fragment_constituents_size = 0;
3427 fragment_length_v1_1 = receivers_end;
3428 composite_offset_v1_1 = 0;
3429 }
J-Alves33c47bf2022-09-29 11:36:20 +01003430
3431 /*
3432 * Currently only support the simpler cases: memory transaction
3433 * in a single fragment that fits in a MM_PPOOL_ENTRY_SIZE.
3434 * TODO: allocate the entries needed for this fragment_length_v1_1.
3435 * - Check corner when v1.1 descriptor converted size surpasses
3436 * the size of the entry.
3437 */
3438 if (fragment_length_v1_1 > MM_PPOOL_ENTRY_SIZE) {
3439 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00003440 "Translation of FF-A v1.0 descriptors for over %lu is "
J-Alves33c47bf2022-09-29 11:36:20 +01003441 "unsupported.",
3442 MM_PPOOL_ENTRY_SIZE);
3443 return ffa_error(FFA_NOT_SUPPORTED);
3444 }
3445
3446 space_left = fragment_length_v1_1;
3447
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003448 /*
3449 * Allocate a page of memory to construct the v1.1 memory descriptor.
3450 * Earlier we checked that the fragment_length_v1_1 would not be larger
3451 * than a page.
3452 */
J-Alves33c47bf2022-09-29 11:36:20 +01003453 memory_region_v1_1 =
3454 (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
3455 if (memory_region_v1_1 == NULL) {
3456 return ffa_error(FFA_NO_MEMORY);
3457 }
3458
3459 /* Translate header from v1.0 to v1.1. */
3460 api_ffa_memory_region_v1_1_from_v1_0(memory_region_v1_0,
3461 memory_region_v1_1);
3462
3463 space_left -= sizeof(struct ffa_memory_region);
3464
3465 /* Copy memory access information. */
Daniel Boulby41ef8ba2023-10-13 17:01:22 +01003466 memcpy_s((uint8_t *)memory_region_v1_1 +
3467 memory_region_v1_1->receivers_offset,
3468 space_left, memory_region_v1_0->receivers, receivers_length);
J-Alves33c47bf2022-09-29 11:36:20 +01003469
3470 /* Initialize the memory access descriptors with composite offset. */
3471 for (uint32_t i = 0; i < memory_region_v1_1->receiver_count; i++) {
3472 struct ffa_memory_access *receiver =
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003473 ffa_memory_region_get_receiver(memory_region_v1_1, i);
3474 assert(receiver != NULL);
J-Alves33c47bf2022-09-29 11:36:20 +01003475 receiver->composite_memory_region_offset =
3476 composite_offset_v1_1;
3477 }
3478
3479 space_left -= receivers_length;
3480
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003481 /* Composite memory descriptors to copy. */
3482 if (send_transaction) {
3483 /* Init v1.1 composite. */
3484 composite_v1_1 = (struct ffa_composite_memory_region
3485 *)((uint8_t *)memory_region_v1_1 +
3486 composite_offset_v1_1);
J-Alves33c47bf2022-09-29 11:36:20 +01003487
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003488 composite_v1_0 = ffa_memory_region_get_composite_v1_0(
3489 memory_region_v1_0, 0);
3490 composite_v1_1->constituent_count =
3491 composite_v1_0->constituent_count;
3492 composite_v1_1->page_count = composite_v1_0->page_count;
J-Alves33c47bf2022-09-29 11:36:20 +01003493
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003494 space_left -= sizeof(struct ffa_composite_memory_region);
J-Alves33c47bf2022-09-29 11:36:20 +01003495
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003496 /* Initialize v1.1 constituents. */
3497 memcpy_s(composite_v1_1->constituents, space_left,
3498 composite_v1_0->constituents,
3499 fragment_constituents_size);
J-Alves33c47bf2022-09-29 11:36:20 +01003500
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003501 space_left -= fragment_constituents_size;
3502 }
3503
J-Alves33c47bf2022-09-29 11:36:20 +01003504 assert(space_left == 0U);
3505
J-Alves33c47bf2022-09-29 11:36:20 +01003506 /*
3507 * Remove the v1.0 fragment size, and resultant size of v1.1 fragment.
3508 */
3509 *total_length = *total_length - *fragment_length + fragment_length_v1_1;
3510 *fragment_length = fragment_length_v1_1;
3511
3512 /*
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003513 * After successfully updating to v1.1 copy the descriptor to the
3514 * internal buffer given as a parameter (used to prevent TOCTOU attacks)
3515 * and free the scratch memory used to construct it.
J-Alves33c47bf2022-09-29 11:36:20 +01003516 */
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003517 memcpy_s(allocated, MM_PPOOL_ENTRY_SIZE, memory_region_v1_1,
3518 *fragment_length);
3519 mpool_free(&api_page_pool, memory_region_v1_1);
J-Alves33c47bf2022-09-29 11:36:20 +01003520
3521 return (struct ffa_value){.func = FFA_SUCCESS_32};
3522}
3523
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003524struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
3525 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01003526 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003527{
3528 struct vm *from = current->vm;
3529 struct vm *to;
3530 const void *from_msg;
J-Alves33c47bf2022-09-29 11:36:20 +01003531 void *allocated_entry;
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003532 struct ffa_memory_region *memory_region = NULL;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003533 struct ffa_value ret;
J-Alves363f5722022-04-25 17:37:37 +01003534 bool targets_other_world = false;
Karl Meakin0e617d92024-04-05 12:55:22 +01003535 enum ffa_version ffa_version;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003536
3537 if (ipa_addr(address) != 0 || page_count != 0) {
3538 /*
3539 * Hafnium only supports passing the descriptor in the TX
3540 * mailbox.
3541 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003542 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003543 }
3544
Andrew Walbranca808b12020-05-15 17:22:28 +01003545 if (fragment_length > length) {
3546 dlog_verbose(
3547 "Fragment length %d greater than total length %d.\n",
3548 fragment_length, length);
3549 return ffa_error(FFA_INVALID_PARAMETERS);
3550 }
J-Alves33c47bf2022-09-29 11:36:20 +01003551
3552 if (fragment_length > HF_MAILBOX_SIZE ||
3553 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003554 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003555 }
3556
3557 /*
3558 * Check that the sender has configured its send buffer. If the TX
3559 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
3560 * be safely accessed after releasing the lock since the TX mailbox
3561 * address can only be configured once.
3562 */
3563 sl_lock(&from->lock);
3564 from_msg = from->mailbox.send;
J-Alves33c47bf2022-09-29 11:36:20 +01003565 ffa_version = from->ffa_version;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003566 sl_unlock(&from->lock);
3567
3568 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003569 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003570 }
3571
3572 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003573 * Copy the memory region descriptor to a fresh page from the memory
3574 * pool. This prevents the sender from changing it underneath us, and
3575 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003576 */
J-Alves33c47bf2022-09-29 11:36:20 +01003577 allocated_entry = mpool_alloc(&api_page_pool);
3578 if (allocated_entry == NULL) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003579 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003580 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003581 }
J-Alves33c47bf2022-09-29 11:36:20 +01003582
J-Alves8f2150d2024-03-28 16:15:56 +00003583 if (!memcpy_trapped(allocated_entry, MM_PPOOL_ENTRY_SIZE, from_msg,
3584 fragment_length)) {
3585 dlog_error(
3586 "%s: Failed to copy FF-A memory region descriptor.\n",
3587 __func__);
3588 return ffa_error(FFA_ABORTED);
3589 }
J-Alves33c47bf2022-09-29 11:36:20 +01003590
Daniel Boulbyc7dc9322023-10-27 15:12:07 +01003591 if (!ffa_memory_region_sanity_check(allocated_entry, ffa_version,
3592 fragment_length, true)) {
3593 ret = ffa_error(FFA_INVALID_PARAMETERS);
3594 goto out;
3595 }
3596
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003597 ret = api_ffa_memory_transaction_descriptor_v1_1_from_v1_0(
3598 allocated_entry, &fragment_length, &length, ffa_version, true);
J-Alves33c47bf2022-09-29 11:36:20 +01003599 if (ret.func != FFA_SUCCESS_32) {
3600 goto out;
3601 }
3602
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003603 memory_region = allocated_entry;
3604
J-Alves33c47bf2022-09-29 11:36:20 +01003605 if (fragment_length < sizeof(struct ffa_memory_region) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003606 memory_region->memory_access_desc_size) {
J-Alves33c47bf2022-09-29 11:36:20 +01003607 dlog_verbose(
3608 "Initial fragment length %d smaller than header size "
Karl Meakine8937d92024-03-19 16:04:25 +00003609 "%lu.\n",
J-Alves33c47bf2022-09-29 11:36:20 +01003610 fragment_length,
3611 sizeof(struct ffa_memory_region) +
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003612 memory_region->memory_access_desc_size);
3613 ret = ffa_error(FFA_INVALID_PARAMETERS);
3614 goto out;
J-Alves33c47bf2022-09-29 11:36:20 +01003615 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003616
J-Alves84658fc2021-06-17 14:37:32 +01003617 if (!api_memory_region_check_flags(memory_region, share_func)) {
3618 dlog_verbose(
3619 "Memory region reserved arguments must be zero.\n");
3620 ret = ffa_error(FFA_INVALID_PARAMETERS);
3621 goto out;
3622 }
3623
J-Alves363f5722022-04-25 17:37:37 +01003624 if (memory_region->receiver_count == 0U) {
3625 dlog_verbose("Receiver count can't be 0.\n");
3626 ret = ffa_error(FFA_INVALID_PARAMETERS);
3627 goto out;
3628 }
3629
J-Alves95fbb312024-03-20 15:19:16 +00003630 if ((share_func == FFA_MEM_DONATE_32 ||
3631 share_func == FFA_MEM_DONATE_64) &&
J-Alves363f5722022-04-25 17:37:37 +01003632 memory_region->receiver_count != 1U) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003633 dlog_verbose(
J-Alves0a824e92024-04-26 16:20:12 +01003634 "FFA_MEM_DONATE only supports one recipient. Specified "
3635 "%u\n",
J-Alves363f5722022-04-25 17:37:37 +01003636 memory_region->receiver_count);
3637 ret = ffa_error(FFA_INVALID_PARAMETERS);
3638 goto out;
3639 }
3640
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003641 /*
3642 * Ensure that the receiver VM exists and isn't the same as the sender.
J-Alves363f5722022-04-25 17:37:37 +01003643 * If there is a receiver from the other world, track it for later
3644 * forwarding if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003645 */
J-Alves363f5722022-04-25 17:37:37 +01003646 for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003647 struct ffa_memory_access *receiver =
3648 ffa_memory_region_get_receiver(memory_region, i);
J-Alvesc9227c82024-04-24 21:00:58 +01003649 ffa_id_t receiver_id;
3650
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003651 assert(receiver != NULL);
J-Alvesc9227c82024-04-24 21:00:58 +01003652
3653 receiver_id = receiver->receiver_permissions.receiver;
Daniel Boulbyd5ae44b2023-12-12 12:18:11 +00003654
J-Alves363f5722022-04-25 17:37:37 +01003655 to = vm_find(receiver_id);
3656
J-Alves7de61af2024-03-27 10:29:46 +00003657 if ((vm_id_is_current_world(receiver_id) && to == NULL) ||
3658 to == from) {
J-Alves363f5722022-04-25 17:37:37 +01003659 dlog_verbose("%s: invalid receiver.\n", __func__);
3660 ret = ffa_error(FFA_INVALID_PARAMETERS);
3661 goto out;
3662 }
3663
J-Alvesc9227c82024-04-24 21:00:58 +01003664 if (!plat_ffa_is_memory_send_valid(
3665 receiver_id, from->id, share_func,
3666 memory_region->receiver_count > 1)) {
J-Alves363f5722022-04-25 17:37:37 +01003667 ret = ffa_error(FFA_DENIED);
3668 goto out;
3669 }
3670
3671 /* Capture if any of the receivers is from the other world. */
3672 if (!targets_other_world) {
3673 targets_other_world =
3674 !vm_id_is_current_world(receiver_id);
3675 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003676 }
3677
J-Alves363f5722022-04-25 17:37:37 +01003678 if (targets_other_world) {
J-Alves66652252022-07-06 09:49:51 +01003679 ret = plat_ffa_other_world_mem_send(
3680 from, share_func, &memory_region, length,
3681 fragment_length, &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003682 } else {
3683 struct vm_locked from_locked = vm_lock(from);
3684
Andrew Walbran1a86aa92020-05-15 17:22:28 +01003685 ret = ffa_memory_send(from_locked, memory_region, length,
3686 fragment_length, share_func,
3687 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003688 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003689 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003690 * make sure we don't free it.
3691 */
3692 memory_region = NULL;
3693
3694 vm_unlock(&from_locked);
3695 }
3696
3697out:
3698 if (memory_region != NULL) {
3699 mpool_free(&api_page_pool, memory_region);
3700 }
3701
3702 return ret;
3703}
3704
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003705struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
3706 uint32_t fragment_length,
3707 ipaddr_t address, uint32_t page_count,
3708 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003709{
3710 struct vm *to = current->vm;
3711 struct vm_locked to_locked;
3712 const void *to_msg;
J-Alves00847062022-10-03 17:08:44 +01003713 void *retrieve_msg;
3714 struct ffa_memory_region *retrieve_request = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003715 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003716 struct ffa_value ret;
Karl Meakin0e617d92024-04-05 12:55:22 +01003717 enum ffa_version ffa_version;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003718
3719 if (ipa_addr(address) != 0 || page_count != 0) {
3720 /*
3721 * Hafnium only supports passing the descriptor in the TX
3722 * mailbox.
3723 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003724 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003725 }
3726
Andrew Walbrana65a1322020-04-06 19:32:32 +01003727 if (fragment_length != length) {
J-Alves33c47bf2022-09-29 11:36:20 +01003728 dlog_verbose("Fragmentation not supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003729 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003730 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003731
J-Alves00847062022-10-03 17:08:44 +01003732 retrieve_msg = cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003733 message_buffer_size = cpu_get_buffer_size(current->cpu);
3734 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
3735 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003736 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003737 }
3738
3739 to_locked = vm_lock(to);
3740 to_msg = to->mailbox.send;
J-Alves00847062022-10-03 17:08:44 +01003741 ffa_version = to->ffa_version;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003742
3743 if (to_msg == NULL) {
3744 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003745 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003746 goto out;
3747 }
3748
3749 /*
3750 * Copy the retrieve request descriptor to an internal buffer, so that
3751 * the caller can't change it underneath us.
3752 */
J-Alves8f2150d2024-03-28 16:15:56 +00003753 if (!memcpy_trapped(retrieve_msg, message_buffer_size, to_msg,
3754 length)) {
3755 dlog_error(
3756 "%s: Failed to copy FF-A retrieve request "
3757 "descriptor.\n",
3758 __func__);
3759 ret = ffa_error(FFA_ABORTED);
3760 goto out;
3761 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003762
J-Alvese8c8c2b2022-12-16 15:34:48 +00003763 if ((vm_is_mailbox_other_world_owned(to_locked) &&
3764 !plat_ffa_acquire_receiver_rx(to_locked, &ret)) ||
3765 vm_is_mailbox_busy(to_locked)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003766 /*
J-Alvese8c8c2b2022-12-16 15:34:48 +00003767 * Can't retrieve memory information if the mailbox is
3768 * not available.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003769 */
J-Alves59ed0042022-07-28 18:26:41 +01003770 dlog_verbose("%s: RX buffer not ready.\n", __func__);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003771 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003772 goto out;
3773 }
3774
Daniel Boulby296ee702023-11-28 13:36:55 +00003775 if (!is_ffa_hypervisor_retrieve_request(retrieve_msg)) {
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003776 if (!ffa_memory_region_sanity_check(retrieve_msg, ffa_version,
3777 fragment_length, false)) {
3778 ret = ffa_error(FFA_INVALID_PARAMETERS);
3779 goto out;
3780 }
3781 /*
3782 * If required, transform the retrieve request to FF-A v1.1.
3783 */
3784 ret = api_ffa_memory_transaction_descriptor_v1_1_from_v1_0(
3785 retrieve_msg, &fragment_length, &length, ffa_version,
3786 false);
Daniel Boulbyc7dc9322023-10-27 15:12:07 +01003787
Daniel Boulby44e9b3b2024-01-17 12:21:44 +00003788 if (ret.func != FFA_SUCCESS_32) {
3789 goto out;
3790 }
J-Alves00847062022-10-03 17:08:44 +01003791 }
3792
Daniel Boulby36e0bdb2023-10-09 12:00:41 +01003793 retrieve_request = retrieve_msg;
Kathleen Capella578784f2023-09-01 18:03:27 -04003794
J-Alvesb5084cf2022-07-06 14:20:12 +01003795 if (plat_ffa_memory_handle_allocated_by_current_world(
3796 retrieve_request->handle)) {
3797 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
3798 &api_page_pool);
3799 } else {
Daniel Boulbyfdd59f72023-10-19 16:43:19 +01003800 dlog_error("Invalid FF-A memory handle.\n");
3801 ret = ffa_error(FFA_INVALID_PARAMETERS);
J-Alvesb5084cf2022-07-06 14:20:12 +01003802 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00003803out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003804 vm_unlock(&to_locked);
3805 return ret;
3806}
3807
J-Alvesa39a8442024-04-26 15:41:47 +01003808/**
3809 * Copies the memory relinquish descriptor from the partition's TX buffer, to
3810 * the buffer of the local CPU. Do it safely, and return error if:
3811 * - FFA_ABORTED: if the `memcpy_trapped` fails.
3812 * - FFA_INVALID_PARAMETERS: if the size of the full memory relinquish
3813 * descriptor doesn't fit the local CPU buffer.
3814 *
3815 * Returns FFA_SUCCESS if copying goes well, and sets 'out_relinquish'
3816 * to the address of the cpu buffer with the relinquish descriptor.
3817 */
3818static struct ffa_value api_get_ffa_mem_relinquish_descriptor(
3819 struct vcpu *current, const void *from_msg,
3820 struct ffa_mem_relinquish **out_relinquish)
3821{
3822 struct ffa_mem_relinquish *relinquish_request;
3823 uint32_t from_msg_size;
3824 uint32_t total_from_msg_size;
3825 uint32_t dst_size;
3826 vaddr_t dst;
3827 vaddr_t src;
3828
3829 assert(from_msg != NULL);
3830 assert(out_relinquish != NULL);
3831
3832 /*
3833 * Copy the relinquish descriptor to an internal buffer, so that the
3834 * caller can't change it underneath us.
3835 */
3836 relinquish_request =
3837 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
3838
3839 /* Set the destination for the copy. */
3840 dst = va_from_ptr(relinquish_request);
3841 src = va_from_ptr(from_msg);
3842
3843 dst_size = cpu_get_buffer_size(current->cpu);
3844
3845 /* Only copy the size to start with. */
3846 from_msg_size = sizeof(struct ffa_mem_relinquish);
3847 total_from_msg_size = from_msg_size;
3848
3849 if (!memcpy_trapped(ptr_from_va(dst), dst_size, ptr_from_va(src),
3850 from_msg_size)) {
3851 dlog_error(
3852 "%s: Failed to copy FF-A memory relinquish "
3853 "descriptor.\n",
3854 __func__);
3855 return ffa_error(FFA_ABORTED);
3856 }
3857
3858 if (relinquish_request->endpoint_count != 1) {
3859 dlog_error("%s: relinquish descriptor must have 1 endpoint\n",
3860 __func__);
3861 return ffa_error(FFA_INVALID_PARAMETERS);
3862 }
3863
3864 /*
3865 * Increment the `dst` to the position right after the copied header.
3866 * Increment the `src` to point at the list of endpoints.
3867 *
3868 * Calculate the new `dst_size` which is the size of the allocated cpu
3869 * buffer, minus the size of the copied memory relinquish header.
3870 *
3871 * Only after the above: determine the new `from_msg_size` in accordance
3872 * to the endpoint count.
3873 */
3874 dst = va_add(dst, from_msg_size);
3875 src = va_add(src, from_msg_size);
3876
3877 /*
3878 * Check if it is safe to copy the rest of the message.
3879 * This also serves as a santiy check to 'endpoint_count'.
3880 * The size of what is left in the descriptor, based on endpoint_count,
3881 * shall not be bigger than the size of the mailbox minus the size of
3882 * the header which was previously copied in this function.
3883 */
3884 dst_size -= from_msg_size;
3885 from_msg_size = relinquish_request->endpoint_count * sizeof(ffa_id_t);
3886 total_from_msg_size += from_msg_size;
3887
3888 if (total_from_msg_size > HF_MAILBOX_SIZE ||
3889 total_from_msg_size > dst_size) {
3890 dlog_verbose(
3891 "Relinquish message too long. Endpoint count: %u\n",
3892 relinquish_request->endpoint_count);
3893 return ffa_error(FFA_INVALID_PARAMETERS);
3894 }
3895
3896 /* Copy the remaining fragment. */
3897 if (!memcpy_trapped(ptr_from_va(dst), dst_size, ptr_from_va(src),
3898 from_msg_size)) {
3899 dlog_error("%s: Failed to copy FF-A relinquish request.\n",
3900 __func__);
3901 return ffa_error(FFA_ABORTED);
3902 }
3903
3904 /*
3905 * Set the output address for the relinquish descriptor to the current
3906 * cpu's buffer.
3907 */
3908 *out_relinquish = relinquish_request;
3909
3910 return (struct ffa_value){.func = FFA_SUCCESS_32};
3911}
3912
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003913struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003914{
3915 struct vm *from = current->vm;
3916 struct vm_locked from_locked;
3917 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003918 struct ffa_value ret;
J-Alvesa39a8442024-04-26 15:41:47 +01003919 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003920
3921 from_locked = vm_lock(from);
3922 from_msg = from->mailbox.send;
3923
3924 if (from_msg == NULL) {
3925 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003926 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003927 goto out;
3928 }
3929
J-Alvesa39a8442024-04-26 15:41:47 +01003930 ret = api_get_ffa_mem_relinquish_descriptor(current, from_msg,
3931 &relinquish_request);
3932
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003933 /*
J-Alvesa39a8442024-04-26 15:41:47 +01003934 * If the descriptor was safely copied, continue with the handling of
3935 * the retrieve request.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003936 */
J-Alvesa39a8442024-04-26 15:41:47 +01003937 if (ret.func == FFA_SUCCESS_32) {
3938 ret = ffa_memory_relinquish(from_locked, relinquish_request,
3939 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003940 }
J-Alves8f2150d2024-03-28 16:15:56 +00003941
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003942out:
3943 vm_unlock(&from_locked);
3944 return ret;
3945}
3946
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003947struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
3948 ffa_memory_region_flags_t flags,
3949 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003950{
3951 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003952 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003953
Olivier Deprez55a189e2021-06-09 15:45:27 +02003954 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00003955 struct vm_locked to_locked = vm_lock(to);
3956
Andrew Walbranca808b12020-05-15 17:22:28 +01003957 ret = ffa_memory_reclaim(to_locked, handle, flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01003958 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00003959
Andrew Walbran290b0c92020-02-03 16:37:14 +00003960 vm_unlock(&to_locked);
3961 } else {
J-Alvesfc19b372022-07-06 12:17:35 +01003962 ret = plat_ffa_other_world_mem_reclaim(to, handle, flags,
3963 &api_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01003964 }
3965
3966 return ret;
3967}
3968
3969struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
3970 uint32_t fragment_offset,
J-Alves19e20cf2023-08-02 12:48:55 +01003971 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01003972 struct vcpu *current)
3973{
3974 struct vm *to = current->vm;
3975 struct vm_locked to_locked;
3976 struct ffa_value ret;
3977
3978 /* Sender ID MBZ at virtual instance. */
J-Alves59ed0042022-07-28 18:26:41 +01003979 if (vm_id_is_current_world(to->id)) {
3980 if (sender_vm_id != 0) {
3981 dlog_verbose("%s: Invalid sender.\n", __func__);
3982 return ffa_error(FFA_INVALID_PARAMETERS);
3983 }
Andrew Walbranca808b12020-05-15 17:22:28 +01003984 }
3985
3986 to_locked = vm_lock(to);
3987
J-Alves122f1a12022-12-12 15:55:42 +00003988 if (vm_is_mailbox_busy(to_locked)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01003989 /*
3990 * Can't retrieve memory information if the mailbox is not
3991 * available.
3992 */
J-Alves59ed0042022-07-28 18:26:41 +01003993 dlog_verbose("%s: RX buffer not ready partition %x.\n",
3994 __func__, to_locked.vm->id);
Andrew Walbranca808b12020-05-15 17:22:28 +01003995 ret = ffa_error(FFA_BUSY);
3996 goto out;
3997 }
3998
3999 ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
J-Alves59ed0042022-07-28 18:26:41 +01004000 sender_vm_id, &api_page_pool);
Andrew Walbranca808b12020-05-15 17:22:28 +01004001out:
4002 vm_unlock(&to_locked);
4003 return ret;
4004}
4005
4006struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
4007 uint32_t fragment_length,
J-Alves19e20cf2023-08-02 12:48:55 +01004008 ffa_id_t sender_vm_id,
Andrew Walbranca808b12020-05-15 17:22:28 +01004009 struct vcpu *current)
4010{
4011 struct vm *from = current->vm;
4012 const void *from_msg;
4013 void *fragment_copy;
4014 struct ffa_value ret;
4015
4016 /* Sender ID MBZ at virtual instance. */
J-Alvesfdd29272022-07-19 13:16:31 +01004017 if (vm_id_is_current_world(from->id) && sender_vm_id != 0) {
4018 dlog_verbose("Invalid sender.");
Andrew Walbranca808b12020-05-15 17:22:28 +01004019 return ffa_error(FFA_INVALID_PARAMETERS);
4020 }
4021
4022 /*
4023 * Check that the sender has configured its send buffer. If the TX
4024 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
4025 * be safely accessed after releasing the lock since the TX mailbox
4026 * address can only be configured once.
4027 */
4028 sl_lock(&from->lock);
4029 from_msg = from->mailbox.send;
4030 sl_unlock(&from->lock);
4031
4032 if (from_msg == NULL) {
J-Alves59ed0042022-07-28 18:26:41 +01004033 dlog_verbose("Mailbox from %x is not set.\n", from->id);
Andrew Walbranca808b12020-05-15 17:22:28 +01004034 return ffa_error(FFA_INVALID_PARAMETERS);
4035 }
4036
4037 /*
4038 * Copy the fragment to a fresh page from the memory pool. This prevents
4039 * the sender from changing it underneath us, and also lets us keep it
4040 * around in the share state table if needed.
4041 */
4042 if (fragment_length > HF_MAILBOX_SIZE ||
4043 fragment_length > MM_PPOOL_ENTRY_SIZE) {
4044 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00004045 "Fragment length %d larger than mailbox size %zu.\n",
Andrew Walbranca808b12020-05-15 17:22:28 +01004046 fragment_length, HF_MAILBOX_SIZE);
4047 return ffa_error(FFA_INVALID_PARAMETERS);
4048 }
4049 if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
4050 fragment_length % sizeof(struct ffa_memory_region_constituent) !=
4051 0) {
4052 dlog_verbose("Invalid fragment length %d.\n", fragment_length);
4053 return ffa_error(FFA_INVALID_PARAMETERS);
4054 }
4055 fragment_copy = mpool_alloc(&api_page_pool);
4056 if (fragment_copy == NULL) {
4057 dlog_verbose("Failed to allocate fragment copy.\n");
4058 return ffa_error(FFA_NO_MEMORY);
4059 }
J-Alves8f2150d2024-03-28 16:15:56 +00004060
4061 if (!memcpy_trapped(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg,
4062 fragment_length)) {
4063 dlog_error("%s: Failed to copy fragment.\n", __func__);
4064 return ffa_error(FFA_ABORTED);
4065 }
Andrew Walbranca808b12020-05-15 17:22:28 +01004066
4067 /*
4068 * Hafnium doesn't support fragmentation of memory retrieve requests
4069 * (because it doesn't support caller-specified mappings, so a request
4070 * will never be larger than a single page), so this must be part of a
4071 * memory send (i.e. donate, lend or share) request.
4072 *
4073 * We can tell from the handle whether the memory transaction is for the
J-Alvesfdd29272022-07-19 13:16:31 +01004074 * other world or not.
Andrew Walbranca808b12020-05-15 17:22:28 +01004075 */
J-Alvesfdd29272022-07-19 13:16:31 +01004076 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbranca808b12020-05-15 17:22:28 +01004077 struct vm_locked from_locked = vm_lock(from);
4078
4079 ret = ffa_memory_send_continue(from_locked, fragment_copy,
4080 fragment_length, handle,
4081 &api_page_pool);
4082 /*
4083 * `ffa_memory_send_continue` takes ownership of the
4084 * fragment_copy, so we don't need to free it here.
4085 */
4086 vm_unlock(&from_locked);
4087 } else {
J-Alvesfdd29272022-07-19 13:16:31 +01004088 ret = plat_ffa_other_world_mem_send_continue(
4089 from, fragment_copy, fragment_length, handle,
4090 &api_page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00004091 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00004092
4093 return ret;
4094}
Max Shvetsov40108e72020-08-27 12:39:50 +01004095
Olivier Deprezd614d322021-06-18 15:21:00 +02004096/**
4097 * Register an entry point for a vCPU in warm boot cases.
4098 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1 FFA_SECONDARY_EP_REGISTER.
4099 */
Max Shvetsov40108e72020-08-27 12:39:50 +01004100struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
4101 struct vcpu *current)
4102{
4103 struct vm_locked vm_locked;
Olivier Deprezb2808332023-02-02 15:25:40 +01004104 struct vcpu_locked current_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +01004105
Olivier Deprezd614d322021-06-18 15:21:00 +02004106 /*
4107 * Reject if interface is not supported at this FF-A instance
4108 * (DEN0077A FF-A v1.1 Beta0 Table 18.29) or the VM is UP.
4109 */
4110 if (!plat_ffa_is_secondary_ep_register_supported() ||
Karl Meakin82041822024-05-20 11:14:34 +01004111 vm_is_up(current->vm)) {
Olivier Deprezd614d322021-06-18 15:21:00 +02004112 return ffa_error(FFA_NOT_SUPPORTED);
4113 }
4114
4115 /*
4116 * No further check is made on the address validity
4117 * (FF-A v1.1 Beta0 Table 18.29) as the VM boundaries are not known
4118 * from the VM or vCPU structure.
4119 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1:
4120 * For each SP [...] the Framework assumes that the same entry point
4121 * address is used for initializing any execution context during a
4122 * secondary cold boot.
4123 * If this function is invoked multiple times, then the entry point
4124 * address specified in the last valid invocation must be used by the
4125 * callee.
4126 */
Olivier Deprezb2808332023-02-02 15:25:40 +01004127 current_locked = vcpu_lock(current);
4128 if (current->rt_model != RTM_SP_INIT) {
4129 dlog_error(
4130 "FFA_SECONDARY_EP_REGISTER can only be called while "
4131 "vCPU in run-time state for initialization.\n");
4132 vcpu_unlock(&current_locked);
4133 return ffa_error(FFA_DENIED);
Olivier Deprezd614d322021-06-18 15:21:00 +02004134 }
Olivier Deprezb2808332023-02-02 15:25:40 +01004135 vcpu_unlock(&current_locked);
Olivier Deprezd614d322021-06-18 15:21:00 +02004136
Olivier Deprezb2808332023-02-02 15:25:40 +01004137 vm_locked = vm_lock(current->vm);
Max Shvetsov40108e72020-08-27 12:39:50 +01004138 vm_locked.vm->secondary_ep = entry_point;
4139 vm_unlock(&vm_locked);
4140
Olivier Deprezb2808332023-02-02 15:25:40 +01004141 return (struct ffa_value){.func = FFA_SUCCESS_32};
Max Shvetsov40108e72020-08-27 12:39:50 +01004142}
J-Alvesa0f317d2021-06-09 13:31:59 +01004143
J-Alves19e20cf2023-08-02 12:48:55 +01004144struct ffa_value api_ffa_notification_bitmap_create(ffa_id_t vm_id,
J-Alvesa0f317d2021-06-09 13:31:59 +01004145 ffa_vcpu_count_t vcpu_count,
4146 struct vcpu *current)
4147{
J-Alves7ccaccf2024-02-01 14:25:23 +00004148 const struct ffa_value ret =
4149 plat_ffa_is_notifications_bitmap_access_valid(current, vm_id);
4150
4151 if (ffa_func_id(ret) != FFA_SUCCESS_32) {
4152 dlog_verbose(
4153 "FFA_NOTIFICATION_BITMAP_CREATE to be used by "
4154 "hypervisor for valid NWd VM IDs only (%x).\n",
4155 vm_id);
4156 return ret;
J-Alvesa0f317d2021-06-09 13:31:59 +01004157 }
4158
4159 return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
4160}
4161
J-Alves19e20cf2023-08-02 12:48:55 +01004162struct ffa_value api_ffa_notification_bitmap_destroy(ffa_id_t vm_id,
J-Alvesa0f317d2021-06-09 13:31:59 +01004163 struct vcpu *current)
4164{
J-Alves7ccaccf2024-02-01 14:25:23 +00004165 const struct ffa_value ret =
4166 plat_ffa_is_notifications_bitmap_access_valid(current, vm_id);
4167
4168 if (ffa_func_id(ret) != FFA_SUCCESS_32) {
4169 dlog_verbose(
4170 "FFA_NOTIFICATION_BITMAP_DESTROY to be used by "
4171 "hypervisor for valid NWd VM IDs only (%x).\n",
4172 vm_id);
4173 return ret;
J-Alvesa0f317d2021-06-09 13:31:59 +01004174 }
4175
4176 return plat_ffa_notifications_bitmap_destroy(vm_id);
4177}
J-Alvesc003a7a2021-03-18 13:06:53 +00004178
4179struct ffa_value api_ffa_notification_update_bindings(
J-Alves19e20cf2023-08-02 12:48:55 +01004180 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesc003a7a2021-03-18 13:06:53 +00004181 ffa_notifications_bitmap_t notifications, bool is_bind,
4182 struct vcpu *current)
4183{
4184 struct ffa_value ret = {.func = FFA_SUCCESS_32};
4185 struct vm_locked receiver_locked;
4186 const bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
J-Alves19e20cf2023-08-02 12:48:55 +01004187 const ffa_id_t id_to_update = is_bind ? sender_vm_id : HF_INVALID_VM_ID;
4188 const ffa_id_t id_to_validate =
J-Alvesc003a7a2021-03-18 13:06:53 +00004189 is_bind ? HF_INVALID_VM_ID : sender_vm_id;
J-Alves1daeaea2022-09-06 17:41:24 +01004190 const uint32_t flags_mbz =
4191 is_bind ? ~FFA_NOTIFICATIONS_FLAG_PER_VCPU : ~0U;
4192
4193 if ((flags_mbz & flags) != 0U) {
4194 return ffa_error(FFA_INVALID_PARAMETERS);
4195 }
J-Alvesc003a7a2021-03-18 13:06:53 +00004196
4197 if (!plat_ffa_is_notifications_bind_valid(current, sender_vm_id,
4198 receiver_vm_id)) {
4199 dlog_verbose("Invalid use of notifications bind interface.\n");
4200 return ffa_error(FFA_INVALID_PARAMETERS);
4201 }
4202
J-Alvesb15e9402021-09-08 11:44:42 +01004203 if (plat_ffa_notifications_update_bindings_forward(
4204 receiver_vm_id, sender_vm_id, flags, notifications, is_bind,
4205 &ret)) {
J-Alvesb15e9402021-09-08 11:44:42 +01004206 return ret;
4207 }
4208
J-Alvesc003a7a2021-03-18 13:06:53 +00004209 if (notifications == 0U) {
Karl Meakine8937d92024-03-19 16:04:25 +00004210 dlog_verbose("No notifications have been specified %lx.\n",
J-Alves1512acc2024-02-01 16:57:10 +00004211 notifications);
J-Alvesc003a7a2021-03-18 13:06:53 +00004212 return ffa_error(FFA_INVALID_PARAMETERS);
4213 }
4214
4215 /**
4216 * This check assumes receiver is the current VM, and has been enforced
4217 * by 'plat_ffa_is_notifications_bind_valid'.
4218 */
4219 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
4220
4221 if (receiver_locked.vm == NULL) {
4222 dlog_verbose("Receiver doesn't exist!\n");
J-Alves7333dcf2024-02-01 17:25:10 +00004223 return ffa_error(FFA_INVALID_PARAMETERS);
J-Alvesc003a7a2021-03-18 13:06:53 +00004224 }
4225
J-Alves09ff9d82021-11-02 11:55:20 +00004226 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesc003a7a2021-03-18 13:06:53 +00004227 dlog_verbose("Notifications are not enabled.\n");
4228 ret = ffa_error(FFA_NOT_SUPPORTED);
4229 goto out;
4230 }
4231
4232 if (is_bind && vm_id_is_current_world(sender_vm_id) &&
4233 vm_find(sender_vm_id) == NULL) {
4234 dlog_verbose("Sender VM does not exist!\n");
4235 ret = ffa_error(FFA_INVALID_PARAMETERS);
4236 goto out;
4237 }
4238
4239 /*
4240 * Can't bind/unbind notifications if at least one is bound to a
4241 * different sender.
4242 */
4243 if (!vm_notifications_validate_bound_sender(
J-Alves661e1b72023-08-02 13:39:40 +01004244 receiver_locked, ffa_is_vm_id(sender_vm_id), id_to_validate,
4245 notifications)) {
J-Alvesf0208142024-02-01 17:28:58 +00004246 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00004247 "Sender %x not permitted to set notifications %lx to "
J-Alvesf0208142024-02-01 17:28:58 +00004248 "%x.\n",
4249 sender_vm_id, notifications, receiver_vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +00004250 ret = ffa_error(FFA_DENIED);
4251 goto out;
4252 }
4253
4254 /**
4255 * Check if there is a pending notification within those specified in
4256 * the bitmap.
4257 */
4258 if (vm_are_notifications_pending(receiver_locked,
J-Alves661e1b72023-08-02 13:39:40 +01004259 ffa_is_vm_id(sender_vm_id),
J-Alvesc003a7a2021-03-18 13:06:53 +00004260 notifications)) {
Karl Meakine8937d92024-03-19 16:04:25 +00004261 dlog_verbose("Notifications within '%lx' pending.\n",
J-Alvesc003a7a2021-03-18 13:06:53 +00004262 notifications);
4263 ret = ffa_error(FFA_DENIED);
4264 goto out;
4265 }
4266
4267 vm_notifications_update_bindings(
J-Alves661e1b72023-08-02 13:39:40 +01004268 receiver_locked, ffa_is_vm_id(sender_vm_id), id_to_update,
J-Alvesc003a7a2021-03-18 13:06:53 +00004269 notifications, is_per_vcpu && is_bind);
4270
4271out:
4272 vm_unlock(&receiver_locked);
4273 return ret;
4274}
J-Alvesaa79c012021-07-09 14:29:45 +01004275
4276struct ffa_value api_ffa_notification_set(
J-Alves19e20cf2023-08-02 12:48:55 +01004277 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesaa79c012021-07-09 14:29:45 +01004278 ffa_notifications_bitmap_t notifications, struct vcpu *current)
4279{
4280 struct ffa_value ret;
4281 struct vm_locked receiver_locked;
J-Alvesaa79c012021-07-09 14:29:45 +01004282 /*
4283 * Check if is per-vCPU or global, and extracting vCPU ID according
4284 * to table 17.19 of the FF-A v1.1 Beta 0 spec.
4285 */
4286 bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
4287 ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
J-Alves6b754a12024-02-14 14:26:30 +00004288 const uint32_t flags_mbz =
4289 ~(FFA_NOTIFICATIONS_FLAG_PER_VCPU |
4290 FFA_NOTIFICATIONS_FLAG_DELAY_SRI | (0xFFFFU << 16));
4291
4292 if ((flags_mbz & flags) != 0U) {
4293 dlog_verbose("%s: caller shouldn't set bits that MBZ.\n",
4294 __func__);
4295 return ffa_error(FFA_INVALID_PARAMETERS);
4296 }
J-Alvesaa79c012021-07-09 14:29:45 +01004297
J-Alves30ac91d2024-02-14 15:33:12 +00004298 /* Global notifications must target any vCPU. */
4299 if (!is_per_vcpu && vcpu_id != 0U) {
4300 dlog_verbose(
4301 "For global notifications vCPU ID MBZ in call to set "
4302 "notifications.\n");
4303 return ffa_error(FFA_INVALID_PARAMETERS);
4304 }
4305
J-Alvesaa79c012021-07-09 14:29:45 +01004306 if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
4307 receiver_vm_id)) {
4308 dlog_verbose("Invalid use of notifications set interface.\n");
4309 return ffa_error(FFA_INVALID_PARAMETERS);
4310 }
4311
4312 if (notifications == 0U) {
4313 dlog_verbose("No notifications have been specified.\n");
4314 return ffa_error(FFA_INVALID_PARAMETERS);
4315 }
4316
J-Alvesde7bd2f2021-09-09 19:54:35 +01004317 if (plat_ffa_notification_set_forward(sender_vm_id, receiver_vm_id,
4318 flags, notifications, &ret)) {
4319 return ret;
4320 }
4321
J-Alvesaa79c012021-07-09 14:29:45 +01004322 /*
4323 * This check assumes receiver is the current VM, and has been enforced
4324 * by 'plat_ffa_is_notification_set_valid'.
4325 */
4326 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
4327
4328 if (receiver_locked.vm == NULL) {
4329 dlog_verbose("Receiver ID is not valid.\n");
4330 return ffa_error(FFA_INVALID_PARAMETERS);
4331 }
4332
J-Alves09ff9d82021-11-02 11:55:20 +00004333 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesaa79c012021-07-09 14:29:45 +01004334 dlog_verbose("Receiver's notifications not enabled.\n");
4335 ret = ffa_error(FFA_DENIED);
4336 goto out;
4337 }
4338
4339 /*
J-Alves89c22132024-02-14 15:44:41 +00004340 * Check the if the notifications are bound as global if per-vCPU flag
4341 * is set, or if they are bound as per-vCPU and caller setting as
4342 * global. In either case, return FFA_INVALID_PARAMETERS.
4343 */
4344 if (vm_notifications_validate_binding(
4345 receiver_locked, ffa_is_vm_id(sender_vm_id), sender_vm_id,
4346 notifications, !is_per_vcpu)) {
Karl Meakine8937d92024-03-19 16:04:25 +00004347 dlog_verbose("Notifications in %lx are %s\n", notifications,
J-Alves89c22132024-02-14 15:44:41 +00004348 !is_per_vcpu ? "global" : "per-vCPU");
4349 ret = ffa_error(FFA_INVALID_PARAMETERS);
4350 goto out;
4351 }
4352
4353 /*
J-Alvesaa79c012021-07-09 14:29:45 +01004354 * If notifications are not bound to the sender, they wouldn't be
4355 * enabled either for the receiver.
4356 */
4357 if (!vm_notifications_validate_binding(
J-Alves661e1b72023-08-02 13:39:40 +01004358 receiver_locked, ffa_is_vm_id(sender_vm_id), sender_vm_id,
4359 notifications, is_per_vcpu)) {
J-Alvesaa79c012021-07-09 14:29:45 +01004360 dlog_verbose("Notifications bindings not valid.\n");
4361 ret = ffa_error(FFA_DENIED);
4362 goto out;
4363 }
4364
4365 if (is_per_vcpu && vcpu_id >= receiver_locked.vm->vcpu_count) {
4366 dlog_verbose("Invalid VCPU ID!\n");
4367 ret = ffa_error(FFA_INVALID_PARAMETERS);
4368 goto out;
4369 }
4370
J-Alves7461ef22021-10-18 17:21:33 +01004371 /* Set notifications pending. */
J-Alves5a16c962022-03-25 12:32:51 +00004372 vm_notifications_partition_set_pending(
J-Alves661e1b72023-08-02 13:39:40 +01004373 receiver_locked, ffa_is_vm_id(sender_vm_id), notifications,
J-Alves5a16c962022-03-25 12:32:51 +00004374 vcpu_id, is_per_vcpu);
4375
Karl Meakine8937d92024-03-19 16:04:25 +00004376 dlog_verbose("Set the notifications: %lx.\n", notifications);
J-Alvesaa79c012021-07-09 14:29:45 +01004377
J-Alves13394022021-06-30 13:48:49 +01004378 if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
4379 dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
4380 vcpu_index(current));
4381 plat_ffa_sri_trigger_not_delayed(current->cpu);
4382 } else {
4383 plat_ffa_sri_state_set(DELAYED);
4384 }
J-Alvesaa79c012021-07-09 14:29:45 +01004385
J-Alves13394022021-06-30 13:48:49 +01004386 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alvesaa79c012021-07-09 14:29:45 +01004387out:
4388 vm_unlock(&receiver_locked);
4389
4390 return ret;
4391}
4392
4393static struct ffa_value api_ffa_notification_get_success_return(
4394 ffa_notifications_bitmap_t from_sp, ffa_notifications_bitmap_t from_vm,
4395 ffa_notifications_bitmap_t from_framework)
4396{
4397 return (struct ffa_value){
4398 .func = FFA_SUCCESS_32,
4399 .arg1 = 0U,
4400 .arg2 = (uint32_t)from_sp,
4401 .arg3 = (uint32_t)(from_sp >> 32),
4402 .arg4 = (uint32_t)from_vm,
4403 .arg5 = (uint32_t)(from_vm >> 32),
4404 .arg6 = (uint32_t)from_framework,
4405 .arg7 = (uint32_t)(from_framework >> 32),
4406 };
4407}
4408
J-Alves19e20cf2023-08-02 12:48:55 +01004409struct ffa_value api_ffa_notification_get(ffa_id_t receiver_vm_id,
J-Alvesaa79c012021-07-09 14:29:45 +01004410 ffa_vcpu_index_t vcpu_id,
4411 uint32_t flags, struct vcpu *current)
4412{
J-Alves663682a2022-03-25 13:56:51 +00004413 ffa_notifications_bitmap_t framework_notifications = 0;
J-Alvesaa79c012021-07-09 14:29:45 +01004414 ffa_notifications_bitmap_t sp_notifications = 0;
4415 ffa_notifications_bitmap_t vm_notifications = 0;
4416 struct vm_locked receiver_locked;
4417 struct ffa_value ret;
J-Alvesfc95a302022-04-22 14:18:23 +01004418 const uint32_t flags_mbz = ~(FFA_NOTIFICATION_FLAG_BITMAP_HYP |
4419 FFA_NOTIFICATION_FLAG_BITMAP_SPM |
4420 FFA_NOTIFICATION_FLAG_BITMAP_SP |
4421 FFA_NOTIFICATION_FLAG_BITMAP_VM);
4422
4423 /* The FF-A v1.1 EAC0 specification states bits [31:4] Must Be Zero. */
4424 if ((flags & flags_mbz) != 0U) {
4425 dlog_verbose(
4426 "Invalid flags bit(s) set in notifications get. [31:4] "
4427 "MBZ(%x)\n",
4428 flags);
4429 return ffa_error(FFA_INVALID_PARAMETERS);
4430 }
J-Alvesaa79c012021-07-09 14:29:45 +01004431
4432 /*
J-Alvesfc95a302022-04-22 14:18:23 +01004433 * Following check should capture wrong uses of the interface,
4434 * depending on whether Hafnium is SPMC or hypervisor. On the
4435 * rest of the function it is assumed this condition is met.
J-Alvesaa79c012021-07-09 14:29:45 +01004436 */
J-Alvesfc95a302022-04-22 14:18:23 +01004437 if (!plat_ffa_is_notification_get_valid(current, receiver_vm_id,
4438 flags)) {
J-Alvesaa79c012021-07-09 14:29:45 +01004439 dlog_verbose("Invalid use of notifications get interface.\n");
4440 return ffa_error(FFA_INVALID_PARAMETERS);
4441 }
4442
4443 /*
4444 * This check assumes receiver is the current VM, and has been enforced
4445 * by `plat_ffa_is_notifications_get_valid`.
4446 */
4447 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
4448
4449 /*
4450 * `plat_ffa_is_notifications_get_valid` ensures following is never
4451 * true.
4452 */
4453 CHECK(receiver_locked.vm != NULL);
4454
J-Alves60458812024-03-22 11:57:10 +00004455 if (receiver_locked.vm->vcpu_count <= vcpu_id) {
J-Alves1abb3342022-01-05 11:59:10 +00004456 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00004457 "Invalid VCPU ID %u. vcpu count %u current core: "
4458 "%zu!\n",
J-Alves1abb3342022-01-05 11:59:10 +00004459 vcpu_id, receiver_locked.vm->vcpu_count,
4460 cpu_index(current->cpu));
J-Alvesaa79c012021-07-09 14:29:45 +01004461 ret = ffa_error(FFA_INVALID_PARAMETERS);
4462 goto out;
4463 }
4464
4465 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_SP) != 0U) {
J-Alves98ff9562021-09-09 14:39:41 +01004466 if (!plat_ffa_notifications_get_from_sp(
4467 receiver_locked, vcpu_id, &sp_notifications,
4468 &ret)) {
4469 dlog_verbose("Failed to get notifications from sps.");
4470 goto out;
4471 }
J-Alvesaa79c012021-07-09 14:29:45 +01004472 }
4473
4474 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U) {
J-Alves5136dda2022-03-25 12:26:38 +00004475 vm_notifications = vm_notifications_partition_get_pending(
J-Alvesaa79c012021-07-09 14:29:45 +01004476 receiver_locked, true, vcpu_id);
4477 }
4478
J-Alvesd605a092022-03-28 14:20:48 +01004479 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U ||
4480 (flags & FFA_NOTIFICATION_FLAG_BITMAP_SPM) != 0U) {
4481 if (!plat_ffa_notifications_get_framework_notifications(
4482 receiver_locked, &framework_notifications, flags,
4483 vcpu_id, &ret)) {
4484 dlog_verbose(
4485 "Failed to get notifications from "
4486 "framework.\n");
4487 goto out;
4488 }
4489 }
4490
J-Alves663682a2022-03-25 13:56:51 +00004491 ret = api_ffa_notification_get_success_return(
4492 sp_notifications, vm_notifications, framework_notifications);
J-Alvesaa79c012021-07-09 14:29:45 +01004493
J-Alvesfe23ebe2021-10-13 16:07:07 +01004494 /*
4495 * If there are no more pending notifications, change `sri_state` to
4496 * handled.
4497 */
4498 if (vm_is_notifications_pending_count_zero()) {
4499 plat_ffa_sri_state_set(HANDLED);
4500 }
4501
J-Alves6e2abc62021-12-02 14:58:56 +00004502 if (!receiver_locked.vm->el0_partition &&
4503 !vm_are_global_notifications_pending(receiver_locked)) {
4504 vm_notifications_set_npi_injected(receiver_locked, false);
4505 }
4506
J-Alvesaa79c012021-07-09 14:29:45 +01004507out:
4508 vm_unlock(&receiver_locked);
4509
4510 return ret;
4511}
J-Alvesc8e8a222021-06-08 17:33:52 +01004512
4513/**
4514 * Prepares successful return for FFA_NOTIFICATION_INFO_GET, as described by
4515 * the section 17.7.1 of the FF-A v1.1 Beta0 specification.
4516 */
4517static struct ffa_value api_ffa_notification_info_get_success_return(
4518 const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
J-Alvesfe23ebe2021-10-13 16:07:07 +01004519 uint32_t lists_count)
J-Alvesc8e8a222021-06-08 17:33:52 +01004520{
4521 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
4522
4523 /*
4524 * Copying content of ids into ret structure. Use 5 registers (x3-x7) to
4525 * hold the list of ids.
4526 */
4527 memcpy_s(&ret.arg3,
4528 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET, ids,
4529 sizeof(ids[0]) * ids_count);
4530
4531 /*
4532 * According to the spec x2 should have:
4533 * - Bit flagging if there are more notifications pending;
4534 * - The total number of elements (i.e. total list size);
4535 * - The number of VCPU IDs within each VM specific list.
4536 */
J-Alvesfe23ebe2021-10-13 16:07:07 +01004537 ret.arg2 = vm_notifications_pending_not_retrieved_by_scheduler()
4538 ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING
4539 : 0;
J-Alvesc8e8a222021-06-08 17:33:52 +01004540
4541 ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
4542 << FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
4543
4544 for (unsigned int i = 0; i < lists_count; i++) {
4545 ret.arg2 |= (lists_sizes[i] & FFA_NOTIFICATIONS_LIST_SIZE_MASK)
4546 << FFA_NOTIFICATIONS_LIST_SHIFT(i + 1);
4547 }
4548
4549 return ret;
4550}
4551
4552struct ffa_value api_ffa_notification_info_get(struct vcpu *current)
4553{
4554 /*
4555 * Following set of variables should be populated with the return info.
4556 * At a successfull handling of this interface, they should be used
4557 * to populate the 'ret' structure in accordance to the table 17.29
4558 * of the FF-A v1.1 Beta0 specification.
4559 */
4560 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
4561 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
4562 uint32_t lists_count = 0;
4563 uint32_t ids_count = 0;
4564 bool list_is_full = false;
J-Alves13394022021-06-30 13:48:49 +01004565 struct ffa_value result;
J-Alvesc8e8a222021-06-08 17:33:52 +01004566
4567 /*
4568 * This interface can only be called at NS virtual/physical FF-A
4569 * instance by the endpoint implementing the primary scheduler and the
4570 * Hypervisor/OS kernel.
4571 * In the SPM, following check passes if call has been forwarded from
4572 * the hypervisor.
4573 */
Karl Meakin5e996992024-05-20 11:27:07 +01004574
4575 if (!vm_is_primary(current->vm)) {
J-Alvesc8e8a222021-06-08 17:33:52 +01004576 dlog_verbose(
4577 "Only the receiver's scheduler can use this "
4578 "interface\n");
4579 return ffa_error(FFA_NOT_SUPPORTED);
4580 }
4581
J-Alvesca058c22021-09-10 14:02:07 +01004582 /*
4583 * Forward call to the other world, and fill the arrays used to assemble
4584 * return.
4585 */
4586 plat_ffa_notification_info_get_forward(
4587 ids, &ids_count, lists_sizes, &lists_count,
4588 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
4589
4590 list_is_full = ids_count == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
4591
J-Alvesc8e8a222021-06-08 17:33:52 +01004592 /* Get notifications' info from this world */
4593 for (ffa_vm_count_t index = 0; index < vm_get_count() && !list_is_full;
4594 ++index) {
4595 struct vm_locked vm_locked = vm_lock(vm_find_index(index));
4596
4597 list_is_full = vm_notifications_info_get(
4598 vm_locked, ids, &ids_count, lists_sizes, &lists_count,
4599 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
4600
4601 vm_unlock(&vm_locked);
4602 }
4603
4604 if (!list_is_full) {
4605 /* Grab notifications info from other world */
J-Alvesfe23ebe2021-10-13 16:07:07 +01004606 plat_ffa_vm_notifications_info_get(
J-Alvesc8e8a222021-06-08 17:33:52 +01004607 ids, &ids_count, lists_sizes, &lists_count,
4608 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
4609 }
4610
4611 if (ids_count == 0) {
J-Alvesca058c22021-09-10 14:02:07 +01004612 dlog_verbose(
4613 "Notification info get has no data to retrieve.\n");
J-Alves13394022021-06-30 13:48:49 +01004614 result = ffa_error(FFA_NO_DATA);
4615 } else {
4616 result = api_ffa_notification_info_get_success_return(
J-Alvesfe23ebe2021-10-13 16:07:07 +01004617 ids, ids_count, lists_sizes, lists_count);
J-Alvesc8e8a222021-06-08 17:33:52 +01004618 }
4619
J-Alvesfe23ebe2021-10-13 16:07:07 +01004620 plat_ffa_sri_state_set(HANDLED);
4621
J-Alves13394022021-06-30 13:48:49 +01004622 return result;
J-Alvesc8e8a222021-06-08 17:33:52 +01004623}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -07004624
4625struct ffa_value api_ffa_mem_perm_get(vaddr_t base_addr, struct vcpu *current)
4626{
4627 struct vm_locked vm_locked;
4628 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
4629 bool mode_ret = false;
4630 uint32_t mode = 0;
4631
4632 if (!plat_ffa_is_mem_perm_get_valid(current)) {
J-Alves5a099172024-02-22 14:34:51 +00004633 return ffa_error(FFA_DENIED);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -07004634 }
4635
4636 if (!(current->vm->el0_partition)) {
4637 return ffa_error(FFA_DENIED);
4638 }
4639
4640 vm_locked = vm_lock(current->vm);
4641
4642 /*
4643 * mm_get_mode is used to check if the given base_addr page is already
4644 * mapped. If the page is unmapped, return error. If the page is mapped
4645 * appropriate attributes are returned to the caller. Note that
4646 * mm_get_mode returns true if the address is in the valid VA range as
4647 * supported by the architecture and MMU configurations, as opposed to
4648 * whether a page is mapped or not. For a page to be known as mapped,
4649 * the API must return true AND the returned mode must not have
4650 * MM_MODE_INVALID set.
4651 */
4652 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
4653 va_add(base_addr, PAGE_SIZE), &mode);
4654 if (!mode_ret || (mode & MM_MODE_INVALID)) {
4655 ret = ffa_error(FFA_INVALID_PARAMETERS);
4656 goto out;
4657 }
4658
4659 /* No memory should be marked RWX */
4660 CHECK((mode & (MM_MODE_R | MM_MODE_W | MM_MODE_X)) !=
4661 (MM_MODE_R | MM_MODE_W | MM_MODE_X));
4662
4663 /*
4664 * S-EL0 partitions are expected to have all their pages marked as
4665 * non-global.
4666 */
4667 CHECK((mode & (MM_MODE_NG | MM_MODE_USER)) ==
4668 (MM_MODE_NG | MM_MODE_USER));
4669
4670 if (mode & MM_MODE_W) {
4671 /* No memory should be writeable but not readable. */
4672 CHECK(mode & MM_MODE_R);
4673 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
4674 .arg2 = (uint32_t)(FFA_MEM_PERM_RW)};
4675 } else if (mode & MM_MODE_R) {
4676 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
4677 .arg2 = (uint32_t)(FFA_MEM_PERM_RX)};
4678 if (!(mode & MM_MODE_X)) {
4679 ret.arg2 = (uint32_t)(FFA_MEM_PERM_RO);
4680 }
4681 }
4682out:
4683 vm_unlock(&vm_locked);
4684 return ret;
4685}
4686
4687struct ffa_value api_ffa_mem_perm_set(vaddr_t base_addr, uint32_t page_count,
4688 uint32_t mem_perm, struct vcpu *current)
4689{
4690 struct vm_locked vm_locked;
4691 struct ffa_value ret;
4692 bool mode_ret = false;
4693 uint32_t original_mode;
4694 uint32_t new_mode;
4695 struct mpool local_page_pool;
4696
4697 if (!plat_ffa_is_mem_perm_set_valid(current)) {
J-Alves5a099172024-02-22 14:34:51 +00004698 return ffa_error(FFA_DENIED);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -07004699 }
4700
4701 if (!(current->vm->el0_partition)) {
4702 return ffa_error(FFA_DENIED);
4703 }
4704
4705 if (!is_aligned(va_addr(base_addr), PAGE_SIZE)) {
4706 return ffa_error(FFA_INVALID_PARAMETERS);
4707 }
4708
4709 if ((mem_perm != FFA_MEM_PERM_RW) && (mem_perm != FFA_MEM_PERM_RO) &&
4710 (mem_perm != FFA_MEM_PERM_RX)) {
4711 return ffa_error(FFA_INVALID_PARAMETERS);
4712 }
4713
4714 /*
4715 * Create a local pool so any freed memory can't be used by another
4716 * thread. This is to ensure the original mapping can be restored if any
4717 * stage of the process fails.
4718 */
4719 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
4720
4721 vm_locked = vm_lock(current->vm);
4722
4723 /*
4724 * All regions accessible by the partition are mapped during boot. If we
4725 * cannot get a successful translation for the page range, the request
4726 * to change permissions is rejected.
4727 * mm_get_mode is used to check if the given address range is already
4728 * mapped. If the range is unmapped, return error. If the range is
4729 * mapped appropriate attributes are returned to the caller. Note that
4730 * mm_get_mode returns true if the address is in the valid VA range as
4731 * supported by the architecture and MMU configurations, as opposed to
4732 * whether a page is mapped or not. For a page to be known as mapped,
4733 * the API must return true AND the returned mode must not have
4734 * MM_MODE_INVALID set.
4735 */
4736
4737 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
4738 va_add(base_addr, page_count * PAGE_SIZE),
4739 &original_mode);
4740 if (!mode_ret || (original_mode & MM_MODE_INVALID)) {
4741 ret = ffa_error(FFA_INVALID_PARAMETERS);
4742 goto out;
4743 }
4744
4745 /* Device memory cannot be marked as executable */
4746 if ((original_mode & MM_MODE_D) && (mem_perm == FFA_MEM_PERM_RX)) {
4747 ret = ffa_error(FFA_INVALID_PARAMETERS);
4748 goto out;
4749 }
4750
4751 new_mode = MM_MODE_USER | MM_MODE_NG;
4752
4753 if (mem_perm == FFA_MEM_PERM_RW) {
4754 new_mode |= MM_MODE_R | MM_MODE_W;
4755 } else if (mem_perm == FFA_MEM_PERM_RX) {
4756 new_mode |= MM_MODE_R | MM_MODE_X;
4757 } else if (mem_perm == FFA_MEM_PERM_RO) {
4758 new_mode |= MM_MODE_R;
4759 }
4760
4761 /*
4762 * Safe to re-map memory, since we know the requested permissions are
4763 * valid, and the memory requested to be re-mapped is also valid.
4764 */
4765 if (!mm_identity_prepare(
4766 &vm_locked.vm->ptable, pa_from_va(base_addr),
4767 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
4768 new_mode, &local_page_pool)) {
4769 /*
4770 * Defrag the table into the local page pool.
4771 * mm_identity_prepare could have allocated or freed pages to
4772 * split blocks or tables etc.
4773 */
4774 mm_stage1_defrag(&vm_locked.vm->ptable, &local_page_pool);
4775
4776 /*
4777 * Guaranteed to succeed mapping with old mode since the mapping
4778 * with old mode already existed and we have a local page pool
4779 * that should have sufficient memory to go back to the original
4780 * state.
4781 */
4782 CHECK(mm_identity_prepare(
4783 &vm_locked.vm->ptable, pa_from_va(base_addr),
4784 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
4785 original_mode, &local_page_pool));
4786 mm_identity_commit(
4787 &vm_locked.vm->ptable, pa_from_va(base_addr),
4788 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
4789 original_mode, &local_page_pool);
4790
4791 mm_stage1_defrag(&vm_locked.vm->ptable, &api_page_pool);
4792 ret = ffa_error(FFA_NO_MEMORY);
4793 goto out;
4794 }
4795
4796 mm_identity_commit(
4797 &vm_locked.vm->ptable, pa_from_va(base_addr),
4798 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)), new_mode,
4799 &local_page_pool);
4800
4801 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
4802
4803out:
4804 mpool_fini(&local_page_pool);
4805 vm_unlock(&vm_locked);
4806
4807 return ret;
4808}
Maksims Svecovs71b76702022-05-20 15:32:58 +01004809
4810/**
Karl Meakinc5cebbc2024-06-17 11:30:27 +01004811 * Send the contents of the given vCPU's log buffer to the log, preceded
Karl Meakin705b56e2024-05-22 10:04:32 +01004812 * by the VM ID and followed by a newline.
4813 */
Karl Meakin6f1f1212024-07-16 10:18:16 +01004814void api_flush_log_buffer(struct vcpu_locked *vcpu_locked)
Karl Meakin705b56e2024-05-22 10:04:32 +01004815{
Karl Meakin6f1f1212024-07-16 10:18:16 +01004816 /*
4817 * NOTE: This line is parsed by `hftest.py`.
4818 * If you change the format, make sure to update
4819 * `HFTEST_CTRL_JSON_REGEX` as well.
4820 */
4821 struct vcpu *vcpu = vcpu_locked->vcpu;
4822 struct log_buffer *buffer = &vcpu->log_buffer;
4823 ffa_id_t vm_id = vcpu->vm->id;
4824 ffa_id_t vcpu_id = vcpu_index(vcpu);
4825
Karl Meakin7de26952024-06-14 14:50:19 +01004826 buffer->chars[buffer->len] = '\0';
Olivier Deprezbd432092024-07-26 14:01:12 +02004827 dlog("[%x %u] %s\n", vm_id, vcpu_id, buffer->chars);
Karl Meakin7de26952024-06-14 14:50:19 +01004828 buffer->len = 0;
Karl Meakin705b56e2024-05-22 10:04:32 +01004829}
4830
4831/**
Karl Meakin740f74f2024-02-14 18:04:48 +00004832 * Implements FF-A v1.2 FFA_CONSOLE_LOG ABI for buffered logging.
Maksims Svecovs71b76702022-05-20 15:32:58 +01004833 */
4834struct ffa_value api_ffa_console_log(const struct ffa_value args,
4835 struct vcpu *current)
4836{
Karl Meakin740f74f2024-02-14 18:04:48 +00004837 /* Maximum number of characters is 128: 16 registers of 8 bytes each. */
4838 char chars[128] = {0};
Karl Meakin0e617d92024-04-05 12:55:22 +01004839 const bool v1_2 = current->vm->ffa_version >= FFA_VERSION_1_2;
Karl Meakin740f74f2024-02-14 18:04:48 +00004840 const bool log32 = args.func == FFA_CONSOLE_LOG_32;
Maksims Svecovs71b76702022-05-20 15:32:58 +01004841
Karl Meakin740f74f2024-02-14 18:04:48 +00004842 /*
4843 * 32bit: always 6 registers
4844 * 64bit and less than v1.2: 6 registers
4845 * 64bit and v1.2 or greater: 16 registers
4846 */
Karl Meakin66a38bd2024-05-28 16:00:56 +01004847 /* NOLINTNEXTLINE(readability-avoid-nested-conditional-operator) */
Karl Meakin740f74f2024-02-14 18:04:48 +00004848 const size_t registers_max = log32 ? 6 : (v1_2 ? 16 : 6);
4849 const size_t chars_max =
4850 registers_max * (log32 ? sizeof(uint32_t) : sizeof(uint64_t));
4851 const size_t chars_count = args.arg1;
Karl Meakinc5cebbc2024-06-17 11:30:27 +01004852 struct vcpu_locked vcpu_locked;
4853 struct log_buffer *log_buffer;
Karl Meakin740f74f2024-02-14 18:04:48 +00004854
4855 assert(args.func == FFA_CONSOLE_LOG_32 ||
4856 args.func == FFA_CONSOLE_LOG_64);
4857
4858 if (chars_count == 0 || chars_count > chars_max) {
Maksims Svecovs71b76702022-05-20 15:32:58 +01004859 return ffa_error(FFA_INVALID_PARAMETERS);
4860 }
4861
Karl Meakin740f74f2024-02-14 18:04:48 +00004862 if (log32) {
4863 uint32_t *registers = (uint32_t *)chars;
Maksims Svecovs71b76702022-05-20 15:32:58 +01004864
Karl Meakin740f74f2024-02-14 18:04:48 +00004865 registers[0] = args.arg2 & 0xffffffff;
4866 registers[1] = args.arg3 & 0xffffffff;
4867 registers[2] = args.arg4 & 0xffffffff;
4868 registers[3] = args.arg5 & 0xffffffff;
4869 registers[4] = args.arg6 & 0xffffffff;
4870 registers[5] = args.arg7 & 0xffffffff;
4871 } else {
4872 uint64_t *registers = (uint64_t *)chars;
4873
4874 registers[0] = args.arg2;
4875 registers[1] = args.arg3;
4876 registers[2] = args.arg4;
4877 registers[3] = args.arg5;
4878 registers[4] = args.arg6;
4879 registers[5] = args.arg7;
4880 if (v1_2) {
4881 registers[6] = args.extended_val.arg8;
4882 registers[7] = args.extended_val.arg9;
4883 registers[8] = args.extended_val.arg10;
4884 registers[9] = args.extended_val.arg11;
4885 registers[10] = args.extended_val.arg12;
4886 registers[11] = args.extended_val.arg13;
4887 registers[12] = args.extended_val.arg14;
4888 registers[13] = args.extended_val.arg15;
4889 registers[14] = args.extended_val.arg16;
4890 registers[15] = args.extended_val.arg17;
4891 }
4892 }
4893
Karl Meakinc5cebbc2024-06-17 11:30:27 +01004894 vcpu_locked = vcpu_lock(current);
4895 log_buffer = &current->log_buffer;
Karl Meakin7de26952024-06-14 14:50:19 +01004896
Karl Meakin740f74f2024-02-14 18:04:48 +00004897 for (size_t i = 0; i < chars_count; i++) {
4898 bool flush = false;
4899 const char c = chars[i];
4900
4901 if (c == '\n' || c == '\0') {
4902 flush = true;
4903 } else {
Karl Meakin7de26952024-06-14 14:50:19 +01004904 log_buffer->chars[log_buffer->len++] = c;
4905 flush = log_buffer->len == LOG_BUFFER_SIZE;
Karl Meakin740f74f2024-02-14 18:04:48 +00004906 }
4907
4908 if (flush) {
Karl Meakin6f1f1212024-07-16 10:18:16 +01004909 api_flush_log_buffer(&vcpu_locked);
Karl Meakin740f74f2024-02-14 18:04:48 +00004910 }
4911 }
Maksims Svecovs71b76702022-05-20 15:32:58 +01004912
Karl Meakinc5cebbc2024-06-17 11:30:27 +01004913 vcpu_unlock(&vcpu_locked);
Maksims Svecovs71b76702022-05-20 15:32:58 +01004914 return (struct ffa_value){.func = FFA_SUCCESS_32};
4915}