blob: 13e8555442c4d42bca2e8c19979707d31aca2a79 [file] [log] [blame]
Karl Meakin5a365d32024-11-08 23:55:03 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/arch/gicv3.h"
Karl Meakin5a365d32024-11-08 23:55:03 +000010
11#include "hf/api.h"
12#include "hf/check.h"
Karl Meakin902af082024-11-28 14:58:38 +000013#include "hf/ffa.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000014#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000015#include "hf/ffa/interrupts.h"
Karl Meakin936ec1e2025-01-31 13:17:11 +000016#include "hf/ffa/vm.h"
17#include "hf/ffa_internal.h"
Karl Meakin5a365d32024-11-08 23:55:03 +000018#include "hf/plat/interrupts.h"
19#include "hf/vm.h"
20
Karl Meakin117c8082024-12-04 16:03:28 +000021bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
22 struct ffa_value *ret)
Karl Meakin5a365d32024-11-08 23:55:03 +000023{
24 (void)vm_id;
25 (void)vcpu_idx;
26 (void)ret;
27
28 return false;
29}
30
31/**
32 * Check if current VM can resume target VM using FFA_RUN ABI.
33 */
Karl Meakin117c8082024-12-04 16:03:28 +000034bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
35 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
36 struct ffa_value *run_ret, struct vcpu **next)
Karl Meakin5a365d32024-11-08 23:55:03 +000037{
38 /*
39 * Under the Partition runtime model specified in FF-A v1.1-Beta0 spec,
40 * SP can invoke FFA_RUN to resume target SP.
41 */
42 struct vcpu *target_vcpu;
43 struct vcpu *current = current_locked.vcpu;
44 bool ret = true;
45 struct vm *vm;
46 struct vcpu_locked target_locked;
47 struct two_vcpu_locked vcpus_locked;
48
49 vm = vm_find(target_vm_id);
50 if (vm == NULL) {
51 return false;
52 }
53
54 if (vm_is_mp(vm) && vm_is_mp(current->vm) &&
55 vcpu_idx != cpu_index(current->cpu)) {
56 dlog_verbose("vcpu_idx (%d) != pcpu index (%zu)\n", vcpu_idx,
57 cpu_index(current->cpu));
58 return false;
59 }
60
61 target_vcpu = api_ffa_get_vm_vcpu(vm, current);
62
63 vcpu_unlock(&current_locked);
64
65 /* Lock both vCPUs at once to avoid deadlock. */
66 vcpus_locked = vcpu_lock_both(current, target_vcpu);
67 current_locked = vcpus_locked.vcpu1;
68 target_locked = vcpus_locked.vcpu2;
69
70 /* Only the primary VM can turn ON a vCPU that is currently OFF. */
71 if (!vm_is_primary(current->vm) &&
72 target_vcpu->state == VCPU_STATE_OFF) {
73 run_ret->arg2 = FFA_DENIED;
74 ret = false;
75 goto out;
76 }
77
78 /*
79 * An SPx can resume another SPy only when SPy is in PREEMPTED or
80 * BLOCKED state.
81 */
82 if (vm_id_is_current_world(current->vm->id) &&
83 vm_id_is_current_world(target_vm_id)) {
84 /* Target SP must be in preempted or blocked state. */
85 if (target_vcpu->state != VCPU_STATE_PREEMPTED &&
86 target_vcpu->state != VCPU_STATE_BLOCKED) {
87 run_ret->arg2 = FFA_DENIED;
88 ret = false;
89 goto out;
90 }
91 }
92
93 /* A SP cannot invoke FFA_RUN to resume a normal world VM. */
94 if (!vm_id_is_current_world(target_vm_id)) {
95 run_ret->arg2 = FFA_DENIED;
96 ret = false;
97 goto out;
98 }
99
Karl Meakin5a365d32024-11-08 23:55:03 +0000100 if (vm_id_is_current_world(current->vm->id)) {
101 /*
102 * Refer FF-A v1.1 EAC0 spec section 8.3.2.2.1
103 * Signaling an Other S-Int in blocked state
104 */
105 if (current->preempted_vcpu != NULL) {
106 /*
107 * After the target SP execution context has handled
108 * the interrupt, it uses the FFA_RUN ABI to resume
109 * the request due to which it had entered the blocked
110 * state earlier.
111 * Deny the state transition if the SP didnt perform the
112 * deactivation of the secure virtual interrupt.
113 */
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000114 if (vcpu_virt_interrupt_count_get(current_locked) > 0) {
Karl Meakin5a365d32024-11-08 23:55:03 +0000115 run_ret->arg2 = FFA_DENIED;
116 ret = false;
117 goto out;
118 }
119
120 /*
121 * Refer Figure 8.13 Scenario 1: Implementation choice:
122 * SPMC left all intermediate SP execution contexts in
123 * blocked state. Hence, SPMC now bypasses the
124 * intermediate these execution contexts and resumes the
125 * SP execution context that was originally preempted.
126 */
127 *next = current->preempted_vcpu;
128 if (target_vcpu != current->preempted_vcpu) {
129 dlog_verbose("Skipping intermediate vCPUs\n");
130 }
Karl Meakin5a365d32024-11-08 23:55:03 +0000131
132 /*
133 * Clear fields corresponding to secure interrupt
134 * handling.
135 */
136 vcpu_secure_interrupt_complete(current_locked);
137 }
138 }
139
140 /* Check if a vCPU of SP is being resumed. */
141 if (vm_id_is_current_world(target_vm_id)) {
142 /*
143 * A call chain cannot span CPUs. The target vCPU can only be
144 * resumed by FFA_RUN on present CPU.
145 */
146 if ((target_vcpu->call_chain.prev_node != NULL ||
147 target_vcpu->call_chain.next_node != NULL) &&
148 (target_vcpu->cpu != current->cpu)) {
149 run_ret->arg2 = FFA_DENIED;
150 ret = false;
151 goto out;
152 }
Karl Meakin5a365d32024-11-08 23:55:03 +0000153 }
154
155out:
156 vcpu_unlock(&target_locked);
157 return ret;
158}
159
160/**
161 * SPMC scheduled call chain is completely unwound.
162 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000163static void ffa_cpu_cycles_exit_spmc_schedule_mode(
164 struct vcpu_locked current_locked)
Karl Meakin5a365d32024-11-08 23:55:03 +0000165{
166 struct vcpu *current;
167
168 current = current_locked.vcpu;
169 assert(current->call_chain.next_node == NULL);
170 CHECK(current->scheduling_mode == SPMC_MODE);
171
172 current->scheduling_mode = NONE;
173 current->rt_model = RTM_NONE;
174}
175
176/**
177 * A SP in running state could have been pre-empted by a secure interrupt. SPM
178 * would switch the execution to the vCPU of target SP responsible for interupt
179 * handling. Upon completion of interrupt handling, vCPU performs interrupt
180 * signal completion through FFA_MSG_WAIT ABI (provided it was in waiting state
181 * when interrupt was signaled).
182 *
183 * SPM then resumes the original SP that was initially pre-empted.
184 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000185static struct ffa_value ffa_cpu_cycles_preempted_vcpu_resume(
Karl Meakin5a365d32024-11-08 23:55:03 +0000186 struct vcpu_locked current_locked, struct vcpu **next)
187{
188 struct ffa_value ffa_ret = (struct ffa_value){.func = FFA_MSG_WAIT_32};
189 struct vcpu *target_vcpu;
190 struct vcpu *current = current_locked.vcpu;
191 struct vcpu_locked target_locked;
192 struct two_vcpu_locked vcpus_locked;
193
194 CHECK(current->preempted_vcpu != NULL);
195 CHECK(current->preempted_vcpu->state == VCPU_STATE_PREEMPTED);
196
197 target_vcpu = current->preempted_vcpu;
198 vcpu_unlock(&current_locked);
199
200 /* Lock both vCPUs at once to avoid deadlock. */
201 vcpus_locked = vcpu_lock_both(current, target_vcpu);
202 current_locked = vcpus_locked.vcpu1;
203 target_locked = vcpus_locked.vcpu2;
204
205 /* Reset the fields tracking secure interrupt processing. */
206 vcpu_secure_interrupt_complete(current_locked);
207
208 /* SPMC scheduled call chain is completely unwound. */
Karl Meakinca38ef92025-02-13 14:20:23 +0000209 ffa_cpu_cycles_exit_spmc_schedule_mode(current_locked);
Karl Meakin5a365d32024-11-08 23:55:03 +0000210 assert(current->call_chain.prev_node == NULL);
211
212 current->state = VCPU_STATE_WAITING;
213
214 vcpu_set_running(target_locked, NULL);
215
216 vcpu_unlock(&target_locked);
217
218 /* Restore interrupt priority mask. */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000219 ffa_interrupts_unmask(current);
Karl Meakin5a365d32024-11-08 23:55:03 +0000220
221 /* The pre-empted vCPU should be run. */
222 *next = target_vcpu;
223
224 return ffa_ret;
225}
226
227static struct ffa_value ffa_msg_wait_complete(struct vcpu_locked current_locked,
228 struct vcpu **next)
229{
230 struct vcpu *current = current_locked.vcpu;
231
232 current->scheduling_mode = NONE;
233 current->rt_model = RTM_NONE;
234
Daniel Boulbyd7992232025-03-06 17:09:49 +0000235 /*
236 * We no longer need to do a managed exit so clear the interrupt if
237 * needed.
238 */
239 vcpu_virt_interrupt_clear(current_locked, HF_MANAGED_EXIT_INTID);
240
Karl Meakin5a365d32024-11-08 23:55:03 +0000241 /* Relinquish control back to the NWd. */
242 *next = api_switch_to_other_world(
243 current_locked, (struct ffa_value){.func = FFA_MSG_WAIT_32},
244 VCPU_STATE_WAITING);
245
246 return api_ffa_interrupt_return(0);
247}
248
249/**
250 * Deals with the common case of intercepting an FFA_MSG_WAIT call.
251 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000252static bool ffa_cpu_cycles_msg_wait_intercept(struct vcpu_locked current_locked,
253 struct vcpu **next,
254 struct ffa_value *ffa_ret)
Karl Meakin5a365d32024-11-08 23:55:03 +0000255{
256 struct two_vcpu_locked both_vcpu_locks;
257 struct vcpu *current = current_locked.vcpu;
258 bool ret = false;
259
260 assert(next != NULL);
261 assert(*next != NULL);
262
263 vcpu_unlock(&current_locked);
264
265 both_vcpu_locks = vcpu_lock_both(current, *next);
266
267 /*
J-Alves4796d112025-02-12 14:39:00 +0000268 * Check if there is a pending interrupt, and if the partition
269 * is expects to notify the scheduler or resume straight away.
270 * Either trigger SRI for later donation of CPU cycles, or
271 * eret `FFA_INTERRUPT` back to the caller.
Karl Meakin5a365d32024-11-08 23:55:03 +0000272 */
Karl Meakin117c8082024-12-04 16:03:28 +0000273 if (ffa_interrupts_intercept_call(both_vcpu_locks.vcpu1,
274 both_vcpu_locks.vcpu2, ffa_ret)) {
Karl Meakin5a365d32024-11-08 23:55:03 +0000275 *next = NULL;
276 ret = true;
277 }
278
279 vcpu_unlock(&both_vcpu_locks.vcpu2);
280
281 return ret;
282}
283
Karl Meakin936ec1e2025-01-31 13:17:11 +0000284static bool sp_boot_next(struct vcpu_locked current_locked, struct vcpu **next)
285{
286 struct vcpu *vcpu_next = NULL;
287 struct vcpu *current = current_locked.vcpu;
288 struct vm *next_vm;
289 size_t cpu_indx = cpu_index(current->cpu);
290
291 if (current->cpu->last_sp_initialized) {
292 return false;
293 }
294
295 if (!atomic_load_explicit(&current->vm->aborting,
296 memory_order_relaxed)) {
297 /* vCPU has just returned from successful initialization. */
298 dlog_verbose(
299 "Initialized execution context of VM: %#x on CPU: %zu, "
300 "boot_order: %u\n",
301 current->vm->id, cpu_index(current->cpu),
302 current->vm->boot_order);
303 }
304
305 if (cpu_index(current_locked.vcpu->cpu) == PRIMARY_CPU_IDX) {
306 next_vm = vm_get_next_boot(current->vm);
307 } else {
308 /* SP boot chain on secondary CPU. */
309 next_vm = vm_get_next_boot_secondary_core(current->vm);
310 }
311
312 current->state = VCPU_STATE_WAITING;
313 current->rt_model = RTM_NONE;
314 current->scheduling_mode = NONE;
315
316 /*
317 * Pick next SP's vCPU to be booted. Once all SPs have booted
318 * (next_vm is NULL), then return execution to NWd.
319 */
320 if (next_vm == NULL) {
321 current->cpu->last_sp_initialized = true;
322 goto out;
323 }
324
325 vcpu_next = vm_get_vcpu(next_vm, cpu_indx);
326
327 /*
328 * An SP's execution context needs to be bootstrapped if:
329 * - It has never been initialized before.
330 * - Or it was turned off when the CPU, on which it was pinned, was
331 * powered down.
332 */
333 if (vcpu_next->rt_model == RTM_SP_INIT ||
334 vcpu_next->state == VCPU_STATE_OFF) {
335 vcpu_next->rt_model = RTM_SP_INIT;
336 arch_regs_reset(vcpu_next);
337 vcpu_next->cpu = current->cpu;
338 vcpu_next->state = VCPU_STATE_RUNNING;
339 vcpu_next->regs_available = false;
340 vcpu_set_phys_core_idx(vcpu_next);
341 arch_regs_set_pc_arg(&vcpu_next->regs,
342 vcpu_next->vm->secondary_ep, 0ULL);
343
344 if (cpu_index(current_locked.vcpu->cpu) == PRIMARY_CPU_IDX) {
345 /*
346 * Boot information is passed by the SPMC to the SP's
347 * execution context only on the primary CPU.
348 */
349 vcpu_set_boot_info_gp_reg(vcpu_next);
350 }
351
352 *next = vcpu_next;
353
354 return true;
355 }
356out:
357 dlog_notice("Finished bootstrapping all SPs on CPU%lx\n", cpu_indx);
358 return false;
359}
360
Karl Meakin5a365d32024-11-08 23:55:03 +0000361/**
362 * The invocation of FFA_MSG_WAIT at secure virtual FF-A instance is compliant
363 * with FF-A v1.1 EAC0 specification. It only performs the state transition
364 * from RUNNING to WAITING for the following Partition runtime models:
365 * RTM_FFA_RUN, RTM_SEC_INTERRUPT, RTM_SP_INIT.
366 */
Karl Meakin117c8082024-12-04 16:03:28 +0000367struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
368 struct vcpu_locked current_locked, struct vcpu **next)
Karl Meakin5a365d32024-11-08 23:55:03 +0000369{
370 struct ffa_value ret = api_ffa_interrupt_return(0);
371 struct vcpu *current = current_locked.vcpu;
372
373 switch (current->rt_model) {
374 case RTM_SP_INIT:
375 if (!sp_boot_next(current_locked, next)) {
376 ret = ffa_msg_wait_complete(current_locked, next);
377
Karl Meakinca38ef92025-02-13 14:20:23 +0000378 if (ffa_cpu_cycles_msg_wait_intercept(current_locked,
379 next, &ret)) {
Karl Meakin5a365d32024-11-08 23:55:03 +0000380 }
381 }
382 break;
383 case RTM_SEC_INTERRUPT:
384 /*
385 * Either resume the preempted SP or complete the FFA_MSG_WAIT.
386 */
387 assert(current->preempted_vcpu != NULL);
Karl Meakinca38ef92025-02-13 14:20:23 +0000388 ffa_cpu_cycles_preempted_vcpu_resume(current_locked, next);
Karl Meakin5a365d32024-11-08 23:55:03 +0000389
Karl Meakinca38ef92025-02-13 14:20:23 +0000390 if (ffa_cpu_cycles_msg_wait_intercept(current_locked, next,
391 &ret)) {
Karl Meakin5a365d32024-11-08 23:55:03 +0000392 break;
393 }
394
395 /*
396 * If CPU cycles were allocated through FFA_RUN interface,
397 * allow the interrupts(if they were masked earlier) before
398 * returning control to NWd.
399 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000400 ffa_interrupts_unmask(current);
Karl Meakin5a365d32024-11-08 23:55:03 +0000401 break;
402 case RTM_FFA_RUN:
403 ret = ffa_msg_wait_complete(current_locked, next);
404
Karl Meakinca38ef92025-02-13 14:20:23 +0000405 if (ffa_cpu_cycles_msg_wait_intercept(current_locked, next,
406 &ret)) {
Karl Meakin5a365d32024-11-08 23:55:03 +0000407 break;
408 }
409
410 /*
411 * If CPU cycles were allocated through FFA_RUN interface,
412 * allow the interrupts(if they were masked earlier) before
413 * returning control to NWd.
414 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000415 ffa_interrupts_unmask(current);
Karl Meakin5a365d32024-11-08 23:55:03 +0000416
417 break;
418 default:
419 panic("%s: unexpected runtime model %x for [%x %x]",
420 current->rt_model, current->vm->id,
421 cpu_index(current->cpu));
422 }
423
424 vcpu_unlock(&current_locked);
425
426 return ret;
427}
428
Karl Meakin5a365d32024-11-08 23:55:03 +0000429/*
430 * Initialize the scheduling mode and/or Partition Runtime model of the target
431 * SP upon being resumed by an FFA_RUN ABI.
432 */
J-Alves5d65fe72025-02-18 16:02:32 +0000433void ffa_cpu_cycles_init_schedule_mode_ffa_run(
Karl Meakin117c8082024-12-04 16:03:28 +0000434 struct vcpu_locked current_locked, struct vcpu_locked target_locked)
Karl Meakin5a365d32024-11-08 23:55:03 +0000435{
436 struct vcpu *vcpu = target_locked.vcpu;
437 struct vcpu *current = current_locked.vcpu;
438
439 /*
440 * Scenario 1 in Table 8.4; Therefore SPMC could be resuming a vCPU
441 * that was part of NWd scheduled mode.
442 */
443 CHECK(vcpu->scheduling_mode != SPMC_MODE);
444
445 /* Section 8.2.3 bullet 4.2 of spec FF-A v1.1 EAC0. */
446 if (vcpu->state == VCPU_STATE_WAITING) {
447 assert(vcpu->rt_model == RTM_SP_INIT ||
448 vcpu->rt_model == RTM_NONE);
449 vcpu->rt_model = RTM_FFA_RUN;
450
451 if (!vm_id_is_current_world(current->vm->id) ||
452 (current->scheduling_mode == NWD_MODE)) {
453 vcpu->scheduling_mode = NWD_MODE;
454 }
455 } else {
456 /* SP vCPU would have been pre-empted earlier or blocked. */
457 CHECK(vcpu->state == VCPU_STATE_PREEMPTED ||
458 vcpu->state == VCPU_STATE_BLOCKED);
459 }
460
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000461 ffa_interrupts_mask(target_locked);
Karl Meakin5a365d32024-11-08 23:55:03 +0000462}
463
464/*
465 * Prepare to yield execution back to the VM/SP that allocated CPU cycles and
466 * move to BLOCKED state. If the CPU cycles were allocated to the current
467 * execution context by the SPMC to handle secure virtual interrupt, then
468 * FFA_YIELD invocation is essentially a no-op.
469 */
Karl Meakin117c8082024-12-04 16:03:28 +0000470struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
471 struct vcpu **next,
472 uint32_t timeout_low,
473 uint32_t timeout_high)
Karl Meakin5a365d32024-11-08 23:55:03 +0000474{
475 struct ffa_value ret_args = (struct ffa_value){.func = FFA_SUCCESS_32};
476 struct vcpu *current = current_locked.vcpu;
477 struct ffa_value ret = {
478 .func = FFA_YIELD_32,
479 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
480 .arg2 = timeout_low,
481 .arg3 = timeout_high,
482 };
483
484 switch (current->rt_model) {
485 case RTM_FFA_DIR_REQ:
486 assert(current->direct_request_origin.vm_id !=
487 HF_INVALID_VM_ID);
488 if (current->call_chain.prev_node == NULL) {
489 /*
490 * Relinquish cycles to the NWd VM that sent direct
491 * request message to the current SP.
492 */
493 *next = api_switch_to_other_world(current_locked, ret,
494 VCPU_STATE_BLOCKED);
495 } else {
496 /*
497 * Relinquish cycles to the SP that sent direct request
498 * message to the current SP.
499 */
500 *next = api_switch_to_vm(
501 current_locked, ret, VCPU_STATE_BLOCKED,
502 current->direct_request_origin.vm_id);
503 }
504 break;
505 case RTM_SEC_INTERRUPT: {
506 /*
507 * SPMC does not implement a scheduler needed to resume the
508 * current vCPU upon timeout expiration. Hence, SPMC makes the
509 * implementation defined choice to treat FFA_YIELD invocation
510 * as a no-op if the SP execution context is in the secure
511 * interrupt runtime model. This does not violate FF-A spec as
512 * the spec does not mandate timeout to be honored. Moreover,
513 * timeout specified by an endpoint is just a hint to the
514 * partition manager which allocated CPU cycles.
515 * Resume the current vCPU.
516 */
517 *next = NULL;
518 break;
519 }
520 default:
521 CHECK(current->rt_model == RTM_FFA_RUN);
522 *next = api_switch_to_primary(current_locked, ret,
523 VCPU_STATE_BLOCKED);
524 break;
525 }
526
527 /*
528 * Before yielding CPU cycles, allow the interrupts(if they were
529 * masked earlier).
530 */
531 if (*next != NULL) {
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000532 ffa_interrupts_unmask(current);
Karl Meakin5a365d32024-11-08 23:55:03 +0000533 }
534
535 return ret_args;
536}
Karl Meakin936ec1e2025-01-31 13:17:11 +0000537
Karl Meakin936ec1e2025-01-31 13:17:11 +0000538/**
539 * Validates the Runtime model for FFA_RUN. Refer to section 7.2 of the FF-A
540 * v1.1 EAC0 spec.
541 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000542static bool ffa_cpu_cycles_check_rtm_ffa_run(struct vcpu_locked current_locked,
543 struct vcpu_locked locked_vcpu,
544 uint32_t func,
545 enum vcpu_state *next_state)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000546{
547 switch (func) {
548 case FFA_MSG_SEND_DIRECT_REQ_64:
549 case FFA_MSG_SEND_DIRECT_REQ_32:
550 case FFA_MSG_SEND_DIRECT_REQ2_64:
551 /* Fall through. */
552 case FFA_RUN_32: {
553 /* Rules 1,2 section 7.2 EAC0 spec. */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000554 if (ffa_direct_msg_precedes_in_call_chain(current_locked,
555 locked_vcpu)) {
Karl Meakin936ec1e2025-01-31 13:17:11 +0000556 return false;
557 }
558 *next_state = VCPU_STATE_BLOCKED;
559 return true;
560 }
561 case FFA_MSG_WAIT_32:
562 /* Rule 4 section 7.2 EAC0 spec. Fall through. */
563 *next_state = VCPU_STATE_WAITING;
564 return true;
565 case FFA_YIELD_32:
566 /* Rule 5 section 7.2 EAC0 spec. */
567 *next_state = VCPU_STATE_BLOCKED;
568 return true;
569 case FFA_MSG_SEND_DIRECT_RESP_64:
570 case FFA_MSG_SEND_DIRECT_RESP_32:
571 case FFA_MSG_SEND_DIRECT_RESP2_64:
572 /* Rule 3 section 7.2 EAC0 spec. Fall through. */
573 default:
574 /* Deny state transitions by default. */
575 return false;
576 }
577}
578
579/**
580 * Validates the Runtime model for FFA_MSG_SEND_DIRECT_REQ and
581 * FFA_MSG_SEND_DIRECT_REQ2. Refer to section 8.3 of the FF-A
582 * v1.2 spec.
583 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000584static bool ffa_cpu_cycles_check_rtm_ffa_dir_req(
585 struct vcpu_locked current_locked, struct vcpu_locked locked_vcpu,
586 ffa_id_t receiver_vm_id, uint32_t func, enum vcpu_state *next_state)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000587{
588 switch (func) {
589 case FFA_MSG_SEND_DIRECT_REQ_64:
590 case FFA_MSG_SEND_DIRECT_REQ_32:
591 case FFA_MSG_SEND_DIRECT_REQ2_64:
592 /* Fall through. */
593 case FFA_RUN_32: {
594 /* Rules 1,2. */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000595 if (ffa_direct_msg_precedes_in_call_chain(current_locked,
596 locked_vcpu)) {
Karl Meakin936ec1e2025-01-31 13:17:11 +0000597 return false;
598 }
599
600 *next_state = VCPU_STATE_BLOCKED;
601 return true;
602 }
603 case FFA_MSG_SEND_DIRECT_RESP_64:
604 case FFA_MSG_SEND_DIRECT_RESP_32: {
605 case FFA_MSG_SEND_DIRECT_RESP2_64:
606 /* Rule 3. */
607 if (current_locked.vcpu->direct_request_origin.vm_id ==
608 receiver_vm_id) {
609 *next_state = VCPU_STATE_WAITING;
610 return true;
611 }
612
613 return false;
614 }
615 case FFA_YIELD_32:
616 /* Rule 3, section 8.3 of FF-A v1.2 spec. */
617 *next_state = VCPU_STATE_BLOCKED;
618 return true;
619 case FFA_MSG_WAIT_32:
620 /* Rule 4. Fall through. */
621 default:
622 /* Deny state transitions by default. */
623 return false;
624 }
625}
626
627/**
628 * Validates the Runtime model for Secure interrupt handling. Refer to section
629 * 8.4 of the FF-A v1.2 ALP0 spec.
630 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000631static bool ffa_cpu_cycles_check_rtm_sec_interrupt(
632 struct vcpu_locked current_locked, struct vcpu_locked locked_vcpu,
633 uint32_t func, enum vcpu_state *next_state)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000634{
635 struct vcpu *current = current_locked.vcpu;
636 struct vcpu *vcpu = locked_vcpu.vcpu;
637
638 CHECK(current->scheduling_mode == SPMC_MODE);
639
640 switch (func) {
641 case FFA_MSG_SEND_DIRECT_REQ_64:
642 case FFA_MSG_SEND_DIRECT_REQ_32:
643 case FFA_MSG_SEND_DIRECT_REQ2_64:
644 /* Rule 3. */
645 *next_state = VCPU_STATE_BLOCKED;
646 return true;
647 case FFA_RUN_32: {
648 /* Rule 6. */
649 if (vcpu->state == VCPU_STATE_PREEMPTED) {
650 *next_state = VCPU_STATE_BLOCKED;
651 return true;
652 }
653
654 return false;
655 }
656 case FFA_MSG_WAIT_32:
657 /* Rule 2. */
658 *next_state = VCPU_STATE_WAITING;
659 return true;
660 case FFA_YIELD_32:
661 /* Rule 3, section 8.4 of FF-A v1.2 spec. */
662 *next_state = VCPU_STATE_BLOCKED;
663 return true;
664 case FFA_MSG_SEND_DIRECT_RESP_64:
665 case FFA_MSG_SEND_DIRECT_RESP_32:
666 case FFA_MSG_SEND_DIRECT_RESP2_64:
667 /* Rule 5. Fall through. */
668 default:
669 /* Deny state transitions by default. */
670 return false;
671 }
672}
673
674/**
675 * Validates the Runtime model for SP initialization. Refer to section
676 * 8.3 of the FF-A v1.2 ALP0 spec.
677 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000678static bool ffa_cpu_cycles_check_rtm_sp_init(struct vcpu_locked locked_vcpu,
679 uint32_t func,
680 enum vcpu_state *next_state)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000681{
682 switch (func) {
683 case FFA_MSG_SEND_DIRECT_REQ_64:
684 case FFA_MSG_SEND_DIRECT_REQ_32:
685 case FFA_MSG_SEND_DIRECT_REQ2_64: {
686 struct vcpu *vcpu = locked_vcpu.vcpu;
687
688 assert(vcpu != NULL);
689 /* Rule 1. */
690 if (vcpu->rt_model != RTM_SP_INIT) {
691 *next_state = VCPU_STATE_BLOCKED;
692 return true;
693 }
694
695 return false;
696 }
697 case FFA_MSG_WAIT_32:
698 /* Rule 2. Fall through. */
699 case FFA_ERROR_32:
700 /* Rule 3. */
701 *next_state = VCPU_STATE_WAITING;
702 return true;
703 case FFA_YIELD_32:
704 /* Rule 4. Fall through. */
705 case FFA_RUN_32:
706 /* Rule 6. Fall through. */
707 case FFA_MSG_SEND_DIRECT_RESP_64:
708 case FFA_MSG_SEND_DIRECT_RESP_32:
709 case FFA_MSG_SEND_DIRECT_RESP2_64:
710 /* Rule 5. Fall through. */
711 default:
712 /* Deny state transitions by default. */
713 return false;
714 }
715}
716
717/**
718 * Check if the runtime model (state machine) of the current SP supports the
719 * given FF-A ABI invocation. If yes, next_state represents the state to which
720 * the current vcpu would transition upon the FF-A ABI invocation as determined
721 * by the Partition runtime model.
722 */
723bool ffa_cpu_cycles_check_runtime_state_transition(
724 struct vcpu_locked current_locked, ffa_id_t vm_id,
725 ffa_id_t receiver_vm_id, struct vcpu_locked locked_vcpu, uint32_t func,
726 enum vcpu_state *next_state)
727{
728 bool allowed = false;
729 struct vcpu *current = current_locked.vcpu;
730
731 assert(current != NULL);
732
733 /* Perform state transition checks only for Secure Partitions. */
734 if (!vm_id_is_current_world(vm_id)) {
735 return true;
736 }
737
738 switch (current->rt_model) {
739 case RTM_FFA_RUN:
Karl Meakinca38ef92025-02-13 14:20:23 +0000740 allowed = ffa_cpu_cycles_check_rtm_ffa_run(
Karl Meakin936ec1e2025-01-31 13:17:11 +0000741 current_locked, locked_vcpu, func, next_state);
742 break;
743 case RTM_FFA_DIR_REQ:
Karl Meakinca38ef92025-02-13 14:20:23 +0000744 allowed = ffa_cpu_cycles_check_rtm_ffa_dir_req(
Karl Meakin936ec1e2025-01-31 13:17:11 +0000745 current_locked, locked_vcpu, receiver_vm_id, func,
746 next_state);
747 break;
748 case RTM_SEC_INTERRUPT:
Karl Meakinca38ef92025-02-13 14:20:23 +0000749 allowed = ffa_cpu_cycles_check_rtm_sec_interrupt(
Karl Meakin936ec1e2025-01-31 13:17:11 +0000750 current_locked, locked_vcpu, func, next_state);
751 break;
752 case RTM_SP_INIT:
Karl Meakinca38ef92025-02-13 14:20:23 +0000753 allowed = ffa_cpu_cycles_check_rtm_sp_init(locked_vcpu, func,
754 next_state);
Karl Meakin936ec1e2025-01-31 13:17:11 +0000755 break;
756 default:
757 dlog_error(
758 "Illegal Runtime Model specified by SP%x on CPU%zx\n",
759 current->vm->id, cpu_index(current->cpu));
760 allowed = false;
761 break;
762 }
763
764 if (!allowed) {
765 dlog_verbose("State transition denied\n");
766 }
767
768 return allowed;
769}
770
771/*
772 * Handle FFA_ERROR_32 call according to the given error code.
773 *
774 * Error codes other than FFA_ABORTED, and cases of FFA_ABORTED not
775 * in RTM_SP_INIT runtime model, not implemented. Refer to section 8.5
776 * of FF-A 1.2 spec.
777 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000778struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
779 struct vcpu **next,
780 enum ffa_error error_code)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000781{
782 struct vcpu_locked current_locked;
783 struct vm_locked vm_locked;
784 enum partition_runtime_model rt_model;
785 struct ffa_value ret = api_ffa_interrupt_return(0);
786
787 vm_locked = vm_lock(current->vm);
788 current_locked = vcpu_lock(current);
789 rt_model = current_locked.vcpu->rt_model;
790
791 if (error_code == FFA_ABORTED && rt_model == RTM_SP_INIT) {
792 dlog_error("Aborting SP %#x from vCPU %u\n", current->vm->id,
793 vcpu_index(current));
794
795 atomic_store_explicit(&current->vm->aborting, true,
796 memory_order_relaxed);
797
798 ffa_vm_free_resources(vm_locked);
799
800 if (sp_boot_next(current_locked, next)) {
801 goto out;
802 }
803
804 /*
805 * Relinquish control back to the NWd. Return
806 * FFA_MSG_WAIT_32 to indicate to SPMD that SPMC
807 * has successfully finished initialization.
808 */
809 *next = api_switch_to_other_world(
810 current_locked,
811 (struct ffa_value){.func = FFA_MSG_WAIT_32},
812 VCPU_STATE_ABORTED);
813
814 goto out;
815 }
816 ret = ffa_error(FFA_NOT_SUPPORTED);
817out:
818 vcpu_unlock(&current_locked);
819 vm_unlock(&vm_locked);
820 return ret;
821}