blob: 4becb129a40fb3340e959bc0f0ebbf4e6c9a7f10 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
22 * Drops the current interrupt priority and deactivate the given interrupt ID
23 * for the calling vCPU.
24 *
25 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
30 struct vcpu_locked current_locked;
31 uint32_t int_id;
32 int ret = 0;
33
34 current_locked = vcpu_lock(current);
35 if (vint_id >= HF_NUM_INTIDS) {
36 ret = -1;
37 goto out;
38 }
39
40 /*
41 * Current implementation maps virtual interrupt to physical interrupt.
42 */
43 if (pint_id != vint_id) {
44 ret = -1;
45 goto out;
46 }
47
48 /*
49 * A malicious SP could de-activate an interrupt that does not belong to
50 * it. Return error to indicate failure.
51 */
52 if (!vcpu_interrupt_queue_peek(current_locked, &int_id)) {
53 dlog_error("No virtual interrupt to be deactivated\n");
54 ret = -1;
55 goto out;
56 }
57
58 if (int_id != vint_id) {
59 dlog_error("Unknown interrupt being deactivated %u\n", vint_id);
60 ret = -1;
61 goto out;
62 }
63
64 if (current->requires_deactivate_call) {
65 /* There is no preempted vCPU to resume. */
66 assert(current->preempted_vcpu == NULL);
67
68 vcpu_secure_interrupt_complete(current_locked);
69 }
70
71 /*
72 * Now that the virtual interrupt has been serviced and deactivated,
73 * remove it from the queue, if it was pending.
74 */
75 vcpu_interrupt_queue_pop(current_locked, &int_id);
76 assert(vint_id == int_id);
77out:
78 vcpu_unlock(&current_locked);
79 return ret;
80}
81
Karl Meakinca38ef92025-02-13 14:20:23 +000082static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000083 struct vcpu *current, uint32_t interrupt_id)
84{
85 /*
86 * Find which VM/SP owns this interrupt. We then find the
87 * corresponding vCPU context for this CPU.
88 */
89 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
90 struct vm *vm = vm_find_index(index);
91
92 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
93 struct interrupt_descriptor int_desc =
94 vm->interrupt_desc[j];
95
96 /*
97 * Interrupt descriptors are populated
98 * contiguously.
99 */
100 if (!int_desc.valid) {
101 break;
102 }
103 if (int_desc.interrupt_id == interrupt_id) {
104 return api_ffa_get_vm_vcpu(vm, current);
105 }
106 }
107 }
108
109 return NULL;
110}
111
Karl Meakinca38ef92025-02-13 14:20:23 +0000112static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
113 uint32_t interrupt_id)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000114{
115 struct vcpu *target_vcpu;
116
117 switch (interrupt_id) {
118 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100119 /*
120 * Get the next vCPU with a pending IPI. If all vCPUs
121 * have had their IPIs handled this will return NULL.
122 */
123 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000124 break;
125 case ARM_EL1_VIRT_TIMER_PHYS_INT:
126 /* Fall through */
127 case ARM_EL1_PHYS_TIMER_PHYS_INT:
128 panic("Timer interrupt not expected to fire: %u\n",
129 interrupt_id);
130 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000131 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000132 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000133
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100134 /* The target vCPU for a secure interrupt cannot be NULL. */
135 CHECK(target_vcpu != NULL);
136 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000137
138 return target_vcpu;
139}
140
141/*
142 * Queue the pending virtual interrupt for target vcpu. Necessary fields
143 * tracking the secure interrupt processing are set accordingly.
144 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000145static void ffa_interrupts_queue_vint(struct vcpu_locked target_vcpu_locked,
146 uint32_t vint_id,
147 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000148{
149 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
150 struct vcpu *preempted_vcpu = current_locked.vcpu;
151
152 if (preempted_vcpu != NULL) {
153 target_vcpu->preempted_vcpu = preempted_vcpu;
154 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
155 }
156
157 /* Queue the pending virtual interrupt for target vcpu. */
158 if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
159 panic("Exhausted interrupt queue for vcpu of SP: %x\n",
160 target_vcpu->vm->id);
161 }
162}
163
164/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000165 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
166 * restore the priority mask thereby allowing the interrupts to be delivered.
167 */
168void ffa_interrupts_unmask(struct vcpu *current)
169{
170 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
171}
172
173/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000174 * Enforce action of an SP in response to non-secure or other-secure interrupt
175 * by changing the priority mask. Effectively, physical interrupts shall not
176 * trigger which has the same effect as queueing interrupts.
177 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000178void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000179{
180 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
181 uint8_t current_priority;
182
183 /* Save current value of priority mask. */
184 current_priority = plat_interrupts_get_priority_mask();
185 receiver_vcpu->prev_interrupt_priority = current_priority;
186
187 if (receiver_vcpu->vm->other_s_interrupts_action ==
188 OTHER_S_INT_ACTION_QUEUED ||
189 receiver_vcpu->scheduling_mode == SPMC_MODE) {
190 /*
191 * If secure interrupts not masked yet, mask them now. We could
192 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
193 * sends a direct request, and we are making the IMPDEF choice
194 * to mask interrupts when such a situation occurs. This keeps
195 * design simple.
196 */
197 if (current_priority > SWD_MASK_ALL_INT) {
198 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
199 }
200 } else if (receiver_vcpu->vm->ns_interrupts_action ==
201 NS_ACTION_QUEUED) {
202 /* If non secure interrupts not masked yet, mask them now. */
203 if (current_priority > SWD_MASK_NS_INT) {
204 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
205 }
206 }
207}
208
209/**
210 * Handles the secure interrupt according to the target vCPU's state
211 * in the case the owner of the interrupt is an S-EL0 partition.
212 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000213static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000214 struct vcpu_locked current_locked,
215 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
216{
217 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
218 struct vcpu *next;
219
220 /* Secure interrupt signaling and queuing for S-EL0 SP. */
221 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600222 case VCPU_STATE_WAITING: {
223 struct ffa_value ret_interrupt =
224 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000225
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600226 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
227 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
228 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000229
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600230 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000231 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000232
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600233 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000234
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600235 /*
236 * If the execution was in NWd as well, set the vCPU
237 * in preempted state as well.
238 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000239 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
240 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000241
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600242 /*
243 * The target vcpu could have migrated to a different physical
244 * CPU. SPMC will migrate it to current physical CPU and resume
245 * it.
246 */
247 target_vcpu->cpu = current_locked.vcpu->cpu;
248
249 /* Switch to target vCPU responsible for this interrupt. */
250 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000251 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600252 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000253 case VCPU_STATE_BLOCKED:
254 case VCPU_STATE_PREEMPTED:
255 case VCPU_STATE_RUNNING:
256 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
257 target_vcpu->vm->id);
258 /*
259 * The target vCPU cannot be resumed, SPMC resumes current
260 * vCPU.
261 */
262 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000263 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
264 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000265 break;
266 default:
267 panic("Secure interrupt cannot be signaled to target SP\n");
268 break;
269 }
270
271 return next;
272}
273
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000274/**
275 * Handles the secure interrupt according to the target vCPU's state
276 * in the case the owner of the interrupt is an S-EL1 partition.
277 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000278static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000279 struct vcpu_locked current_locked,
280 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
281{
282 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
283 struct vcpu *current = current_locked.vcpu;
284 struct vcpu *next = NULL;
285
286 /* Secure interrupt signaling and queuing for S-EL1 SP. */
287 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600288 case VCPU_STATE_WAITING: {
289 struct ffa_value ret_interrupt =
290 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000291
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600292 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
293 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000294 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000295
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600296 /*
297 * Ideally, we have to mask non-secure interrupts here
298 * since the spec mandates that SPMC should make sure
299 * SPMC scheduled call chain cannot be preempted by a
300 * non-secure interrupt. However, our current design
301 * takes care of it implicitly.
302 */
303 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
304
Karl Meakinca38ef92025-02-13 14:20:23 +0000305 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
306 current_locked);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600307 next = target_vcpu;
308
309 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000310 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600311 * The target vcpu could have migrated to a different
312 * physical CPU. SPMC will migrate it to current
313 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000314 */
315 assert(target_vcpu->vm->vcpu_count == 1);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600316 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000317 }
318 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600319 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000320 case VCPU_STATE_BLOCKED:
321 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
322 /*
323 * The target vcpu has migrated to a different physical
324 * CPU. Hence, it cannot be resumed on this CPU, SPMC
325 * resumes current vCPU.
326 */
327 assert(target_vcpu->vm->vcpu_count == 1);
328 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000329 ffa_interrupts_queue_vint(
330 target_vcpu_locked, v_intid,
331 (struct vcpu_locked){.vcpu = NULL});
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000332 } else if (ffa_direct_msg_precedes_in_call_chain(
333 current_locked, target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000334 struct ffa_value ret_interrupt =
335 api_ffa_interrupt_return(0);
336
337 /*
338 * If the target vCPU ran earlier in the same call
339 * chain as the current vCPU, SPMC leaves all
340 * intermediate execution contexts in blocked state and
341 * resumes the target vCPU for handling secure
342 * interrupt.
343 * Under the current design, there is only one possible
344 * scenario in which this could happen: both the
345 * preempted (i.e. current) and target vCPU are in the
346 * same NWd scheduled call chain and is described in the
347 * Scenario 1 of Table 8.4 in EAC0 spec.
348 */
349 assert(current_locked.vcpu->scheduling_mode ==
350 NWD_MODE);
351 assert(target_vcpu->scheduling_mode == NWD_MODE);
352
353 /*
354 * The execution preempted the call chain that involved
355 * the targeted and the current SPs.
356 * The targetted SP is set running, whilst the
357 * preempted SP is set PREEMPTED.
358 */
359 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
360
Karl Meakinca38ef92025-02-13 14:20:23 +0000361 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
362 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000363
364 next = target_vcpu;
365 } else {
366 /*
367 * The target vCPU cannot be resumed now because it is
368 * in BLOCKED state (it yielded CPU cycles using
369 * FFA_YIELD). SPMC queues the virtual interrupt and
370 * resumes the current vCPU which could belong to either
371 * a VM or a SP.
372 */
373 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000374 ffa_interrupts_queue_vint(
375 target_vcpu_locked, v_intid,
376 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000377 }
378 break;
379 case VCPU_STATE_PREEMPTED:
380 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
381 /*
382 * We do not resume a target vCPU that has been already
383 * pre-empted by an interrupt. Make the vIRQ pending for
384 * target SP(i.e., queue the interrupt) and continue to
385 * resume current vCPU. Refer to section 8.3.2.1 bullet
386 * 3 in the FF-A v1.1 EAC0 spec.
387 */
388
389 if (current->vm->id == HF_OTHER_WORLD_ID) {
390 /*
391 * The target vCPU must have been preempted by a
392 * non secure interrupt. It could not have been
393 * preempted by a secure interrupt as current
394 * SPMC implementation does not allow secure
395 * interrupt prioritization. Moreover, the
396 * target vCPU should have been in Normal World
397 * scheduled mode as SPMC scheduled mode call
398 * chain cannot be preempted by a non secure
399 * interrupt.
400 */
401 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
402 }
403 } else {
404 /*
405 * The target vcpu has migrated to a different physical
406 * CPU. Hence, it cannot be resumed on this CPU, SPMC
407 * resumes current vCPU.
408 */
409 assert(target_vcpu->vm->vcpu_count == 1);
410 }
411
412 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000413 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
414 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000415
416 break;
417 case VCPU_STATE_RUNNING:
418 if (current == target_vcpu) {
419 /*
420 * This is the special scenario where the current
421 * running execution context also happens to be the
422 * target of the secure interrupt. In this case, it
423 * needs to signal completion of secure interrupt
424 * implicitly. Refer to the embedded comment in vcpu.h
425 * file for the description of this variable.
426 */
427
428 current->requires_deactivate_call = true;
429 } else {
430 /*
431 * The target vcpu has migrated to a different physical
432 * CPU. Hence, it cannot be resumed on this CPU, SPMC
433 * resumes current vCPU.
434 */
435 assert(target_vcpu->vm->vcpu_count == 1);
436 }
437 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000438 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
439 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000440 break;
441 case VCPU_STATE_BLOCKED_INTERRUPT:
442 /* WFI is no-op for SP. Fall through. */
443 default:
444 /*
445 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
446 * to handle secure interrupt.
447 */
448 panic("Secure interrupt cannot be signaled to target SP\n");
449 break;
450 }
451
452 return next;
453}
454
455/**
456 * Obtain the physical interrupt that triggered from the interrupt controller,
457 * and inject the corresponding virtual interrupt to the target vCPU.
458 * When PEs executing in the Normal World, and secure interrupts trigger,
459 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
460 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
461 */
Karl Meakin117c8082024-12-04 16:03:28 +0000462void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
463 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000464{
465 struct vcpu *target_vcpu;
466 struct vcpu_locked target_vcpu_locked =
467 (struct vcpu_locked){.vcpu = NULL};
468 struct vcpu_locked current_locked;
469 uint32_t intid;
470 struct vm_locked target_vm_locked;
471 uint32_t v_intid;
472
473 /* Find pending interrupt id. This also activates the interrupt. */
474 intid = plat_interrupts_get_pending_interrupt_id();
475 v_intid = intid;
476
477 switch (intid) {
478 case ARM_SEL2_TIMER_PHYS_INT:
479 /* Disable the S-EL2 physical timer */
480 host_timer_disable();
481 target_vcpu = timer_find_target_vcpu(current);
482
483 if (target_vcpu != NULL) {
484 v_intid = HF_VIRTUAL_TIMER_INTID;
485 break;
486 }
487 /*
488 * It is possible for target_vcpu to be NULL in case of spurious
489 * timer interrupt. Fall through.
490 */
491 case SPURIOUS_INTID_OTHER_WORLD:
492 /*
493 * Spurious interrupt ID indicating that there are no pending
494 * interrupts to acknowledge. For such scenarios, resume the
495 * current vCPU.
496 */
497 *next = NULL;
498 return;
499 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000500 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000501 break;
502 }
503
504 /*
505 * End the interrupt to drop the running priority. It also deactivates
506 * the physical interrupt. If not, the interrupt could trigger again
507 * after resuming current vCPU.
508 */
509 plat_interrupts_end_of_interrupt(intid);
510
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100511 if (target_vcpu == NULL) {
512 /* No further handling required. Resume the current vCPU. */
513 *next = NULL;
514 return;
515 }
516
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000517 target_vm_locked = vm_lock(target_vcpu->vm);
518
519 if (target_vcpu == current) {
520 current_locked = vcpu_lock(current);
521 target_vcpu_locked = current_locked;
522 } else {
523 struct two_vcpu_locked vcpus_locked;
524 /* Lock both vCPUs at once to avoid deadlock. */
525 vcpus_locked = vcpu_lock_both(current, target_vcpu);
526 current_locked = vcpus_locked.vcpu1;
527 target_vcpu_locked = vcpus_locked.vcpu2;
528 }
529
530 /*
531 * A race condition can occur with the execution contexts belonging to
532 * an MP SP. An interrupt targeting the execution context on present
533 * core can trigger while the execution context of this SP on a
534 * different core is being aborted. In such scenario, the physical
535 * interrupts beloning to the aborted SP are disabled and the current
536 * execution context is resumed.
537 */
538 if (target_vcpu->state == VCPU_STATE_ABORTED ||
539 atomic_load_explicit(&target_vcpu->vm->aborting,
540 memory_order_relaxed)) {
541 /* Clear fields corresponding to secure interrupt handling. */
542 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000543 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000544
545 /* Resume current vCPU. */
546 *next = NULL;
547 } else {
548 /*
549 * SPMC has started handling a secure interrupt with a clean
550 * slate. This signal should be false unless there was a bug in
551 * source code. Hence, use assert rather than CHECK.
552 */
553 assert(!target_vcpu->requires_deactivate_call);
554
555 /* Set the interrupt pending in the target vCPU. */
556 vcpu_interrupt_inject(target_vcpu_locked, v_intid);
557
558 switch (intid) {
559 case HF_IPI_INTID:
560 if (hf_ipi_handle(target_vcpu_locked)) {
561 *next = NULL;
562 break;
563 }
564 /*
565 * Fall through in the case handling has not been fully
566 * completed.
567 */
568 default:
569 /*
570 * Either invoke the handler related to partitions from
571 * S-EL0 or from S-EL1.
572 */
573 *next = target_vcpu_locked.vcpu->vm->el0_partition
Karl Meakinca38ef92025-02-13 14:20:23 +0000574 ? ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000575 current_locked,
576 target_vcpu_locked, v_intid)
Karl Meakinca38ef92025-02-13 14:20:23 +0000577 : ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000578 current_locked,
579 target_vcpu_locked, v_intid);
580 }
581 }
582
583 if (target_vcpu_locked.vcpu != NULL) {
584 vcpu_unlock(&target_vcpu_locked);
585 }
586
587 vcpu_unlock(&current_locked);
588 vm_unlock(&target_vm_locked);
589}
590
Karl Meakin117c8082024-12-04 16:03:28 +0000591bool ffa_interrupts_inject_notification_pending_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000592 struct vcpu_locked target_locked, struct vcpu_locked current_locked,
593 struct vm_locked receiver_locked)
594{
595 struct vm *next_vm = target_locked.vcpu->vm;
596 bool ret = false;
597
598 /*
599 * Inject the NPI if:
600 * - The targeted VM ID is from this world (i.e. if it is an SP).
601 * - The partition has global pending notifications and an NPI hasn't
602 * been injected yet.
603 * - There are pending per-vCPU notifications in the next vCPU.
604 */
605 if (vm_id_is_current_world(next_vm->id) &&
606 (vm_are_per_vcpu_notifications_pending(
607 receiver_locked, vcpu_index(target_locked.vcpu)) ||
608 (vm_are_global_notifications_pending(receiver_locked) &&
609 !vm_notifications_is_npi_injected(receiver_locked)))) {
610 api_interrupt_inject_locked(target_locked,
611 HF_NOTIFICATION_PENDING_INTID,
612 current_locked, NULL);
613 vm_notifications_set_npi_injected(receiver_locked, true);
614 ret = true;
615 }
616
617 return ret;
618}
619
Karl Meakin117c8082024-12-04 16:03:28 +0000620struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000621{
622 struct vcpu *next;
623 struct two_vcpu_locked both_vcpu_locked;
624
625 /*
626 * The action specified by SP in its manifest is ``Non-secure interrupt
627 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
628 * Hence, the call chain starts unwinding. The current vCPU must have
629 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
630 * and execution is either handed back to the normal world or to the
631 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
632 * The api_preempt() call is equivalent to calling
633 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
634 * SP can be resumed later by FFA_RUN.
635 */
636 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
637 assert(current_vcpu->call_chain.next_node == NULL);
638
639 if (current_vcpu->call_chain.prev_node == NULL) {
640 /* End of NWd scheduled call chain */
641 return api_preempt(current_vcpu);
642 }
643
644 next = current_vcpu->call_chain.prev_node;
645 CHECK(next != NULL);
646
647 /*
648 * Lock both vCPUs. Strictly speaking, it may not be necessary since
649 * next is guaranteed to be in BLOCKED state as it is the predecessor of
650 * the current vCPU in the present call chain.
651 */
652 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
653
654 /* Removing a node from an existing call chain. */
655 current_vcpu->call_chain.prev_node = NULL;
656 current_vcpu->state = VCPU_STATE_PREEMPTED;
657
658 /*
659 * SPMC applies the runtime model till when the vCPU transitions from
660 * running to waiting state. Moreover, the SP continues to remain in
661 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
662 * are not changed here.
663 */
664 assert(next->state == VCPU_STATE_BLOCKED);
665 assert(next->call_chain.next_node == current_vcpu);
666
667 next->call_chain.next_node = NULL;
668
669 vcpu_set_running(both_vcpu_locked.vcpu2,
670 &(struct ffa_value){
671 .func = FFA_INTERRUPT_32,
672 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
673 vcpu_index(current_vcpu)),
674 });
675
676 sl_unlock(&next->lock);
677 sl_unlock(&current_vcpu->lock);
678
679 return next;
680}
681
Karl Meakinca38ef92025-02-13 14:20:23 +0000682static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000683 struct vcpu_locked current_locked)
684{
685 struct vcpu *current;
686 struct interrupts *interrupts;
687 struct vm *vm;
688
689 current = current_locked.vcpu;
690 interrupts = &current->interrupts;
691 vm = current->vm;
692
Karl Meakin117c8082024-12-04 16:03:28 +0000693 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000694 vcpu_virt_interrupt_set_enabled(interrupts,
695 HF_MANAGED_EXIT_INTID);
696 /*
697 * SPMC decides the interrupt type for Managed exit signal based
698 * on the partition manifest.
699 */
700 if (vm->me_signal_virq) {
701 vcpu_virt_interrupt_set_type(interrupts,
702 HF_MANAGED_EXIT_INTID,
703 INTERRUPT_TYPE_IRQ);
704 } else {
705 vcpu_virt_interrupt_set_type(interrupts,
706 HF_MANAGED_EXIT_INTID,
707 INTERRUPT_TYPE_FIQ);
708 }
709 }
710
711 if (vm->notifications.enabled) {
712 vcpu_virt_interrupt_set_enabled(interrupts,
713 HF_NOTIFICATION_PENDING_INTID);
714 }
715}
716
717/**
718 * Enable relevant virtual interrupts for Secure Partitions.
719 * For all SPs, any applicable virtual maintenance interrupts are enabled.
720 * Additionally, for S-EL0 partitions, all the interrupts declared in the
721 * partition manifest are enabled at the virtual interrupt controller
722 * interface early during the boot stage as an S-EL0 SP need not call
723 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
724 */
Karl Meakin117c8082024-12-04 16:03:28 +0000725void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
726 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000727{
728 struct vcpu *current;
729 struct interrupts *interrupts;
730 struct vm *vm;
731
732 current = current_locked.vcpu;
733 interrupts = &current->interrupts;
734 vm = current->vm;
735 assert(vm == vm_locked.vm);
736
737 if (vm->el0_partition) {
738 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
739 struct interrupt_descriptor int_desc;
740
741 int_desc = vm_locked.vm->interrupt_desc[k];
742
743 /* Interrupt descriptors are populated contiguously. */
744 if (!int_desc.valid) {
745 break;
746 }
747 vcpu_virt_interrupt_set_enabled(interrupts,
748 int_desc.interrupt_id);
749 }
750 }
751
Karl Meakinca38ef92025-02-13 14:20:23 +0000752 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000753}
754
755/**
756 * Reconfigure the interrupt belonging to the current partition at runtime.
757 * At present, this paravirtualized interface only allows the following
758 * commands which signify what change is being requested by the current
759 * partition:
760 * - Change the target CPU of the interrupt.
761 * - Change the security state of the interrupt.
762 * - Enable or disable the physical interrupt.
763 */
Karl Meakin117c8082024-12-04 16:03:28 +0000764int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
765 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000766{
767 struct vm *vm = current->vm;
768 struct vm_locked vm_locked;
769 int64_t ret = -1;
770 struct interrupt_descriptor *int_desc = NULL;
771
772 /*
773 * Lock VM to protect interrupt descriptor from being modified
774 * concurrently.
775 */
776 vm_locked = vm_lock(vm);
777
778 switch (command) {
779 case INT_RECONFIGURE_TARGET_PE:
780 /* Here, value represents the target PE index. */
781 if (value >= MAX_CPUS) {
782 dlog_verbose(
783 "Illegal target PE index specified while "
784 "reconfiguring interrupt %x\n",
785 int_id);
786 goto out_unlock;
787 }
788
789 /*
790 * An UP SP cannot reconfigure an interrupt to be targetted to
791 * any other physical CPU except the one it is currently
792 * running on.
793 */
794 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
795 dlog_verbose(
796 "Illegal target PE index specified by current "
797 "UP SP\n");
798 goto out_unlock;
799 }
800
801 /* Configure the interrupt to be routed to a specific CPU. */
802 int_desc = vm_interrupt_set_target_mpidr(
803 vm_locked, int_id, cpu_find_index(value)->id);
804 break;
805 case INT_RECONFIGURE_SEC_STATE:
806 /* Specify the new security state of the interrupt. */
807 if (value != INT_DESC_SEC_STATE_NS &&
808 value != INT_DESC_SEC_STATE_S) {
809 dlog_verbose(
810 "Illegal value %x specified while "
811 "reconfiguring interrupt %x\n",
812 value, int_id);
813 goto out_unlock;
814 }
815 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
816 break;
817 case INT_RECONFIGURE_ENABLE:
818 /* Enable or disable the interrupt. */
819 if (value != INT_DISABLE && value != INT_ENABLE) {
820 dlog_verbose(
821 "Illegal value %x specified while "
822 "reconfiguring interrupt %x\n",
823 value, int_id);
824 goto out_unlock;
825 } else {
826 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
827 value == INT_ENABLE);
828 }
829 break;
830 default:
831 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
832 command);
833 goto out_unlock;
834 }
835
836 /* Check if the interrupt belongs to the current SP. */
837 if (int_desc == NULL) {
838 dlog_verbose("Interrupt %x does not belong to current SP\n",
839 int_id);
840 goto out_unlock;
841 }
842
843 ret = 0;
844 plat_interrupts_reconfigure_interrupt(*int_desc);
845
846out_unlock:
847 vm_unlock(&vm_locked);
848
849 return ret;
850}
851
852/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000853uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000854{
855 uint32_t int_id;
856
857 /*
858 * If there are any virtual interrupts in the queue, return the first
859 * entry. Else, return the pending interrupt from the bitmap.
860 */
861 if (vcpu_interrupt_queue_peek(current_locked, &int_id)) {
862 struct interrupts *interrupts;
863
864 /*
865 * Mark the virtual interrupt as no longer pending and decrement
866 * the count.
867 */
868 interrupts = &current_locked.vcpu->interrupts;
869 vcpu_virt_interrupt_clear_pending(interrupts, int_id);
870 vcpu_interrupt_count_decrement(current_locked, interrupts,
871 int_id);
872
873 return int_id;
874 }
875
876 return api_interrupt_get(current_locked);
877}
Karl Meakin8d245542025-01-31 13:19:25 +0000878
879/**
880 * Run the vCPU in SPMC schedule mode under the runtime model for secure
881 * interrupt handling.
882 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000883static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000884 struct vcpu_locked target_vcpu_locked)
885{
886 struct vcpu *target_vcpu;
887
888 target_vcpu = target_vcpu_locked.vcpu;
889
890 /* Mark the registers as unavailable now. */
891 target_vcpu->regs_available = false;
892 target_vcpu->scheduling_mode = SPMC_MODE;
893 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
894 target_vcpu->state = VCPU_STATE_RUNNING;
895 target_vcpu->requires_deactivate_call = false;
896}
897
898bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
899 struct vcpu_locked next_locked,
900 struct ffa_value *signal_interrupt)
901{
902 uint32_t intid;
903
904 /*
905 * Check if there are any pending virtual secure interrupts to be
906 * handled.
907 */
908 if (vcpu_interrupt_queue_peek(current_locked, &intid)) {
909 /*
910 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
911 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
912 * case 1.
913 */
914 *signal_interrupt = api_ffa_interrupt_return(intid);
915
916 /*
917 * Prepare to resume this partition's vCPU in SPMC
918 * schedule mode to handle virtual secure interrupt.
919 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000920 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000921
922 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
923 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
924
925 dlog_verbose("%s: Pending interrup, intercepting FF-A call.\n",
926 __func__);
927
928 return true;
929 }
930
931 return false;
932}