blob: 8f0c9acfe9c32952fc849ded6f306eb6792a06b8 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
22 * Drops the current interrupt priority and deactivate the given interrupt ID
23 * for the calling vCPU.
24 *
25 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
30 struct vcpu_locked current_locked;
31 uint32_t int_id;
32 int ret = 0;
33
34 current_locked = vcpu_lock(current);
35 if (vint_id >= HF_NUM_INTIDS) {
36 ret = -1;
37 goto out;
38 }
39
40 /*
41 * Current implementation maps virtual interrupt to physical interrupt.
42 */
43 if (pint_id != vint_id) {
44 ret = -1;
45 goto out;
46 }
47
48 /*
49 * A malicious SP could de-activate an interrupt that does not belong to
50 * it. Return error to indicate failure.
51 */
52 if (!vcpu_interrupt_queue_peek(current_locked, &int_id)) {
53 dlog_error("No virtual interrupt to be deactivated\n");
54 ret = -1;
55 goto out;
56 }
57
58 if (int_id != vint_id) {
59 dlog_error("Unknown interrupt being deactivated %u\n", vint_id);
60 ret = -1;
61 goto out;
62 }
63
64 if (current->requires_deactivate_call) {
65 /* There is no preempted vCPU to resume. */
66 assert(current->preempted_vcpu == NULL);
67
68 vcpu_secure_interrupt_complete(current_locked);
69 }
70
71 /*
72 * Now that the virtual interrupt has been serviced and deactivated,
73 * remove it from the queue, if it was pending.
74 */
75 vcpu_interrupt_queue_pop(current_locked, &int_id);
76 assert(vint_id == int_id);
77out:
78 vcpu_unlock(&current_locked);
79 return ret;
80}
81
Karl Meakinca38ef92025-02-13 14:20:23 +000082static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000083 struct vcpu *current, uint32_t interrupt_id)
84{
85 /*
86 * Find which VM/SP owns this interrupt. We then find the
87 * corresponding vCPU context for this CPU.
88 */
89 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
90 struct vm *vm = vm_find_index(index);
91
92 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
93 struct interrupt_descriptor int_desc =
94 vm->interrupt_desc[j];
95
96 /*
97 * Interrupt descriptors are populated
98 * contiguously.
99 */
100 if (!int_desc.valid) {
101 break;
102 }
103 if (int_desc.interrupt_id == interrupt_id) {
104 return api_ffa_get_vm_vcpu(vm, current);
105 }
106 }
107 }
108
109 return NULL;
110}
111
Karl Meakinca38ef92025-02-13 14:20:23 +0000112static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +0000113 uint32_t interrupt_id,
114 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000115{
116 struct vcpu *target_vcpu;
117
J-Alvesde211782025-02-07 14:44:39 +0000118 assert(current != NULL);
119 assert(v_intid != NULL);
120
121 *v_intid = interrupt_id;
122
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000123 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +0000124 case SPURIOUS_INTID_OTHER_WORLD:
125 /*
126 * Spurious interrupt ID indicating that there are no pending
127 * interrupts to acknowledge. For such scenarios, resume the
128 * current vCPU.
129 */
130 target_vcpu = NULL;
131 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000132 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100133 /*
134 * Get the next vCPU with a pending IPI. If all vCPUs
135 * have had their IPIs handled this will return NULL.
136 */
137 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000138 break;
J-Alvesde211782025-02-07 14:44:39 +0000139 case ARM_SEL2_TIMER_PHYS_INT:
140 /* Disable the S-EL2 physical timer */
141 host_timer_disable();
142 target_vcpu = timer_find_target_vcpu(current);
143
144 if (target_vcpu != NULL) {
145 *v_intid = HF_VIRTUAL_TIMER_INTID;
146 }
147 /*
148 * It is possible for target_vcpu to be NULL in case of spurious
149 * timer interrupt.
150 */
151 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000152 case ARM_EL1_VIRT_TIMER_PHYS_INT:
153 /* Fall through */
154 case ARM_EL1_PHYS_TIMER_PHYS_INT:
155 panic("Timer interrupt not expected to fire: %u\n",
156 interrupt_id);
157 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000158 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000159 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000160
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100161 /* The target vCPU for a secure interrupt cannot be NULL. */
162 CHECK(target_vcpu != NULL);
163 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000164
165 return target_vcpu;
166}
167
168/*
169 * Queue the pending virtual interrupt for target vcpu. Necessary fields
170 * tracking the secure interrupt processing are set accordingly.
171 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000172static void ffa_interrupts_queue_vint(struct vcpu_locked target_vcpu_locked,
173 uint32_t vint_id,
174 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000175{
176 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
177 struct vcpu *preempted_vcpu = current_locked.vcpu;
178
179 if (preempted_vcpu != NULL) {
180 target_vcpu->preempted_vcpu = preempted_vcpu;
181 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
182 }
183
184 /* Queue the pending virtual interrupt for target vcpu. */
185 if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
186 panic("Exhausted interrupt queue for vcpu of SP: %x\n",
187 target_vcpu->vm->id);
188 }
189}
190
191/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000192 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
193 * restore the priority mask thereby allowing the interrupts to be delivered.
194 */
195void ffa_interrupts_unmask(struct vcpu *current)
196{
197 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
198}
199
200/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000201 * Enforce action of an SP in response to non-secure or other-secure interrupt
202 * by changing the priority mask. Effectively, physical interrupts shall not
203 * trigger which has the same effect as queueing interrupts.
204 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000205void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000206{
207 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
208 uint8_t current_priority;
209
210 /* Save current value of priority mask. */
211 current_priority = plat_interrupts_get_priority_mask();
212 receiver_vcpu->prev_interrupt_priority = current_priority;
213
214 if (receiver_vcpu->vm->other_s_interrupts_action ==
215 OTHER_S_INT_ACTION_QUEUED ||
216 receiver_vcpu->scheduling_mode == SPMC_MODE) {
217 /*
218 * If secure interrupts not masked yet, mask them now. We could
219 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
220 * sends a direct request, and we are making the IMPDEF choice
221 * to mask interrupts when such a situation occurs. This keeps
222 * design simple.
223 */
224 if (current_priority > SWD_MASK_ALL_INT) {
225 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
226 }
227 } else if (receiver_vcpu->vm->ns_interrupts_action ==
228 NS_ACTION_QUEUED) {
229 /* If non secure interrupts not masked yet, mask them now. */
230 if (current_priority > SWD_MASK_NS_INT) {
231 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
232 }
233 }
234}
235
236/**
237 * Handles the secure interrupt according to the target vCPU's state
238 * in the case the owner of the interrupt is an S-EL0 partition.
239 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000240static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000241 struct vcpu_locked current_locked,
242 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
243{
244 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
245 struct vcpu *next;
246
247 /* Secure interrupt signaling and queuing for S-EL0 SP. */
248 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600249 case VCPU_STATE_WAITING: {
250 struct ffa_value ret_interrupt =
251 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000252
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600253 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
254 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
255 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000256
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600257 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000258 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000259
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600260 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000261
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600262 /*
263 * If the execution was in NWd as well, set the vCPU
264 * in preempted state as well.
265 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000266 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
267 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000268
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600269 /*
270 * The target vcpu could have migrated to a different physical
271 * CPU. SPMC will migrate it to current physical CPU and resume
272 * it.
273 */
274 target_vcpu->cpu = current_locked.vcpu->cpu;
275
276 /* Switch to target vCPU responsible for this interrupt. */
277 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000278 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600279 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000280 case VCPU_STATE_BLOCKED:
281 case VCPU_STATE_PREEMPTED:
282 case VCPU_STATE_RUNNING:
283 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
284 target_vcpu->vm->id);
285 /*
286 * The target vCPU cannot be resumed, SPMC resumes current
287 * vCPU.
288 */
289 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000290 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
291 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000292 break;
293 default:
294 panic("Secure interrupt cannot be signaled to target SP\n");
295 break;
296 }
297
298 return next;
299}
300
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000301/**
302 * Handles the secure interrupt according to the target vCPU's state
303 * in the case the owner of the interrupt is an S-EL1 partition.
304 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000305static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000306 struct vcpu_locked current_locked,
307 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
308{
309 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
310 struct vcpu *current = current_locked.vcpu;
311 struct vcpu *next = NULL;
312
J-Alves7e7fce02025-02-07 15:14:56 +0000313 /*
314 * The target vcpu has migrated to a different physical
315 * CPU. Hence, it cannot be resumed on this CPU, SPMC
316 * resumes current vCPU.
317 */
318 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
319 assert(target_vcpu->vm->vcpu_count == 1);
320 }
321
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000322 /* Secure interrupt signaling and queuing for S-EL1 SP. */
323 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600324 case VCPU_STATE_WAITING: {
325 struct ffa_value ret_interrupt =
326 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000327
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600328 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
329 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000330 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000331
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600332 /*
333 * Ideally, we have to mask non-secure interrupts here
334 * since the spec mandates that SPMC should make sure
335 * SPMC scheduled call chain cannot be preempted by a
336 * non-secure interrupt. However, our current design
337 * takes care of it implicitly.
338 */
339 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
340
Karl Meakinca38ef92025-02-13 14:20:23 +0000341 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
342 current_locked);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600343 next = target_vcpu;
344
345 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000346 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600347 * The target vcpu could have migrated to a different
348 * physical CPU. SPMC will migrate it to current
349 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000350 */
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600351 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000352 }
353 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600354 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000355 case VCPU_STATE_BLOCKED:
J-Alves7e7fce02025-02-07 15:14:56 +0000356
357 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
358 ffa_direct_msg_precedes_in_call_chain(current_locked,
359 target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000360 struct ffa_value ret_interrupt =
361 api_ffa_interrupt_return(0);
362
363 /*
364 * If the target vCPU ran earlier in the same call
365 * chain as the current vCPU, SPMC leaves all
366 * intermediate execution contexts in blocked state and
367 * resumes the target vCPU for handling secure
368 * interrupt.
369 * Under the current design, there is only one possible
370 * scenario in which this could happen: both the
371 * preempted (i.e. current) and target vCPU are in the
372 * same NWd scheduled call chain and is described in the
373 * Scenario 1 of Table 8.4 in EAC0 spec.
374 */
375 assert(current_locked.vcpu->scheduling_mode ==
376 NWD_MODE);
377 assert(target_vcpu->scheduling_mode == NWD_MODE);
378
379 /*
380 * The execution preempted the call chain that involved
381 * the targeted and the current SPs.
382 * The targetted SP is set running, whilst the
383 * preempted SP is set PREEMPTED.
384 */
385 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
386
Karl Meakinca38ef92025-02-13 14:20:23 +0000387 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
388 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000389 next = target_vcpu;
390 } else {
391 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000392 * Either:
393 * - The target vCPU has migrated to a different
394 * physical CPU. Hence, it cannot be resumed on this
395 * CPU, SPMC resumes current vCPU.
396 * - The target vCPU cannot be resumed now because it is
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000397 * in BLOCKED state (it yielded CPU cycles using
398 * FFA_YIELD). SPMC queues the virtual interrupt and
399 * resumes the current vCPU which could belong to either
400 * a VM or a SP.
401 */
402 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000403 ffa_interrupts_queue_vint(
404 target_vcpu_locked, v_intid,
405 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000406 }
407 break;
408 case VCPU_STATE_PREEMPTED:
J-Alves7e7fce02025-02-07 15:14:56 +0000409 /*
410 * We do not resume a target vCPU that has been already
411 * pre-empted by an interrupt. Make the vIRQ pending for
412 * target SP(i.e., queue the interrupt) and continue to
413 * resume current vCPU. Refer to section 8.3.2.1 bullet
414 * 3 in the FF-A v1.1 EAC0 spec.
415 */
416 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
417 current->vm->id == HF_OTHER_WORLD_ID) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000418 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000419 * The target vCPU must have been preempted by a
420 * non secure interrupt. It could not have been
421 * preempted by a secure interrupt as current
422 * SPMC implementation does not allow secure
423 * interrupt prioritization. Moreover, the
424 * target vCPU should have been in Normal World
425 * scheduled mode as SPMC scheduled mode call
426 * chain cannot be preempted by a non secure
427 * interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000428 */
J-Alves7e7fce02025-02-07 15:14:56 +0000429 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000430 }
431
432 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000433 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
434 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000435
436 break;
437 case VCPU_STATE_RUNNING:
438 if (current == target_vcpu) {
439 /*
440 * This is the special scenario where the current
441 * running execution context also happens to be the
442 * target of the secure interrupt. In this case, it
443 * needs to signal completion of secure interrupt
444 * implicitly. Refer to the embedded comment in vcpu.h
445 * file for the description of this variable.
446 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000447 current->requires_deactivate_call = true;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000448 }
J-Alves7e7fce02025-02-07 15:14:56 +0000449
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000450 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000451 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
452 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000453 break;
454 case VCPU_STATE_BLOCKED_INTERRUPT:
455 /* WFI is no-op for SP. Fall through. */
456 default:
457 /*
458 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
459 * to handle secure interrupt.
460 */
461 panic("Secure interrupt cannot be signaled to target SP\n");
462 break;
463 }
464
465 return next;
466}
467
468/**
469 * Obtain the physical interrupt that triggered from the interrupt controller,
470 * and inject the corresponding virtual interrupt to the target vCPU.
471 * When PEs executing in the Normal World, and secure interrupts trigger,
472 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
473 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
474 */
Karl Meakin117c8082024-12-04 16:03:28 +0000475void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
476 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000477{
478 struct vcpu *target_vcpu;
479 struct vcpu_locked target_vcpu_locked =
480 (struct vcpu_locked){.vcpu = NULL};
481 struct vcpu_locked current_locked;
482 uint32_t intid;
483 struct vm_locked target_vm_locked;
484 uint32_t v_intid;
485
486 /* Find pending interrupt id. This also activates the interrupt. */
487 intid = plat_interrupts_get_pending_interrupt_id();
488 v_intid = intid;
489
J-Alvesde211782025-02-07 14:44:39 +0000490 /* Get the target vCPU and get the virtual interrupt ID. */
491 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000492
493 /*
J-Alvesde211782025-02-07 14:44:39 +0000494 * Spurious interrupt ID indicates there is no pending interrupt to
495 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000496 */
J-Alvesde211782025-02-07 14:44:39 +0000497 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
498 /*
499 * End the interrupt to drop the running priority. It also
500 * deactivates the physical interrupt. If not, the interrupt
501 * could trigger again after resuming current vCPU.
502 */
503 plat_interrupts_end_of_interrupt(intid);
504 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000505
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100506 if (target_vcpu == NULL) {
507 /* No further handling required. Resume the current vCPU. */
508 *next = NULL;
509 return;
510 }
511
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000512 target_vm_locked = vm_lock(target_vcpu->vm);
513
514 if (target_vcpu == current) {
515 current_locked = vcpu_lock(current);
516 target_vcpu_locked = current_locked;
517 } else {
518 struct two_vcpu_locked vcpus_locked;
519 /* Lock both vCPUs at once to avoid deadlock. */
520 vcpus_locked = vcpu_lock_both(current, target_vcpu);
521 current_locked = vcpus_locked.vcpu1;
522 target_vcpu_locked = vcpus_locked.vcpu2;
523 }
524
525 /*
526 * A race condition can occur with the execution contexts belonging to
527 * an MP SP. An interrupt targeting the execution context on present
528 * core can trigger while the execution context of this SP on a
529 * different core is being aborted. In such scenario, the physical
530 * interrupts beloning to the aborted SP are disabled and the current
531 * execution context is resumed.
532 */
533 if (target_vcpu->state == VCPU_STATE_ABORTED ||
534 atomic_load_explicit(&target_vcpu->vm->aborting,
535 memory_order_relaxed)) {
536 /* Clear fields corresponding to secure interrupt handling. */
537 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000538 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000539
540 /* Resume current vCPU. */
541 *next = NULL;
542 } else {
543 /*
544 * SPMC has started handling a secure interrupt with a clean
545 * slate. This signal should be false unless there was a bug in
546 * source code. Hence, use assert rather than CHECK.
547 */
548 assert(!target_vcpu->requires_deactivate_call);
549
550 /* Set the interrupt pending in the target vCPU. */
551 vcpu_interrupt_inject(target_vcpu_locked, v_intid);
552
553 switch (intid) {
554 case HF_IPI_INTID:
555 if (hf_ipi_handle(target_vcpu_locked)) {
556 *next = NULL;
557 break;
558 }
559 /*
560 * Fall through in the case handling has not been fully
561 * completed.
562 */
563 default:
564 /*
565 * Either invoke the handler related to partitions from
566 * S-EL0 or from S-EL1.
567 */
568 *next = target_vcpu_locked.vcpu->vm->el0_partition
Karl Meakinca38ef92025-02-13 14:20:23 +0000569 ? ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000570 current_locked,
571 target_vcpu_locked, v_intid)
Karl Meakinca38ef92025-02-13 14:20:23 +0000572 : ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000573 current_locked,
574 target_vcpu_locked, v_intid);
575 }
576 }
577
578 if (target_vcpu_locked.vcpu != NULL) {
579 vcpu_unlock(&target_vcpu_locked);
580 }
581
582 vcpu_unlock(&current_locked);
583 vm_unlock(&target_vm_locked);
584}
585
Karl Meakin117c8082024-12-04 16:03:28 +0000586bool ffa_interrupts_inject_notification_pending_interrupt(
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000587 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000588{
589 struct vm *next_vm = target_locked.vcpu->vm;
590 bool ret = false;
591
592 /*
593 * Inject the NPI if:
594 * - The targeted VM ID is from this world (i.e. if it is an SP).
595 * - The partition has global pending notifications and an NPI hasn't
596 * been injected yet.
597 * - There are pending per-vCPU notifications in the next vCPU.
598 */
599 if (vm_id_is_current_world(next_vm->id) &&
600 (vm_are_per_vcpu_notifications_pending(
601 receiver_locked, vcpu_index(target_locked.vcpu)) ||
602 (vm_are_global_notifications_pending(receiver_locked) &&
603 !vm_notifications_is_npi_injected(receiver_locked)))) {
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000604 vcpu_interrupt_inject(target_locked,
605 HF_NOTIFICATION_PENDING_INTID);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000606 vm_notifications_set_npi_injected(receiver_locked, true);
607 ret = true;
608 }
609
610 return ret;
611}
612
Karl Meakin117c8082024-12-04 16:03:28 +0000613struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000614{
615 struct vcpu *next;
616 struct two_vcpu_locked both_vcpu_locked;
617
618 /*
619 * The action specified by SP in its manifest is ``Non-secure interrupt
620 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
621 * Hence, the call chain starts unwinding. The current vCPU must have
622 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
623 * and execution is either handed back to the normal world or to the
624 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
625 * The api_preempt() call is equivalent to calling
626 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
627 * SP can be resumed later by FFA_RUN.
628 */
629 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
630 assert(current_vcpu->call_chain.next_node == NULL);
631
632 if (current_vcpu->call_chain.prev_node == NULL) {
633 /* End of NWd scheduled call chain */
634 return api_preempt(current_vcpu);
635 }
636
637 next = current_vcpu->call_chain.prev_node;
638 CHECK(next != NULL);
639
640 /*
641 * Lock both vCPUs. Strictly speaking, it may not be necessary since
642 * next is guaranteed to be in BLOCKED state as it is the predecessor of
643 * the current vCPU in the present call chain.
644 */
645 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
646
647 /* Removing a node from an existing call chain. */
648 current_vcpu->call_chain.prev_node = NULL;
649 current_vcpu->state = VCPU_STATE_PREEMPTED;
650
651 /*
652 * SPMC applies the runtime model till when the vCPU transitions from
653 * running to waiting state. Moreover, the SP continues to remain in
654 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
655 * are not changed here.
656 */
657 assert(next->state == VCPU_STATE_BLOCKED);
658 assert(next->call_chain.next_node == current_vcpu);
659
660 next->call_chain.next_node = NULL;
661
662 vcpu_set_running(both_vcpu_locked.vcpu2,
663 &(struct ffa_value){
664 .func = FFA_INTERRUPT_32,
665 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
666 vcpu_index(current_vcpu)),
667 });
668
669 sl_unlock(&next->lock);
670 sl_unlock(&current_vcpu->lock);
671
672 return next;
673}
674
Karl Meakinca38ef92025-02-13 14:20:23 +0000675static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000676 struct vcpu_locked current_locked)
677{
678 struct vcpu *current;
679 struct interrupts *interrupts;
680 struct vm *vm;
681
682 current = current_locked.vcpu;
683 interrupts = &current->interrupts;
684 vm = current->vm;
685
Karl Meakin117c8082024-12-04 16:03:28 +0000686 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000687 vcpu_virt_interrupt_set_enabled(interrupts,
688 HF_MANAGED_EXIT_INTID);
689 /*
690 * SPMC decides the interrupt type for Managed exit signal based
691 * on the partition manifest.
692 */
693 if (vm->me_signal_virq) {
694 vcpu_virt_interrupt_set_type(interrupts,
695 HF_MANAGED_EXIT_INTID,
696 INTERRUPT_TYPE_IRQ);
697 } else {
698 vcpu_virt_interrupt_set_type(interrupts,
699 HF_MANAGED_EXIT_INTID,
700 INTERRUPT_TYPE_FIQ);
701 }
702 }
703
704 if (vm->notifications.enabled) {
705 vcpu_virt_interrupt_set_enabled(interrupts,
706 HF_NOTIFICATION_PENDING_INTID);
707 }
708}
709
710/**
711 * Enable relevant virtual interrupts for Secure Partitions.
712 * For all SPs, any applicable virtual maintenance interrupts are enabled.
713 * Additionally, for S-EL0 partitions, all the interrupts declared in the
714 * partition manifest are enabled at the virtual interrupt controller
715 * interface early during the boot stage as an S-EL0 SP need not call
716 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
717 */
Karl Meakin117c8082024-12-04 16:03:28 +0000718void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
719 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000720{
721 struct vcpu *current;
722 struct interrupts *interrupts;
723 struct vm *vm;
724
725 current = current_locked.vcpu;
726 interrupts = &current->interrupts;
727 vm = current->vm;
728 assert(vm == vm_locked.vm);
729
730 if (vm->el0_partition) {
731 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
732 struct interrupt_descriptor int_desc;
733
734 int_desc = vm_locked.vm->interrupt_desc[k];
735
736 /* Interrupt descriptors are populated contiguously. */
737 if (!int_desc.valid) {
738 break;
739 }
740 vcpu_virt_interrupt_set_enabled(interrupts,
741 int_desc.interrupt_id);
742 }
743 }
744
Karl Meakinca38ef92025-02-13 14:20:23 +0000745 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000746}
747
748/**
749 * Reconfigure the interrupt belonging to the current partition at runtime.
750 * At present, this paravirtualized interface only allows the following
751 * commands which signify what change is being requested by the current
752 * partition:
753 * - Change the target CPU of the interrupt.
754 * - Change the security state of the interrupt.
755 * - Enable or disable the physical interrupt.
756 */
Karl Meakin117c8082024-12-04 16:03:28 +0000757int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
758 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000759{
760 struct vm *vm = current->vm;
761 struct vm_locked vm_locked;
762 int64_t ret = -1;
763 struct interrupt_descriptor *int_desc = NULL;
764
765 /*
766 * Lock VM to protect interrupt descriptor from being modified
767 * concurrently.
768 */
769 vm_locked = vm_lock(vm);
770
771 switch (command) {
772 case INT_RECONFIGURE_TARGET_PE:
773 /* Here, value represents the target PE index. */
774 if (value >= MAX_CPUS) {
775 dlog_verbose(
776 "Illegal target PE index specified while "
777 "reconfiguring interrupt %x\n",
778 int_id);
779 goto out_unlock;
780 }
781
782 /*
783 * An UP SP cannot reconfigure an interrupt to be targetted to
784 * any other physical CPU except the one it is currently
785 * running on.
786 */
787 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
788 dlog_verbose(
789 "Illegal target PE index specified by current "
790 "UP SP\n");
791 goto out_unlock;
792 }
793
794 /* Configure the interrupt to be routed to a specific CPU. */
795 int_desc = vm_interrupt_set_target_mpidr(
796 vm_locked, int_id, cpu_find_index(value)->id);
797 break;
798 case INT_RECONFIGURE_SEC_STATE:
799 /* Specify the new security state of the interrupt. */
800 if (value != INT_DESC_SEC_STATE_NS &&
801 value != INT_DESC_SEC_STATE_S) {
802 dlog_verbose(
803 "Illegal value %x specified while "
804 "reconfiguring interrupt %x\n",
805 value, int_id);
806 goto out_unlock;
807 }
808 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
809 break;
810 case INT_RECONFIGURE_ENABLE:
811 /* Enable or disable the interrupt. */
812 if (value != INT_DISABLE && value != INT_ENABLE) {
813 dlog_verbose(
814 "Illegal value %x specified while "
815 "reconfiguring interrupt %x\n",
816 value, int_id);
817 goto out_unlock;
818 } else {
819 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
820 value == INT_ENABLE);
821 }
822 break;
823 default:
824 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
825 command);
826 goto out_unlock;
827 }
828
829 /* Check if the interrupt belongs to the current SP. */
830 if (int_desc == NULL) {
831 dlog_verbose("Interrupt %x does not belong to current SP\n",
832 int_id);
833 goto out_unlock;
834 }
835
836 ret = 0;
837 plat_interrupts_reconfigure_interrupt(*int_desc);
838
839out_unlock:
840 vm_unlock(&vm_locked);
841
842 return ret;
843}
844
845/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000846uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000847{
848 uint32_t int_id;
849
850 /*
851 * If there are any virtual interrupts in the queue, return the first
852 * entry. Else, return the pending interrupt from the bitmap.
853 */
854 if (vcpu_interrupt_queue_peek(current_locked, &int_id)) {
855 struct interrupts *interrupts;
856
857 /*
858 * Mark the virtual interrupt as no longer pending and decrement
859 * the count.
860 */
861 interrupts = &current_locked.vcpu->interrupts;
862 vcpu_virt_interrupt_clear_pending(interrupts, int_id);
863 vcpu_interrupt_count_decrement(current_locked, interrupts,
864 int_id);
865
866 return int_id;
867 }
868
869 return api_interrupt_get(current_locked);
870}
Karl Meakin8d245542025-01-31 13:19:25 +0000871
872/**
873 * Run the vCPU in SPMC schedule mode under the runtime model for secure
874 * interrupt handling.
875 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000876static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000877 struct vcpu_locked target_vcpu_locked)
878{
879 struct vcpu *target_vcpu;
880
881 target_vcpu = target_vcpu_locked.vcpu;
882
883 /* Mark the registers as unavailable now. */
884 target_vcpu->regs_available = false;
885 target_vcpu->scheduling_mode = SPMC_MODE;
886 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
887 target_vcpu->state = VCPU_STATE_RUNNING;
888 target_vcpu->requires_deactivate_call = false;
889}
890
891bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
892 struct vcpu_locked next_locked,
893 struct ffa_value *signal_interrupt)
894{
895 uint32_t intid;
896
897 /*
898 * Check if there are any pending virtual secure interrupts to be
899 * handled.
900 */
901 if (vcpu_interrupt_queue_peek(current_locked, &intid)) {
902 /*
903 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
904 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
905 * case 1.
906 */
907 *signal_interrupt = api_ffa_interrupt_return(intid);
908
909 /*
910 * Prepare to resume this partition's vCPU in SPMC
911 * schedule mode to handle virtual secure interrupt.
912 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000913 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000914
915 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
916 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
917
918 dlog_verbose("%s: Pending interrup, intercepting FF-A call.\n",
919 __func__);
920
921 return true;
922 }
923
924 return false;
925}