blob: 74bc5b886e024d32b663d0f802d5301c31ca4207 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000022 * This function has been deprecated and it's contents moved into
23 * api_interrupt_get in order to align the bitmap and queue for tracking
24 * interupts.
Karl Meakin8e58ddc2024-11-08 23:19:34 +000025 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
Daniel Boulby3c1506b2025-02-25 10:49:51 +000030 (void)pint_id;
31 (void)vint_id;
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000032 (void)current;
33 return 0;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000034}
35
Karl Meakinca38ef92025-02-13 14:20:23 +000036static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000037 struct vcpu *current, uint32_t interrupt_id)
38{
39 /*
40 * Find which VM/SP owns this interrupt. We then find the
41 * corresponding vCPU context for this CPU.
42 */
43 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
44 struct vm *vm = vm_find_index(index);
45
46 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
47 struct interrupt_descriptor int_desc =
48 vm->interrupt_desc[j];
49
50 /*
51 * Interrupt descriptors are populated
52 * contiguously.
53 */
54 if (!int_desc.valid) {
55 break;
56 }
57 if (int_desc.interrupt_id == interrupt_id) {
58 return api_ffa_get_vm_vcpu(vm, current);
59 }
60 }
61 }
62
63 return NULL;
64}
65
Karl Meakinca38ef92025-02-13 14:20:23 +000066static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +000067 uint32_t interrupt_id,
68 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000069{
70 struct vcpu *target_vcpu;
71
J-Alvesde211782025-02-07 14:44:39 +000072 assert(current != NULL);
73 assert(v_intid != NULL);
74
75 *v_intid = interrupt_id;
76
Karl Meakin8e58ddc2024-11-08 23:19:34 +000077 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +000078 case SPURIOUS_INTID_OTHER_WORLD:
79 /*
80 * Spurious interrupt ID indicating that there are no pending
81 * interrupts to acknowledge. For such scenarios, resume the
82 * current vCPU.
83 */
84 target_vcpu = NULL;
85 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000086 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +010087 /*
88 * Get the next vCPU with a pending IPI. If all vCPUs
89 * have had their IPIs handled this will return NULL.
90 */
91 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +000092 break;
J-Alvesde211782025-02-07 14:44:39 +000093 case ARM_SEL2_TIMER_PHYS_INT:
94 /* Disable the S-EL2 physical timer */
95 host_timer_disable();
96 target_vcpu = timer_find_target_vcpu(current);
97
98 if (target_vcpu != NULL) {
99 *v_intid = HF_VIRTUAL_TIMER_INTID;
100 }
101 /*
102 * It is possible for target_vcpu to be NULL in case of spurious
103 * timer interrupt.
104 */
105 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000106 case ARM_EL1_VIRT_TIMER_PHYS_INT:
107 /* Fall through */
108 case ARM_EL1_PHYS_TIMER_PHYS_INT:
109 panic("Timer interrupt not expected to fire: %u\n",
110 interrupt_id);
111 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000112 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000113 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000114
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100115 /* The target vCPU for a secure interrupt cannot be NULL. */
116 CHECK(target_vcpu != NULL);
117 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000118
119 return target_vcpu;
120}
121
122/*
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000123 * If the current vCPU is being preempted, record this in the target vCPU
124 * and set the current states to VCPU_STATE_PREEMPTED.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000125 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000126static void ffa_interrupts_set_preempted_vcpu(
127 struct vcpu_locked target_vcpu_locked,
128 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000129{
130 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
131 struct vcpu *preempted_vcpu = current_locked.vcpu;
132
133 if (preempted_vcpu != NULL) {
134 target_vcpu->preempted_vcpu = preempted_vcpu;
135 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
136 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000137}
138
139/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000140 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
141 * restore the priority mask thereby allowing the interrupts to be delivered.
142 */
143void ffa_interrupts_unmask(struct vcpu *current)
144{
145 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
146}
147
148/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000149 * Enforce action of an SP in response to non-secure or other-secure interrupt
150 * by changing the priority mask. Effectively, physical interrupts shall not
151 * trigger which has the same effect as queueing interrupts.
152 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000153void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000154{
155 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
156 uint8_t current_priority;
157
158 /* Save current value of priority mask. */
159 current_priority = plat_interrupts_get_priority_mask();
160 receiver_vcpu->prev_interrupt_priority = current_priority;
161
162 if (receiver_vcpu->vm->other_s_interrupts_action ==
163 OTHER_S_INT_ACTION_QUEUED ||
164 receiver_vcpu->scheduling_mode == SPMC_MODE) {
165 /*
166 * If secure interrupts not masked yet, mask them now. We could
167 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
168 * sends a direct request, and we are making the IMPDEF choice
169 * to mask interrupts when such a situation occurs. This keeps
170 * design simple.
171 */
172 if (current_priority > SWD_MASK_ALL_INT) {
173 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
174 }
175 } else if (receiver_vcpu->vm->ns_interrupts_action ==
176 NS_ACTION_QUEUED) {
177 /* If non secure interrupts not masked yet, mask them now. */
178 if (current_priority > SWD_MASK_NS_INT) {
179 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
180 }
181 }
182}
183
184/**
185 * Handles the secure interrupt according to the target vCPU's state
186 * in the case the owner of the interrupt is an S-EL0 partition.
187 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000188static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000189 struct vcpu_locked current_locked,
190 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
191{
192 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
193 struct vcpu *next;
194
195 /* Secure interrupt signaling and queuing for S-EL0 SP. */
196 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600197 case VCPU_STATE_WAITING: {
198 struct ffa_value ret_interrupt =
199 api_ffa_interrupt_return(v_intid);
Daniel Boulby4b9add52025-02-25 11:02:00 +0000200 uint32_t pending_intid;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000201
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600202 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
203 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
204 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000205
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600206 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000207 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000208
Daniel Boulby4b9add52025-02-25 11:02:00 +0000209 /*
210 * Since S-EL0 partitions will not receive the interrupt through
211 * a vIRQ signal in addition to the FFA_INTERRUPT ERET, make the
212 * interrupt no longer pending at this point.
213 */
214 pending_intid = vcpu_virt_interrupt_get_pending_and_enabled(
215 target_vcpu_locked);
216 assert(pending_intid == v_intid);
217
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600218 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000219
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600220 /*
221 * If the execution was in NWd as well, set the vCPU
222 * in preempted state as well.
223 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000224 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
225 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000226
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600227 /*
228 * The target vcpu could have migrated to a different physical
229 * CPU. SPMC will migrate it to current physical CPU and resume
230 * it.
231 */
232 target_vcpu->cpu = current_locked.vcpu->cpu;
233
234 /* Switch to target vCPU responsible for this interrupt. */
235 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000236 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600237 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000238 case VCPU_STATE_BLOCKED:
239 case VCPU_STATE_PREEMPTED:
240 case VCPU_STATE_RUNNING:
241 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
242 target_vcpu->vm->id);
243 /*
244 * The target vCPU cannot be resumed, SPMC resumes current
245 * vCPU.
246 */
247 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000248 ffa_interrupts_set_preempted_vcpu(
249 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000250 break;
251 default:
252 panic("Secure interrupt cannot be signaled to target SP\n");
253 break;
254 }
255
256 return next;
257}
258
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000259/**
260 * Handles the secure interrupt according to the target vCPU's state
261 * in the case the owner of the interrupt is an S-EL1 partition.
262 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000263static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000264 struct vcpu_locked current_locked,
265 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
266{
267 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
268 struct vcpu *current = current_locked.vcpu;
269 struct vcpu *next = NULL;
270
J-Alves7e7fce02025-02-07 15:14:56 +0000271 /*
272 * The target vcpu has migrated to a different physical
273 * CPU. Hence, it cannot be resumed on this CPU, SPMC
274 * resumes current vCPU.
275 */
276 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
277 assert(target_vcpu->vm->vcpu_count == 1);
278 }
279
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000280 /* Secure interrupt signaling and queuing for S-EL1 SP. */
281 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600282 case VCPU_STATE_WAITING: {
283 struct ffa_value ret_interrupt =
284 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000285
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600286 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
287 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000288 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000289
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600290 /*
291 * Ideally, we have to mask non-secure interrupts here
292 * since the spec mandates that SPMC should make sure
293 * SPMC scheduled call chain cannot be preempted by a
294 * non-secure interrupt. However, our current design
295 * takes care of it implicitly.
296 */
297 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
298
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000299 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
300 current_locked);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600301 next = target_vcpu;
302
303 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000304 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600305 * The target vcpu could have migrated to a different
306 * physical CPU. SPMC will migrate it to current
307 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000308 */
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600309 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000310 }
311 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600312 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000313 case VCPU_STATE_BLOCKED:
J-Alves7e7fce02025-02-07 15:14:56 +0000314
315 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
316 ffa_direct_msg_precedes_in_call_chain(current_locked,
317 target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000318 struct ffa_value ret_interrupt =
319 api_ffa_interrupt_return(0);
320
321 /*
322 * If the target vCPU ran earlier in the same call
323 * chain as the current vCPU, SPMC leaves all
324 * intermediate execution contexts in blocked state and
325 * resumes the target vCPU for handling secure
326 * interrupt.
327 * Under the current design, there is only one possible
328 * scenario in which this could happen: both the
329 * preempted (i.e. current) and target vCPU are in the
330 * same NWd scheduled call chain and is described in the
331 * Scenario 1 of Table 8.4 in EAC0 spec.
332 */
333 assert(current_locked.vcpu->scheduling_mode ==
334 NWD_MODE);
335 assert(target_vcpu->scheduling_mode == NWD_MODE);
336
337 /*
338 * The execution preempted the call chain that involved
339 * the targeted and the current SPs.
340 * The targetted SP is set running, whilst the
341 * preempted SP is set PREEMPTED.
342 */
343 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
344
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000345 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
346 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000347 next = target_vcpu;
348 } else {
349 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000350 * Either:
351 * - The target vCPU has migrated to a different
352 * physical CPU. Hence, it cannot be resumed on this
353 * CPU, SPMC resumes current vCPU.
354 * - The target vCPU cannot be resumed now because it is
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000355 * in BLOCKED state (it yielded CPU cycles using
356 * FFA_YIELD). SPMC queues the virtual interrupt and
357 * resumes the current vCPU which could belong to either
358 * a VM or a SP.
359 */
360 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000361 ffa_interrupts_set_preempted_vcpu(
362 target_vcpu_locked,
Karl Meakinca38ef92025-02-13 14:20:23 +0000363 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000364 }
365 break;
366 case VCPU_STATE_PREEMPTED:
J-Alves7e7fce02025-02-07 15:14:56 +0000367 /*
368 * We do not resume a target vCPU that has been already
369 * pre-empted by an interrupt. Make the vIRQ pending for
370 * target SP(i.e., queue the interrupt) and continue to
371 * resume current vCPU. Refer to section 8.3.2.1 bullet
372 * 3 in the FF-A v1.1 EAC0 spec.
373 */
374 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
375 current->vm->id == HF_OTHER_WORLD_ID) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000376 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000377 * The target vCPU must have been preempted by a
378 * non secure interrupt. It could not have been
379 * preempted by a secure interrupt as current
380 * SPMC implementation does not allow secure
381 * interrupt prioritization. Moreover, the
382 * target vCPU should have been in Normal World
383 * scheduled mode as SPMC scheduled mode call
384 * chain cannot be preempted by a non secure
385 * interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000386 */
J-Alves7e7fce02025-02-07 15:14:56 +0000387 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000388 }
389
390 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000391 ffa_interrupts_set_preempted_vcpu(
392 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000393
394 break;
395 case VCPU_STATE_RUNNING:
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000396 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000397 ffa_interrupts_set_preempted_vcpu(
398 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000399 break;
400 case VCPU_STATE_BLOCKED_INTERRUPT:
401 /* WFI is no-op for SP. Fall through. */
402 default:
403 /*
404 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
405 * to handle secure interrupt.
406 */
407 panic("Secure interrupt cannot be signaled to target SP\n");
408 break;
409 }
410
411 return next;
412}
413
414/**
415 * Obtain the physical interrupt that triggered from the interrupt controller,
416 * and inject the corresponding virtual interrupt to the target vCPU.
417 * When PEs executing in the Normal World, and secure interrupts trigger,
418 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
419 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
420 */
Karl Meakin117c8082024-12-04 16:03:28 +0000421void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
422 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000423{
424 struct vcpu *target_vcpu;
425 struct vcpu_locked target_vcpu_locked =
426 (struct vcpu_locked){.vcpu = NULL};
427 struct vcpu_locked current_locked;
428 uint32_t intid;
429 struct vm_locked target_vm_locked;
430 uint32_t v_intid;
431
432 /* Find pending interrupt id. This also activates the interrupt. */
433 intid = plat_interrupts_get_pending_interrupt_id();
434 v_intid = intid;
435
J-Alvesde211782025-02-07 14:44:39 +0000436 /* Get the target vCPU and get the virtual interrupt ID. */
437 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000438
439 /*
J-Alvesde211782025-02-07 14:44:39 +0000440 * Spurious interrupt ID indicates there is no pending interrupt to
441 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000442 */
J-Alvesde211782025-02-07 14:44:39 +0000443 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
444 /*
445 * End the interrupt to drop the running priority. It also
446 * deactivates the physical interrupt. If not, the interrupt
447 * could trigger again after resuming current vCPU.
448 */
449 plat_interrupts_end_of_interrupt(intid);
450 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000451
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100452 if (target_vcpu == NULL) {
453 /* No further handling required. Resume the current vCPU. */
454 *next = NULL;
455 return;
456 }
457
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000458 target_vm_locked = vm_lock(target_vcpu->vm);
459
460 if (target_vcpu == current) {
461 current_locked = vcpu_lock(current);
462 target_vcpu_locked = current_locked;
463 } else {
464 struct two_vcpu_locked vcpus_locked;
465 /* Lock both vCPUs at once to avoid deadlock. */
466 vcpus_locked = vcpu_lock_both(current, target_vcpu);
467 current_locked = vcpus_locked.vcpu1;
468 target_vcpu_locked = vcpus_locked.vcpu2;
469 }
470
471 /*
472 * A race condition can occur with the execution contexts belonging to
473 * an MP SP. An interrupt targeting the execution context on present
474 * core can trigger while the execution context of this SP on a
475 * different core is being aborted. In such scenario, the physical
476 * interrupts beloning to the aborted SP are disabled and the current
477 * execution context is resumed.
478 */
479 if (target_vcpu->state == VCPU_STATE_ABORTED ||
480 atomic_load_explicit(&target_vcpu->vm->aborting,
481 memory_order_relaxed)) {
482 /* Clear fields corresponding to secure interrupt handling. */
483 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000484 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000485
486 /* Resume current vCPU. */
487 *next = NULL;
488 } else {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000489 /* Set the interrupt pending in the target vCPU. */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000490 vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000491
492 switch (intid) {
493 case HF_IPI_INTID:
494 if (hf_ipi_handle(target_vcpu_locked)) {
495 *next = NULL;
496 break;
497 }
498 /*
499 * Fall through in the case handling has not been fully
500 * completed.
501 */
502 default:
503 /*
504 * Either invoke the handler related to partitions from
505 * S-EL0 or from S-EL1.
506 */
507 *next = target_vcpu_locked.vcpu->vm->el0_partition
Karl Meakinca38ef92025-02-13 14:20:23 +0000508 ? ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000509 current_locked,
510 target_vcpu_locked, v_intid)
Karl Meakinca38ef92025-02-13 14:20:23 +0000511 : ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000512 current_locked,
513 target_vcpu_locked, v_intid);
514 }
515 }
516
517 if (target_vcpu_locked.vcpu != NULL) {
518 vcpu_unlock(&target_vcpu_locked);
519 }
520
521 vcpu_unlock(&current_locked);
522 vm_unlock(&target_vm_locked);
523}
524
Karl Meakin117c8082024-12-04 16:03:28 +0000525bool ffa_interrupts_inject_notification_pending_interrupt(
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000526 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000527{
528 struct vm *next_vm = target_locked.vcpu->vm;
529 bool ret = false;
530
531 /*
532 * Inject the NPI if:
533 * - The targeted VM ID is from this world (i.e. if it is an SP).
534 * - The partition has global pending notifications and an NPI hasn't
535 * been injected yet.
536 * - There are pending per-vCPU notifications in the next vCPU.
537 */
538 if (vm_id_is_current_world(next_vm->id) &&
539 (vm_are_per_vcpu_notifications_pending(
540 receiver_locked, vcpu_index(target_locked.vcpu)) ||
541 (vm_are_global_notifications_pending(receiver_locked) &&
542 !vm_notifications_is_npi_injected(receiver_locked)))) {
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000543 vcpu_virt_interrupt_inject(target_locked,
544 HF_NOTIFICATION_PENDING_INTID);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000545 vm_notifications_set_npi_injected(receiver_locked, true);
546 ret = true;
547 }
548
549 return ret;
550}
551
Karl Meakin117c8082024-12-04 16:03:28 +0000552struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000553{
554 struct vcpu *next;
555 struct two_vcpu_locked both_vcpu_locked;
556
557 /*
558 * The action specified by SP in its manifest is ``Non-secure interrupt
559 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
560 * Hence, the call chain starts unwinding. The current vCPU must have
561 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
562 * and execution is either handed back to the normal world or to the
563 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
564 * The api_preempt() call is equivalent to calling
565 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
566 * SP can be resumed later by FFA_RUN.
567 */
568 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
569 assert(current_vcpu->call_chain.next_node == NULL);
570
571 if (current_vcpu->call_chain.prev_node == NULL) {
572 /* End of NWd scheduled call chain */
573 return api_preempt(current_vcpu);
574 }
575
576 next = current_vcpu->call_chain.prev_node;
577 CHECK(next != NULL);
578
579 /*
580 * Lock both vCPUs. Strictly speaking, it may not be necessary since
581 * next is guaranteed to be in BLOCKED state as it is the predecessor of
582 * the current vCPU in the present call chain.
583 */
584 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
585
586 /* Removing a node from an existing call chain. */
587 current_vcpu->call_chain.prev_node = NULL;
588 current_vcpu->state = VCPU_STATE_PREEMPTED;
589
590 /*
591 * SPMC applies the runtime model till when the vCPU transitions from
592 * running to waiting state. Moreover, the SP continues to remain in
593 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
594 * are not changed here.
595 */
596 assert(next->state == VCPU_STATE_BLOCKED);
597 assert(next->call_chain.next_node == current_vcpu);
598
599 next->call_chain.next_node = NULL;
600
601 vcpu_set_running(both_vcpu_locked.vcpu2,
602 &(struct ffa_value){
603 .func = FFA_INTERRUPT_32,
604 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
605 vcpu_index(current_vcpu)),
606 });
607
608 sl_unlock(&next->lock);
609 sl_unlock(&current_vcpu->lock);
610
611 return next;
612}
613
Karl Meakinca38ef92025-02-13 14:20:23 +0000614static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000615 struct vcpu_locked current_locked)
616{
617 struct vcpu *current;
618 struct interrupts *interrupts;
619 struct vm *vm;
620
621 current = current_locked.vcpu;
622 interrupts = &current->interrupts;
623 vm = current->vm;
624
Karl Meakin117c8082024-12-04 16:03:28 +0000625 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000626 vcpu_virt_interrupt_set_enabled(interrupts,
627 HF_MANAGED_EXIT_INTID);
628 /*
629 * SPMC decides the interrupt type for Managed exit signal based
630 * on the partition manifest.
631 */
632 if (vm->me_signal_virq) {
633 vcpu_virt_interrupt_set_type(interrupts,
634 HF_MANAGED_EXIT_INTID,
635 INTERRUPT_TYPE_IRQ);
636 } else {
637 vcpu_virt_interrupt_set_type(interrupts,
638 HF_MANAGED_EXIT_INTID,
639 INTERRUPT_TYPE_FIQ);
640 }
641 }
642
643 if (vm->notifications.enabled) {
644 vcpu_virt_interrupt_set_enabled(interrupts,
645 HF_NOTIFICATION_PENDING_INTID);
646 }
647}
648
649/**
650 * Enable relevant virtual interrupts for Secure Partitions.
651 * For all SPs, any applicable virtual maintenance interrupts are enabled.
652 * Additionally, for S-EL0 partitions, all the interrupts declared in the
653 * partition manifest are enabled at the virtual interrupt controller
654 * interface early during the boot stage as an S-EL0 SP need not call
655 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
656 */
Karl Meakin117c8082024-12-04 16:03:28 +0000657void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
658 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000659{
660 struct vcpu *current;
661 struct interrupts *interrupts;
662 struct vm *vm;
663
664 current = current_locked.vcpu;
665 interrupts = &current->interrupts;
666 vm = current->vm;
667 assert(vm == vm_locked.vm);
668
669 if (vm->el0_partition) {
670 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
671 struct interrupt_descriptor int_desc;
672
673 int_desc = vm_locked.vm->interrupt_desc[k];
674
675 /* Interrupt descriptors are populated contiguously. */
676 if (!int_desc.valid) {
677 break;
678 }
679 vcpu_virt_interrupt_set_enabled(interrupts,
680 int_desc.interrupt_id);
681 }
682 }
683
Karl Meakinca38ef92025-02-13 14:20:23 +0000684 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000685}
686
687/**
688 * Reconfigure the interrupt belonging to the current partition at runtime.
689 * At present, this paravirtualized interface only allows the following
690 * commands which signify what change is being requested by the current
691 * partition:
692 * - Change the target CPU of the interrupt.
693 * - Change the security state of the interrupt.
694 * - Enable or disable the physical interrupt.
695 */
Karl Meakin117c8082024-12-04 16:03:28 +0000696int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
697 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000698{
699 struct vm *vm = current->vm;
700 struct vm_locked vm_locked;
701 int64_t ret = -1;
702 struct interrupt_descriptor *int_desc = NULL;
703
704 /*
705 * Lock VM to protect interrupt descriptor from being modified
706 * concurrently.
707 */
708 vm_locked = vm_lock(vm);
709
710 switch (command) {
711 case INT_RECONFIGURE_TARGET_PE:
712 /* Here, value represents the target PE index. */
713 if (value >= MAX_CPUS) {
714 dlog_verbose(
715 "Illegal target PE index specified while "
716 "reconfiguring interrupt %x\n",
717 int_id);
718 goto out_unlock;
719 }
720
721 /*
722 * An UP SP cannot reconfigure an interrupt to be targetted to
723 * any other physical CPU except the one it is currently
724 * running on.
725 */
726 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
727 dlog_verbose(
728 "Illegal target PE index specified by current "
729 "UP SP\n");
730 goto out_unlock;
731 }
732
733 /* Configure the interrupt to be routed to a specific CPU. */
734 int_desc = vm_interrupt_set_target_mpidr(
735 vm_locked, int_id, cpu_find_index(value)->id);
736 break;
737 case INT_RECONFIGURE_SEC_STATE:
738 /* Specify the new security state of the interrupt. */
739 if (value != INT_DESC_SEC_STATE_NS &&
740 value != INT_DESC_SEC_STATE_S) {
741 dlog_verbose(
742 "Illegal value %x specified while "
743 "reconfiguring interrupt %x\n",
744 value, int_id);
745 goto out_unlock;
746 }
747 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
748 break;
749 case INT_RECONFIGURE_ENABLE:
750 /* Enable or disable the interrupt. */
751 if (value != INT_DISABLE && value != INT_ENABLE) {
752 dlog_verbose(
753 "Illegal value %x specified while "
754 "reconfiguring interrupt %x\n",
755 value, int_id);
756 goto out_unlock;
757 } else {
758 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
759 value == INT_ENABLE);
760 }
761 break;
762 default:
763 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
764 command);
765 goto out_unlock;
766 }
767
768 /* Check if the interrupt belongs to the current SP. */
769 if (int_desc == NULL) {
770 dlog_verbose("Interrupt %x does not belong to current SP\n",
771 int_id);
772 goto out_unlock;
773 }
774
775 ret = 0;
776 plat_interrupts_reconfigure_interrupt(*int_desc);
777
778out_unlock:
779 vm_unlock(&vm_locked);
780
781 return ret;
782}
783
784/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000785uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000786{
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000787 return vcpu_virt_interrupt_get_pending_and_enabled(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000788}
Karl Meakin8d245542025-01-31 13:19:25 +0000789
790/**
791 * Run the vCPU in SPMC schedule mode under the runtime model for secure
792 * interrupt handling.
793 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000794static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000795 struct vcpu_locked target_vcpu_locked)
796{
797 struct vcpu *target_vcpu;
798
799 target_vcpu = target_vcpu_locked.vcpu;
800
801 /* Mark the registers as unavailable now. */
802 target_vcpu->regs_available = false;
803 target_vcpu->scheduling_mode = SPMC_MODE;
804 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
805 target_vcpu->state = VCPU_STATE_RUNNING;
Karl Meakin8d245542025-01-31 13:19:25 +0000806}
807
808bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
809 struct vcpu_locked next_locked,
810 struct ffa_value *signal_interrupt)
811{
Daniel Boulby4b9add52025-02-25 11:02:00 +0000812 /*
813 * Since S-EL0 partitions will not receive the interrupt through a vIRQ
814 * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no
815 * longer pending at this point. Otherwise keep it as pending for
816 * when the S-EL1 parition calls hf_interrupt_get.
817 */
818 uint32_t intid = current_locked.vcpu->vm->el0_partition
819 ? vcpu_virt_interrupt_get_pending_and_enabled(
820 current_locked)
821 : vcpu_virt_interrupt_peek_pending_and_enabled(
822 current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000823
824 /*
825 * Check if there are any pending virtual secure interrupts to be
826 * handled.
827 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000828 if (intid != HF_INVALID_INTID) {
Karl Meakin8d245542025-01-31 13:19:25 +0000829 /*
830 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
831 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
832 * case 1.
833 */
834 *signal_interrupt = api_ffa_interrupt_return(intid);
835
836 /*
837 * Prepare to resume this partition's vCPU in SPMC
838 * schedule mode to handle virtual secure interrupt.
839 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000840 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000841
842 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
843 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
844
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000845 dlog_verbose(
846 "%s: Pending interrupt %d, intercepting FF-A call.\n",
847 __func__, intid);
Karl Meakin8d245542025-01-31 13:19:25 +0000848
849 return true;
850 }
851
852 return false;
853}