blob: 73aa74963d70bc275bef99349b7deee56b8921a7 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
22 * Drops the current interrupt priority and deactivate the given interrupt ID
23 * for the calling vCPU.
24 *
25 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
30 struct vcpu_locked current_locked;
31 uint32_t int_id;
32 int ret = 0;
33
34 current_locked = vcpu_lock(current);
35 if (vint_id >= HF_NUM_INTIDS) {
36 ret = -1;
37 goto out;
38 }
39
40 /*
41 * Current implementation maps virtual interrupt to physical interrupt.
42 */
43 if (pint_id != vint_id) {
44 ret = -1;
45 goto out;
46 }
47
48 /*
49 * A malicious SP could de-activate an interrupt that does not belong to
50 * it. Return error to indicate failure.
51 */
52 if (!vcpu_interrupt_queue_peek(current_locked, &int_id)) {
53 dlog_error("No virtual interrupt to be deactivated\n");
54 ret = -1;
55 goto out;
56 }
57
58 if (int_id != vint_id) {
59 dlog_error("Unknown interrupt being deactivated %u\n", vint_id);
60 ret = -1;
61 goto out;
62 }
63
64 if (current->requires_deactivate_call) {
65 /* There is no preempted vCPU to resume. */
66 assert(current->preempted_vcpu == NULL);
67
68 vcpu_secure_interrupt_complete(current_locked);
69 }
70
71 /*
72 * Now that the virtual interrupt has been serviced and deactivated,
73 * remove it from the queue, if it was pending.
74 */
75 vcpu_interrupt_queue_pop(current_locked, &int_id);
76 assert(vint_id == int_id);
77out:
78 vcpu_unlock(&current_locked);
79 return ret;
80}
81
Karl Meakinca38ef92025-02-13 14:20:23 +000082static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000083 struct vcpu *current, uint32_t interrupt_id)
84{
85 /*
86 * Find which VM/SP owns this interrupt. We then find the
87 * corresponding vCPU context for this CPU.
88 */
89 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
90 struct vm *vm = vm_find_index(index);
91
92 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
93 struct interrupt_descriptor int_desc =
94 vm->interrupt_desc[j];
95
96 /*
97 * Interrupt descriptors are populated
98 * contiguously.
99 */
100 if (!int_desc.valid) {
101 break;
102 }
103 if (int_desc.interrupt_id == interrupt_id) {
104 return api_ffa_get_vm_vcpu(vm, current);
105 }
106 }
107 }
108
109 return NULL;
110}
111
Karl Meakinca38ef92025-02-13 14:20:23 +0000112static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +0000113 uint32_t interrupt_id,
114 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000115{
116 struct vcpu *target_vcpu;
117
J-Alvesde211782025-02-07 14:44:39 +0000118 assert(current != NULL);
119 assert(v_intid != NULL);
120
121 *v_intid = interrupt_id;
122
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000123 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +0000124 case SPURIOUS_INTID_OTHER_WORLD:
125 /*
126 * Spurious interrupt ID indicating that there are no pending
127 * interrupts to acknowledge. For such scenarios, resume the
128 * current vCPU.
129 */
130 target_vcpu = NULL;
131 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000132 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100133 /*
134 * Get the next vCPU with a pending IPI. If all vCPUs
135 * have had their IPIs handled this will return NULL.
136 */
137 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000138 break;
J-Alvesde211782025-02-07 14:44:39 +0000139 case ARM_SEL2_TIMER_PHYS_INT:
140 /* Disable the S-EL2 physical timer */
141 host_timer_disable();
142 target_vcpu = timer_find_target_vcpu(current);
143
144 if (target_vcpu != NULL) {
145 *v_intid = HF_VIRTUAL_TIMER_INTID;
146 }
147 /*
148 * It is possible for target_vcpu to be NULL in case of spurious
149 * timer interrupt.
150 */
151 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000152 case ARM_EL1_VIRT_TIMER_PHYS_INT:
153 /* Fall through */
154 case ARM_EL1_PHYS_TIMER_PHYS_INT:
155 panic("Timer interrupt not expected to fire: %u\n",
156 interrupt_id);
157 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000158 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000159 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000160
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100161 /* The target vCPU for a secure interrupt cannot be NULL. */
162 CHECK(target_vcpu != NULL);
163 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000164
165 return target_vcpu;
166}
167
168/*
169 * Queue the pending virtual interrupt for target vcpu. Necessary fields
170 * tracking the secure interrupt processing are set accordingly.
171 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000172static void ffa_interrupts_queue_vint(struct vcpu_locked target_vcpu_locked,
173 uint32_t vint_id,
174 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000175{
176 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
177 struct vcpu *preempted_vcpu = current_locked.vcpu;
178
179 if (preempted_vcpu != NULL) {
180 target_vcpu->preempted_vcpu = preempted_vcpu;
181 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
182 }
183
184 /* Queue the pending virtual interrupt for target vcpu. */
185 if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
186 panic("Exhausted interrupt queue for vcpu of SP: %x\n",
187 target_vcpu->vm->id);
188 }
189}
190
191/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000192 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
193 * restore the priority mask thereby allowing the interrupts to be delivered.
194 */
195void ffa_interrupts_unmask(struct vcpu *current)
196{
197 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
198}
199
200/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000201 * Enforce action of an SP in response to non-secure or other-secure interrupt
202 * by changing the priority mask. Effectively, physical interrupts shall not
203 * trigger which has the same effect as queueing interrupts.
204 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000205void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000206{
207 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
208 uint8_t current_priority;
209
210 /* Save current value of priority mask. */
211 current_priority = plat_interrupts_get_priority_mask();
212 receiver_vcpu->prev_interrupt_priority = current_priority;
213
214 if (receiver_vcpu->vm->other_s_interrupts_action ==
215 OTHER_S_INT_ACTION_QUEUED ||
216 receiver_vcpu->scheduling_mode == SPMC_MODE) {
217 /*
218 * If secure interrupts not masked yet, mask them now. We could
219 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
220 * sends a direct request, and we are making the IMPDEF choice
221 * to mask interrupts when such a situation occurs. This keeps
222 * design simple.
223 */
224 if (current_priority > SWD_MASK_ALL_INT) {
225 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
226 }
227 } else if (receiver_vcpu->vm->ns_interrupts_action ==
228 NS_ACTION_QUEUED) {
229 /* If non secure interrupts not masked yet, mask them now. */
230 if (current_priority > SWD_MASK_NS_INT) {
231 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
232 }
233 }
234}
235
236/**
237 * Handles the secure interrupt according to the target vCPU's state
238 * in the case the owner of the interrupt is an S-EL0 partition.
239 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000240static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000241 struct vcpu_locked current_locked,
242 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
243{
244 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
245 struct vcpu *next;
246
247 /* Secure interrupt signaling and queuing for S-EL0 SP. */
248 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600249 case VCPU_STATE_WAITING: {
250 struct ffa_value ret_interrupt =
251 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000252
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600253 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
254 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
255 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000256
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600257 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000258 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000259
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600260 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000261
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600262 /*
263 * If the execution was in NWd as well, set the vCPU
264 * in preempted state as well.
265 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000266 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
267 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000268
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600269 /*
270 * The target vcpu could have migrated to a different physical
271 * CPU. SPMC will migrate it to current physical CPU and resume
272 * it.
273 */
274 target_vcpu->cpu = current_locked.vcpu->cpu;
275
276 /* Switch to target vCPU responsible for this interrupt. */
277 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000278 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600279 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000280 case VCPU_STATE_BLOCKED:
281 case VCPU_STATE_PREEMPTED:
282 case VCPU_STATE_RUNNING:
283 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
284 target_vcpu->vm->id);
285 /*
286 * The target vCPU cannot be resumed, SPMC resumes current
287 * vCPU.
288 */
289 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000290 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
291 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000292 break;
293 default:
294 panic("Secure interrupt cannot be signaled to target SP\n");
295 break;
296 }
297
298 return next;
299}
300
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000301/**
302 * Handles the secure interrupt according to the target vCPU's state
303 * in the case the owner of the interrupt is an S-EL1 partition.
304 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000305static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000306 struct vcpu_locked current_locked,
307 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
308{
309 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
310 struct vcpu *current = current_locked.vcpu;
311 struct vcpu *next = NULL;
312
313 /* Secure interrupt signaling and queuing for S-EL1 SP. */
314 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600315 case VCPU_STATE_WAITING: {
316 struct ffa_value ret_interrupt =
317 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000318
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600319 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
320 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000321 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000322
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600323 /*
324 * Ideally, we have to mask non-secure interrupts here
325 * since the spec mandates that SPMC should make sure
326 * SPMC scheduled call chain cannot be preempted by a
327 * non-secure interrupt. However, our current design
328 * takes care of it implicitly.
329 */
330 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
331
Karl Meakinca38ef92025-02-13 14:20:23 +0000332 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
333 current_locked);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600334 next = target_vcpu;
335
336 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000337 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600338 * The target vcpu could have migrated to a different
339 * physical CPU. SPMC will migrate it to current
340 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000341 */
342 assert(target_vcpu->vm->vcpu_count == 1);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600343 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000344 }
345 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600346 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000347 case VCPU_STATE_BLOCKED:
348 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
349 /*
350 * The target vcpu has migrated to a different physical
351 * CPU. Hence, it cannot be resumed on this CPU, SPMC
352 * resumes current vCPU.
353 */
354 assert(target_vcpu->vm->vcpu_count == 1);
355 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000356 ffa_interrupts_queue_vint(
357 target_vcpu_locked, v_intid,
358 (struct vcpu_locked){.vcpu = NULL});
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000359 } else if (ffa_direct_msg_precedes_in_call_chain(
360 current_locked, target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000361 struct ffa_value ret_interrupt =
362 api_ffa_interrupt_return(0);
363
364 /*
365 * If the target vCPU ran earlier in the same call
366 * chain as the current vCPU, SPMC leaves all
367 * intermediate execution contexts in blocked state and
368 * resumes the target vCPU for handling secure
369 * interrupt.
370 * Under the current design, there is only one possible
371 * scenario in which this could happen: both the
372 * preempted (i.e. current) and target vCPU are in the
373 * same NWd scheduled call chain and is described in the
374 * Scenario 1 of Table 8.4 in EAC0 spec.
375 */
376 assert(current_locked.vcpu->scheduling_mode ==
377 NWD_MODE);
378 assert(target_vcpu->scheduling_mode == NWD_MODE);
379
380 /*
381 * The execution preempted the call chain that involved
382 * the targeted and the current SPs.
383 * The targetted SP is set running, whilst the
384 * preempted SP is set PREEMPTED.
385 */
386 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
387
Karl Meakinca38ef92025-02-13 14:20:23 +0000388 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
389 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000390
391 next = target_vcpu;
392 } else {
393 /*
394 * The target vCPU cannot be resumed now because it is
395 * in BLOCKED state (it yielded CPU cycles using
396 * FFA_YIELD). SPMC queues the virtual interrupt and
397 * resumes the current vCPU which could belong to either
398 * a VM or a SP.
399 */
400 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000401 ffa_interrupts_queue_vint(
402 target_vcpu_locked, v_intid,
403 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000404 }
405 break;
406 case VCPU_STATE_PREEMPTED:
407 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
408 /*
409 * We do not resume a target vCPU that has been already
410 * pre-empted by an interrupt. Make the vIRQ pending for
411 * target SP(i.e., queue the interrupt) and continue to
412 * resume current vCPU. Refer to section 8.3.2.1 bullet
413 * 3 in the FF-A v1.1 EAC0 spec.
414 */
415
416 if (current->vm->id == HF_OTHER_WORLD_ID) {
417 /*
418 * The target vCPU must have been preempted by a
419 * non secure interrupt. It could not have been
420 * preempted by a secure interrupt as current
421 * SPMC implementation does not allow secure
422 * interrupt prioritization. Moreover, the
423 * target vCPU should have been in Normal World
424 * scheduled mode as SPMC scheduled mode call
425 * chain cannot be preempted by a non secure
426 * interrupt.
427 */
428 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
429 }
430 } else {
431 /*
432 * The target vcpu has migrated to a different physical
433 * CPU. Hence, it cannot be resumed on this CPU, SPMC
434 * resumes current vCPU.
435 */
436 assert(target_vcpu->vm->vcpu_count == 1);
437 }
438
439 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000440 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
441 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000442
443 break;
444 case VCPU_STATE_RUNNING:
445 if (current == target_vcpu) {
446 /*
447 * This is the special scenario where the current
448 * running execution context also happens to be the
449 * target of the secure interrupt. In this case, it
450 * needs to signal completion of secure interrupt
451 * implicitly. Refer to the embedded comment in vcpu.h
452 * file for the description of this variable.
453 */
454
455 current->requires_deactivate_call = true;
456 } else {
457 /*
458 * The target vcpu has migrated to a different physical
459 * CPU. Hence, it cannot be resumed on this CPU, SPMC
460 * resumes current vCPU.
461 */
462 assert(target_vcpu->vm->vcpu_count == 1);
463 }
464 next = NULL;
Karl Meakinca38ef92025-02-13 14:20:23 +0000465 ffa_interrupts_queue_vint(target_vcpu_locked, v_intid,
466 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000467 break;
468 case VCPU_STATE_BLOCKED_INTERRUPT:
469 /* WFI is no-op for SP. Fall through. */
470 default:
471 /*
472 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
473 * to handle secure interrupt.
474 */
475 panic("Secure interrupt cannot be signaled to target SP\n");
476 break;
477 }
478
479 return next;
480}
481
482/**
483 * Obtain the physical interrupt that triggered from the interrupt controller,
484 * and inject the corresponding virtual interrupt to the target vCPU.
485 * When PEs executing in the Normal World, and secure interrupts trigger,
486 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
487 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
488 */
Karl Meakin117c8082024-12-04 16:03:28 +0000489void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
490 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000491{
492 struct vcpu *target_vcpu;
493 struct vcpu_locked target_vcpu_locked =
494 (struct vcpu_locked){.vcpu = NULL};
495 struct vcpu_locked current_locked;
496 uint32_t intid;
497 struct vm_locked target_vm_locked;
498 uint32_t v_intid;
499
500 /* Find pending interrupt id. This also activates the interrupt. */
501 intid = plat_interrupts_get_pending_interrupt_id();
502 v_intid = intid;
503
J-Alvesde211782025-02-07 14:44:39 +0000504 /* Get the target vCPU and get the virtual interrupt ID. */
505 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000506
507 /*
J-Alvesde211782025-02-07 14:44:39 +0000508 * Spurious interrupt ID indicates there is no pending interrupt to
509 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000510 */
J-Alvesde211782025-02-07 14:44:39 +0000511 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
512 /*
513 * End the interrupt to drop the running priority. It also
514 * deactivates the physical interrupt. If not, the interrupt
515 * could trigger again after resuming current vCPU.
516 */
517 plat_interrupts_end_of_interrupt(intid);
518 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000519
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100520 if (target_vcpu == NULL) {
521 /* No further handling required. Resume the current vCPU. */
522 *next = NULL;
523 return;
524 }
525
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000526 target_vm_locked = vm_lock(target_vcpu->vm);
527
528 if (target_vcpu == current) {
529 current_locked = vcpu_lock(current);
530 target_vcpu_locked = current_locked;
531 } else {
532 struct two_vcpu_locked vcpus_locked;
533 /* Lock both vCPUs at once to avoid deadlock. */
534 vcpus_locked = vcpu_lock_both(current, target_vcpu);
535 current_locked = vcpus_locked.vcpu1;
536 target_vcpu_locked = vcpus_locked.vcpu2;
537 }
538
539 /*
540 * A race condition can occur with the execution contexts belonging to
541 * an MP SP. An interrupt targeting the execution context on present
542 * core can trigger while the execution context of this SP on a
543 * different core is being aborted. In such scenario, the physical
544 * interrupts beloning to the aborted SP are disabled and the current
545 * execution context is resumed.
546 */
547 if (target_vcpu->state == VCPU_STATE_ABORTED ||
548 atomic_load_explicit(&target_vcpu->vm->aborting,
549 memory_order_relaxed)) {
550 /* Clear fields corresponding to secure interrupt handling. */
551 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000552 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000553
554 /* Resume current vCPU. */
555 *next = NULL;
556 } else {
557 /*
558 * SPMC has started handling a secure interrupt with a clean
559 * slate. This signal should be false unless there was a bug in
560 * source code. Hence, use assert rather than CHECK.
561 */
562 assert(!target_vcpu->requires_deactivate_call);
563
564 /* Set the interrupt pending in the target vCPU. */
565 vcpu_interrupt_inject(target_vcpu_locked, v_intid);
566
567 switch (intid) {
568 case HF_IPI_INTID:
569 if (hf_ipi_handle(target_vcpu_locked)) {
570 *next = NULL;
571 break;
572 }
573 /*
574 * Fall through in the case handling has not been fully
575 * completed.
576 */
577 default:
578 /*
579 * Either invoke the handler related to partitions from
580 * S-EL0 or from S-EL1.
581 */
582 *next = target_vcpu_locked.vcpu->vm->el0_partition
Karl Meakinca38ef92025-02-13 14:20:23 +0000583 ? ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000584 current_locked,
585 target_vcpu_locked, v_intid)
Karl Meakinca38ef92025-02-13 14:20:23 +0000586 : ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000587 current_locked,
588 target_vcpu_locked, v_intid);
589 }
590 }
591
592 if (target_vcpu_locked.vcpu != NULL) {
593 vcpu_unlock(&target_vcpu_locked);
594 }
595
596 vcpu_unlock(&current_locked);
597 vm_unlock(&target_vm_locked);
598}
599
Karl Meakin117c8082024-12-04 16:03:28 +0000600bool ffa_interrupts_inject_notification_pending_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000601 struct vcpu_locked target_locked, struct vcpu_locked current_locked,
602 struct vm_locked receiver_locked)
603{
604 struct vm *next_vm = target_locked.vcpu->vm;
605 bool ret = false;
606
607 /*
608 * Inject the NPI if:
609 * - The targeted VM ID is from this world (i.e. if it is an SP).
610 * - The partition has global pending notifications and an NPI hasn't
611 * been injected yet.
612 * - There are pending per-vCPU notifications in the next vCPU.
613 */
614 if (vm_id_is_current_world(next_vm->id) &&
615 (vm_are_per_vcpu_notifications_pending(
616 receiver_locked, vcpu_index(target_locked.vcpu)) ||
617 (vm_are_global_notifications_pending(receiver_locked) &&
618 !vm_notifications_is_npi_injected(receiver_locked)))) {
619 api_interrupt_inject_locked(target_locked,
620 HF_NOTIFICATION_PENDING_INTID,
621 current_locked, NULL);
622 vm_notifications_set_npi_injected(receiver_locked, true);
623 ret = true;
624 }
625
626 return ret;
627}
628
Karl Meakin117c8082024-12-04 16:03:28 +0000629struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000630{
631 struct vcpu *next;
632 struct two_vcpu_locked both_vcpu_locked;
633
634 /*
635 * The action specified by SP in its manifest is ``Non-secure interrupt
636 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
637 * Hence, the call chain starts unwinding. The current vCPU must have
638 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
639 * and execution is either handed back to the normal world or to the
640 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
641 * The api_preempt() call is equivalent to calling
642 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
643 * SP can be resumed later by FFA_RUN.
644 */
645 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
646 assert(current_vcpu->call_chain.next_node == NULL);
647
648 if (current_vcpu->call_chain.prev_node == NULL) {
649 /* End of NWd scheduled call chain */
650 return api_preempt(current_vcpu);
651 }
652
653 next = current_vcpu->call_chain.prev_node;
654 CHECK(next != NULL);
655
656 /*
657 * Lock both vCPUs. Strictly speaking, it may not be necessary since
658 * next is guaranteed to be in BLOCKED state as it is the predecessor of
659 * the current vCPU in the present call chain.
660 */
661 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
662
663 /* Removing a node from an existing call chain. */
664 current_vcpu->call_chain.prev_node = NULL;
665 current_vcpu->state = VCPU_STATE_PREEMPTED;
666
667 /*
668 * SPMC applies the runtime model till when the vCPU transitions from
669 * running to waiting state. Moreover, the SP continues to remain in
670 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
671 * are not changed here.
672 */
673 assert(next->state == VCPU_STATE_BLOCKED);
674 assert(next->call_chain.next_node == current_vcpu);
675
676 next->call_chain.next_node = NULL;
677
678 vcpu_set_running(both_vcpu_locked.vcpu2,
679 &(struct ffa_value){
680 .func = FFA_INTERRUPT_32,
681 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
682 vcpu_index(current_vcpu)),
683 });
684
685 sl_unlock(&next->lock);
686 sl_unlock(&current_vcpu->lock);
687
688 return next;
689}
690
Karl Meakinca38ef92025-02-13 14:20:23 +0000691static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000692 struct vcpu_locked current_locked)
693{
694 struct vcpu *current;
695 struct interrupts *interrupts;
696 struct vm *vm;
697
698 current = current_locked.vcpu;
699 interrupts = &current->interrupts;
700 vm = current->vm;
701
Karl Meakin117c8082024-12-04 16:03:28 +0000702 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000703 vcpu_virt_interrupt_set_enabled(interrupts,
704 HF_MANAGED_EXIT_INTID);
705 /*
706 * SPMC decides the interrupt type for Managed exit signal based
707 * on the partition manifest.
708 */
709 if (vm->me_signal_virq) {
710 vcpu_virt_interrupt_set_type(interrupts,
711 HF_MANAGED_EXIT_INTID,
712 INTERRUPT_TYPE_IRQ);
713 } else {
714 vcpu_virt_interrupt_set_type(interrupts,
715 HF_MANAGED_EXIT_INTID,
716 INTERRUPT_TYPE_FIQ);
717 }
718 }
719
720 if (vm->notifications.enabled) {
721 vcpu_virt_interrupt_set_enabled(interrupts,
722 HF_NOTIFICATION_PENDING_INTID);
723 }
724}
725
726/**
727 * Enable relevant virtual interrupts for Secure Partitions.
728 * For all SPs, any applicable virtual maintenance interrupts are enabled.
729 * Additionally, for S-EL0 partitions, all the interrupts declared in the
730 * partition manifest are enabled at the virtual interrupt controller
731 * interface early during the boot stage as an S-EL0 SP need not call
732 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
733 */
Karl Meakin117c8082024-12-04 16:03:28 +0000734void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
735 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000736{
737 struct vcpu *current;
738 struct interrupts *interrupts;
739 struct vm *vm;
740
741 current = current_locked.vcpu;
742 interrupts = &current->interrupts;
743 vm = current->vm;
744 assert(vm == vm_locked.vm);
745
746 if (vm->el0_partition) {
747 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
748 struct interrupt_descriptor int_desc;
749
750 int_desc = vm_locked.vm->interrupt_desc[k];
751
752 /* Interrupt descriptors are populated contiguously. */
753 if (!int_desc.valid) {
754 break;
755 }
756 vcpu_virt_interrupt_set_enabled(interrupts,
757 int_desc.interrupt_id);
758 }
759 }
760
Karl Meakinca38ef92025-02-13 14:20:23 +0000761 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000762}
763
764/**
765 * Reconfigure the interrupt belonging to the current partition at runtime.
766 * At present, this paravirtualized interface only allows the following
767 * commands which signify what change is being requested by the current
768 * partition:
769 * - Change the target CPU of the interrupt.
770 * - Change the security state of the interrupt.
771 * - Enable or disable the physical interrupt.
772 */
Karl Meakin117c8082024-12-04 16:03:28 +0000773int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
774 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000775{
776 struct vm *vm = current->vm;
777 struct vm_locked vm_locked;
778 int64_t ret = -1;
779 struct interrupt_descriptor *int_desc = NULL;
780
781 /*
782 * Lock VM to protect interrupt descriptor from being modified
783 * concurrently.
784 */
785 vm_locked = vm_lock(vm);
786
787 switch (command) {
788 case INT_RECONFIGURE_TARGET_PE:
789 /* Here, value represents the target PE index. */
790 if (value >= MAX_CPUS) {
791 dlog_verbose(
792 "Illegal target PE index specified while "
793 "reconfiguring interrupt %x\n",
794 int_id);
795 goto out_unlock;
796 }
797
798 /*
799 * An UP SP cannot reconfigure an interrupt to be targetted to
800 * any other physical CPU except the one it is currently
801 * running on.
802 */
803 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
804 dlog_verbose(
805 "Illegal target PE index specified by current "
806 "UP SP\n");
807 goto out_unlock;
808 }
809
810 /* Configure the interrupt to be routed to a specific CPU. */
811 int_desc = vm_interrupt_set_target_mpidr(
812 vm_locked, int_id, cpu_find_index(value)->id);
813 break;
814 case INT_RECONFIGURE_SEC_STATE:
815 /* Specify the new security state of the interrupt. */
816 if (value != INT_DESC_SEC_STATE_NS &&
817 value != INT_DESC_SEC_STATE_S) {
818 dlog_verbose(
819 "Illegal value %x specified while "
820 "reconfiguring interrupt %x\n",
821 value, int_id);
822 goto out_unlock;
823 }
824 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
825 break;
826 case INT_RECONFIGURE_ENABLE:
827 /* Enable or disable the interrupt. */
828 if (value != INT_DISABLE && value != INT_ENABLE) {
829 dlog_verbose(
830 "Illegal value %x specified while "
831 "reconfiguring interrupt %x\n",
832 value, int_id);
833 goto out_unlock;
834 } else {
835 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
836 value == INT_ENABLE);
837 }
838 break;
839 default:
840 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
841 command);
842 goto out_unlock;
843 }
844
845 /* Check if the interrupt belongs to the current SP. */
846 if (int_desc == NULL) {
847 dlog_verbose("Interrupt %x does not belong to current SP\n",
848 int_id);
849 goto out_unlock;
850 }
851
852 ret = 0;
853 plat_interrupts_reconfigure_interrupt(*int_desc);
854
855out_unlock:
856 vm_unlock(&vm_locked);
857
858 return ret;
859}
860
861/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000862uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000863{
864 uint32_t int_id;
865
866 /*
867 * If there are any virtual interrupts in the queue, return the first
868 * entry. Else, return the pending interrupt from the bitmap.
869 */
870 if (vcpu_interrupt_queue_peek(current_locked, &int_id)) {
871 struct interrupts *interrupts;
872
873 /*
874 * Mark the virtual interrupt as no longer pending and decrement
875 * the count.
876 */
877 interrupts = &current_locked.vcpu->interrupts;
878 vcpu_virt_interrupt_clear_pending(interrupts, int_id);
879 vcpu_interrupt_count_decrement(current_locked, interrupts,
880 int_id);
881
882 return int_id;
883 }
884
885 return api_interrupt_get(current_locked);
886}
Karl Meakin8d245542025-01-31 13:19:25 +0000887
888/**
889 * Run the vCPU in SPMC schedule mode under the runtime model for secure
890 * interrupt handling.
891 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000892static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000893 struct vcpu_locked target_vcpu_locked)
894{
895 struct vcpu *target_vcpu;
896
897 target_vcpu = target_vcpu_locked.vcpu;
898
899 /* Mark the registers as unavailable now. */
900 target_vcpu->regs_available = false;
901 target_vcpu->scheduling_mode = SPMC_MODE;
902 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
903 target_vcpu->state = VCPU_STATE_RUNNING;
904 target_vcpu->requires_deactivate_call = false;
905}
906
907bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
908 struct vcpu_locked next_locked,
909 struct ffa_value *signal_interrupt)
910{
911 uint32_t intid;
912
913 /*
914 * Check if there are any pending virtual secure interrupts to be
915 * handled.
916 */
917 if (vcpu_interrupt_queue_peek(current_locked, &intid)) {
918 /*
919 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
920 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
921 * case 1.
922 */
923 *signal_interrupt = api_ffa_interrupt_return(intid);
924
925 /*
926 * Prepare to resume this partition's vCPU in SPMC
927 * schedule mode to handle virtual secure interrupt.
928 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000929 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000930
931 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
932 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
933
934 dlog_verbose("%s: Pending interrup, intercepting FF-A call.\n",
935 __func__);
936
937 return true;
938 }
939
940 return false;
941}