blob: 6529ee4148ac55126fb91a60de8801bb009d7506 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
22 * Drops the current interrupt priority and deactivate the given interrupt ID
23 * for the calling vCPU.
24 *
25 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
Daniel Boulby3c1506b2025-02-25 10:49:51 +000030 (void)pint_id;
31 (void)vint_id;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000032 int ret = 0;
Daniel Boulby3c1506b2025-02-25 10:49:51 +000033 struct vcpu_locked current_locked;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000034
35 current_locked = vcpu_lock(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +000036
37 if (current->requires_deactivate_call) {
38 /* There is no preempted vCPU to resume. */
39 assert(current->preempted_vcpu == NULL);
40
41 vcpu_secure_interrupt_complete(current_locked);
42 }
43
Karl Meakin8e58ddc2024-11-08 23:19:34 +000044 vcpu_unlock(&current_locked);
45 return ret;
46}
47
Karl Meakinca38ef92025-02-13 14:20:23 +000048static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000049 struct vcpu *current, uint32_t interrupt_id)
50{
51 /*
52 * Find which VM/SP owns this interrupt. We then find the
53 * corresponding vCPU context for this CPU.
54 */
55 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
56 struct vm *vm = vm_find_index(index);
57
58 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
59 struct interrupt_descriptor int_desc =
60 vm->interrupt_desc[j];
61
62 /*
63 * Interrupt descriptors are populated
64 * contiguously.
65 */
66 if (!int_desc.valid) {
67 break;
68 }
69 if (int_desc.interrupt_id == interrupt_id) {
70 return api_ffa_get_vm_vcpu(vm, current);
71 }
72 }
73 }
74
75 return NULL;
76}
77
Karl Meakinca38ef92025-02-13 14:20:23 +000078static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +000079 uint32_t interrupt_id,
80 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000081{
82 struct vcpu *target_vcpu;
83
J-Alvesde211782025-02-07 14:44:39 +000084 assert(current != NULL);
85 assert(v_intid != NULL);
86
87 *v_intid = interrupt_id;
88
Karl Meakin8e58ddc2024-11-08 23:19:34 +000089 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +000090 case SPURIOUS_INTID_OTHER_WORLD:
91 /*
92 * Spurious interrupt ID indicating that there are no pending
93 * interrupts to acknowledge. For such scenarios, resume the
94 * current vCPU.
95 */
96 target_vcpu = NULL;
97 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000098 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +010099 /*
100 * Get the next vCPU with a pending IPI. If all vCPUs
101 * have had their IPIs handled this will return NULL.
102 */
103 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000104 break;
J-Alvesde211782025-02-07 14:44:39 +0000105 case ARM_SEL2_TIMER_PHYS_INT:
106 /* Disable the S-EL2 physical timer */
107 host_timer_disable();
108 target_vcpu = timer_find_target_vcpu(current);
109
110 if (target_vcpu != NULL) {
111 *v_intid = HF_VIRTUAL_TIMER_INTID;
112 }
113 /*
114 * It is possible for target_vcpu to be NULL in case of spurious
115 * timer interrupt.
116 */
117 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000118 case ARM_EL1_VIRT_TIMER_PHYS_INT:
119 /* Fall through */
120 case ARM_EL1_PHYS_TIMER_PHYS_INT:
121 panic("Timer interrupt not expected to fire: %u\n",
122 interrupt_id);
123 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000124 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000125 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000126
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100127 /* The target vCPU for a secure interrupt cannot be NULL. */
128 CHECK(target_vcpu != NULL);
129 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000130
131 return target_vcpu;
132}
133
134/*
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000135 * If the current vCPU is being preempted, record this in the target vCPU
136 * and set the current states to VCPU_STATE_PREEMPTED.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000137 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000138static void ffa_interrupts_set_preempted_vcpu(
139 struct vcpu_locked target_vcpu_locked,
140 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000141{
142 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
143 struct vcpu *preempted_vcpu = current_locked.vcpu;
144
145 if (preempted_vcpu != NULL) {
146 target_vcpu->preempted_vcpu = preempted_vcpu;
147 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
148 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000149}
150
151/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000152 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
153 * restore the priority mask thereby allowing the interrupts to be delivered.
154 */
155void ffa_interrupts_unmask(struct vcpu *current)
156{
157 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
158}
159
160/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000161 * Enforce action of an SP in response to non-secure or other-secure interrupt
162 * by changing the priority mask. Effectively, physical interrupts shall not
163 * trigger which has the same effect as queueing interrupts.
164 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000165void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000166{
167 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
168 uint8_t current_priority;
169
170 /* Save current value of priority mask. */
171 current_priority = plat_interrupts_get_priority_mask();
172 receiver_vcpu->prev_interrupt_priority = current_priority;
173
174 if (receiver_vcpu->vm->other_s_interrupts_action ==
175 OTHER_S_INT_ACTION_QUEUED ||
176 receiver_vcpu->scheduling_mode == SPMC_MODE) {
177 /*
178 * If secure interrupts not masked yet, mask them now. We could
179 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
180 * sends a direct request, and we are making the IMPDEF choice
181 * to mask interrupts when such a situation occurs. This keeps
182 * design simple.
183 */
184 if (current_priority > SWD_MASK_ALL_INT) {
185 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
186 }
187 } else if (receiver_vcpu->vm->ns_interrupts_action ==
188 NS_ACTION_QUEUED) {
189 /* If non secure interrupts not masked yet, mask them now. */
190 if (current_priority > SWD_MASK_NS_INT) {
191 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
192 }
193 }
194}
195
196/**
197 * Handles the secure interrupt according to the target vCPU's state
198 * in the case the owner of the interrupt is an S-EL0 partition.
199 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000200static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000201 struct vcpu_locked current_locked,
202 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
203{
204 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
205 struct vcpu *next;
206
207 /* Secure interrupt signaling and queuing for S-EL0 SP. */
208 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600209 case VCPU_STATE_WAITING: {
210 struct ffa_value ret_interrupt =
211 api_ffa_interrupt_return(v_intid);
Daniel Boulby4b9add52025-02-25 11:02:00 +0000212 uint32_t pending_intid;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000213
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600214 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
215 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
216 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000217
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600218 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000219 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000220
Daniel Boulby4b9add52025-02-25 11:02:00 +0000221 /*
222 * Since S-EL0 partitions will not receive the interrupt through
223 * a vIRQ signal in addition to the FFA_INTERRUPT ERET, make the
224 * interrupt no longer pending at this point.
225 */
226 pending_intid = vcpu_virt_interrupt_get_pending_and_enabled(
227 target_vcpu_locked);
228 assert(pending_intid == v_intid);
229
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600230 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000231
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600232 /*
233 * If the execution was in NWd as well, set the vCPU
234 * in preempted state as well.
235 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000236 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
237 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000238
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600239 /*
240 * The target vcpu could have migrated to a different physical
241 * CPU. SPMC will migrate it to current physical CPU and resume
242 * it.
243 */
244 target_vcpu->cpu = current_locked.vcpu->cpu;
245
246 /* Switch to target vCPU responsible for this interrupt. */
247 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000248 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600249 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000250 case VCPU_STATE_BLOCKED:
251 case VCPU_STATE_PREEMPTED:
252 case VCPU_STATE_RUNNING:
253 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
254 target_vcpu->vm->id);
255 /*
256 * The target vCPU cannot be resumed, SPMC resumes current
257 * vCPU.
258 */
259 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000260 ffa_interrupts_set_preempted_vcpu(
261 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000262 break;
263 default:
264 panic("Secure interrupt cannot be signaled to target SP\n");
265 break;
266 }
267
268 return next;
269}
270
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000271/**
272 * Handles the secure interrupt according to the target vCPU's state
273 * in the case the owner of the interrupt is an S-EL1 partition.
274 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000275static struct vcpu *ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000276 struct vcpu_locked current_locked,
277 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
278{
279 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
280 struct vcpu *current = current_locked.vcpu;
281 struct vcpu *next = NULL;
282
J-Alves7e7fce02025-02-07 15:14:56 +0000283 /*
284 * The target vcpu has migrated to a different physical
285 * CPU. Hence, it cannot be resumed on this CPU, SPMC
286 * resumes current vCPU.
287 */
288 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
289 assert(target_vcpu->vm->vcpu_count == 1);
290 }
291
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000292 /* Secure interrupt signaling and queuing for S-EL1 SP. */
293 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600294 case VCPU_STATE_WAITING: {
295 struct ffa_value ret_interrupt =
296 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000297
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600298 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
299 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000300 ffa_interrupts_mask(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000301
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600302 /*
303 * Ideally, we have to mask non-secure interrupts here
304 * since the spec mandates that SPMC should make sure
305 * SPMC scheduled call chain cannot be preempted by a
306 * non-secure interrupt. However, our current design
307 * takes care of it implicitly.
308 */
309 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
310
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000311 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
312 current_locked);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600313 next = target_vcpu;
314
315 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000316 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600317 * The target vcpu could have migrated to a different
318 * physical CPU. SPMC will migrate it to current
319 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000320 */
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600321 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000322 }
323 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600324 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000325 case VCPU_STATE_BLOCKED:
J-Alves7e7fce02025-02-07 15:14:56 +0000326
327 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
328 ffa_direct_msg_precedes_in_call_chain(current_locked,
329 target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000330 struct ffa_value ret_interrupt =
331 api_ffa_interrupt_return(0);
332
333 /*
334 * If the target vCPU ran earlier in the same call
335 * chain as the current vCPU, SPMC leaves all
336 * intermediate execution contexts in blocked state and
337 * resumes the target vCPU for handling secure
338 * interrupt.
339 * Under the current design, there is only one possible
340 * scenario in which this could happen: both the
341 * preempted (i.e. current) and target vCPU are in the
342 * same NWd scheduled call chain and is described in the
343 * Scenario 1 of Table 8.4 in EAC0 spec.
344 */
345 assert(current_locked.vcpu->scheduling_mode ==
346 NWD_MODE);
347 assert(target_vcpu->scheduling_mode == NWD_MODE);
348
349 /*
350 * The execution preempted the call chain that involved
351 * the targeted and the current SPs.
352 * The targetted SP is set running, whilst the
353 * preempted SP is set PREEMPTED.
354 */
355 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
356
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000357 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
358 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000359 next = target_vcpu;
360 } else {
361 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000362 * Either:
363 * - The target vCPU has migrated to a different
364 * physical CPU. Hence, it cannot be resumed on this
365 * CPU, SPMC resumes current vCPU.
366 * - The target vCPU cannot be resumed now because it is
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000367 * in BLOCKED state (it yielded CPU cycles using
368 * FFA_YIELD). SPMC queues the virtual interrupt and
369 * resumes the current vCPU which could belong to either
370 * a VM or a SP.
371 */
372 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000373 ffa_interrupts_set_preempted_vcpu(
374 target_vcpu_locked,
Karl Meakinca38ef92025-02-13 14:20:23 +0000375 (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000376 }
377 break;
378 case VCPU_STATE_PREEMPTED:
J-Alves7e7fce02025-02-07 15:14:56 +0000379 /*
380 * We do not resume a target vCPU that has been already
381 * pre-empted by an interrupt. Make the vIRQ pending for
382 * target SP(i.e., queue the interrupt) and continue to
383 * resume current vCPU. Refer to section 8.3.2.1 bullet
384 * 3 in the FF-A v1.1 EAC0 spec.
385 */
386 if (target_vcpu->cpu == current_locked.vcpu->cpu &&
387 current->vm->id == HF_OTHER_WORLD_ID) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000388 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000389 * The target vCPU must have been preempted by a
390 * non secure interrupt. It could not have been
391 * preempted by a secure interrupt as current
392 * SPMC implementation does not allow secure
393 * interrupt prioritization. Moreover, the
394 * target vCPU should have been in Normal World
395 * scheduled mode as SPMC scheduled mode call
396 * chain cannot be preempted by a non secure
397 * interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000398 */
J-Alves7e7fce02025-02-07 15:14:56 +0000399 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000400 }
401
402 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000403 ffa_interrupts_set_preempted_vcpu(
404 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000405
406 break;
407 case VCPU_STATE_RUNNING:
408 if (current == target_vcpu) {
409 /*
410 * This is the special scenario where the current
411 * running execution context also happens to be the
412 * target of the secure interrupt. In this case, it
413 * needs to signal completion of secure interrupt
414 * implicitly. Refer to the embedded comment in vcpu.h
415 * file for the description of this variable.
416 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000417 current->requires_deactivate_call = true;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000418 }
J-Alves7e7fce02025-02-07 15:14:56 +0000419
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000420 next = NULL;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000421 ffa_interrupts_set_preempted_vcpu(
422 target_vcpu_locked, (struct vcpu_locked){.vcpu = NULL});
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000423 break;
424 case VCPU_STATE_BLOCKED_INTERRUPT:
425 /* WFI is no-op for SP. Fall through. */
426 default:
427 /*
428 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
429 * to handle secure interrupt.
430 */
431 panic("Secure interrupt cannot be signaled to target SP\n");
432 break;
433 }
434
435 return next;
436}
437
438/**
439 * Obtain the physical interrupt that triggered from the interrupt controller,
440 * and inject the corresponding virtual interrupt to the target vCPU.
441 * When PEs executing in the Normal World, and secure interrupts trigger,
442 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
443 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
444 */
Karl Meakin117c8082024-12-04 16:03:28 +0000445void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
446 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000447{
448 struct vcpu *target_vcpu;
449 struct vcpu_locked target_vcpu_locked =
450 (struct vcpu_locked){.vcpu = NULL};
451 struct vcpu_locked current_locked;
452 uint32_t intid;
453 struct vm_locked target_vm_locked;
454 uint32_t v_intid;
455
456 /* Find pending interrupt id. This also activates the interrupt. */
457 intid = plat_interrupts_get_pending_interrupt_id();
458 v_intid = intid;
459
J-Alvesde211782025-02-07 14:44:39 +0000460 /* Get the target vCPU and get the virtual interrupt ID. */
461 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000462
463 /*
J-Alvesde211782025-02-07 14:44:39 +0000464 * Spurious interrupt ID indicates there is no pending interrupt to
465 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000466 */
J-Alvesde211782025-02-07 14:44:39 +0000467 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
468 /*
469 * End the interrupt to drop the running priority. It also
470 * deactivates the physical interrupt. If not, the interrupt
471 * could trigger again after resuming current vCPU.
472 */
473 plat_interrupts_end_of_interrupt(intid);
474 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000475
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100476 if (target_vcpu == NULL) {
477 /* No further handling required. Resume the current vCPU. */
478 *next = NULL;
479 return;
480 }
481
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000482 target_vm_locked = vm_lock(target_vcpu->vm);
483
484 if (target_vcpu == current) {
485 current_locked = vcpu_lock(current);
486 target_vcpu_locked = current_locked;
487 } else {
488 struct two_vcpu_locked vcpus_locked;
489 /* Lock both vCPUs at once to avoid deadlock. */
490 vcpus_locked = vcpu_lock_both(current, target_vcpu);
491 current_locked = vcpus_locked.vcpu1;
492 target_vcpu_locked = vcpus_locked.vcpu2;
493 }
494
495 /*
496 * A race condition can occur with the execution contexts belonging to
497 * an MP SP. An interrupt targeting the execution context on present
498 * core can trigger while the execution context of this SP on a
499 * different core is being aborted. In such scenario, the physical
500 * interrupts beloning to the aborted SP are disabled and the current
501 * execution context is resumed.
502 */
503 if (target_vcpu->state == VCPU_STATE_ABORTED ||
504 atomic_load_explicit(&target_vcpu->vm->aborting,
505 memory_order_relaxed)) {
506 /* Clear fields corresponding to secure interrupt handling. */
507 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000508 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000509
510 /* Resume current vCPU. */
511 *next = NULL;
512 } else {
513 /*
514 * SPMC has started handling a secure interrupt with a clean
515 * slate. This signal should be false unless there was a bug in
516 * source code. Hence, use assert rather than CHECK.
517 */
518 assert(!target_vcpu->requires_deactivate_call);
519
520 /* Set the interrupt pending in the target vCPU. */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000521 vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000522
523 switch (intid) {
524 case HF_IPI_INTID:
525 if (hf_ipi_handle(target_vcpu_locked)) {
526 *next = NULL;
527 break;
528 }
529 /*
530 * Fall through in the case handling has not been fully
531 * completed.
532 */
533 default:
534 /*
535 * Either invoke the handler related to partitions from
536 * S-EL0 or from S-EL1.
537 */
538 *next = target_vcpu_locked.vcpu->vm->el0_partition
Karl Meakinca38ef92025-02-13 14:20:23 +0000539 ? ffa_interrupts_signal_secure_interrupt_sel0(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000540 current_locked,
541 target_vcpu_locked, v_intid)
Karl Meakinca38ef92025-02-13 14:20:23 +0000542 : ffa_interrupts_signal_secure_interrupt_sel1(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000543 current_locked,
544 target_vcpu_locked, v_intid);
545 }
546 }
547
548 if (target_vcpu_locked.vcpu != NULL) {
549 vcpu_unlock(&target_vcpu_locked);
550 }
551
552 vcpu_unlock(&current_locked);
553 vm_unlock(&target_vm_locked);
554}
555
Karl Meakin117c8082024-12-04 16:03:28 +0000556bool ffa_interrupts_inject_notification_pending_interrupt(
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000557 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000558{
559 struct vm *next_vm = target_locked.vcpu->vm;
560 bool ret = false;
561
562 /*
563 * Inject the NPI if:
564 * - The targeted VM ID is from this world (i.e. if it is an SP).
565 * - The partition has global pending notifications and an NPI hasn't
566 * been injected yet.
567 * - There are pending per-vCPU notifications in the next vCPU.
568 */
569 if (vm_id_is_current_world(next_vm->id) &&
570 (vm_are_per_vcpu_notifications_pending(
571 receiver_locked, vcpu_index(target_locked.vcpu)) ||
572 (vm_are_global_notifications_pending(receiver_locked) &&
573 !vm_notifications_is_npi_injected(receiver_locked)))) {
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000574 vcpu_virt_interrupt_inject(target_locked,
575 HF_NOTIFICATION_PENDING_INTID);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000576 vm_notifications_set_npi_injected(receiver_locked, true);
577 ret = true;
578 }
579
580 return ret;
581}
582
Karl Meakin117c8082024-12-04 16:03:28 +0000583struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000584{
585 struct vcpu *next;
586 struct two_vcpu_locked both_vcpu_locked;
587
588 /*
589 * The action specified by SP in its manifest is ``Non-secure interrupt
590 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
591 * Hence, the call chain starts unwinding. The current vCPU must have
592 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
593 * and execution is either handed back to the normal world or to the
594 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
595 * The api_preempt() call is equivalent to calling
596 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
597 * SP can be resumed later by FFA_RUN.
598 */
599 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
600 assert(current_vcpu->call_chain.next_node == NULL);
601
602 if (current_vcpu->call_chain.prev_node == NULL) {
603 /* End of NWd scheduled call chain */
604 return api_preempt(current_vcpu);
605 }
606
607 next = current_vcpu->call_chain.prev_node;
608 CHECK(next != NULL);
609
610 /*
611 * Lock both vCPUs. Strictly speaking, it may not be necessary since
612 * next is guaranteed to be in BLOCKED state as it is the predecessor of
613 * the current vCPU in the present call chain.
614 */
615 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
616
617 /* Removing a node from an existing call chain. */
618 current_vcpu->call_chain.prev_node = NULL;
619 current_vcpu->state = VCPU_STATE_PREEMPTED;
620
621 /*
622 * SPMC applies the runtime model till when the vCPU transitions from
623 * running to waiting state. Moreover, the SP continues to remain in
624 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
625 * are not changed here.
626 */
627 assert(next->state == VCPU_STATE_BLOCKED);
628 assert(next->call_chain.next_node == current_vcpu);
629
630 next->call_chain.next_node = NULL;
631
632 vcpu_set_running(both_vcpu_locked.vcpu2,
633 &(struct ffa_value){
634 .func = FFA_INTERRUPT_32,
635 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
636 vcpu_index(current_vcpu)),
637 });
638
639 sl_unlock(&next->lock);
640 sl_unlock(&current_vcpu->lock);
641
642 return next;
643}
644
Karl Meakinca38ef92025-02-13 14:20:23 +0000645static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000646 struct vcpu_locked current_locked)
647{
648 struct vcpu *current;
649 struct interrupts *interrupts;
650 struct vm *vm;
651
652 current = current_locked.vcpu;
653 interrupts = &current->interrupts;
654 vm = current->vm;
655
Karl Meakin117c8082024-12-04 16:03:28 +0000656 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000657 vcpu_virt_interrupt_set_enabled(interrupts,
658 HF_MANAGED_EXIT_INTID);
659 /*
660 * SPMC decides the interrupt type for Managed exit signal based
661 * on the partition manifest.
662 */
663 if (vm->me_signal_virq) {
664 vcpu_virt_interrupt_set_type(interrupts,
665 HF_MANAGED_EXIT_INTID,
666 INTERRUPT_TYPE_IRQ);
667 } else {
668 vcpu_virt_interrupt_set_type(interrupts,
669 HF_MANAGED_EXIT_INTID,
670 INTERRUPT_TYPE_FIQ);
671 }
672 }
673
674 if (vm->notifications.enabled) {
675 vcpu_virt_interrupt_set_enabled(interrupts,
676 HF_NOTIFICATION_PENDING_INTID);
677 }
678}
679
680/**
681 * Enable relevant virtual interrupts for Secure Partitions.
682 * For all SPs, any applicable virtual maintenance interrupts are enabled.
683 * Additionally, for S-EL0 partitions, all the interrupts declared in the
684 * partition manifest are enabled at the virtual interrupt controller
685 * interface early during the boot stage as an S-EL0 SP need not call
686 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
687 */
Karl Meakin117c8082024-12-04 16:03:28 +0000688void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
689 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000690{
691 struct vcpu *current;
692 struct interrupts *interrupts;
693 struct vm *vm;
694
695 current = current_locked.vcpu;
696 interrupts = &current->interrupts;
697 vm = current->vm;
698 assert(vm == vm_locked.vm);
699
700 if (vm->el0_partition) {
701 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
702 struct interrupt_descriptor int_desc;
703
704 int_desc = vm_locked.vm->interrupt_desc[k];
705
706 /* Interrupt descriptors are populated contiguously. */
707 if (!int_desc.valid) {
708 break;
709 }
710 vcpu_virt_interrupt_set_enabled(interrupts,
711 int_desc.interrupt_id);
712 }
713 }
714
Karl Meakinca38ef92025-02-13 14:20:23 +0000715 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000716}
717
718/**
719 * Reconfigure the interrupt belonging to the current partition at runtime.
720 * At present, this paravirtualized interface only allows the following
721 * commands which signify what change is being requested by the current
722 * partition:
723 * - Change the target CPU of the interrupt.
724 * - Change the security state of the interrupt.
725 * - Enable or disable the physical interrupt.
726 */
Karl Meakin117c8082024-12-04 16:03:28 +0000727int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
728 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000729{
730 struct vm *vm = current->vm;
731 struct vm_locked vm_locked;
732 int64_t ret = -1;
733 struct interrupt_descriptor *int_desc = NULL;
734
735 /*
736 * Lock VM to protect interrupt descriptor from being modified
737 * concurrently.
738 */
739 vm_locked = vm_lock(vm);
740
741 switch (command) {
742 case INT_RECONFIGURE_TARGET_PE:
743 /* Here, value represents the target PE index. */
744 if (value >= MAX_CPUS) {
745 dlog_verbose(
746 "Illegal target PE index specified while "
747 "reconfiguring interrupt %x\n",
748 int_id);
749 goto out_unlock;
750 }
751
752 /*
753 * An UP SP cannot reconfigure an interrupt to be targetted to
754 * any other physical CPU except the one it is currently
755 * running on.
756 */
757 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
758 dlog_verbose(
759 "Illegal target PE index specified by current "
760 "UP SP\n");
761 goto out_unlock;
762 }
763
764 /* Configure the interrupt to be routed to a specific CPU. */
765 int_desc = vm_interrupt_set_target_mpidr(
766 vm_locked, int_id, cpu_find_index(value)->id);
767 break;
768 case INT_RECONFIGURE_SEC_STATE:
769 /* Specify the new security state of the interrupt. */
770 if (value != INT_DESC_SEC_STATE_NS &&
771 value != INT_DESC_SEC_STATE_S) {
772 dlog_verbose(
773 "Illegal value %x specified while "
774 "reconfiguring interrupt %x\n",
775 value, int_id);
776 goto out_unlock;
777 }
778 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
779 break;
780 case INT_RECONFIGURE_ENABLE:
781 /* Enable or disable the interrupt. */
782 if (value != INT_DISABLE && value != INT_ENABLE) {
783 dlog_verbose(
784 "Illegal value %x specified while "
785 "reconfiguring interrupt %x\n",
786 value, int_id);
787 goto out_unlock;
788 } else {
789 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
790 value == INT_ENABLE);
791 }
792 break;
793 default:
794 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
795 command);
796 goto out_unlock;
797 }
798
799 /* Check if the interrupt belongs to the current SP. */
800 if (int_desc == NULL) {
801 dlog_verbose("Interrupt %x does not belong to current SP\n",
802 int_id);
803 goto out_unlock;
804 }
805
806 ret = 0;
807 plat_interrupts_reconfigure_interrupt(*int_desc);
808
809out_unlock:
810 vm_unlock(&vm_locked);
811
812 return ret;
813}
814
815/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000816uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000817{
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000818 return vcpu_virt_interrupt_get_pending_and_enabled(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000819}
Karl Meakin8d245542025-01-31 13:19:25 +0000820
821/**
822 * Run the vCPU in SPMC schedule mode under the runtime model for secure
823 * interrupt handling.
824 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000825static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000826 struct vcpu_locked target_vcpu_locked)
827{
828 struct vcpu *target_vcpu;
829
830 target_vcpu = target_vcpu_locked.vcpu;
831
832 /* Mark the registers as unavailable now. */
833 target_vcpu->regs_available = false;
834 target_vcpu->scheduling_mode = SPMC_MODE;
835 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
836 target_vcpu->state = VCPU_STATE_RUNNING;
837 target_vcpu->requires_deactivate_call = false;
838}
839
840bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
841 struct vcpu_locked next_locked,
842 struct ffa_value *signal_interrupt)
843{
Daniel Boulby4b9add52025-02-25 11:02:00 +0000844 /*
845 * Since S-EL0 partitions will not receive the interrupt through a vIRQ
846 * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no
847 * longer pending at this point. Otherwise keep it as pending for
848 * when the S-EL1 parition calls hf_interrupt_get.
849 */
850 uint32_t intid = current_locked.vcpu->vm->el0_partition
851 ? vcpu_virt_interrupt_get_pending_and_enabled(
852 current_locked)
853 : vcpu_virt_interrupt_peek_pending_and_enabled(
854 current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000855
856 /*
857 * Check if there are any pending virtual secure interrupts to be
858 * handled.
859 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000860 if (intid != HF_INVALID_INTID) {
Karl Meakin8d245542025-01-31 13:19:25 +0000861 /*
862 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
863 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
864 * case 1.
865 */
866 *signal_interrupt = api_ffa_interrupt_return(intid);
867
868 /*
869 * Prepare to resume this partition's vCPU in SPMC
870 * schedule mode to handle virtual secure interrupt.
871 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000872 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000873
874 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
875 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
876
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000877 dlog_verbose(
878 "%s: Pending interrupt %d, intercepting FF-A call.\n",
879 __func__, intid);
Karl Meakin8d245542025-01-31 13:19:25 +0000880
881 return true;
882 }
883
884 return false;
885}