blob: 434950c0a2fb4618d8902849f8a7b520ff339e0e [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakinfa1dcb82025-02-10 16:47:50 +000016#include "hf/ffa/direct_messaging.h"
Karl Meakin902af082024-11-28 14:58:38 +000017#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000018#include "hf/hf_ipi.h"
19#include "hf/vm.h"
20
21/**
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000022 * This function has been deprecated and it's contents moved into
23 * api_interrupt_get in order to align the bitmap and queue for tracking
24 * interupts.
Karl Meakin8e58ddc2024-11-08 23:19:34 +000025 * Returns 0 on success, or -1 otherwise.
26 */
Karl Meakin117c8082024-12-04 16:03:28 +000027int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
28 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000029{
Daniel Boulby3c1506b2025-02-25 10:49:51 +000030 (void)pint_id;
31 (void)vint_id;
Daniel Boulbyaa386fd2025-02-07 15:01:20 +000032 (void)current;
33 return 0;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000034}
35
Karl Meakinca38ef92025-02-13 14:20:23 +000036static struct vcpu *ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +000037 struct vcpu *current, uint32_t interrupt_id)
38{
39 /*
40 * Find which VM/SP owns this interrupt. We then find the
41 * corresponding vCPU context for this CPU.
42 */
43 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
44 struct vm *vm = vm_find_index(index);
45
46 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
47 struct interrupt_descriptor int_desc =
48 vm->interrupt_desc[j];
49
50 /*
51 * Interrupt descriptors are populated
52 * contiguously.
53 */
54 if (!int_desc.valid) {
55 break;
56 }
57 if (int_desc.interrupt_id == interrupt_id) {
58 return api_ffa_get_vm_vcpu(vm, current);
59 }
60 }
61 }
62
63 return NULL;
64}
65
Karl Meakinca38ef92025-02-13 14:20:23 +000066static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current,
J-Alvesde211782025-02-07 14:44:39 +000067 uint32_t interrupt_id,
68 uint32_t *v_intid)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000069{
70 struct vcpu *target_vcpu;
71
J-Alvesde211782025-02-07 14:44:39 +000072 assert(current != NULL);
73 assert(v_intid != NULL);
74
75 *v_intid = interrupt_id;
76
Karl Meakin8e58ddc2024-11-08 23:19:34 +000077 switch (interrupt_id) {
J-Alvesde211782025-02-07 14:44:39 +000078 case SPURIOUS_INTID_OTHER_WORLD:
79 /*
80 * Spurious interrupt ID indicating that there are no pending
81 * interrupts to acknowledge. For such scenarios, resume the
82 * current vCPU.
83 */
84 target_vcpu = NULL;
85 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +000086 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +010087 /*
88 * Get the next vCPU with a pending IPI. If all vCPUs
89 * have had their IPIs handled this will return NULL.
90 */
91 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +000092 break;
J-Alvesde211782025-02-07 14:44:39 +000093 case ARM_SEL2_TIMER_PHYS_INT:
94 /* Disable the S-EL2 physical timer */
95 host_timer_disable();
96 target_vcpu = timer_find_target_vcpu(current);
97
98 if (target_vcpu != NULL) {
99 *v_intid = HF_VIRTUAL_TIMER_INTID;
100 }
101 /*
102 * It is possible for target_vcpu to be NULL in case of spurious
103 * timer interrupt.
104 */
105 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000106 case ARM_EL1_VIRT_TIMER_PHYS_INT:
107 /* Fall through */
108 case ARM_EL1_PHYS_TIMER_PHYS_INT:
109 panic("Timer interrupt not expected to fire: %u\n",
110 interrupt_id);
111 default:
Karl Meakinca38ef92025-02-13 14:20:23 +0000112 target_vcpu = ffa_interrupts_find_target_vcpu_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000113 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000114
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100115 /* The target vCPU for a secure interrupt cannot be NULL. */
116 CHECK(target_vcpu != NULL);
117 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000118
119 return target_vcpu;
120}
121
122/*
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000123 * If the current vCPU is being preempted, record this in the target vCPU
124 * and set the current states to VCPU_STATE_PREEMPTED.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000125 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000126static void ffa_interrupts_set_preempted_vcpu(
127 struct vcpu_locked target_vcpu_locked,
128 struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000129{
130 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
131 struct vcpu *preempted_vcpu = current_locked.vcpu;
132
J-Alvesac62a852025-02-07 19:03:07 +0000133 assert(target_vcpu != NULL);
134 assert(preempted_vcpu != NULL);
135
136 target_vcpu->preempted_vcpu = preempted_vcpu;
137 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000138}
139
140/**
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000141 * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
142 * restore the priority mask thereby allowing the interrupts to be delivered.
143 */
144void ffa_interrupts_unmask(struct vcpu *current)
145{
146 plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
147}
148
149/**
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000150 * Enforce action of an SP in response to non-secure or other-secure interrupt
151 * by changing the priority mask. Effectively, physical interrupts shall not
152 * trigger which has the same effect as queueing interrupts.
153 */
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000154void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000155{
156 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
157 uint8_t current_priority;
158
159 /* Save current value of priority mask. */
160 current_priority = plat_interrupts_get_priority_mask();
161 receiver_vcpu->prev_interrupt_priority = current_priority;
162
163 if (receiver_vcpu->vm->other_s_interrupts_action ==
164 OTHER_S_INT_ACTION_QUEUED ||
165 receiver_vcpu->scheduling_mode == SPMC_MODE) {
166 /*
167 * If secure interrupts not masked yet, mask them now. We could
168 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
169 * sends a direct request, and we are making the IMPDEF choice
170 * to mask interrupts when such a situation occurs. This keeps
171 * design simple.
172 */
173 if (current_priority > SWD_MASK_ALL_INT) {
174 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
175 }
176 } else if (receiver_vcpu->vm->ns_interrupts_action ==
177 NS_ACTION_QUEUED) {
178 /* If non secure interrupts not masked yet, mask them now. */
179 if (current_priority > SWD_MASK_NS_INT) {
180 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
181 }
182 }
183}
184
J-Alves20160602025-02-07 17:46:22 +0000185static struct vcpu *interrupt_resume_waiting(
186 struct vcpu_locked current_locked,
187 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
188{
189 struct vcpu *next = NULL;
190 struct ffa_value ret_interrupt = api_ffa_interrupt_return(v_intid);
191 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
192
193 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
194 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
195 ffa_interrupts_mask(target_vcpu_locked);
196
197 if (target_vcpu_locked.vcpu->vm->el0_partition) {
198 /*
199 * Since S-EL0 partitions will not receive the interrupt through
200 * a vIRQ signal in addition to the FFA_INTERRUPT ERET, make the
201 * interrupt no longer pending at this point.
202 */
203 uint32_t pending_intid =
204 vcpu_virt_interrupt_get_pending_and_enabled(
205 target_vcpu_locked);
206 assert(pending_intid == v_intid);
207 }
208
209 /*
210 * Ideally, we have to mask non-secure interrupts here
211 * since the spec mandates that SPMC should make sure
212 * SPMC scheduled call chain cannot be preempted by a
213 * non-secure interrupt. However, our current design
214 * takes care of it implicitly.
215 */
216 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
217
218 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked, current_locked);
219
220 next = target_vcpu;
221
222 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
223 /*
224 * The target vcpu could have migrated to a different
225 * physical CPU. SPMC will migrate it to current
226 * physical CPU and resume it.
227 */
228 assert(target_vcpu->vm->vcpu_count == 1);
229 target_vcpu->cpu = current_locked.vcpu->cpu;
230 }
231
232 return next;
233}
234
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000235/**
J-Alvesac62a852025-02-07 19:03:07 +0000236 * Handles the secure interrupt according to the target vCPU's state.
237 * Returns the next vCPU to resume accordingly.
238 * If it returns NULL, the current vCPU shall be resumed.
239 * This might be if the target vCPU is the current vCPU, or if the
240 * target vCPU is not in a state in which it can be resumed to handle
241 * the secure interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000242 */
J-Alvesac62a852025-02-07 19:03:07 +0000243static struct vcpu *ffa_interrupts_signal_secure_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000244 struct vcpu_locked current_locked,
245 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
246{
247 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
248 struct vcpu *current = current_locked.vcpu;
249 struct vcpu *next = NULL;
250
J-Alves7e7fce02025-02-07 15:14:56 +0000251 /*
252 * The target vcpu has migrated to a different physical
253 * CPU. Hence, it cannot be resumed on this CPU, SPMC
254 * resumes current vCPU.
255 */
256 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
257 assert(target_vcpu->vm->vcpu_count == 1);
258 }
259
J-Alvesac62a852025-02-07 19:03:07 +0000260 /* Secure interrupt signaling and queuing for SP. */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000261 switch (target_vcpu->state) {
J-Alves20160602025-02-07 17:46:22 +0000262 case VCPU_STATE_WAITING:
263 next = interrupt_resume_waiting(current_locked,
264 target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000265 break;
266 case VCPU_STATE_BLOCKED:
J-Alvesac62a852025-02-07 19:03:07 +0000267 if (!target_vcpu->vm->el0_partition &&
268 target_vcpu->cpu == current_locked.vcpu->cpu &&
J-Alves7e7fce02025-02-07 15:14:56 +0000269 ffa_direct_msg_precedes_in_call_chain(current_locked,
270 target_vcpu_locked)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000271 struct ffa_value ret_interrupt =
272 api_ffa_interrupt_return(0);
273
274 /*
275 * If the target vCPU ran earlier in the same call
276 * chain as the current vCPU, SPMC leaves all
277 * intermediate execution contexts in blocked state and
278 * resumes the target vCPU for handling secure
279 * interrupt.
280 * Under the current design, there is only one possible
281 * scenario in which this could happen: both the
282 * preempted (i.e. current) and target vCPU are in the
283 * same NWd scheduled call chain and is described in the
284 * Scenario 1 of Table 8.4 in EAC0 spec.
285 */
286 assert(current_locked.vcpu->scheduling_mode ==
287 NWD_MODE);
288 assert(target_vcpu->scheduling_mode == NWD_MODE);
289
290 /*
291 * The execution preempted the call chain that involved
292 * the targeted and the current SPs.
293 * The targetted SP is set running, whilst the
294 * preempted SP is set PREEMPTED.
295 */
296 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
297
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000298 ffa_interrupts_set_preempted_vcpu(target_vcpu_locked,
299 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000300 next = target_vcpu;
J-Alvesac62a852025-02-07 19:03:07 +0000301 break;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000302 }
J-Alvesac62a852025-02-07 19:03:07 +0000303
304 /*
305 * `next` is NULL.
306 * Either:
307 * - EL0 paritition can't be resumed when in blocked state.
308 * - The target vCPU has migrated to a different
309 * physical CPU. Hence, it cannot be resumed on this
310 * CPU, SPMC resumes current vCPU.
311 * - The target vCPU cannot be resumed now because it is
312 * in BLOCKED state (it yielded CPU cycles using
313 * FFA_YIELD). SPMC queues the virtual interrupt and
314 * resumes the current vCPU which could belong to either
315 * a VM or a SP.
316 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000317 break;
318 case VCPU_STATE_PREEMPTED:
J-Alves7e7fce02025-02-07 15:14:56 +0000319 /*
320 * We do not resume a target vCPU that has been already
321 * pre-empted by an interrupt. Make the vIRQ pending for
322 * target SP(i.e., queue the interrupt) and continue to
323 * resume current vCPU. Refer to section 8.3.2.1 bullet
324 * 3 in the FF-A v1.1 EAC0 spec.
325 */
J-Alvesac62a852025-02-07 19:03:07 +0000326 if (!target_vcpu->vm->el0_partition &&
327 target_vcpu->cpu == current_locked.vcpu->cpu &&
J-Alves7e7fce02025-02-07 15:14:56 +0000328 current->vm->id == HF_OTHER_WORLD_ID) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000329 /*
J-Alves7e7fce02025-02-07 15:14:56 +0000330 * The target vCPU must have been preempted by a
331 * non secure interrupt. It could not have been
332 * preempted by a secure interrupt as current
333 * SPMC implementation does not allow secure
334 * interrupt prioritization. Moreover, the
335 * target vCPU should have been in Normal World
336 * scheduled mode as SPMC scheduled mode call
337 * chain cannot be preempted by a non secure
338 * interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000339 */
J-Alves7e7fce02025-02-07 15:14:56 +0000340 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000341 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000342 break;
343 case VCPU_STATE_RUNNING:
J-Alvesac62a852025-02-07 19:03:07 +0000344 /*
345 * Interrupt has been injected in the vCPU state.
346 */
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000347 break;
348 case VCPU_STATE_BLOCKED_INTERRUPT:
349 /* WFI is no-op for SP. Fall through. */
350 default:
351 /*
352 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
353 * to handle secure interrupt.
354 */
355 panic("Secure interrupt cannot be signaled to target SP\n");
356 break;
357 }
358
359 return next;
360}
361
362/**
363 * Obtain the physical interrupt that triggered from the interrupt controller,
364 * and inject the corresponding virtual interrupt to the target vCPU.
365 * When PEs executing in the Normal World, and secure interrupts trigger,
366 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
367 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
368 */
Karl Meakin117c8082024-12-04 16:03:28 +0000369void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
370 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000371{
372 struct vcpu *target_vcpu;
373 struct vcpu_locked target_vcpu_locked =
374 (struct vcpu_locked){.vcpu = NULL};
375 struct vcpu_locked current_locked;
376 uint32_t intid;
377 struct vm_locked target_vm_locked;
378 uint32_t v_intid;
379
380 /* Find pending interrupt id. This also activates the interrupt. */
381 intid = plat_interrupts_get_pending_interrupt_id();
382 v_intid = intid;
383
J-Alvesde211782025-02-07 14:44:39 +0000384 /* Get the target vCPU and get the virtual interrupt ID. */
385 target_vcpu = ffa_interrupts_find_target_vcpu(current, intid, &v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000386
387 /*
J-Alvesde211782025-02-07 14:44:39 +0000388 * Spurious interrupt ID indicates there is no pending interrupt to
389 * acknowledge so we do not need to call end of interrupt.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000390 */
J-Alvesde211782025-02-07 14:44:39 +0000391 if (v_intid != SPURIOUS_INTID_OTHER_WORLD) {
392 /*
393 * End the interrupt to drop the running priority. It also
394 * deactivates the physical interrupt. If not, the interrupt
395 * could trigger again after resuming current vCPU.
396 */
397 plat_interrupts_end_of_interrupt(intid);
398 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000399
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100400 if (target_vcpu == NULL) {
401 /* No further handling required. Resume the current vCPU. */
402 *next = NULL;
403 return;
404 }
405
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000406 target_vm_locked = vm_lock(target_vcpu->vm);
407
408 if (target_vcpu == current) {
409 current_locked = vcpu_lock(current);
410 target_vcpu_locked = current_locked;
411 } else {
412 struct two_vcpu_locked vcpus_locked;
413 /* Lock both vCPUs at once to avoid deadlock. */
414 vcpus_locked = vcpu_lock_both(current, target_vcpu);
415 current_locked = vcpus_locked.vcpu1;
416 target_vcpu_locked = vcpus_locked.vcpu2;
417 }
418
419 /*
420 * A race condition can occur with the execution contexts belonging to
421 * an MP SP. An interrupt targeting the execution context on present
422 * core can trigger while the execution context of this SP on a
423 * different core is being aborted. In such scenario, the physical
424 * interrupts beloning to the aborted SP are disabled and the current
425 * execution context is resumed.
426 */
427 if (target_vcpu->state == VCPU_STATE_ABORTED ||
428 atomic_load_explicit(&target_vcpu->vm->aborting,
429 memory_order_relaxed)) {
430 /* Clear fields corresponding to secure interrupt handling. */
431 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000432 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000433
434 /* Resume current vCPU. */
435 *next = NULL;
436 } else {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000437 /* Set the interrupt pending in the target vCPU. */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000438 vcpu_virt_interrupt_inject(target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000439
440 switch (intid) {
441 case HF_IPI_INTID:
442 if (hf_ipi_handle(target_vcpu_locked)) {
443 *next = NULL;
444 break;
445 }
446 /*
447 * Fall through in the case handling has not been fully
448 * completed.
449 */
450 default:
451 /*
452 * Either invoke the handler related to partitions from
453 * S-EL0 or from S-EL1.
454 */
J-Alvesac62a852025-02-07 19:03:07 +0000455 *next = ffa_interrupts_signal_secure_interrupt(
456 current_locked, target_vcpu_locked, v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000457 }
458 }
459
460 if (target_vcpu_locked.vcpu != NULL) {
461 vcpu_unlock(&target_vcpu_locked);
462 }
463
464 vcpu_unlock(&current_locked);
465 vm_unlock(&target_vm_locked);
466}
467
Karl Meakin117c8082024-12-04 16:03:28 +0000468bool ffa_interrupts_inject_notification_pending_interrupt(
Daniel Boulbyd49d0772025-01-15 11:19:36 +0000469 struct vcpu_locked target_locked, struct vm_locked receiver_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000470{
471 struct vm *next_vm = target_locked.vcpu->vm;
472 bool ret = false;
473
474 /*
475 * Inject the NPI if:
476 * - The targeted VM ID is from this world (i.e. if it is an SP).
Daniel Boulby6e046112025-02-25 17:33:48 +0000477 * - The partition has global pending notifications or there are
478 * pending per-vCPU notifications in the next vCPU.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000479 */
480 if (vm_id_is_current_world(next_vm->id) &&
481 (vm_are_per_vcpu_notifications_pending(
482 receiver_locked, vcpu_index(target_locked.vcpu)) ||
Daniel Boulby6e046112025-02-25 17:33:48 +0000483 vm_are_global_notifications_pending(receiver_locked))) {
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000484 vcpu_virt_interrupt_inject(target_locked,
485 HF_NOTIFICATION_PENDING_INTID);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000486 ret = true;
487 }
488
489 return ret;
490}
491
Karl Meakin117c8082024-12-04 16:03:28 +0000492struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000493{
494 struct vcpu *next;
495 struct two_vcpu_locked both_vcpu_locked;
496
497 /*
498 * The action specified by SP in its manifest is ``Non-secure interrupt
499 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
500 * Hence, the call chain starts unwinding. The current vCPU must have
501 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
502 * and execution is either handed back to the normal world or to the
503 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
504 * The api_preempt() call is equivalent to calling
505 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
506 * SP can be resumed later by FFA_RUN.
507 */
508 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
509 assert(current_vcpu->call_chain.next_node == NULL);
510
511 if (current_vcpu->call_chain.prev_node == NULL) {
512 /* End of NWd scheduled call chain */
513 return api_preempt(current_vcpu);
514 }
515
516 next = current_vcpu->call_chain.prev_node;
517 CHECK(next != NULL);
518
519 /*
520 * Lock both vCPUs. Strictly speaking, it may not be necessary since
521 * next is guaranteed to be in BLOCKED state as it is the predecessor of
522 * the current vCPU in the present call chain.
523 */
524 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
525
526 /* Removing a node from an existing call chain. */
527 current_vcpu->call_chain.prev_node = NULL;
528 current_vcpu->state = VCPU_STATE_PREEMPTED;
529
530 /*
531 * SPMC applies the runtime model till when the vCPU transitions from
532 * running to waiting state. Moreover, the SP continues to remain in
533 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
534 * are not changed here.
535 */
536 assert(next->state == VCPU_STATE_BLOCKED);
537 assert(next->call_chain.next_node == current_vcpu);
538
539 next->call_chain.next_node = NULL;
540
541 vcpu_set_running(both_vcpu_locked.vcpu2,
542 &(struct ffa_value){
543 .func = FFA_INTERRUPT_32,
544 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
545 vcpu_index(current_vcpu)),
546 });
547
548 sl_unlock(&next->lock);
549 sl_unlock(&current_vcpu->lock);
550
551 return next;
552}
553
Karl Meakinca38ef92025-02-13 14:20:23 +0000554static void ffa_interrupts_enable_virtual_maintenance_interrupts(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000555 struct vcpu_locked current_locked)
556{
557 struct vcpu *current;
558 struct interrupts *interrupts;
559 struct vm *vm;
560
561 current = current_locked.vcpu;
562 interrupts = &current->interrupts;
563 vm = current->vm;
564
Karl Meakin117c8082024-12-04 16:03:28 +0000565 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000566 vcpu_virt_interrupt_set_enabled(interrupts,
567 HF_MANAGED_EXIT_INTID);
568 /*
569 * SPMC decides the interrupt type for Managed exit signal based
570 * on the partition manifest.
571 */
572 if (vm->me_signal_virq) {
573 vcpu_virt_interrupt_set_type(interrupts,
574 HF_MANAGED_EXIT_INTID,
575 INTERRUPT_TYPE_IRQ);
576 } else {
577 vcpu_virt_interrupt_set_type(interrupts,
578 HF_MANAGED_EXIT_INTID,
579 INTERRUPT_TYPE_FIQ);
580 }
581 }
582
583 if (vm->notifications.enabled) {
584 vcpu_virt_interrupt_set_enabled(interrupts,
585 HF_NOTIFICATION_PENDING_INTID);
586 }
587}
588
589/**
590 * Enable relevant virtual interrupts for Secure Partitions.
591 * For all SPs, any applicable virtual maintenance interrupts are enabled.
592 * Additionally, for S-EL0 partitions, all the interrupts declared in the
593 * partition manifest are enabled at the virtual interrupt controller
594 * interface early during the boot stage as an S-EL0 SP need not call
595 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
596 */
Karl Meakin117c8082024-12-04 16:03:28 +0000597void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
598 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000599{
600 struct vcpu *current;
601 struct interrupts *interrupts;
602 struct vm *vm;
603
604 current = current_locked.vcpu;
605 interrupts = &current->interrupts;
606 vm = current->vm;
607 assert(vm == vm_locked.vm);
608
609 if (vm->el0_partition) {
610 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
611 struct interrupt_descriptor int_desc;
612
613 int_desc = vm_locked.vm->interrupt_desc[k];
614
615 /* Interrupt descriptors are populated contiguously. */
616 if (!int_desc.valid) {
617 break;
618 }
619 vcpu_virt_interrupt_set_enabled(interrupts,
620 int_desc.interrupt_id);
621 }
622 }
623
Karl Meakinca38ef92025-02-13 14:20:23 +0000624 ffa_interrupts_enable_virtual_maintenance_interrupts(current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000625}
626
627/**
628 * Reconfigure the interrupt belonging to the current partition at runtime.
629 * At present, this paravirtualized interface only allows the following
630 * commands which signify what change is being requested by the current
631 * partition:
632 * - Change the target CPU of the interrupt.
633 * - Change the security state of the interrupt.
634 * - Enable or disable the physical interrupt.
635 */
Karl Meakin117c8082024-12-04 16:03:28 +0000636int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
637 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000638{
639 struct vm *vm = current->vm;
640 struct vm_locked vm_locked;
641 int64_t ret = -1;
642 struct interrupt_descriptor *int_desc = NULL;
643
644 /*
645 * Lock VM to protect interrupt descriptor from being modified
646 * concurrently.
647 */
648 vm_locked = vm_lock(vm);
649
650 switch (command) {
651 case INT_RECONFIGURE_TARGET_PE:
652 /* Here, value represents the target PE index. */
653 if (value >= MAX_CPUS) {
654 dlog_verbose(
655 "Illegal target PE index specified while "
656 "reconfiguring interrupt %x\n",
657 int_id);
658 goto out_unlock;
659 }
660
661 /*
662 * An UP SP cannot reconfigure an interrupt to be targetted to
663 * any other physical CPU except the one it is currently
664 * running on.
665 */
666 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
667 dlog_verbose(
668 "Illegal target PE index specified by current "
669 "UP SP\n");
670 goto out_unlock;
671 }
672
673 /* Configure the interrupt to be routed to a specific CPU. */
674 int_desc = vm_interrupt_set_target_mpidr(
675 vm_locked, int_id, cpu_find_index(value)->id);
676 break;
677 case INT_RECONFIGURE_SEC_STATE:
678 /* Specify the new security state of the interrupt. */
679 if (value != INT_DESC_SEC_STATE_NS &&
680 value != INT_DESC_SEC_STATE_S) {
681 dlog_verbose(
682 "Illegal value %x specified while "
683 "reconfiguring interrupt %x\n",
684 value, int_id);
685 goto out_unlock;
686 }
687 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
688 break;
689 case INT_RECONFIGURE_ENABLE:
690 /* Enable or disable the interrupt. */
691 if (value != INT_DISABLE && value != INT_ENABLE) {
692 dlog_verbose(
693 "Illegal value %x specified while "
694 "reconfiguring interrupt %x\n",
695 value, int_id);
696 goto out_unlock;
697 } else {
698 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
699 value == INT_ENABLE);
700 }
701 break;
702 default:
703 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
704 command);
705 goto out_unlock;
706 }
707
708 /* Check if the interrupt belongs to the current SP. */
709 if (int_desc == NULL) {
710 dlog_verbose("Interrupt %x does not belong to current SP\n",
711 int_id);
712 goto out_unlock;
713 }
714
715 ret = 0;
716 plat_interrupts_reconfigure_interrupt(*int_desc);
717
718out_unlock:
719 vm_unlock(&vm_locked);
720
721 return ret;
722}
723
Karl Meakin8d245542025-01-31 13:19:25 +0000724/**
725 * Run the vCPU in SPMC schedule mode under the runtime model for secure
726 * interrupt handling.
727 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000728static void ffa_interrupts_run_in_sec_interrupt_rtm(
Karl Meakin8d245542025-01-31 13:19:25 +0000729 struct vcpu_locked target_vcpu_locked)
730{
731 struct vcpu *target_vcpu;
732
733 target_vcpu = target_vcpu_locked.vcpu;
734
735 /* Mark the registers as unavailable now. */
736 target_vcpu->regs_available = false;
737 target_vcpu->scheduling_mode = SPMC_MODE;
738 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
739 target_vcpu->state = VCPU_STATE_RUNNING;
Karl Meakin8d245542025-01-31 13:19:25 +0000740}
741
742bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
743 struct vcpu_locked next_locked,
744 struct ffa_value *signal_interrupt)
745{
Daniel Boulby4b9add52025-02-25 11:02:00 +0000746 /*
747 * Since S-EL0 partitions will not receive the interrupt through a vIRQ
748 * signal in addition to the FFA_INTERRUPT ERET, make the interrupt no
749 * longer pending at this point. Otherwise keep it as pending for
750 * when the S-EL1 parition calls hf_interrupt_get.
751 */
752 uint32_t intid = current_locked.vcpu->vm->el0_partition
753 ? vcpu_virt_interrupt_get_pending_and_enabled(
754 current_locked)
755 : vcpu_virt_interrupt_peek_pending_and_enabled(
756 current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000757
758 /*
759 * Check if there are any pending virtual secure interrupts to be
760 * handled.
761 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000762 if (intid != HF_INVALID_INTID) {
Karl Meakin8d245542025-01-31 13:19:25 +0000763 /*
764 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
765 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
766 * case 1.
767 */
768 *signal_interrupt = api_ffa_interrupt_return(intid);
769
770 /*
771 * Prepare to resume this partition's vCPU in SPMC
772 * schedule mode to handle virtual secure interrupt.
773 */
Karl Meakinca38ef92025-02-13 14:20:23 +0000774 ffa_interrupts_run_in_sec_interrupt_rtm(current_locked);
Karl Meakin8d245542025-01-31 13:19:25 +0000775
776 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
777 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
778
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000779 dlog_verbose(
780 "%s: Pending interrupt %d, intercepting FF-A call.\n",
781 __func__, intid);
Karl Meakin8d245542025-01-31 13:19:25 +0000782
783 return true;
784 }
785
786 return false;
787}