blob: 4983e25cbb4cbe7a29ccc2cfe276836641448fb6 [file] [log] [blame]
Karl Meakin8e58ddc2024-11-08 23:19:34 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/plat/interrupts.h"
10
11#include "hf/arch/gicv3.h"
12#include "hf/arch/host_timer.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000013
14#include "hf/api.h"
15#include "hf/check.h"
Karl Meakin902af082024-11-28 14:58:38 +000016#include "hf/ffa/vm.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000017#include "hf/hf_ipi.h"
18#include "hf/vm.h"
19
20/**
21 * Drops the current interrupt priority and deactivate the given interrupt ID
22 * for the calling vCPU.
23 *
24 * Returns 0 on success, or -1 otherwise.
25 */
Karl Meakin117c8082024-12-04 16:03:28 +000026int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
27 struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +000028{
29 struct vcpu_locked current_locked;
30 uint32_t int_id;
31 int ret = 0;
32
33 current_locked = vcpu_lock(current);
34 if (vint_id >= HF_NUM_INTIDS) {
35 ret = -1;
36 goto out;
37 }
38
39 /*
40 * Current implementation maps virtual interrupt to physical interrupt.
41 */
42 if (pint_id != vint_id) {
43 ret = -1;
44 goto out;
45 }
46
47 /*
48 * A malicious SP could de-activate an interrupt that does not belong to
49 * it. Return error to indicate failure.
50 */
51 if (!vcpu_interrupt_queue_peek(current_locked, &int_id)) {
52 dlog_error("No virtual interrupt to be deactivated\n");
53 ret = -1;
54 goto out;
55 }
56
57 if (int_id != vint_id) {
58 dlog_error("Unknown interrupt being deactivated %u\n", vint_id);
59 ret = -1;
60 goto out;
61 }
62
63 if (current->requires_deactivate_call) {
64 /* There is no preempted vCPU to resume. */
65 assert(current->preempted_vcpu == NULL);
66
67 vcpu_secure_interrupt_complete(current_locked);
68 }
69
70 /*
71 * Now that the virtual interrupt has been serviced and deactivated,
72 * remove it from the queue, if it was pending.
73 */
74 vcpu_interrupt_queue_pop(current_locked, &int_id);
75 assert(vint_id == int_id);
76out:
77 vcpu_unlock(&current_locked);
78 return ret;
79}
80
81static struct vcpu *plat_ffa_find_target_vcpu_secure_interrupt(
82 struct vcpu *current, uint32_t interrupt_id)
83{
84 /*
85 * Find which VM/SP owns this interrupt. We then find the
86 * corresponding vCPU context for this CPU.
87 */
88 for (ffa_vm_count_t index = 0; index < vm_get_count(); ++index) {
89 struct vm *vm = vm_find_index(index);
90
91 for (uint32_t j = 0; j < HF_NUM_INTIDS; j++) {
92 struct interrupt_descriptor int_desc =
93 vm->interrupt_desc[j];
94
95 /*
96 * Interrupt descriptors are populated
97 * contiguously.
98 */
99 if (!int_desc.valid) {
100 break;
101 }
102 if (int_desc.interrupt_id == interrupt_id) {
103 return api_ffa_get_vm_vcpu(vm, current);
104 }
105 }
106 }
107
108 return NULL;
109}
110
111static struct vcpu *plat_ffa_find_target_vcpu(struct vcpu *current,
112 uint32_t interrupt_id)
113{
114 struct vcpu *target_vcpu;
115
116 switch (interrupt_id) {
117 case HF_IPI_INTID:
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100118 /*
119 * Get the next vCPU with a pending IPI. If all vCPUs
120 * have had their IPIs handled this will return NULL.
121 */
122 target_vcpu = hf_ipi_get_pending_target_vcpu(current);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000123 break;
124 case ARM_EL1_VIRT_TIMER_PHYS_INT:
125 /* Fall through */
126 case ARM_EL1_PHYS_TIMER_PHYS_INT:
127 panic("Timer interrupt not expected to fire: %u\n",
128 interrupt_id);
129 default:
130 target_vcpu = plat_ffa_find_target_vcpu_secure_interrupt(
131 current, interrupt_id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000132
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100133 /* The target vCPU for a secure interrupt cannot be NULL. */
134 CHECK(target_vcpu != NULL);
135 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000136
137 return target_vcpu;
138}
139
140/*
141 * Queue the pending virtual interrupt for target vcpu. Necessary fields
142 * tracking the secure interrupt processing are set accordingly.
143 */
144static void plat_ffa_queue_vint(struct vcpu_locked target_vcpu_locked,
145 uint32_t vint_id,
146 struct vcpu_locked current_locked)
147{
148 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
149 struct vcpu *preempted_vcpu = current_locked.vcpu;
150
151 if (preempted_vcpu != NULL) {
152 target_vcpu->preempted_vcpu = preempted_vcpu;
153 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
154 }
155
156 /* Queue the pending virtual interrupt for target vcpu. */
157 if (!vcpu_interrupt_queue_push(target_vcpu_locked, vint_id)) {
158 panic("Exhausted interrupt queue for vcpu of SP: %x\n",
159 target_vcpu->vm->id);
160 }
161}
162
163/**
164 * Enforce action of an SP in response to non-secure or other-secure interrupt
165 * by changing the priority mask. Effectively, physical interrupts shall not
166 * trigger which has the same effect as queueing interrupts.
167 */
168static void plat_ffa_vcpu_queue_interrupts(
169 struct vcpu_locked receiver_vcpu_locked)
170{
171 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
172 uint8_t current_priority;
173
174 /* Save current value of priority mask. */
175 current_priority = plat_interrupts_get_priority_mask();
176 receiver_vcpu->prev_interrupt_priority = current_priority;
177
178 if (receiver_vcpu->vm->other_s_interrupts_action ==
179 OTHER_S_INT_ACTION_QUEUED ||
180 receiver_vcpu->scheduling_mode == SPMC_MODE) {
181 /*
182 * If secure interrupts not masked yet, mask them now. We could
183 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
184 * sends a direct request, and we are making the IMPDEF choice
185 * to mask interrupts when such a situation occurs. This keeps
186 * design simple.
187 */
188 if (current_priority > SWD_MASK_ALL_INT) {
189 plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
190 }
191 } else if (receiver_vcpu->vm->ns_interrupts_action ==
192 NS_ACTION_QUEUED) {
193 /* If non secure interrupts not masked yet, mask them now. */
194 if (current_priority > SWD_MASK_NS_INT) {
195 plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
196 }
197 }
198}
199
200/**
201 * Handles the secure interrupt according to the target vCPU's state
202 * in the case the owner of the interrupt is an S-EL0 partition.
203 */
204static struct vcpu *plat_ffa_signal_secure_interrupt_sel0(
205 struct vcpu_locked current_locked,
206 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
207{
208 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
209 struct vcpu *next;
210
211 /* Secure interrupt signaling and queuing for S-EL0 SP. */
212 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600213 case VCPU_STATE_WAITING: {
214 struct ffa_value ret_interrupt =
215 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000216
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600217 /* FF-A v1.1 EAC0 Table 8.1 case 1 and Table 12.10. */
218 dlog_verbose("S-EL0: Secure interrupt signaled: %x\n",
219 target_vcpu->vm->id);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000220
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600221 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
222 plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000223
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600224 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000225
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600226 /*
227 * If the execution was in NWd as well, set the vCPU
228 * in preempted state as well.
229 */
230 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
231 current_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000232
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600233 /*
234 * The target vcpu could have migrated to a different physical
235 * CPU. SPMC will migrate it to current physical CPU and resume
236 * it.
237 */
238 target_vcpu->cpu = current_locked.vcpu->cpu;
239
240 /* Switch to target vCPU responsible for this interrupt. */
241 next = target_vcpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000242 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600243 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000244 case VCPU_STATE_BLOCKED:
245 case VCPU_STATE_PREEMPTED:
246 case VCPU_STATE_RUNNING:
247 dlog_verbose("S-EL0: Secure interrupt queued: %x\n",
248 target_vcpu->vm->id);
249 /*
250 * The target vCPU cannot be resumed, SPMC resumes current
251 * vCPU.
252 */
253 next = NULL;
254 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
255 (struct vcpu_locked){.vcpu = NULL});
256 break;
257 default:
258 panic("Secure interrupt cannot be signaled to target SP\n");
259 break;
260 }
261
262 return next;
263}
264
265static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
266 struct vcpu_locked target_locked)
267{
268 struct vcpu *prev_node;
269 struct vcpu *current = current_locked.vcpu;
270 struct vcpu *target = target_locked.vcpu;
271
272 assert(current != NULL);
273 assert(target != NULL);
274
275 prev_node = current->call_chain.prev_node;
276
277 while (prev_node != NULL) {
278 if (prev_node == target) {
279 return true;
280 }
281
282 /* The target vCPU is not it's immediate predecessor. */
283 prev_node = prev_node->call_chain.prev_node;
284 }
285
286 /* Search terminated. Reached start of call chain. */
287 return false;
288}
289
290/**
291 * Handles the secure interrupt according to the target vCPU's state
292 * in the case the owner of the interrupt is an S-EL1 partition.
293 */
294static struct vcpu *plat_ffa_signal_secure_interrupt_sel1(
295 struct vcpu_locked current_locked,
296 struct vcpu_locked target_vcpu_locked, uint32_t v_intid)
297{
298 struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
299 struct vcpu *current = current_locked.vcpu;
300 struct vcpu *next = NULL;
301
302 /* Secure interrupt signaling and queuing for S-EL1 SP. */
303 switch (target_vcpu->state) {
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600304 case VCPU_STATE_WAITING: {
305 struct ffa_value ret_interrupt =
306 api_ffa_interrupt_return(v_intid);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000307
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600308 /* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
309 vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
310 plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000311
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600312 /*
313 * Ideally, we have to mask non-secure interrupts here
314 * since the spec mandates that SPMC should make sure
315 * SPMC scheduled call chain cannot be preempted by a
316 * non-secure interrupt. However, our current design
317 * takes care of it implicitly.
318 */
319 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
320
321 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
322 current_locked);
323 next = target_vcpu;
324
325 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000326 /*
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600327 * The target vcpu could have migrated to a different
328 * physical CPU. SPMC will migrate it to current
329 * physical CPU and resume it.
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000330 */
331 assert(target_vcpu->vm->vcpu_count == 1);
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600332 target_vcpu->cpu = current_locked.vcpu->cpu;
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000333 }
334 break;
Madhukar Pappireddyfe60d092025-01-24 06:42:54 -0600335 }
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000336 case VCPU_STATE_BLOCKED:
337 if (target_vcpu->cpu != current_locked.vcpu->cpu) {
338 /*
339 * The target vcpu has migrated to a different physical
340 * CPU. Hence, it cannot be resumed on this CPU, SPMC
341 * resumes current vCPU.
342 */
343 assert(target_vcpu->vm->vcpu_count == 1);
344 next = NULL;
345 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
346 (struct vcpu_locked){.vcpu = NULL});
347 } else if (is_predecessor_in_call_chain(current_locked,
348 target_vcpu_locked)) {
349 struct ffa_value ret_interrupt =
350 api_ffa_interrupt_return(0);
351
352 /*
353 * If the target vCPU ran earlier in the same call
354 * chain as the current vCPU, SPMC leaves all
355 * intermediate execution contexts in blocked state and
356 * resumes the target vCPU for handling secure
357 * interrupt.
358 * Under the current design, there is only one possible
359 * scenario in which this could happen: both the
360 * preempted (i.e. current) and target vCPU are in the
361 * same NWd scheduled call chain and is described in the
362 * Scenario 1 of Table 8.4 in EAC0 spec.
363 */
364 assert(current_locked.vcpu->scheduling_mode ==
365 NWD_MODE);
366 assert(target_vcpu->scheduling_mode == NWD_MODE);
367
368 /*
369 * The execution preempted the call chain that involved
370 * the targeted and the current SPs.
371 * The targetted SP is set running, whilst the
372 * preempted SP is set PREEMPTED.
373 */
374 vcpu_set_running(target_vcpu_locked, &ret_interrupt);
375
376 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
377 current_locked);
378
379 next = target_vcpu;
380 } else {
381 /*
382 * The target vCPU cannot be resumed now because it is
383 * in BLOCKED state (it yielded CPU cycles using
384 * FFA_YIELD). SPMC queues the virtual interrupt and
385 * resumes the current vCPU which could belong to either
386 * a VM or a SP.
387 */
388 next = NULL;
389 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
390 (struct vcpu_locked){.vcpu = NULL});
391 }
392 break;
393 case VCPU_STATE_PREEMPTED:
394 if (target_vcpu->cpu == current_locked.vcpu->cpu) {
395 /*
396 * We do not resume a target vCPU that has been already
397 * pre-empted by an interrupt. Make the vIRQ pending for
398 * target SP(i.e., queue the interrupt) and continue to
399 * resume current vCPU. Refer to section 8.3.2.1 bullet
400 * 3 in the FF-A v1.1 EAC0 spec.
401 */
402
403 if (current->vm->id == HF_OTHER_WORLD_ID) {
404 /*
405 * The target vCPU must have been preempted by a
406 * non secure interrupt. It could not have been
407 * preempted by a secure interrupt as current
408 * SPMC implementation does not allow secure
409 * interrupt prioritization. Moreover, the
410 * target vCPU should have been in Normal World
411 * scheduled mode as SPMC scheduled mode call
412 * chain cannot be preempted by a non secure
413 * interrupt.
414 */
415 CHECK(target_vcpu->scheduling_mode == NWD_MODE);
416 }
417 } else {
418 /*
419 * The target vcpu has migrated to a different physical
420 * CPU. Hence, it cannot be resumed on this CPU, SPMC
421 * resumes current vCPU.
422 */
423 assert(target_vcpu->vm->vcpu_count == 1);
424 }
425
426 next = NULL;
427 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
428 (struct vcpu_locked){.vcpu = NULL});
429
430 break;
431 case VCPU_STATE_RUNNING:
432 if (current == target_vcpu) {
433 /*
434 * This is the special scenario where the current
435 * running execution context also happens to be the
436 * target of the secure interrupt. In this case, it
437 * needs to signal completion of secure interrupt
438 * implicitly. Refer to the embedded comment in vcpu.h
439 * file for the description of this variable.
440 */
441
442 current->requires_deactivate_call = true;
443 } else {
444 /*
445 * The target vcpu has migrated to a different physical
446 * CPU. Hence, it cannot be resumed on this CPU, SPMC
447 * resumes current vCPU.
448 */
449 assert(target_vcpu->vm->vcpu_count == 1);
450 }
451 next = NULL;
452 plat_ffa_queue_vint(target_vcpu_locked, v_intid,
453 (struct vcpu_locked){.vcpu = NULL});
454 break;
455 case VCPU_STATE_BLOCKED_INTERRUPT:
456 /* WFI is no-op for SP. Fall through. */
457 default:
458 /*
459 * vCPU of Target SP cannot be in OFF/ABORTED state if it has
460 * to handle secure interrupt.
461 */
462 panic("Secure interrupt cannot be signaled to target SP\n");
463 break;
464 }
465
466 return next;
467}
468
469/**
470 * Obtain the physical interrupt that triggered from the interrupt controller,
471 * and inject the corresponding virtual interrupt to the target vCPU.
472 * When PEs executing in the Normal World, and secure interrupts trigger,
473 * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
474 * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
475 */
Karl Meakin117c8082024-12-04 16:03:28 +0000476void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
477 struct vcpu **next)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000478{
479 struct vcpu *target_vcpu;
480 struct vcpu_locked target_vcpu_locked =
481 (struct vcpu_locked){.vcpu = NULL};
482 struct vcpu_locked current_locked;
483 uint32_t intid;
484 struct vm_locked target_vm_locked;
485 uint32_t v_intid;
486
487 /* Find pending interrupt id. This also activates the interrupt. */
488 intid = plat_interrupts_get_pending_interrupt_id();
489 v_intid = intid;
490
491 switch (intid) {
492 case ARM_SEL2_TIMER_PHYS_INT:
493 /* Disable the S-EL2 physical timer */
494 host_timer_disable();
495 target_vcpu = timer_find_target_vcpu(current);
496
497 if (target_vcpu != NULL) {
498 v_intid = HF_VIRTUAL_TIMER_INTID;
499 break;
500 }
501 /*
502 * It is possible for target_vcpu to be NULL in case of spurious
503 * timer interrupt. Fall through.
504 */
505 case SPURIOUS_INTID_OTHER_WORLD:
506 /*
507 * Spurious interrupt ID indicating that there are no pending
508 * interrupts to acknowledge. For such scenarios, resume the
509 * current vCPU.
510 */
511 *next = NULL;
512 return;
513 default:
514 target_vcpu = plat_ffa_find_target_vcpu(current, intid);
515 break;
516 }
517
518 /*
519 * End the interrupt to drop the running priority. It also deactivates
520 * the physical interrupt. If not, the interrupt could trigger again
521 * after resuming current vCPU.
522 */
523 plat_interrupts_end_of_interrupt(intid);
524
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100525 if (target_vcpu == NULL) {
526 /* No further handling required. Resume the current vCPU. */
527 *next = NULL;
528 return;
529 }
530
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000531 target_vm_locked = vm_lock(target_vcpu->vm);
532
533 if (target_vcpu == current) {
534 current_locked = vcpu_lock(current);
535 target_vcpu_locked = current_locked;
536 } else {
537 struct two_vcpu_locked vcpus_locked;
538 /* Lock both vCPUs at once to avoid deadlock. */
539 vcpus_locked = vcpu_lock_both(current, target_vcpu);
540 current_locked = vcpus_locked.vcpu1;
541 target_vcpu_locked = vcpus_locked.vcpu2;
542 }
543
544 /*
545 * A race condition can occur with the execution contexts belonging to
546 * an MP SP. An interrupt targeting the execution context on present
547 * core can trigger while the execution context of this SP on a
548 * different core is being aborted. In such scenario, the physical
549 * interrupts beloning to the aborted SP are disabled and the current
550 * execution context is resumed.
551 */
552 if (target_vcpu->state == VCPU_STATE_ABORTED ||
553 atomic_load_explicit(&target_vcpu->vm->aborting,
554 memory_order_relaxed)) {
555 /* Clear fields corresponding to secure interrupt handling. */
556 vcpu_secure_interrupt_complete(target_vcpu_locked);
Karl Meakin117c8082024-12-04 16:03:28 +0000557 ffa_vm_disable_interrupts(target_vm_locked);
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000558
559 /* Resume current vCPU. */
560 *next = NULL;
561 } else {
562 /*
563 * SPMC has started handling a secure interrupt with a clean
564 * slate. This signal should be false unless there was a bug in
565 * source code. Hence, use assert rather than CHECK.
566 */
567 assert(!target_vcpu->requires_deactivate_call);
568
569 /* Set the interrupt pending in the target vCPU. */
570 vcpu_interrupt_inject(target_vcpu_locked, v_intid);
571
572 switch (intid) {
573 case HF_IPI_INTID:
574 if (hf_ipi_handle(target_vcpu_locked)) {
575 *next = NULL;
576 break;
577 }
578 /*
579 * Fall through in the case handling has not been fully
580 * completed.
581 */
582 default:
583 /*
584 * Either invoke the handler related to partitions from
585 * S-EL0 or from S-EL1.
586 */
587 *next = target_vcpu_locked.vcpu->vm->el0_partition
588 ? plat_ffa_signal_secure_interrupt_sel0(
589 current_locked,
590 target_vcpu_locked, v_intid)
591 : plat_ffa_signal_secure_interrupt_sel1(
592 current_locked,
593 target_vcpu_locked, v_intid);
594 }
595 }
596
597 if (target_vcpu_locked.vcpu != NULL) {
598 vcpu_unlock(&target_vcpu_locked);
599 }
600
601 vcpu_unlock(&current_locked);
602 vm_unlock(&target_vm_locked);
603}
604
Karl Meakin117c8082024-12-04 16:03:28 +0000605bool ffa_interrupts_inject_notification_pending_interrupt(
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000606 struct vcpu_locked target_locked, struct vcpu_locked current_locked,
607 struct vm_locked receiver_locked)
608{
609 struct vm *next_vm = target_locked.vcpu->vm;
610 bool ret = false;
611
612 /*
613 * Inject the NPI if:
614 * - The targeted VM ID is from this world (i.e. if it is an SP).
615 * - The partition has global pending notifications and an NPI hasn't
616 * been injected yet.
617 * - There are pending per-vCPU notifications in the next vCPU.
618 */
619 if (vm_id_is_current_world(next_vm->id) &&
620 (vm_are_per_vcpu_notifications_pending(
621 receiver_locked, vcpu_index(target_locked.vcpu)) ||
622 (vm_are_global_notifications_pending(receiver_locked) &&
623 !vm_notifications_is_npi_injected(receiver_locked)))) {
624 api_interrupt_inject_locked(target_locked,
625 HF_NOTIFICATION_PENDING_INTID,
626 current_locked, NULL);
627 vm_notifications_set_npi_injected(receiver_locked, true);
628 ret = true;
629 }
630
631 return ret;
632}
633
Karl Meakin117c8082024-12-04 16:03:28 +0000634struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000635{
636 struct vcpu *next;
637 struct two_vcpu_locked both_vcpu_locked;
638
639 /*
640 * The action specified by SP in its manifest is ``Non-secure interrupt
641 * is signaled``. Refer to section 8.2.4 rules and guidelines bullet 4.
642 * Hence, the call chain starts unwinding. The current vCPU must have
643 * been a part of NWd scheduled call chain. Therefore, it is pre-empted
644 * and execution is either handed back to the normal world or to the
645 * previous SP vCPU in the call chain through the FFA_INTERRUPT ABI.
646 * The api_preempt() call is equivalent to calling
647 * api_switch_to_other_world for current vCPU passing FFA_INTERRUPT. The
648 * SP can be resumed later by FFA_RUN.
649 */
650 CHECK(current_vcpu->scheduling_mode == NWD_MODE);
651 assert(current_vcpu->call_chain.next_node == NULL);
652
653 if (current_vcpu->call_chain.prev_node == NULL) {
654 /* End of NWd scheduled call chain */
655 return api_preempt(current_vcpu);
656 }
657
658 next = current_vcpu->call_chain.prev_node;
659 CHECK(next != NULL);
660
661 /*
662 * Lock both vCPUs. Strictly speaking, it may not be necessary since
663 * next is guaranteed to be in BLOCKED state as it is the predecessor of
664 * the current vCPU in the present call chain.
665 */
666 both_vcpu_locked = vcpu_lock_both(current_vcpu, next);
667
668 /* Removing a node from an existing call chain. */
669 current_vcpu->call_chain.prev_node = NULL;
670 current_vcpu->state = VCPU_STATE_PREEMPTED;
671
672 /*
673 * SPMC applies the runtime model till when the vCPU transitions from
674 * running to waiting state. Moreover, the SP continues to remain in
675 * its CPU cycle allocation mode. Hence, rt_model and scheduling_mode
676 * are not changed here.
677 */
678 assert(next->state == VCPU_STATE_BLOCKED);
679 assert(next->call_chain.next_node == current_vcpu);
680
681 next->call_chain.next_node = NULL;
682
683 vcpu_set_running(both_vcpu_locked.vcpu2,
684 &(struct ffa_value){
685 .func = FFA_INTERRUPT_32,
686 .arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
687 vcpu_index(current_vcpu)),
688 });
689
690 sl_unlock(&next->lock);
691 sl_unlock(&current_vcpu->lock);
692
693 return next;
694}
695
696static void plat_ffa_enable_virtual_maintenance_interrupts(
697 struct vcpu_locked current_locked)
698{
699 struct vcpu *current;
700 struct interrupts *interrupts;
701 struct vm *vm;
702
703 current = current_locked.vcpu;
704 interrupts = &current->interrupts;
705 vm = current->vm;
706
Karl Meakin117c8082024-12-04 16:03:28 +0000707 if (ffa_vm_managed_exit_supported(vm)) {
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000708 vcpu_virt_interrupt_set_enabled(interrupts,
709 HF_MANAGED_EXIT_INTID);
710 /*
711 * SPMC decides the interrupt type for Managed exit signal based
712 * on the partition manifest.
713 */
714 if (vm->me_signal_virq) {
715 vcpu_virt_interrupt_set_type(interrupts,
716 HF_MANAGED_EXIT_INTID,
717 INTERRUPT_TYPE_IRQ);
718 } else {
719 vcpu_virt_interrupt_set_type(interrupts,
720 HF_MANAGED_EXIT_INTID,
721 INTERRUPT_TYPE_FIQ);
722 }
723 }
724
725 if (vm->notifications.enabled) {
726 vcpu_virt_interrupt_set_enabled(interrupts,
727 HF_NOTIFICATION_PENDING_INTID);
728 }
729}
730
731/**
732 * Enable relevant virtual interrupts for Secure Partitions.
733 * For all SPs, any applicable virtual maintenance interrupts are enabled.
734 * Additionally, for S-EL0 partitions, all the interrupts declared in the
735 * partition manifest are enabled at the virtual interrupt controller
736 * interface early during the boot stage as an S-EL0 SP need not call
737 * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
738 */
Karl Meakin117c8082024-12-04 16:03:28 +0000739void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
740 struct vm_locked vm_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000741{
742 struct vcpu *current;
743 struct interrupts *interrupts;
744 struct vm *vm;
745
746 current = current_locked.vcpu;
747 interrupts = &current->interrupts;
748 vm = current->vm;
749 assert(vm == vm_locked.vm);
750
751 if (vm->el0_partition) {
752 for (uint32_t k = 0; k < VM_MANIFEST_MAX_INTERRUPTS; k++) {
753 struct interrupt_descriptor int_desc;
754
755 int_desc = vm_locked.vm->interrupt_desc[k];
756
757 /* Interrupt descriptors are populated contiguously. */
758 if (!int_desc.valid) {
759 break;
760 }
761 vcpu_virt_interrupt_set_enabled(interrupts,
762 int_desc.interrupt_id);
763 }
764 }
765
766 plat_ffa_enable_virtual_maintenance_interrupts(current_locked);
767}
768
769/**
770 * Reconfigure the interrupt belonging to the current partition at runtime.
771 * At present, this paravirtualized interface only allows the following
772 * commands which signify what change is being requested by the current
773 * partition:
774 * - Change the target CPU of the interrupt.
775 * - Change the security state of the interrupt.
776 * - Enable or disable the physical interrupt.
777 */
Karl Meakin117c8082024-12-04 16:03:28 +0000778int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
779 uint32_t value, struct vcpu *current)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000780{
781 struct vm *vm = current->vm;
782 struct vm_locked vm_locked;
783 int64_t ret = -1;
784 struct interrupt_descriptor *int_desc = NULL;
785
786 /*
787 * Lock VM to protect interrupt descriptor from being modified
788 * concurrently.
789 */
790 vm_locked = vm_lock(vm);
791
792 switch (command) {
793 case INT_RECONFIGURE_TARGET_PE:
794 /* Here, value represents the target PE index. */
795 if (value >= MAX_CPUS) {
796 dlog_verbose(
797 "Illegal target PE index specified while "
798 "reconfiguring interrupt %x\n",
799 int_id);
800 goto out_unlock;
801 }
802
803 /*
804 * An UP SP cannot reconfigure an interrupt to be targetted to
805 * any other physical CPU except the one it is currently
806 * running on.
807 */
808 if (vm_is_up(vm) && value != cpu_index(current->cpu)) {
809 dlog_verbose(
810 "Illegal target PE index specified by current "
811 "UP SP\n");
812 goto out_unlock;
813 }
814
815 /* Configure the interrupt to be routed to a specific CPU. */
816 int_desc = vm_interrupt_set_target_mpidr(
817 vm_locked, int_id, cpu_find_index(value)->id);
818 break;
819 case INT_RECONFIGURE_SEC_STATE:
820 /* Specify the new security state of the interrupt. */
821 if (value != INT_DESC_SEC_STATE_NS &&
822 value != INT_DESC_SEC_STATE_S) {
823 dlog_verbose(
824 "Illegal value %x specified while "
825 "reconfiguring interrupt %x\n",
826 value, int_id);
827 goto out_unlock;
828 }
829 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value);
830 break;
831 case INT_RECONFIGURE_ENABLE:
832 /* Enable or disable the interrupt. */
833 if (value != INT_DISABLE && value != INT_ENABLE) {
834 dlog_verbose(
835 "Illegal value %x specified while "
836 "reconfiguring interrupt %x\n",
837 value, int_id);
838 goto out_unlock;
839 } else {
840 int_desc = vm_interrupt_set_enable(vm_locked, int_id,
841 value == INT_ENABLE);
842 }
843 break;
844 default:
845 dlog_verbose("Interrupt reconfigure: Unsupported command %x\n",
846 command);
847 goto out_unlock;
848 }
849
850 /* Check if the interrupt belongs to the current SP. */
851 if (int_desc == NULL) {
852 dlog_verbose("Interrupt %x does not belong to current SP\n",
853 int_id);
854 goto out_unlock;
855 }
856
857 ret = 0;
858 plat_interrupts_reconfigure_interrupt(*int_desc);
859
860out_unlock:
861 vm_unlock(&vm_locked);
862
863 return ret;
864}
865
866/* Returns the virtual interrupt id to be handled by SP. */
Karl Meakin117c8082024-12-04 16:03:28 +0000867uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
Karl Meakin8e58ddc2024-11-08 23:19:34 +0000868{
869 uint32_t int_id;
870
871 /*
872 * If there are any virtual interrupts in the queue, return the first
873 * entry. Else, return the pending interrupt from the bitmap.
874 */
875 if (vcpu_interrupt_queue_peek(current_locked, &int_id)) {
876 struct interrupts *interrupts;
877
878 /*
879 * Mark the virtual interrupt as no longer pending and decrement
880 * the count.
881 */
882 interrupts = &current_locked.vcpu->interrupts;
883 vcpu_virt_interrupt_clear_pending(interrupts, int_id);
884 vcpu_interrupt_count_decrement(current_locked, interrupts,
885 int_id);
886
887 return int_id;
888 }
889
890 return api_interrupt_get(current_locked);
891}
Karl Meakin8d245542025-01-31 13:19:25 +0000892
893/**
894 * Run the vCPU in SPMC schedule mode under the runtime model for secure
895 * interrupt handling.
896 */
897static void plat_ffa_run_in_sec_interrupt_rtm(
898 struct vcpu_locked target_vcpu_locked)
899{
900 struct vcpu *target_vcpu;
901
902 target_vcpu = target_vcpu_locked.vcpu;
903
904 /* Mark the registers as unavailable now. */
905 target_vcpu->regs_available = false;
906 target_vcpu->scheduling_mode = SPMC_MODE;
907 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
908 target_vcpu->state = VCPU_STATE_RUNNING;
909 target_vcpu->requires_deactivate_call = false;
910}
911
912bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
913 struct vcpu_locked next_locked,
914 struct ffa_value *signal_interrupt)
915{
916 uint32_t intid;
917
918 /*
919 * Check if there are any pending virtual secure interrupts to be
920 * handled.
921 */
922 if (vcpu_interrupt_queue_peek(current_locked, &intid)) {
923 /*
924 * Prepare to signal virtual secure interrupt to S-EL0/S-EL1 SP
925 * in WAITING state. Refer to FF-A v1.2 Table 9.1 and Table 9.2
926 * case 1.
927 */
928 *signal_interrupt = api_ffa_interrupt_return(intid);
929
930 /*
931 * Prepare to resume this partition's vCPU in SPMC
932 * schedule mode to handle virtual secure interrupt.
933 */
934 plat_ffa_run_in_sec_interrupt_rtm(current_locked);
935
936 current_locked.vcpu->preempted_vcpu = next_locked.vcpu;
937 next_locked.vcpu->state = VCPU_STATE_PREEMPTED;
938
939 dlog_verbose("%s: Pending interrup, intercepting FF-A call.\n",
940 __func__);
941
942 return true;
943 }
944
945 return false;
946}