blob: 522973d72b114c6f7de3c343202b76ff773557b5 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#include "hf/vcpu.h"
10
Olivier Depreze6f7b9d2021-02-01 11:55:48 +010011#include "hf/arch/cpu.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/check.h"
14#include "hf/dlog.h"
15#include "hf/std.h"
16#include "hf/vm.h"
17
J-Alves7ac49052022-02-08 17:20:53 +000018/** GP register to be used to pass the current vCPU ID, at core bring up. */
19#define PHYS_CORE_IDX_GP_REG 4
20
Fuad Tabba5c738432019-12-02 11:02:42 +000021/**
22 * Locks the given vCPU and updates `locked` to hold the newly locked vCPU.
23 */
24struct vcpu_locked vcpu_lock(struct vcpu *vcpu)
25{
26 struct vcpu_locked locked = {
27 .vcpu = vcpu,
28 };
29
30 sl_lock(&vcpu->lock);
31
32 return locked;
33}
34
35/**
Olivier Deprez0b6f10a2020-08-05 18:21:33 +020036 * Locks two vCPUs ensuring that the locking order is according to the locks'
37 * addresses.
38 */
39struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2)
40{
41 struct two_vcpu_locked dual_lock;
42
43 sl_lock_both(&vcpu1->lock, &vcpu2->lock);
44 dual_lock.vcpu1.vcpu = vcpu1;
45 dual_lock.vcpu2.vcpu = vcpu2;
46
47 return dual_lock;
48}
49
50/**
Fuad Tabba5c738432019-12-02 11:02:42 +000051 * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to
52 * reflect the fact that the vCPU is no longer locked.
53 */
54void vcpu_unlock(struct vcpu_locked *locked)
55{
56 sl_unlock(&locked->vcpu->lock);
57 locked->vcpu = NULL;
58}
59
60void vcpu_init(struct vcpu *vcpu, struct vm *vm)
61{
62 memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));
63 sl_init(&vcpu->lock);
64 vcpu->regs_available = true;
65 vcpu->vm = vm;
Kathleen Capellae468c112023-12-13 17:56:28 -050066 vcpu->direct_request_origin.is_ffa_req2 = false;
67 vcpu->direct_request_origin.vm_id = HF_INVALID_VM_ID;
Olivier Deprezb2808332023-02-02 15:25:40 +010068 vcpu->rt_model = RTM_SP_INIT;
Madhukar Pappireddyeed861e2024-09-25 13:50:54 -050069 list_init(&vcpu->timer_node);
Daniel Boulby7011b5a2024-10-15 18:27:26 +010070 list_init(&vcpu->ipi_list_node);
Madhukar Pappireddyd6c055d2025-05-08 15:35:46 -050071
72 /*
73 * Though resources have not been allocated to the partition yet, it is
74 * safe to skip the NULL state for vCPU during cold boot and transition
75 * directly to CREATED state.
76 */
77 vcpu->state = VCPU_STATE_CREATED;
Fuad Tabba5c738432019-12-02 11:02:42 +000078}
79
80/**
81 * Initialise the registers for the given vCPU and set the state to
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050082 * VCPU_STATE_WAITING. The caller must hold the vCPU lock while calling this.
Fuad Tabba5c738432019-12-02 11:02:42 +000083 */
84void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
85{
86 arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050087 vcpu.vcpu->state = VCPU_STATE_WAITING;
Fuad Tabba5c738432019-12-02 11:02:42 +000088}
89
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010090ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
Fuad Tabba5c738432019-12-02 11:02:42 +000091{
92 size_t index = vcpu - vcpu->vm->vcpus;
93
94 CHECK(index < UINT16_MAX);
95 return index;
96}
97
98/**
99 * Check whether the given vcpu_state is an off state, for the purpose of
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500100 * turning vCPUs on and off. Note that Aborted still counts as ON for the
101 * purposes of PSCI, because according to the PSCI specification (section
Olivier Depreze7eb1682022-03-16 17:09:03 +0100102 * 5.7.1) a core is only considered to be off if it has been turned off
103 * with a CPU_OFF call or hasn't yet been turned on with a CPU_ON call.
Fuad Tabba5c738432019-12-02 11:02:42 +0000104 */
105bool vcpu_is_off(struct vcpu_locked vcpu)
106{
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500107 return (vcpu.vcpu->state == VCPU_STATE_OFF);
Fuad Tabba5c738432019-12-02 11:02:42 +0000108}
109
110/**
111 * Starts a vCPU of a secondary VM.
112 *
113 * Returns true if the secondary was reset and started, or false if it was
114 * already on and so nothing was done.
115 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100116bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
117 ipaddr_t entry, uintreg_t arg)
Fuad Tabba5c738432019-12-02 11:02:42 +0000118{
Max Shvetsov40108e72020-08-27 12:39:50 +0100119 struct vm *vm = vcpu_locked.vcpu->vm;
Fuad Tabba5c738432019-12-02 11:02:42 +0000120 bool vcpu_was_off;
121
122 CHECK(vm->id != HF_PRIMARY_VM_ID);
123
Fuad Tabba5c738432019-12-02 11:02:42 +0000124 vcpu_was_off = vcpu_is_off(vcpu_locked);
125 if (vcpu_was_off) {
126 /*
127 * Set vCPU registers to a clean state ready for boot. As this
128 * is a secondary which can migrate between pCPUs, the ID of the
129 * vCPU is defined as the index and does not match the ID of the
130 * pCPU it is running on.
131 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100132 arch_regs_reset(vcpu_locked.vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000133 vcpu_on(vcpu_locked, entry, arg);
134 }
Fuad Tabba5c738432019-12-02 11:02:42 +0000135
136 return vcpu_was_off;
137}
138
139/**
140 * Handles a page fault. It does so by determining if it's a legitimate or
141 * spurious fault, and recovering from the latter.
142 *
Fuad Tabbaed294af2019-12-20 10:43:01 +0000143 * Returns true if the caller should resume the current vCPU, or false if its VM
Fuad Tabba5c738432019-12-02 11:02:42 +0000144 * should be aborted.
145 */
146bool vcpu_handle_page_fault(const struct vcpu *current,
147 struct vcpu_fault_info *f)
148{
149 struct vm *vm = current->vm;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000150 mm_mode_t mode;
Fuad Tabba5c738432019-12-02 11:02:42 +0000151 uint32_t mask = f->mode | MM_MODE_INVALID;
152 bool resume;
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800153 struct vm_locked locked_vm;
Fuad Tabba5c738432019-12-02 11:02:42 +0000154
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800155 locked_vm = vm_lock(vm);
Fuad Tabba5c738432019-12-02 11:02:42 +0000156 /*
157 * Check if this is a legitimate fault, i.e., if the page table doesn't
158 * allow the access attempted by the VM.
159 *
160 * Otherwise, this is a spurious fault, likely because another CPU is
161 * updating the page table. It is responsible for issuing global TLB
162 * invalidations while holding the VM lock, so we don't need to do
163 * anything else to recover from it. (Acquiring/releasing the lock
164 * ensured that the invalidations have completed.)
165 */
Raghu Krishnamurthyb5775d22021-02-26 18:54:40 -0800166 if (!locked_vm.vm->el0_partition) {
167 resume = vm_mem_get_mode(locked_vm, f->ipaddr,
168 ipa_add(f->ipaddr, 1), &mode) &&
169 (mode & mask) == f->mode;
170 } else {
171 /*
172 * For EL0 partitions we need to get the mode for the faulting
173 * vaddr.
174 */
175 resume =
176 vm_mem_get_mode(locked_vm, ipa_init(va_addr(f->vaddr)),
177 ipa_add(ipa_init(va_addr(f->vaddr)), 1),
178 &mode) &&
179 (mode & mask) == f->mode;
Raghu Krishnamurthyf16b2ce2021-11-02 07:48:38 -0700180
181 /*
182 * For EL0 partitions, if there is an instruction abort and the
183 * mode of the page is RWX, we don't resume since Hafnium does
184 * not allow write and executable pages.
185 */
186 if ((f->mode == MM_MODE_X) &&
187 ((mode & MM_MODE_W) == MM_MODE_W)) {
188 resume = false;
189 }
Raghu Krishnamurthyb5775d22021-02-26 18:54:40 -0800190 }
Fuad Tabba5c738432019-12-02 11:02:42 +0000191
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800192 vm_unlock(&locked_vm);
Fuad Tabba5c738432019-12-02 11:02:42 +0000193
194 if (!resume) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000195 dlog_warning(
Karl Meakine8937d92024-03-19 16:04:25 +0000196 "Stage-%d page fault: pc=%#lx, vmid=%#x, vcpu=%u, "
197 "vaddr=%#lx, ipaddr=%#lx, mode=%#x %#x\n",
198 current->vm->el0_partition ? 1 : 2, va_addr(f->pc),
199 vm->id, vcpu_index(current), va_addr(f->vaddr),
200 ipa_addr(f->ipaddr), f->mode, mode);
Fuad Tabba5c738432019-12-02 11:02:42 +0000201 }
202
203 return resume;
204}
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200205
J-Alves7ac49052022-02-08 17:20:53 +0000206void vcpu_set_phys_core_idx(struct vcpu *vcpu)
207{
208 arch_regs_set_gp_reg(&vcpu->regs, cpu_index(vcpu->cpu),
209 PHYS_CORE_IDX_GP_REG);
210}
Olivier Deprez181074b2023-02-02 14:53:23 +0100211
212/**
Olivier Deprez632249e2022-09-26 09:18:31 +0200213 * Sets the designated GP register through which the vCPU expects to receive the
214 * boot info's address.
215 */
216void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu)
217{
218 struct vm *vm = vcpu->vm;
219 uint32_t gp_register_num = vm->boot_info.gp_register_num;
220
221 if (vm->boot_info.blob_addr.ipa != 0U) {
222 arch_regs_set_gp_reg(&vcpu->regs,
223 ipa_addr(vm->boot_info.blob_addr),
224 gp_register_num);
225 }
226}
227
Daniel Boulbyd633a612025-03-07 18:08:04 +0000228static bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
229 uint32_t intid)
230{
231 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
232 intid) == 1U;
233}
234
235static void vcpu_virt_interrupt_set_enabled(struct interrupts *interrupts,
236 uint32_t intid)
237{
238 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
239}
240
241static void vcpu_virt_interrupt_clear_enabled(struct interrupts *interrupts,
242 uint32_t intid)
243{
244 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
245}
246
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000247static void vcpu_virt_interrupt_set_pending(struct interrupts *interrupts,
248 uint32_t intid)
249{
250 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
251}
252
253static void vcpu_virt_interrupt_clear_pending(struct interrupts *interrupts,
254 uint32_t intid)
255{
256 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
257}
258
259static void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
260{
261 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
262}
263
264static void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
265{
266 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
267}
268
269static void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
270{
271 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
272}
273
274static void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
275{
276 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
277}
278
279static void vcpu_interrupt_count_increment(struct vcpu_locked vcpu_locked,
280 uint32_t intid)
281{
282 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
283
284 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
285 INTERRUPT_TYPE_IRQ) {
286 vcpu_irq_count_increment(vcpu_locked);
287 } else {
288 vcpu_fiq_count_increment(vcpu_locked);
289 }
290}
291
292static void vcpu_interrupt_count_decrement(struct vcpu_locked vcpu_locked,
293 uint32_t intid)
294{
295 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
296
297 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
298 INTERRUPT_TYPE_IRQ) {
299 vcpu_irq_count_decrement(vcpu_locked);
300 } else {
301 vcpu_fiq_count_decrement(vcpu_locked);
302 }
303}
304
305uint32_t vcpu_virt_interrupt_irq_count_get(struct vcpu_locked vcpu_locked)
306{
307 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
308}
309
310uint32_t vcpu_virt_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked)
311{
312 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
313}
314
315uint32_t vcpu_virt_interrupt_count_get(struct vcpu_locked vcpu_locked)
316{
317 return vcpu_virt_interrupt_irq_count_get(vcpu_locked) +
318 vcpu_virt_interrupt_fiq_count_get(vcpu_locked);
319}
320
Daniel Boulbyd7992232025-03-06 17:09:49 +0000321static void vcpu_interrupt_clear_decrement(struct vcpu_locked vcpu_locked,
322 uint32_t intid)
J-Alvesb8730e92024-08-07 18:28:55 +0100323{
324 struct interrupts *interrupts = &(vcpu_locked.vcpu->interrupts);
325
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000326 /*
327 * Mark the virtual interrupt as no longer pending and decrement
Daniel Boulbyd7992232025-03-06 17:09:49 +0000328 * the interrupt count if it is enabled.
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000329 */
J-Alvesb8730e92024-08-07 18:28:55 +0100330 vcpu_virt_interrupt_clear_pending(interrupts, intid);
Daniel Boulbyd7992232025-03-06 17:09:49 +0000331 if (vcpu_is_virt_interrupt_enabled(interrupts, intid)) {
332 vcpu_interrupt_count_decrement(vcpu_locked, intid);
333 }
J-Alvesb8730e92024-08-07 18:28:55 +0100334}
335
J-Alves0247fe62024-02-23 10:21:46 +0000336/**
337 * Sets the vcpu in the VCPU_STATE_RUNNING.
J-Alves67a79262024-07-17 12:01:39 +0100338 * With that, its register are set as "not available".
339 * If there are registers to be written to vCPU's context, do so.
340 * However, this action is restricted to WAITING and BLOCKED states,
341 * as such, assert accordingly.
J-Alves0247fe62024-02-23 10:21:46 +0000342 */
J-Alves478faac2024-10-23 10:35:57 +0100343void vcpu_set_running(struct vcpu_locked target_locked,
344 const struct ffa_value *args)
J-Alves12cedae2023-08-04 14:37:37 +0100345{
346 struct vcpu *target_vcpu = target_locked.vcpu;
347
J-Alves67a79262024-07-17 12:01:39 +0100348 if (args != NULL) {
J-Alves0247fe62024-02-23 10:21:46 +0000349 CHECK(target_vcpu->regs_available);
J-Alves67a79262024-07-17 12:01:39 +0100350 assert(target_vcpu->state == VCPU_STATE_WAITING ||
351 target_vcpu->state == VCPU_STATE_BLOCKED);
J-Alves0247fe62024-02-23 10:21:46 +0000352
353 arch_regs_set_retval(&target_vcpu->regs, *args);
354 }
J-Alves12cedae2023-08-04 14:37:37 +0100355
356 /* Mark the registers as unavailable now. */
357 target_vcpu->regs_available = false;
358
359 /* We are about to resume target vCPU. */
360 target_vcpu->state = VCPU_STATE_RUNNING;
361}
362
J-Alves12cedae2023-08-04 14:37:37 +0100363void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked)
364{
365 struct vcpu *target_vcpu = vcpu_locked.vcpu;
366
367 assert(target_vcpu->scheduling_mode == NONE);
368 assert(target_vcpu->call_chain.prev_node == NULL);
369 assert(target_vcpu->call_chain.next_node == NULL);
370 assert(target_vcpu->rt_model == RTM_NONE);
371
372 target_vcpu->scheduling_mode = SPMC_MODE;
373 target_vcpu->rt_model = RTM_SEC_INTERRUPT;
374}
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500375
376static uint16_t queue_increment_index(uint16_t current_idx)
377{
378 /* Look at the next index. Wrap around if necessary. */
379 if (current_idx == VINT_QUEUE_MAX - 1) {
380 return 0;
381 }
382
383 return current_idx + 1;
384}
385
J-Alves23a73032025-03-04 16:20:54 +0000386/**
387 * If tail reaches head of the queue, and the count of queued interrupts
388 * 0, then the queue is empty.
389 */
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500390static bool is_queue_empty(struct interrupt_queue *q)
391{
J-Alves23a73032025-03-04 16:20:54 +0000392 return q->head == q->tail && q->queued_vint_count == 0U;
393}
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500394
J-Alves23a73032025-03-04 16:20:54 +0000395/**
396 * If tail reaches head of the queue, and the count of queued interrupts
397 * matches the size of the buffer, then the queue is full.
398 */
399static bool is_queue_full(struct interrupt_queue *q)
400{
401 return q->head == q->tail && q->queued_vint_count == VINT_QUEUE_MAX;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500402}
403
404/**
405 * Queue the pending virtual interrupt for target vCPU.
406 *
407 * Returns true if successful in pushing a new entry to the queue, or false
408 * otherwise.
409 */
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000410static bool vcpu_interrupt_queue_push(struct vcpu_locked vcpu_locked,
411 uint32_t vint_id)
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500412{
413 struct interrupt_queue *q;
414 uint16_t new_tail;
415
416 assert(vint_id != HF_INVALID_INTID);
417
418 q = &vcpu_locked.vcpu->interrupts.vint_q;
419
420 /*
421 * A new entry is pushed at the tail of the queue. Upon successful
422 * push operation, the tail increments or wraps around.
423 */
424 new_tail = queue_increment_index(q->tail);
425
J-Alves23a73032025-03-04 16:20:54 +0000426 if (is_queue_full(q)) {
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500427 return false;
428 }
429
430 /* Add the virtual interrupt to the queue. */
431 q->vint_buffer[q->tail] = vint_id;
432 q->tail = new_tail;
433
J-Alves23a73032025-03-04 16:20:54 +0000434 assert(q->queued_vint_count < VINT_QUEUE_MAX);
435 q->queued_vint_count++;
436
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500437 return true;
438}
439
440/**
441 * Remove an entry from the specified vCPU's queue at the head.
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500442 * Returns true if successful in removing the entry, or false otherwise.
443 */
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000444static uint32_t vcpu_interrupt_queue_pop(struct vcpu_locked vcpu_locked)
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500445{
446 struct interrupt_queue *q;
447 uint16_t new_head;
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000448 uint32_t vint_id;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500449
450 q = &vcpu_locked.vcpu->interrupts.vint_q;
451
452 /* Check if queue is empty. */
453 if (is_queue_empty(q)) {
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000454 return HF_INVALID_INTID;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500455 }
456
457 /*
458 * An entry is removed from the head of the queue. Once successful, the
459 * head is incremented or wrapped around if needed.
460 */
461 new_head = queue_increment_index(q->head);
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000462 vint_id = q->vint_buffer[q->head];
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500463 q->head = new_head;
464
J-Alves23a73032025-03-04 16:20:54 +0000465 assert(q->queued_vint_count > 0);
466 q->queued_vint_count--;
467
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000468 return vint_id;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500469}
470
471/**
472 * Look for the first pending virtual interrupt from the vcpu's queue. Note
473 * that the entry is not removed from the queue.
474 *
475 * Returns true if a valid entry exists in the queue, or false otherwise.
476 */
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000477static uint32_t vcpu_interrupt_queue_peek(struct vcpu_locked vcpu_locked)
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500478{
479 struct interrupt_queue *q;
480 uint32_t queued_vint;
481
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500482 q = &vcpu_locked.vcpu->interrupts.vint_q;
483
484 /* Check if queue is empty. */
485 if (is_queue_empty(q)) {
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000486 return HF_INVALID_INTID;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500487 }
488
489 queued_vint = q->vint_buffer[q->head];
490 assert(queued_vint != HF_INVALID_INTID);
491
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000492 return queued_vint;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500493}
494
495/**
J-Alves3b31f092024-08-07 13:26:29 +0100496 * When interrupt handling is complete the preempted_vcpu field should go back
497 * to NULL.
498 */
499void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked)
500{
501 struct vcpu *vcpu;
502
503 vcpu = vcpu_locked.vcpu;
504 vcpu->preempted_vcpu = NULL;
J-Alves3b31f092024-08-07 13:26:29 +0100505}
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000506
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000507void vcpu_virt_interrupt_enable(struct vcpu_locked vcpu_locked,
508 uint32_t vint_id, bool enable)
509{
510 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
511
512 if (enable) {
513 /*
514 * If it is pending and was not enabled before, increment the
515 * count.
516 */
517 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id) &&
518 !vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
519 vcpu_interrupt_count_increment(vcpu_locked, vint_id);
520 }
521 vcpu_virt_interrupt_set_enabled(interrupts, vint_id);
522 } else {
523 /*
524 * If it is pending and was enabled before, decrement the count.
525 */
526 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id) &&
527 vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
528 vcpu_interrupt_count_decrement(vcpu_locked, vint_id);
529 }
530 vcpu_virt_interrupt_clear_enabled(interrupts, vint_id);
531 }
532}
533
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000534/*
535 * Find and return the first intid that is pending and enabled, the interrupt
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000536 * struct for this intid will be at the head of the list so can be popped later.
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000537 */
538uint32_t vcpu_virt_interrupt_peek_pending_and_enabled(
539 struct vcpu_locked vcpu_locked)
540{
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000541 uint32_t vint_id;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000542 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
543 uint32_t pending_and_enabled_count =
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000544 vcpu_virt_interrupt_count_get(vcpu_locked);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000545
546 /* First check there is a pending and enabled interrupt to return. */
547 if (pending_and_enabled_count == 0) {
548 return HF_INVALID_INTID;
549 }
550
551 /*
552 * We know here there is a pending and enabled interrupt in
553 * the queue. So push any interrupts that are not enabled to
554 * the back of the queue until we reach the first enabled one.
555 */
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000556 vint_id = vcpu_interrupt_queue_peek(vcpu_locked);
557 while (!vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
558 vcpu_interrupt_queue_pop(vcpu_locked);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000559 vcpu_interrupt_queue_push(vcpu_locked, vint_id);
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000560 vint_id = vcpu_interrupt_queue_peek(vcpu_locked);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000561 }
562
563 assert(vint_id != HF_INVALID_INTID);
564
565 return vint_id;
566}
567
568/*
569 * Get the next pending and enabled virtual interrupt ID.
570 * Pops from the queue and clears the bitmap.
571 */
572uint32_t vcpu_virt_interrupt_get_pending_and_enabled(
573 struct vcpu_locked vcpu_locked)
574{
575 uint32_t vint_id =
576 vcpu_virt_interrupt_peek_pending_and_enabled(vcpu_locked);
577
578 if (vint_id != HF_INVALID_INTID) {
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000579 vcpu_interrupt_queue_pop(vcpu_locked);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000580 vcpu_interrupt_clear_decrement(vcpu_locked, vint_id);
J-Alves0cbd7a32025-02-10 17:29:15 +0000581
582 /*
583 * Resetting the state of the interrupts_info_get_retrieved,
584 * so the interrupts pending in the vCPU can be included in the
585 * FFA_NOTIFICATION_INFO_GET list.
586 * Resetting now as this functions clears the state of the
587 * virtual interrupt.
588 */
589 vcpu_locked.vcpu->interrupts_info_get_retrieved = false;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000590 }
591
592 return vint_id;
593}
594
595/*
596 * Set a virtual interrupt to pending. Add it to the queue and set the bitmap.
597 */
598void vcpu_virt_interrupt_inject(struct vcpu_locked vcpu_locked,
599 uint32_t vint_id)
600{
601 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
602
603 /*
604 * An interrupt can only be pending once so return if it is
605 * already pending.
606 */
607 if (vcpu_is_virt_interrupt_pending(interrupts, vint_id)) {
608 return;
609 }
610
611 /* Push to the queue and set the bitmap. */
612 if (!vcpu_interrupt_queue_push(vcpu_locked, vint_id)) {
Daniel Boulby9d8fd3e2025-03-06 14:21:39 +0000613 dlog_verbose(
614 "Exhausted interrupt queue for vCPU %u of SP %#x\n",
615 vcpu_index(vcpu_locked.vcpu), vcpu_locked.vcpu->vm->id);
616 assert(false);
617 return;
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000618 }
619 vcpu_virt_interrupt_set_pending(interrupts, vint_id);
620
621 if (vcpu_is_virt_interrupt_enabled(interrupts, vint_id)) {
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000622 vcpu_interrupt_count_increment(vcpu_locked, vint_id);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000623 }
624}
Daniel Boulbyd7992232025-03-06 17:09:49 +0000625
626void vcpu_virt_interrupt_clear(struct vcpu_locked vcpu_locked, uint32_t vint_id)
627{
628 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
629 uint32_t queued_vint_count = interrupts->vint_q.queued_vint_count;
630
631 /* See if interrupt is pending and therefore needs to be cleared. */
632 if (!vcpu_is_virt_interrupt_pending(interrupts, vint_id)) {
633 return;
634 }
635
636 for (uint32_t i = 0; i < queued_vint_count; i++) {
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000637 uint32_t intid = vcpu_interrupt_queue_pop(vcpu_locked);
Daniel Boulbyd7992232025-03-06 17:09:49 +0000638
Daniel Boulby825ec7e2025-03-06 15:55:09 +0000639 if (intid == vint_id) {
640 vcpu_interrupt_clear_decrement(vcpu_locked, intid);
641 } else {
642 /*
643 * If the interrupt is not the one we wish to remove,
644 * inject it again. We must pop and push all interrupts
645 * to ensure the FIFO ordering is maintained.
646 */
647 vcpu_interrupt_queue_push(vcpu_locked, intid);
Daniel Boulbyd7992232025-03-06 17:09:49 +0000648 }
649 }
650}
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -0600651
652/**
653 * Prepare the target vCPU to run after receiving direct request ABI.
654 */
655void vcpu_dir_req_set_state(struct vcpu_locked target_locked, bool is_ffa_req2,
656 ffa_id_t sender_vm_id, struct ffa_value args)
657{
658 struct vcpu *target_vcpu = target_locked.vcpu;
659
660 target_vcpu->state = VCPU_STATE_RUNNING;
661 target_vcpu->regs_available = false;
662 target_vcpu->direct_request_origin.is_ffa_req2 = is_ffa_req2;
663 target_vcpu->direct_request_origin.vm_id = sender_vm_id;
664 target_vcpu->direct_request_origin.is_framework =
665 ffa_is_framework_msg(args);
666
667 arch_regs_set_retval(&target_vcpu->regs, args);
668}
669
670/**
671 * Clear direct request origin vm_id and request type for the target vCPU.
672 * Also, the scheduling mode and partition runtime model are reset.
673 */
674void vcpu_dir_req_reset_state(struct vcpu_locked vcpu_locked)
675{
676 struct vcpu *vcpu = vcpu_locked.vcpu;
677
678 /* Clear direct request origin vm_id and request type. */
679 vcpu->direct_request_origin.vm_id = HF_INVALID_VM_ID;
680 vcpu->direct_request_origin.is_framework = false;
681
682 /* Reset runtime model and scheduling mode. */
683 vcpu->scheduling_mode = NONE;
684 vcpu->rt_model = RTM_NONE;
685}