blob: a5ad7c74266806a67c1988f46c5c160d523170c9 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
J-Alves67f5ba32024-09-27 18:07:11 +010015#include "hf/list.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000016#include "hf/spinlock.h"
17
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010018#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000019
Madhukar Pappireddy84154052022-06-21 18:30:25 -050020/** Action for non secure interrupt by SPMC. */
21#define NS_ACTION_QUEUED 0
22#define NS_ACTION_ME 1
23#define NS_ACTION_SIGNALED 2
Madhukar Pappireddy84154052022-06-21 18:30:25 -050024
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050025/** Maximum number of pending virtual interrupts in the queue per vCPU. */
26#define VINT_QUEUE_MAX 5
27
Fuad Tabba5c738432019-12-02 11:02:42 +000028enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000029 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000030 VCPU_STATE_OFF,
31
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000032 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000033 VCPU_STATE_RUNNING,
34
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050035 /** The vCPU is waiting to be allocated CPU cycles to do work. */
36 VCPU_STATE_WAITING,
37
38 /**
39 * The vCPU is blocked and waiting for some work to complete on
40 * its behalf.
41 */
42 VCPU_STATE_BLOCKED,
43
44 /** The vCPU has been preempted by an interrupt. */
45 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000046
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000047 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000048 VCPU_STATE_BLOCKED_INTERRUPT,
49
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000050 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000051 VCPU_STATE_ABORTED,
52};
53
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050054/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
55enum partition_runtime_model {
56 RTM_NONE,
57 /** Runtime model for FFA_RUN. */
58 RTM_FFA_RUN,
59 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
60 RTM_FFA_DIR_REQ,
61 /** Runtime model for Secure Interrupt handling. */
62 RTM_SEC_INTERRUPT,
63 /** Runtime model for SP Initialization. */
64 RTM_SP_INIT,
65};
66
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050067/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
68enum schedule_mode {
69 NONE,
70 /** Normal world scheduled mode. */
71 NWD_MODE,
72 /** SPMC scheduled mode. */
73 SPMC_MODE,
74};
75
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050076/*
77 * This queue is implemented as a circular buffer. The entries are managed on
78 * a First In First Out basis.
79 */
80struct interrupt_queue {
81 uint32_t vint_buffer[VINT_QUEUE_MAX];
82 uint16_t head;
83 uint16_t tail;
84};
85
Fuad Tabba5c738432019-12-02 11:02:42 +000086struct interrupts {
87 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010088 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000089 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010090 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000091 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010092 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000093 /**
94 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000095 * pending. Count independently virtual IRQ and FIQ interrupt types
96 * i.e. the sum of the two counters is the number of bits set in
97 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000098 */
Manish Pandey35e452f2021-02-18 21:36:34 +000099 uint32_t enabled_and_pending_irq_count;
100 uint32_t enabled_and_pending_fiq_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500101
102 /**
103 * Partition Manager maintains a queue of pending virtual interrupts.
104 */
105 struct interrupt_queue vint_q;
Fuad Tabba5c738432019-12-02 11:02:42 +0000106};
107
108struct vcpu_fault_info {
109 ipaddr_t ipaddr;
110 vaddr_t vaddr;
111 vaddr_t pc;
112 uint32_t mode;
113};
114
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500115struct call_chain {
116 /** Previous node in the SP call chain. */
117 struct vcpu *prev_node;
118
119 /** Next node in the SP call chain. */
120 struct vcpu *next_node;
121};
122
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100123#define LOG_BUFFER_SIZE 256
124
125struct log_buffer {
126 char chars[LOG_BUFFER_SIZE];
127 uint16_t len;
128};
129
Fuad Tabba5c738432019-12-02 11:02:42 +0000130struct vcpu {
131 struct spinlock lock;
132
133 /*
134 * The state is only changed in the context of the vCPU being run. This
135 * ensures the scheduler can easily keep track of the vCPU state as
136 * transitions are indicated by the return code from the run call.
137 */
138 enum vcpu_state state;
139
140 struct cpu *cpu;
141 struct vm *vm;
142 struct arch_regs regs;
143 struct interrupts interrupts;
144
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100145 struct log_buffer log_buffer;
146
Fuad Tabba5c738432019-12-02 11:02:42 +0000147 /*
148 * Determine whether the 'regs' field is available for use. This is set
149 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100150 * back to true when it is descheduled. This is not relevant for the
151 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
152 * in the secure world) as they are pinned to physical CPUs and there
153 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000154 */
155 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000156
157 /*
158 * If the current vCPU is executing as a consequence of a
Kathleen Capellae468c112023-12-13 17:56:28 -0500159 * direct request invocation, then this member holds the
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000160 * originating VM ID from which the call originated.
161 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
Kathleen Capellae468c112023-12-13 17:56:28 -0500162 * a result of a prior direct request invocation.
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000163 */
Kathleen Capellae468c112023-12-13 17:56:28 -0500164 struct {
Kathleen Capellae468c112023-12-13 17:56:28 -0500165 ffa_id_t vm_id;
Karl Meakin06e8b732024-09-20 18:26:49 +0100166 /** Indicate whether request is via FFA_MSG_SEND_DIRECT_REQ2. */
167 bool is_ffa_req2;
168 /** Indicate whether request is a framework message. */
169 bool is_framework;
Kathleen Capellae468c112023-12-13 17:56:28 -0500170 } direct_request_origin;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100171
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500172 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100173 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500174
175 /**
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500176 * Track current vCPU which got pre-empted when secure interrupt
177 * triggered.
178 */
179 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600180
181 /**
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600182 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
183 * mechanisms to signal completion of secure interrupt handling. SP
184 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
185 * when in WAITING/BLOCKED state respectively, but has to perform
186 * implicit signal completion mechanism by dropping the priority
187 * of the virtual secure interrupt when SPMC signaled the virtual
188 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
189 * while running). This variable helps SPMC to keep a track of such
190 * mechanism and perform appropriate bookkeeping.
191 */
J-Alvesac940752024-08-07 14:02:51 +0100192 bool requires_deactivate_call;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500193
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500194 /** SP call chain. */
195 struct call_chain call_chain;
196
197 /**
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100198 * Track if the pending IPI has been retrieved by
199 * FFA_NOTIFICATION_INFO_GET.
200 */
201 bool ipi_info_get_retrieved;
202
203 /**
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500204 * Indicates if the current vCPU is running in SPMC scheduled
205 * mode or Normal World scheduled mode.
206 */
207 enum schedule_mode scheduling_mode;
208
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500209 /**
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600210 * If the action in response to a non-secure or other-secure interrupt
211 * is to queue it, this field is used to save and restore the current
212 * priority mask.
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500213 */
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600214 uint8_t prev_interrupt_priority;
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500215
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500216 /** Partition Runtime Model. */
217 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500218
J-Alves67f5ba32024-09-27 18:07:11 +0100219 /* List entry pointing to the next vCPU in the boot order list. */
220 struct list_entry boot_list_node;
Madhukar Pappireddyeed861e2024-09-25 13:50:54 -0500221
222 /**
223 * An entry in a list maintained by Hafnium for pending arch timers.
224 * It exists in the list on behalf of its parent vCPU. The `prev` and
225 * `next` fields point to the adjacent entries in the list. The list
226 * itself is protected by a spinlock therefore timer entry is
227 * safeguarded from concurrent accesses.
228 */
229 struct list_entry timer_node;
Fuad Tabba5c738432019-12-02 11:02:42 +0000230};
231
232/** Encapsulates a vCPU whose lock is held. */
233struct vcpu_locked {
234 struct vcpu *vcpu;
235};
236
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200237/** Container for two vcpu_locked structures. */
238struct two_vcpu_locked {
239 struct vcpu_locked vcpu1;
240 struct vcpu_locked vcpu2;
241};
242
Fuad Tabba5c738432019-12-02 11:02:42 +0000243struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200244struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000245void vcpu_unlock(struct vcpu_locked *locked);
246void vcpu_init(struct vcpu *vcpu, struct vm *vm);
247void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100248ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000249bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100250bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
251 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000252
253bool vcpu_handle_page_fault(const struct vcpu *current,
254 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200255
J-Alves7ac49052022-02-08 17:20:53 +0000256void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200257void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
258
Olivier Deprez181074b2023-02-02 14:53:23 +0100259void vcpu_update_boot(struct vcpu *vcpu);
260struct vcpu *vcpu_get_boot_vcpu(void);
J-Alves67f5ba32024-09-27 18:07:11 +0100261struct vcpu *vcpu_get_next_boot(struct vcpu *vcpu);
J-Alves7ac49052022-02-08 17:20:53 +0000262
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100263static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
264 uint32_t intid)
265{
266 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
267 intid) == 1U;
268}
269
270static inline void vcpu_virt_interrupt_set_enabled(
271 struct interrupts *interrupts, uint32_t intid)
272{
273 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
274}
275
276static inline void vcpu_virt_interrupt_clear_enabled(
277 struct interrupts *interrupts, uint32_t intid)
278{
279 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
280}
281
282static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
283 uint32_t intid)
284{
285 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
286 intid) == 1U;
287}
288
289static inline void vcpu_virt_interrupt_set_pending(
290 struct interrupts *interrupts, uint32_t intid)
291{
292 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
293}
294
295static inline void vcpu_virt_interrupt_clear_pending(
296 struct interrupts *interrupts, uint32_t intid)
297{
298 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
299}
300
301static inline enum interrupt_type vcpu_virt_interrupt_get_type(
302 struct interrupts *interrupts, uint32_t intid)
303{
304 return (enum interrupt_type)interrupt_bitmap_get_value(
305 &interrupts->interrupt_type, intid);
306}
307
308static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
309 uint32_t intid,
310 enum interrupt_type type)
311{
312 if (type == INTERRUPT_TYPE_IRQ) {
313 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
314 intid);
315 } else {
316 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
317 }
318}
319
Manish Pandey35e452f2021-02-18 21:36:34 +0000320static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
321{
322 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
323}
324
325static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
326{
327 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
328}
329
330static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
331{
332 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
333}
334
335static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
336{
337 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
338}
339
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100340static inline void vcpu_interrupt_count_increment(
341 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
342 uint32_t intid)
343{
344 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
345 INTERRUPT_TYPE_IRQ) {
346 vcpu_irq_count_increment(vcpu_locked);
347 } else {
348 vcpu_fiq_count_increment(vcpu_locked);
349 }
350}
351
352static inline void vcpu_interrupt_count_decrement(
353 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
354 uint32_t intid)
355{
356 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
357 INTERRUPT_TYPE_IRQ) {
358 vcpu_irq_count_decrement(vcpu_locked);
359 } else {
360 vcpu_fiq_count_decrement(vcpu_locked);
361 }
362}
363
Manish Pandey35e452f2021-02-18 21:36:34 +0000364static inline uint32_t vcpu_interrupt_irq_count_get(
365 struct vcpu_locked vcpu_locked)
366{
367 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
368}
369
370static inline uint32_t vcpu_interrupt_fiq_count_get(
371 struct vcpu_locked vcpu_locked)
372{
373 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
374}
375
376static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
377{
378 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
379 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
380}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500381
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600382static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
383 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500384{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600385 vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
386 vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500387}
388
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600389static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
390 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500391{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600392 vcpu1_locked.vcpu->call_chain.prev_node = NULL;
393 vcpu2_locked.vcpu->call_chain.next_node = NULL;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500394}
J-Alves12cedae2023-08-04 14:37:37 +0100395
J-Alvesb8730e92024-08-07 18:28:55 +0100396void vcpu_interrupt_clear_decrement(struct vcpu_locked vcpu_locked,
397 uint32_t intid);
398
J-Alves478faac2024-10-23 10:35:57 +0100399void vcpu_set_running(struct vcpu_locked target_locked,
400 const struct ffa_value *args);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100401
402static inline void vcpu_ipi_set_info_get_retrieved(
403 struct vcpu_locked vcpu_locked)
404{
405 vcpu_locked.vcpu->ipi_info_get_retrieved = true;
406}
407
408static inline bool vcpu_ipi_is_info_get_retrieved(
409 struct vcpu_locked vcpu_locked)
410{
411 return vcpu_locked.vcpu->ipi_info_get_retrieved;
412}
413
414/**
415 * Clear the flag tracking if the IPI has been retrieved by
416 * FFA_NOTIFCATION_INFO_GET.
417 */
418static inline void vcpu_ipi_clear_info_get_retrieved(
419 struct vcpu_locked vcpu_locked)
420{
421 vcpu_locked.vcpu->ipi_info_get_retrieved = false;
422}
423
J-Alves12cedae2023-08-04 14:37:37 +0100424void vcpu_save_interrupt_priority(struct vcpu_locked vcpu_locked,
425 uint8_t priority);
426void vcpu_interrupt_inject(struct vcpu_locked target_locked, uint32_t intid);
J-Alves12cedae2023-08-04 14:37:37 +0100427void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked);
J-Alves3b31f092024-08-07 13:26:29 +0100428
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500429bool vcpu_interrupt_queue_push(struct vcpu_locked vcpu_locked,
430 uint32_t vint_id);
431bool vcpu_interrupt_queue_pop(struct vcpu_locked vcpu_locked,
432 uint32_t *vint_id);
433bool vcpu_interrupt_queue_peek(struct vcpu_locked vcpu_locked,
434 uint32_t *vint_id);
435bool vcpu_is_interrupt_in_queue(struct vcpu_locked vcpu_locked,
436 uint32_t vint_id);
437bool vcpu_is_interrupt_queue_empty(struct vcpu_locked vcpu_locked);
J-Alves3b31f092024-08-07 13:26:29 +0100438
439void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked);