blob: fe2b5b8875bb8c4ab55313e4b15516fbf3a35e37 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000015#include "hf/spinlock.h"
16
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010017#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000018
Fuad Tabba5c738432019-12-02 11:02:42 +000019enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000020 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000021 VCPU_STATE_OFF,
22
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000023 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000024 VCPU_STATE_RUNNING,
25
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050026 /** The vCPU is waiting to be allocated CPU cycles to do work. */
27 VCPU_STATE_WAITING,
28
29 /**
30 * The vCPU is blocked and waiting for some work to complete on
31 * its behalf.
32 */
33 VCPU_STATE_BLOCKED,
34
35 /** The vCPU has been preempted by an interrupt. */
36 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000037
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000038 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000039 VCPU_STATE_BLOCKED_INTERRUPT,
40
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000041 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000042 VCPU_STATE_ABORTED,
43};
44
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050045/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
46enum partition_runtime_model {
47 RTM_NONE,
48 /** Runtime model for FFA_RUN. */
49 RTM_FFA_RUN,
50 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
51 RTM_FFA_DIR_REQ,
52 /** Runtime model for Secure Interrupt handling. */
53 RTM_SEC_INTERRUPT,
54 /** Runtime model for SP Initialization. */
55 RTM_SP_INIT,
56};
57
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050058/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
59enum schedule_mode {
60 NONE,
61 /** Normal world scheduled mode. */
62 NWD_MODE,
63 /** SPMC scheduled mode. */
64 SPMC_MODE,
65};
66
Fuad Tabba5c738432019-12-02 11:02:42 +000067struct interrupts {
68 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010069 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000070 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010071 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000072 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010073 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000074 /**
75 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000076 * pending. Count independently virtual IRQ and FIQ interrupt types
77 * i.e. the sum of the two counters is the number of bits set in
78 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000079 */
Manish Pandey35e452f2021-02-18 21:36:34 +000080 uint32_t enabled_and_pending_irq_count;
81 uint32_t enabled_and_pending_fiq_count;
Fuad Tabba5c738432019-12-02 11:02:42 +000082};
83
84struct vcpu_fault_info {
85 ipaddr_t ipaddr;
86 vaddr_t vaddr;
87 vaddr_t pc;
88 uint32_t mode;
89};
90
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050091struct call_chain {
92 /** Previous node in the SP call chain. */
93 struct vcpu *prev_node;
94
95 /** Next node in the SP call chain. */
96 struct vcpu *next_node;
97};
98
Fuad Tabba5c738432019-12-02 11:02:42 +000099struct vcpu {
100 struct spinlock lock;
101
102 /*
103 * The state is only changed in the context of the vCPU being run. This
104 * ensures the scheduler can easily keep track of the vCPU state as
105 * transitions are indicated by the return code from the run call.
106 */
107 enum vcpu_state state;
108
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500109 bool is_bootstrapped;
Fuad Tabba5c738432019-12-02 11:02:42 +0000110 struct cpu *cpu;
111 struct vm *vm;
112 struct arch_regs regs;
113 struct interrupts interrupts;
114
115 /*
116 * Determine whether the 'regs' field is available for use. This is set
117 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100118 * back to true when it is descheduled. This is not relevant for the
119 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
120 * in the secure world) as they are pinned to physical CPUs and there
121 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000122 */
123 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000124
125 /*
126 * If the current vCPU is executing as a consequence of a
127 * FFA_MSG_SEND_DIRECT_REQ invocation, then this member holds the
128 * originating VM ID from which the call originated.
129 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
130 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
131 */
132 ffa_vm_id_t direct_request_origin_vm_id;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100133
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500134 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100135 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500136
137 /**
138 * Determine whether vCPU is currently handling secure interrupt.
139 */
140 bool processing_secure_interrupt;
141 bool secure_interrupt_deactivated;
142
143 /**
144 * INTID of the current secure interrupt being processed by this vCPU.
145 */
146 uint32_t current_sec_interrupt_id;
147
148 /**
149 * Track current vCPU which got pre-empted when secure interrupt
150 * triggered.
151 */
152 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600153
154 /**
155 * Current value of the Priority Mask register which is saved/restored
156 * during secure interrupt handling.
157 */
158 uint8_t priority_mask;
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600159
160 /**
161 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
162 * mechanisms to signal completion of secure interrupt handling. SP
163 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
164 * when in WAITING/BLOCKED state respectively, but has to perform
165 * implicit signal completion mechanism by dropping the priority
166 * of the virtual secure interrupt when SPMC signaled the virtual
167 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
168 * while running). This variable helps SPMC to keep a track of such
169 * mechanism and perform appropriate bookkeeping.
170 */
171 bool implicit_completion_signal;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500172
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500173 /** SP call chain. */
174 struct call_chain call_chain;
175
176 /**
177 * Indicates if the current vCPU is running in SPMC scheduled
178 * mode or Normal World scheduled mode.
179 */
180 enum schedule_mode scheduling_mode;
181
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500182 /** Partition Runtime Model. */
183 enum partition_runtime_model rt_model;
Fuad Tabba5c738432019-12-02 11:02:42 +0000184};
185
186/** Encapsulates a vCPU whose lock is held. */
187struct vcpu_locked {
188 struct vcpu *vcpu;
189};
190
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200191/** Container for two vcpu_locked structures. */
192struct two_vcpu_locked {
193 struct vcpu_locked vcpu1;
194 struct vcpu_locked vcpu2;
195};
196
Fuad Tabba5c738432019-12-02 11:02:42 +0000197struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200198struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000199void vcpu_unlock(struct vcpu_locked *locked);
200void vcpu_init(struct vcpu *vcpu, struct vm *vm);
201void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100202ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000203bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100204bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
205 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000206
207bool vcpu_handle_page_fault(const struct vcpu *current,
208 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200209
Olivier Depreze6f7b9d2021-02-01 11:55:48 +0100210void vcpu_reset(struct vcpu *vcpu);
Manish Pandey35e452f2021-02-18 21:36:34 +0000211
J-Alves7ac49052022-02-08 17:20:53 +0000212void vcpu_set_phys_core_idx(struct vcpu *vcpu);
213
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100214static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
215 uint32_t intid)
216{
217 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
218 intid) == 1U;
219}
220
221static inline void vcpu_virt_interrupt_set_enabled(
222 struct interrupts *interrupts, uint32_t intid)
223{
224 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
225}
226
227static inline void vcpu_virt_interrupt_clear_enabled(
228 struct interrupts *interrupts, uint32_t intid)
229{
230 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
231}
232
233static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
234 uint32_t intid)
235{
236 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
237 intid) == 1U;
238}
239
240static inline void vcpu_virt_interrupt_set_pending(
241 struct interrupts *interrupts, uint32_t intid)
242{
243 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
244}
245
246static inline void vcpu_virt_interrupt_clear_pending(
247 struct interrupts *interrupts, uint32_t intid)
248{
249 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
250}
251
252static inline enum interrupt_type vcpu_virt_interrupt_get_type(
253 struct interrupts *interrupts, uint32_t intid)
254{
255 return (enum interrupt_type)interrupt_bitmap_get_value(
256 &interrupts->interrupt_type, intid);
257}
258
259static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
260 uint32_t intid,
261 enum interrupt_type type)
262{
263 if (type == INTERRUPT_TYPE_IRQ) {
264 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
265 intid);
266 } else {
267 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
268 }
269}
270
Manish Pandey35e452f2021-02-18 21:36:34 +0000271static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
272{
273 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
274}
275
276static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
277{
278 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
279}
280
281static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
282{
283 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
284}
285
286static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
287{
288 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
289}
290
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100291static inline void vcpu_interrupt_count_increment(
292 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
293 uint32_t intid)
294{
295 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
296 INTERRUPT_TYPE_IRQ) {
297 vcpu_irq_count_increment(vcpu_locked);
298 } else {
299 vcpu_fiq_count_increment(vcpu_locked);
300 }
301}
302
303static inline void vcpu_interrupt_count_decrement(
304 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
305 uint32_t intid)
306{
307 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
308 INTERRUPT_TYPE_IRQ) {
309 vcpu_irq_count_decrement(vcpu_locked);
310 } else {
311 vcpu_fiq_count_decrement(vcpu_locked);
312 }
313}
314
Manish Pandey35e452f2021-02-18 21:36:34 +0000315static inline uint32_t vcpu_interrupt_irq_count_get(
316 struct vcpu_locked vcpu_locked)
317{
318 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
319}
320
321static inline uint32_t vcpu_interrupt_fiq_count_get(
322 struct vcpu_locked vcpu_locked)
323{
324 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
325}
326
327static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
328{
329 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
330 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
331}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500332
333static inline void vcpu_call_chain_extend(struct vcpu *vcpu1,
334 struct vcpu *vcpu2)
335{
336 vcpu1->call_chain.next_node = vcpu2;
337 vcpu2->call_chain.prev_node = vcpu1;
338}
339
340static inline void vcpu_call_chain_remove_node(struct vcpu *vcpu1,
341 struct vcpu *vcpu2)
342{
343 vcpu1->call_chain.prev_node = NULL;
344 vcpu2->call_chain.next_node = NULL;
345}