blob: 9dec951c1d59a1beef9fa411d24ea9d677d37c4a [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
11#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010012#include "hf/interrupt_desc.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/spinlock.h"
14
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010015#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000016
Fuad Tabba5c738432019-12-02 11:02:42 +000017enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000018 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000019 VCPU_STATE_OFF,
20
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000021 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000022 VCPU_STATE_RUNNING,
23
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050024 /** The vCPU is waiting to be allocated CPU cycles to do work. */
25 VCPU_STATE_WAITING,
26
27 /**
28 * The vCPU is blocked and waiting for some work to complete on
29 * its behalf.
30 */
31 VCPU_STATE_BLOCKED,
32
33 /** The vCPU has been preempted by an interrupt. */
34 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000035
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000036 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000037 VCPU_STATE_BLOCKED_INTERRUPT,
38
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000039 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000040 VCPU_STATE_ABORTED,
41};
42
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050043/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
44enum partition_runtime_model {
45 RTM_NONE,
46 /** Runtime model for FFA_RUN. */
47 RTM_FFA_RUN,
48 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
49 RTM_FFA_DIR_REQ,
50 /** Runtime model for Secure Interrupt handling. */
51 RTM_SEC_INTERRUPT,
52 /** Runtime model for SP Initialization. */
53 RTM_SP_INIT,
54};
55
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050056/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
57enum schedule_mode {
58 NONE,
59 /** Normal world scheduled mode. */
60 NWD_MODE,
61 /** SPMC scheduled mode. */
62 SPMC_MODE,
63};
64
Fuad Tabba5c738432019-12-02 11:02:42 +000065struct interrupts {
66 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010067 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000068 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010069 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000070 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010071 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000072 /**
73 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000074 * pending. Count independently virtual IRQ and FIQ interrupt types
75 * i.e. the sum of the two counters is the number of bits set in
76 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000077 */
Manish Pandey35e452f2021-02-18 21:36:34 +000078 uint32_t enabled_and_pending_irq_count;
79 uint32_t enabled_and_pending_fiq_count;
Fuad Tabba5c738432019-12-02 11:02:42 +000080};
81
82struct vcpu_fault_info {
83 ipaddr_t ipaddr;
84 vaddr_t vaddr;
85 vaddr_t pc;
86 uint32_t mode;
87};
88
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050089struct call_chain {
90 /** Previous node in the SP call chain. */
91 struct vcpu *prev_node;
92
93 /** Next node in the SP call chain. */
94 struct vcpu *next_node;
95};
96
Fuad Tabba5c738432019-12-02 11:02:42 +000097struct vcpu {
98 struct spinlock lock;
99
100 /*
101 * The state is only changed in the context of the vCPU being run. This
102 * ensures the scheduler can easily keep track of the vCPU state as
103 * transitions are indicated by the return code from the run call.
104 */
105 enum vcpu_state state;
106
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500107 bool is_bootstrapped;
Fuad Tabba5c738432019-12-02 11:02:42 +0000108 struct cpu *cpu;
109 struct vm *vm;
110 struct arch_regs regs;
111 struct interrupts interrupts;
112
113 /*
114 * Determine whether the 'regs' field is available for use. This is set
115 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100116 * back to true when it is descheduled. This is not relevant for the
117 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
118 * in the secure world) as they are pinned to physical CPUs and there
119 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000120 */
121 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000122
123 /*
124 * If the current vCPU is executing as a consequence of a
125 * FFA_MSG_SEND_DIRECT_REQ invocation, then this member holds the
126 * originating VM ID from which the call originated.
127 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
128 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
129 */
130 ffa_vm_id_t direct_request_origin_vm_id;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100131
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500132 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100133 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500134
135 /**
136 * Determine whether vCPU is currently handling secure interrupt.
137 */
138 bool processing_secure_interrupt;
139 bool secure_interrupt_deactivated;
140
141 /**
142 * INTID of the current secure interrupt being processed by this vCPU.
143 */
144 uint32_t current_sec_interrupt_id;
145
146 /**
147 * Track current vCPU which got pre-empted when secure interrupt
148 * triggered.
149 */
150 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600151
152 /**
153 * Current value of the Priority Mask register which is saved/restored
154 * during secure interrupt handling.
155 */
156 uint8_t priority_mask;
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600157
158 /**
159 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
160 * mechanisms to signal completion of secure interrupt handling. SP
161 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
162 * when in WAITING/BLOCKED state respectively, but has to perform
163 * implicit signal completion mechanism by dropping the priority
164 * of the virtual secure interrupt when SPMC signaled the virtual
165 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
166 * while running). This variable helps SPMC to keep a track of such
167 * mechanism and perform appropriate bookkeeping.
168 */
169 bool implicit_completion_signal;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500170
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500171 /** SP call chain. */
172 struct call_chain call_chain;
173
174 /**
175 * Indicates if the current vCPU is running in SPMC scheduled
176 * mode or Normal World scheduled mode.
177 */
178 enum schedule_mode scheduling_mode;
179
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500180 /** Partition Runtime Model. */
181 enum partition_runtime_model rt_model;
Fuad Tabba5c738432019-12-02 11:02:42 +0000182};
183
184/** Encapsulates a vCPU whose lock is held. */
185struct vcpu_locked {
186 struct vcpu *vcpu;
187};
188
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200189/** Container for two vcpu_locked structures. */
190struct two_vcpu_locked {
191 struct vcpu_locked vcpu1;
192 struct vcpu_locked vcpu2;
193};
194
Fuad Tabba5c738432019-12-02 11:02:42 +0000195struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200196struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000197void vcpu_unlock(struct vcpu_locked *locked);
198void vcpu_init(struct vcpu *vcpu, struct vm *vm);
199void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100200ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000201bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100202bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
203 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000204
205bool vcpu_handle_page_fault(const struct vcpu *current,
206 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200207
Olivier Depreze6f7b9d2021-02-01 11:55:48 +0100208void vcpu_reset(struct vcpu *vcpu);
Manish Pandey35e452f2021-02-18 21:36:34 +0000209
J-Alves7ac49052022-02-08 17:20:53 +0000210void vcpu_set_phys_core_idx(struct vcpu *vcpu);
211
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100212static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
213 uint32_t intid)
214{
215 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
216 intid) == 1U;
217}
218
219static inline void vcpu_virt_interrupt_set_enabled(
220 struct interrupts *interrupts, uint32_t intid)
221{
222 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
223}
224
225static inline void vcpu_virt_interrupt_clear_enabled(
226 struct interrupts *interrupts, uint32_t intid)
227{
228 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
229}
230
231static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
232 uint32_t intid)
233{
234 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
235 intid) == 1U;
236}
237
238static inline void vcpu_virt_interrupt_set_pending(
239 struct interrupts *interrupts, uint32_t intid)
240{
241 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
242}
243
244static inline void vcpu_virt_interrupt_clear_pending(
245 struct interrupts *interrupts, uint32_t intid)
246{
247 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
248}
249
250static inline enum interrupt_type vcpu_virt_interrupt_get_type(
251 struct interrupts *interrupts, uint32_t intid)
252{
253 return (enum interrupt_type)interrupt_bitmap_get_value(
254 &interrupts->interrupt_type, intid);
255}
256
257static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
258 uint32_t intid,
259 enum interrupt_type type)
260{
261 if (type == INTERRUPT_TYPE_IRQ) {
262 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
263 intid);
264 } else {
265 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
266 }
267}
268
Manish Pandey35e452f2021-02-18 21:36:34 +0000269static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
270{
271 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
272}
273
274static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
275{
276 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
277}
278
279static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
280{
281 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
282}
283
284static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
285{
286 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
287}
288
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100289static inline void vcpu_interrupt_count_increment(
290 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
291 uint32_t intid)
292{
293 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
294 INTERRUPT_TYPE_IRQ) {
295 vcpu_irq_count_increment(vcpu_locked);
296 } else {
297 vcpu_fiq_count_increment(vcpu_locked);
298 }
299}
300
301static inline void vcpu_interrupt_count_decrement(
302 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
303 uint32_t intid)
304{
305 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
306 INTERRUPT_TYPE_IRQ) {
307 vcpu_irq_count_decrement(vcpu_locked);
308 } else {
309 vcpu_fiq_count_decrement(vcpu_locked);
310 }
311}
312
Manish Pandey35e452f2021-02-18 21:36:34 +0000313static inline uint32_t vcpu_interrupt_irq_count_get(
314 struct vcpu_locked vcpu_locked)
315{
316 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
317}
318
319static inline uint32_t vcpu_interrupt_fiq_count_get(
320 struct vcpu_locked vcpu_locked)
321{
322 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
323}
324
325static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
326{
327 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
328 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
329}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500330
331static inline void vcpu_call_chain_extend(struct vcpu *vcpu1,
332 struct vcpu *vcpu2)
333{
334 vcpu1->call_chain.next_node = vcpu2;
335 vcpu2->call_chain.prev_node = vcpu1;
336}
337
338static inline void vcpu_call_chain_remove_node(struct vcpu *vcpu1,
339 struct vcpu *vcpu2)
340{
341 vcpu1->call_chain.prev_node = NULL;
342 vcpu2->call_chain.next_node = NULL;
343}