blob: 92656ea36fe2d67f2265ab47db41c0a094329a6a [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000015#include "hf/spinlock.h"
16
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010017#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000018
Madhukar Pappireddy84154052022-06-21 18:30:25 -050019/** Action for non secure interrupt by SPMC. */
20#define NS_ACTION_QUEUED 0
21#define NS_ACTION_ME 1
22#define NS_ACTION_SIGNALED 2
Madhukar Pappireddy84154052022-06-21 18:30:25 -050023
Fuad Tabba5c738432019-12-02 11:02:42 +000024enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000025 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000026 VCPU_STATE_OFF,
27
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000028 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000029 VCPU_STATE_RUNNING,
30
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050031 /** The vCPU is waiting to be allocated CPU cycles to do work. */
32 VCPU_STATE_WAITING,
33
34 /**
35 * The vCPU is blocked and waiting for some work to complete on
36 * its behalf.
37 */
38 VCPU_STATE_BLOCKED,
39
40 /** The vCPU has been preempted by an interrupt. */
41 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000042
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000043 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000044 VCPU_STATE_BLOCKED_INTERRUPT,
45
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000046 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000047 VCPU_STATE_ABORTED,
48};
49
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050050/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
51enum partition_runtime_model {
52 RTM_NONE,
53 /** Runtime model for FFA_RUN. */
54 RTM_FFA_RUN,
55 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
56 RTM_FFA_DIR_REQ,
57 /** Runtime model for Secure Interrupt handling. */
58 RTM_SEC_INTERRUPT,
59 /** Runtime model for SP Initialization. */
60 RTM_SP_INIT,
61};
62
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050063/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
64enum schedule_mode {
65 NONE,
66 /** Normal world scheduled mode. */
67 NWD_MODE,
68 /** SPMC scheduled mode. */
69 SPMC_MODE,
70};
71
Fuad Tabba5c738432019-12-02 11:02:42 +000072struct interrupts {
73 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010074 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000075 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010076 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000077 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010078 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000079 /**
80 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000081 * pending. Count independently virtual IRQ and FIQ interrupt types
82 * i.e. the sum of the two counters is the number of bits set in
83 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000084 */
Manish Pandey35e452f2021-02-18 21:36:34 +000085 uint32_t enabled_and_pending_irq_count;
86 uint32_t enabled_and_pending_fiq_count;
Fuad Tabba5c738432019-12-02 11:02:42 +000087};
88
89struct vcpu_fault_info {
90 ipaddr_t ipaddr;
91 vaddr_t vaddr;
92 vaddr_t pc;
93 uint32_t mode;
94};
95
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050096struct call_chain {
97 /** Previous node in the SP call chain. */
98 struct vcpu *prev_node;
99
100 /** Next node in the SP call chain. */
101 struct vcpu *next_node;
102};
103
Fuad Tabba5c738432019-12-02 11:02:42 +0000104struct vcpu {
105 struct spinlock lock;
106
107 /*
108 * The state is only changed in the context of the vCPU being run. This
109 * ensures the scheduler can easily keep track of the vCPU state as
110 * transitions are indicated by the return code from the run call.
111 */
112 enum vcpu_state state;
113
114 struct cpu *cpu;
115 struct vm *vm;
116 struct arch_regs regs;
117 struct interrupts interrupts;
118
119 /*
120 * Determine whether the 'regs' field is available for use. This is set
121 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100122 * back to true when it is descheduled. This is not relevant for the
123 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
124 * in the secure world) as they are pinned to physical CPUs and there
125 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000126 */
127 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000128
129 /*
130 * If the current vCPU is executing as a consequence of a
Kathleen Capellae468c112023-12-13 17:56:28 -0500131 * direct request invocation, then this member holds the
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000132 * originating VM ID from which the call originated.
133 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
Kathleen Capellae468c112023-12-13 17:56:28 -0500134 * a result of a prior direct request invocation.
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000135 */
Kathleen Capellae468c112023-12-13 17:56:28 -0500136 struct {
137 /**
138 * Indicate whether request is via FFA_MSG_SEND_DIRECT_REQ2.
139 */
140 bool is_ffa_req2;
141 ffa_id_t vm_id;
142 } direct_request_origin;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100143
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500144 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100145 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500146
147 /**
148 * Determine whether vCPU is currently handling secure interrupt.
149 */
150 bool processing_secure_interrupt;
151 bool secure_interrupt_deactivated;
152
153 /**
154 * INTID of the current secure interrupt being processed by this vCPU.
155 */
156 uint32_t current_sec_interrupt_id;
157
158 /**
159 * Track current vCPU which got pre-empted when secure interrupt
160 * triggered.
161 */
162 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600163
164 /**
165 * Current value of the Priority Mask register which is saved/restored
166 * during secure interrupt handling.
167 */
168 uint8_t priority_mask;
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600169
170 /**
171 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
172 * mechanisms to signal completion of secure interrupt handling. SP
173 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
174 * when in WAITING/BLOCKED state respectively, but has to perform
175 * implicit signal completion mechanism by dropping the priority
176 * of the virtual secure interrupt when SPMC signaled the virtual
177 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
178 * while running). This variable helps SPMC to keep a track of such
179 * mechanism and perform appropriate bookkeeping.
180 */
181 bool implicit_completion_signal;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500182
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500183 /** SP call chain. */
184 struct call_chain call_chain;
185
186 /**
187 * Indicates if the current vCPU is running in SPMC scheduled
188 * mode or Normal World scheduled mode.
189 */
190 enum schedule_mode scheduling_mode;
191
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500192 /**
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600193 * If the action in response to a non-secure or other-secure interrupt
194 * is to queue it, this field is used to save and restore the current
195 * priority mask.
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500196 */
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600197 uint8_t prev_interrupt_priority;
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500198
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500199 /** Partition Runtime Model. */
200 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500201
202 /**
Madhukar Pappireddyba248852024-01-04 16:58:09 -0600203 * Direct response message has been intercepted to signal virtual
204 * secure interrupt for an SP.
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500205 */
206 bool direct_resp_intercepted;
207
Madhukar Pappireddyba248852024-01-04 16:58:09 -0600208 /**
209 * FFA_MSG_WAIT invocation has been intercepted to signal virtual
210 * secure interrupt for an SP.
211 */
212 bool msg_wait_intercepted;
213
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500214 /** Save direct response message args to be resumed later. */
215 struct ffa_value direct_resp_ffa_value;
Olivier Deprez181074b2023-02-02 14:53:23 +0100216
217 struct vcpu *next_boot;
Fuad Tabba5c738432019-12-02 11:02:42 +0000218};
219
220/** Encapsulates a vCPU whose lock is held. */
221struct vcpu_locked {
222 struct vcpu *vcpu;
223};
224
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200225/** Container for two vcpu_locked structures. */
226struct two_vcpu_locked {
227 struct vcpu_locked vcpu1;
228 struct vcpu_locked vcpu2;
229};
230
Fuad Tabba5c738432019-12-02 11:02:42 +0000231struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200232struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000233void vcpu_unlock(struct vcpu_locked *locked);
234void vcpu_init(struct vcpu *vcpu, struct vm *vm);
235void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100236ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000237bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100238bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
239 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000240
241bool vcpu_handle_page_fault(const struct vcpu *current,
242 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200243
J-Alves7ac49052022-02-08 17:20:53 +0000244void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200245void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
246
Olivier Deprez181074b2023-02-02 14:53:23 +0100247void vcpu_update_boot(struct vcpu *vcpu);
248struct vcpu *vcpu_get_boot_vcpu(void);
J-Alves7ac49052022-02-08 17:20:53 +0000249
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100250static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
251 uint32_t intid)
252{
253 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
254 intid) == 1U;
255}
256
257static inline void vcpu_virt_interrupt_set_enabled(
258 struct interrupts *interrupts, uint32_t intid)
259{
260 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
261}
262
263static inline void vcpu_virt_interrupt_clear_enabled(
264 struct interrupts *interrupts, uint32_t intid)
265{
266 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
267}
268
269static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
270 uint32_t intid)
271{
272 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
273 intid) == 1U;
274}
275
276static inline void vcpu_virt_interrupt_set_pending(
277 struct interrupts *interrupts, uint32_t intid)
278{
279 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
280}
281
282static inline void vcpu_virt_interrupt_clear_pending(
283 struct interrupts *interrupts, uint32_t intid)
284{
285 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
286}
287
288static inline enum interrupt_type vcpu_virt_interrupt_get_type(
289 struct interrupts *interrupts, uint32_t intid)
290{
291 return (enum interrupt_type)interrupt_bitmap_get_value(
292 &interrupts->interrupt_type, intid);
293}
294
295static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
296 uint32_t intid,
297 enum interrupt_type type)
298{
299 if (type == INTERRUPT_TYPE_IRQ) {
300 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
301 intid);
302 } else {
303 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
304 }
305}
306
Manish Pandey35e452f2021-02-18 21:36:34 +0000307static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
308{
309 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
310}
311
312static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
313{
314 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
315}
316
317static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
318{
319 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
320}
321
322static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
323{
324 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
325}
326
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100327static inline void vcpu_interrupt_count_increment(
328 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
329 uint32_t intid)
330{
331 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
332 INTERRUPT_TYPE_IRQ) {
333 vcpu_irq_count_increment(vcpu_locked);
334 } else {
335 vcpu_fiq_count_increment(vcpu_locked);
336 }
337}
338
339static inline void vcpu_interrupt_count_decrement(
340 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
341 uint32_t intid)
342{
343 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
344 INTERRUPT_TYPE_IRQ) {
345 vcpu_irq_count_decrement(vcpu_locked);
346 } else {
347 vcpu_fiq_count_decrement(vcpu_locked);
348 }
349}
350
Manish Pandey35e452f2021-02-18 21:36:34 +0000351static inline uint32_t vcpu_interrupt_irq_count_get(
352 struct vcpu_locked vcpu_locked)
353{
354 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
355}
356
357static inline uint32_t vcpu_interrupt_fiq_count_get(
358 struct vcpu_locked vcpu_locked)
359{
360 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
361}
362
363static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
364{
365 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
366 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
367}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500368
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600369static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
370 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500371{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600372 vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
373 vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500374}
375
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600376static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
377 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500378{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600379 vcpu1_locked.vcpu->call_chain.prev_node = NULL;
380 vcpu2_locked.vcpu->call_chain.next_node = NULL;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500381}
J-Alves12cedae2023-08-04 14:37:37 +0100382
383void vcpu_set_running(struct vcpu_locked target_locked, struct ffa_value args);
384void vcpu_save_interrupt_priority(struct vcpu_locked vcpu_locked,
385 uint8_t priority);
386void vcpu_interrupt_inject(struct vcpu_locked target_locked, uint32_t intid);
387void vcpu_set_processing_interrupt(struct vcpu_locked vcpu_locked,
J-Alves41e8d5b2024-02-13 11:01:23 +0000388 uint32_t intid,
389 struct vcpu_locked preempted_locked);
J-Alves12cedae2023-08-04 14:37:37 +0100390void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked);