blob: ebf271743239c5e30997eb929f5fce6beb56796c [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000015#include "hf/spinlock.h"
16
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010017#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000018
Madhukar Pappireddy84154052022-06-21 18:30:25 -050019/** Action for non secure interrupt by SPMC. */
20#define NS_ACTION_QUEUED 0
21#define NS_ACTION_ME 1
22#define NS_ACTION_SIGNALED 2
23#define NS_ACTION_INVALID 3
24
Fuad Tabba5c738432019-12-02 11:02:42 +000025enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000026 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000027 VCPU_STATE_OFF,
28
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000029 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000030 VCPU_STATE_RUNNING,
31
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050032 /** The vCPU is waiting to be allocated CPU cycles to do work. */
33 VCPU_STATE_WAITING,
34
35 /**
36 * The vCPU is blocked and waiting for some work to complete on
37 * its behalf.
38 */
39 VCPU_STATE_BLOCKED,
40
41 /** The vCPU has been preempted by an interrupt. */
42 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000043
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000044 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000045 VCPU_STATE_BLOCKED_INTERRUPT,
46
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000047 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000048 VCPU_STATE_ABORTED,
49};
50
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050051/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
52enum partition_runtime_model {
53 RTM_NONE,
54 /** Runtime model for FFA_RUN. */
55 RTM_FFA_RUN,
56 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
57 RTM_FFA_DIR_REQ,
58 /** Runtime model for Secure Interrupt handling. */
59 RTM_SEC_INTERRUPT,
60 /** Runtime model for SP Initialization. */
61 RTM_SP_INIT,
62};
63
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050064/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
65enum schedule_mode {
66 NONE,
67 /** Normal world scheduled mode. */
68 NWD_MODE,
69 /** SPMC scheduled mode. */
70 SPMC_MODE,
71};
72
Fuad Tabba5c738432019-12-02 11:02:42 +000073struct interrupts {
74 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010075 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000076 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010077 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000078 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010079 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000080 /**
81 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000082 * pending. Count independently virtual IRQ and FIQ interrupt types
83 * i.e. the sum of the two counters is the number of bits set in
84 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000085 */
Manish Pandey35e452f2021-02-18 21:36:34 +000086 uint32_t enabled_and_pending_irq_count;
87 uint32_t enabled_and_pending_fiq_count;
Fuad Tabba5c738432019-12-02 11:02:42 +000088};
89
90struct vcpu_fault_info {
91 ipaddr_t ipaddr;
92 vaddr_t vaddr;
93 vaddr_t pc;
94 uint32_t mode;
95};
96
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050097struct call_chain {
98 /** Previous node in the SP call chain. */
99 struct vcpu *prev_node;
100
101 /** Next node in the SP call chain. */
102 struct vcpu *next_node;
103};
104
Fuad Tabba5c738432019-12-02 11:02:42 +0000105struct vcpu {
106 struct spinlock lock;
107
108 /*
109 * The state is only changed in the context of the vCPU being run. This
110 * ensures the scheduler can easily keep track of the vCPU state as
111 * transitions are indicated by the return code from the run call.
112 */
113 enum vcpu_state state;
114
115 struct cpu *cpu;
116 struct vm *vm;
117 struct arch_regs regs;
118 struct interrupts interrupts;
119
120 /*
121 * Determine whether the 'regs' field is available for use. This is set
122 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100123 * back to true when it is descheduled. This is not relevant for the
124 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
125 * in the secure world) as they are pinned to physical CPUs and there
126 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000127 */
128 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000129
130 /*
131 * If the current vCPU is executing as a consequence of a
132 * FFA_MSG_SEND_DIRECT_REQ invocation, then this member holds the
133 * originating VM ID from which the call originated.
134 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
135 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
136 */
137 ffa_vm_id_t direct_request_origin_vm_id;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100138
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500139 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100140 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500141
142 /**
143 * Determine whether vCPU is currently handling secure interrupt.
144 */
145 bool processing_secure_interrupt;
146 bool secure_interrupt_deactivated;
147
148 /**
149 * INTID of the current secure interrupt being processed by this vCPU.
150 */
151 uint32_t current_sec_interrupt_id;
152
153 /**
154 * Track current vCPU which got pre-empted when secure interrupt
155 * triggered.
156 */
157 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600158
159 /**
160 * Current value of the Priority Mask register which is saved/restored
161 * during secure interrupt handling.
162 */
163 uint8_t priority_mask;
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600164
165 /**
166 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
167 * mechanisms to signal completion of secure interrupt handling. SP
168 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
169 * when in WAITING/BLOCKED state respectively, but has to perform
170 * implicit signal completion mechanism by dropping the priority
171 * of the virtual secure interrupt when SPMC signaled the virtual
172 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
173 * while running). This variable helps SPMC to keep a track of such
174 * mechanism and perform appropriate bookkeeping.
175 */
176 bool implicit_completion_signal;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500177
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500178 /** SP call chain. */
179 struct call_chain call_chain;
180
181 /**
182 * Indicates if the current vCPU is running in SPMC scheduled
183 * mode or Normal World scheduled mode.
184 */
185 enum schedule_mode scheduling_mode;
186
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500187 /**
188 * Present action taken by SP in response to a non secure interrupt
189 * based on the precedence rules as specified in section 8.3.1.4 of
190 * the FF-A v1.1 EAC0 spec.
191 */
192 uint8_t present_action_ns_interrupts;
193
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500194 /**
195 * If the action in response to a non secure interrupt is to queue it,
196 * this field is used to save and restore the current priority mask.
197 */
198 uint8_t mask_ns_interrupts;
199
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500200 /** Partition Runtime Model. */
201 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500202
203 /**
204 * Direct response message has been intercepted to handle virtual
205 * secure interrupt for a S-EL0 partition.
206 */
207 bool direct_resp_intercepted;
208
209 /** Save direct response message args to be resumed later. */
210 struct ffa_value direct_resp_ffa_value;
Olivier Deprez181074b2023-02-02 14:53:23 +0100211
212 struct vcpu *next_boot;
Fuad Tabba5c738432019-12-02 11:02:42 +0000213};
214
215/** Encapsulates a vCPU whose lock is held. */
216struct vcpu_locked {
217 struct vcpu *vcpu;
218};
219
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200220/** Container for two vcpu_locked structures. */
221struct two_vcpu_locked {
222 struct vcpu_locked vcpu1;
223 struct vcpu_locked vcpu2;
224};
225
Fuad Tabba5c738432019-12-02 11:02:42 +0000226struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200227struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000228void vcpu_unlock(struct vcpu_locked *locked);
229void vcpu_init(struct vcpu *vcpu, struct vm *vm);
230void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100231ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000232bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100233bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
234 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000235
236bool vcpu_handle_page_fault(const struct vcpu *current,
237 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200238
Olivier Depreze6f7b9d2021-02-01 11:55:48 +0100239void vcpu_reset(struct vcpu *vcpu);
Manish Pandey35e452f2021-02-18 21:36:34 +0000240
J-Alves7ac49052022-02-08 17:20:53 +0000241void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200242void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
243
Olivier Deprez181074b2023-02-02 14:53:23 +0100244void vcpu_update_boot(struct vcpu *vcpu);
245struct vcpu *vcpu_get_boot_vcpu(void);
J-Alves7ac49052022-02-08 17:20:53 +0000246
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100247static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
248 uint32_t intid)
249{
250 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
251 intid) == 1U;
252}
253
254static inline void vcpu_virt_interrupt_set_enabled(
255 struct interrupts *interrupts, uint32_t intid)
256{
257 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
258}
259
260static inline void vcpu_virt_interrupt_clear_enabled(
261 struct interrupts *interrupts, uint32_t intid)
262{
263 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
264}
265
266static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
267 uint32_t intid)
268{
269 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
270 intid) == 1U;
271}
272
273static inline void vcpu_virt_interrupt_set_pending(
274 struct interrupts *interrupts, uint32_t intid)
275{
276 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
277}
278
279static inline void vcpu_virt_interrupt_clear_pending(
280 struct interrupts *interrupts, uint32_t intid)
281{
282 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
283}
284
285static inline enum interrupt_type vcpu_virt_interrupt_get_type(
286 struct interrupts *interrupts, uint32_t intid)
287{
288 return (enum interrupt_type)interrupt_bitmap_get_value(
289 &interrupts->interrupt_type, intid);
290}
291
292static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
293 uint32_t intid,
294 enum interrupt_type type)
295{
296 if (type == INTERRUPT_TYPE_IRQ) {
297 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
298 intid);
299 } else {
300 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
301 }
302}
303
Manish Pandey35e452f2021-02-18 21:36:34 +0000304static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
305{
306 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
307}
308
309static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
310{
311 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
312}
313
314static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
315{
316 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
317}
318
319static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
320{
321 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
322}
323
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100324static inline void vcpu_interrupt_count_increment(
325 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
326 uint32_t intid)
327{
328 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
329 INTERRUPT_TYPE_IRQ) {
330 vcpu_irq_count_increment(vcpu_locked);
331 } else {
332 vcpu_fiq_count_increment(vcpu_locked);
333 }
334}
335
336static inline void vcpu_interrupt_count_decrement(
337 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
338 uint32_t intid)
339{
340 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
341 INTERRUPT_TYPE_IRQ) {
342 vcpu_irq_count_decrement(vcpu_locked);
343 } else {
344 vcpu_fiq_count_decrement(vcpu_locked);
345 }
346}
347
Manish Pandey35e452f2021-02-18 21:36:34 +0000348static inline uint32_t vcpu_interrupt_irq_count_get(
349 struct vcpu_locked vcpu_locked)
350{
351 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
352}
353
354static inline uint32_t vcpu_interrupt_fiq_count_get(
355 struct vcpu_locked vcpu_locked)
356{
357 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
358}
359
360static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
361{
362 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
363 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
364}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500365
366static inline void vcpu_call_chain_extend(struct vcpu *vcpu1,
367 struct vcpu *vcpu2)
368{
369 vcpu1->call_chain.next_node = vcpu2;
370 vcpu2->call_chain.prev_node = vcpu1;
371}
372
373static inline void vcpu_call_chain_remove_node(struct vcpu *vcpu1,
374 struct vcpu *vcpu2)
375{
376 vcpu1->call_chain.prev_node = NULL;
377 vcpu2->call_chain.next_node = NULL;
378}