blob: 91dd30f4f7cdfb4fe25d5287dfb7fef12c99bd93 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000015#include "hf/spinlock.h"
16
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010017#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000018
Madhukar Pappireddy84154052022-06-21 18:30:25 -050019/** Action for non secure interrupt by SPMC. */
20#define NS_ACTION_QUEUED 0
21#define NS_ACTION_ME 1
22#define NS_ACTION_SIGNALED 2
Madhukar Pappireddy84154052022-06-21 18:30:25 -050023
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050024/** Maximum number of pending virtual interrupts in the queue per vCPU. */
25#define VINT_QUEUE_MAX 5
26
Fuad Tabba5c738432019-12-02 11:02:42 +000027enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000028 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000029 VCPU_STATE_OFF,
30
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000031 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000032 VCPU_STATE_RUNNING,
33
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050034 /** The vCPU is waiting to be allocated CPU cycles to do work. */
35 VCPU_STATE_WAITING,
36
37 /**
38 * The vCPU is blocked and waiting for some work to complete on
39 * its behalf.
40 */
41 VCPU_STATE_BLOCKED,
42
43 /** The vCPU has been preempted by an interrupt. */
44 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000045
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000046 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000047 VCPU_STATE_BLOCKED_INTERRUPT,
48
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000049 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000050 VCPU_STATE_ABORTED,
51};
52
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050053/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
54enum partition_runtime_model {
55 RTM_NONE,
56 /** Runtime model for FFA_RUN. */
57 RTM_FFA_RUN,
58 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
59 RTM_FFA_DIR_REQ,
60 /** Runtime model for Secure Interrupt handling. */
61 RTM_SEC_INTERRUPT,
62 /** Runtime model for SP Initialization. */
63 RTM_SP_INIT,
64};
65
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050066/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
67enum schedule_mode {
68 NONE,
69 /** Normal world scheduled mode. */
70 NWD_MODE,
71 /** SPMC scheduled mode. */
72 SPMC_MODE,
73};
74
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050075/*
76 * This queue is implemented as a circular buffer. The entries are managed on
77 * a First In First Out basis.
78 */
79struct interrupt_queue {
80 uint32_t vint_buffer[VINT_QUEUE_MAX];
81 uint16_t head;
82 uint16_t tail;
83};
84
Fuad Tabba5c738432019-12-02 11:02:42 +000085struct interrupts {
86 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010087 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000088 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010089 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +000090 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010091 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +000092 /**
93 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000094 * pending. Count independently virtual IRQ and FIQ interrupt types
95 * i.e. the sum of the two counters is the number of bits set in
96 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000097 */
Manish Pandey35e452f2021-02-18 21:36:34 +000098 uint32_t enabled_and_pending_irq_count;
99 uint32_t enabled_and_pending_fiq_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500100
101 /**
102 * Partition Manager maintains a queue of pending virtual interrupts.
103 */
104 struct interrupt_queue vint_q;
Fuad Tabba5c738432019-12-02 11:02:42 +0000105};
106
107struct vcpu_fault_info {
108 ipaddr_t ipaddr;
109 vaddr_t vaddr;
110 vaddr_t pc;
111 uint32_t mode;
112};
113
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500114struct call_chain {
115 /** Previous node in the SP call chain. */
116 struct vcpu *prev_node;
117
118 /** Next node in the SP call chain. */
119 struct vcpu *next_node;
120};
121
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100122#define LOG_BUFFER_SIZE 256
123
124struct log_buffer {
125 char chars[LOG_BUFFER_SIZE];
126 uint16_t len;
127};
128
Fuad Tabba5c738432019-12-02 11:02:42 +0000129struct vcpu {
130 struct spinlock lock;
131
132 /*
133 * The state is only changed in the context of the vCPU being run. This
134 * ensures the scheduler can easily keep track of the vCPU state as
135 * transitions are indicated by the return code from the run call.
136 */
137 enum vcpu_state state;
138
139 struct cpu *cpu;
140 struct vm *vm;
141 struct arch_regs regs;
142 struct interrupts interrupts;
143
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100144 struct log_buffer log_buffer;
145
Fuad Tabba5c738432019-12-02 11:02:42 +0000146 /*
147 * Determine whether the 'regs' field is available for use. This is set
148 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100149 * back to true when it is descheduled. This is not relevant for the
150 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
151 * in the secure world) as they are pinned to physical CPUs and there
152 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000153 */
154 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000155
156 /*
157 * If the current vCPU is executing as a consequence of a
Kathleen Capellae468c112023-12-13 17:56:28 -0500158 * direct request invocation, then this member holds the
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000159 * originating VM ID from which the call originated.
160 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
Kathleen Capellae468c112023-12-13 17:56:28 -0500161 * a result of a prior direct request invocation.
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000162 */
Kathleen Capellae468c112023-12-13 17:56:28 -0500163 struct {
164 /**
165 * Indicate whether request is via FFA_MSG_SEND_DIRECT_REQ2.
166 */
167 bool is_ffa_req2;
168 ffa_id_t vm_id;
169 } direct_request_origin;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100170
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500171 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100172 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500173
174 /**
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500175 * Track current vCPU which got pre-empted when secure interrupt
176 * triggered.
177 */
178 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600179
180 /**
181 * Current value of the Priority Mask register which is saved/restored
182 * during secure interrupt handling.
183 */
184 uint8_t priority_mask;
Madhukar Pappireddy0aaadbb2021-12-16 20:58:10 -0600185
186 /**
187 * Per FF-A v1.1-Beta0 spec section 8.3, an SP can use multiple
188 * mechanisms to signal completion of secure interrupt handling. SP
189 * can invoke explicit FF-A ABIs, namely FFA_MSG_WAIT and FFA_RUN,
190 * when in WAITING/BLOCKED state respectively, but has to perform
191 * implicit signal completion mechanism by dropping the priority
192 * of the virtual secure interrupt when SPMC signaled the virtual
193 * interrupt in PREEMPTED state(The vCPU was preempted by a Self S-Int
194 * while running). This variable helps SPMC to keep a track of such
195 * mechanism and perform appropriate bookkeeping.
196 */
J-Alvesac940752024-08-07 14:02:51 +0100197 bool requires_deactivate_call;
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500198
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500199 /** SP call chain. */
200 struct call_chain call_chain;
201
202 /**
203 * Indicates if the current vCPU is running in SPMC scheduled
204 * mode or Normal World scheduled mode.
205 */
206 enum schedule_mode scheduling_mode;
207
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500208 /**
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600209 * If the action in response to a non-secure or other-secure interrupt
210 * is to queue it, this field is used to save and restore the current
211 * priority mask.
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500212 */
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600213 uint8_t prev_interrupt_priority;
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500214
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500215 /** Partition Runtime Model. */
216 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500217
218 /**
Madhukar Pappireddyba248852024-01-04 16:58:09 -0600219 * Direct response message has been intercepted to signal virtual
220 * secure interrupt for an SP.
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500221 */
222 bool direct_resp_intercepted;
223
Madhukar Pappireddyba248852024-01-04 16:58:09 -0600224 /**
225 * FFA_MSG_WAIT invocation has been intercepted to signal virtual
226 * secure interrupt for an SP.
227 */
228 bool msg_wait_intercepted;
229
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500230 /** Save direct response message args to be resumed later. */
231 struct ffa_value direct_resp_ffa_value;
Olivier Deprez181074b2023-02-02 14:53:23 +0100232
233 struct vcpu *next_boot;
Fuad Tabba5c738432019-12-02 11:02:42 +0000234};
235
236/** Encapsulates a vCPU whose lock is held. */
237struct vcpu_locked {
238 struct vcpu *vcpu;
239};
240
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200241/** Container for two vcpu_locked structures. */
242struct two_vcpu_locked {
243 struct vcpu_locked vcpu1;
244 struct vcpu_locked vcpu2;
245};
246
Fuad Tabba5c738432019-12-02 11:02:42 +0000247struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200248struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000249void vcpu_unlock(struct vcpu_locked *locked);
250void vcpu_init(struct vcpu *vcpu, struct vm *vm);
251void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100252ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000253bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100254bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
255 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000256
257bool vcpu_handle_page_fault(const struct vcpu *current,
258 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200259
J-Alves7ac49052022-02-08 17:20:53 +0000260void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200261void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
262
Olivier Deprez181074b2023-02-02 14:53:23 +0100263void vcpu_update_boot(struct vcpu *vcpu);
264struct vcpu *vcpu_get_boot_vcpu(void);
J-Alves7ac49052022-02-08 17:20:53 +0000265
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100266static inline bool vcpu_is_virt_interrupt_enabled(struct interrupts *interrupts,
267 uint32_t intid)
268{
269 return interrupt_bitmap_get_value(&interrupts->interrupt_enabled,
270 intid) == 1U;
271}
272
273static inline void vcpu_virt_interrupt_set_enabled(
274 struct interrupts *interrupts, uint32_t intid)
275{
276 interrupt_bitmap_set_value(&interrupts->interrupt_enabled, intid);
277}
278
279static inline void vcpu_virt_interrupt_clear_enabled(
280 struct interrupts *interrupts, uint32_t intid)
281{
282 interrupt_bitmap_clear_value(&interrupts->interrupt_enabled, intid);
283}
284
285static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
286 uint32_t intid)
287{
288 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
289 intid) == 1U;
290}
291
292static inline void vcpu_virt_interrupt_set_pending(
293 struct interrupts *interrupts, uint32_t intid)
294{
295 interrupt_bitmap_set_value(&interrupts->interrupt_pending, intid);
296}
297
298static inline void vcpu_virt_interrupt_clear_pending(
299 struct interrupts *interrupts, uint32_t intid)
300{
301 interrupt_bitmap_clear_value(&interrupts->interrupt_pending, intid);
302}
303
304static inline enum interrupt_type vcpu_virt_interrupt_get_type(
305 struct interrupts *interrupts, uint32_t intid)
306{
307 return (enum interrupt_type)interrupt_bitmap_get_value(
308 &interrupts->interrupt_type, intid);
309}
310
311static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
312 uint32_t intid,
313 enum interrupt_type type)
314{
315 if (type == INTERRUPT_TYPE_IRQ) {
316 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
317 intid);
318 } else {
319 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
320 }
321}
322
Manish Pandey35e452f2021-02-18 21:36:34 +0000323static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
324{
325 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
326}
327
328static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
329{
330 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
331}
332
333static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
334{
335 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
336}
337
338static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
339{
340 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
341}
342
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100343static inline void vcpu_interrupt_count_increment(
344 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
345 uint32_t intid)
346{
347 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
348 INTERRUPT_TYPE_IRQ) {
349 vcpu_irq_count_increment(vcpu_locked);
350 } else {
351 vcpu_fiq_count_increment(vcpu_locked);
352 }
353}
354
355static inline void vcpu_interrupt_count_decrement(
356 struct vcpu_locked vcpu_locked, struct interrupts *interrupts,
357 uint32_t intid)
358{
359 if (vcpu_virt_interrupt_get_type(interrupts, intid) ==
360 INTERRUPT_TYPE_IRQ) {
361 vcpu_irq_count_decrement(vcpu_locked);
362 } else {
363 vcpu_fiq_count_decrement(vcpu_locked);
364 }
365}
366
Manish Pandey35e452f2021-02-18 21:36:34 +0000367static inline uint32_t vcpu_interrupt_irq_count_get(
368 struct vcpu_locked vcpu_locked)
369{
370 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
371}
372
373static inline uint32_t vcpu_interrupt_fiq_count_get(
374 struct vcpu_locked vcpu_locked)
375{
376 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
377}
378
379static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
380{
381 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
382 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
383}
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500384
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600385static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
386 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500387{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600388 vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
389 vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500390}
391
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600392static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
393 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500394{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600395 vcpu1_locked.vcpu->call_chain.prev_node = NULL;
396 vcpu2_locked.vcpu->call_chain.next_node = NULL;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500397}
J-Alves12cedae2023-08-04 14:37:37 +0100398
J-Alvesb8730e92024-08-07 18:28:55 +0100399void vcpu_interrupt_clear_decrement(struct vcpu_locked vcpu_locked,
400 uint32_t intid);
401
J-Alves0247fe62024-02-23 10:21:46 +0000402void vcpu_set_running(struct vcpu_locked target_locked, struct ffa_value *args);
J-Alves12cedae2023-08-04 14:37:37 +0100403void vcpu_save_interrupt_priority(struct vcpu_locked vcpu_locked,
404 uint8_t priority);
405void vcpu_interrupt_inject(struct vcpu_locked target_locked, uint32_t intid);
J-Alves12cedae2023-08-04 14:37:37 +0100406void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked);
J-Alves3b31f092024-08-07 13:26:29 +0100407
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500408bool vcpu_interrupt_queue_push(struct vcpu_locked vcpu_locked,
409 uint32_t vint_id);
410bool vcpu_interrupt_queue_pop(struct vcpu_locked vcpu_locked,
411 uint32_t *vint_id);
412bool vcpu_interrupt_queue_peek(struct vcpu_locked vcpu_locked,
413 uint32_t *vint_id);
414bool vcpu_is_interrupt_in_queue(struct vcpu_locked vcpu_locked,
415 uint32_t vint_id);
416bool vcpu_is_interrupt_queue_empty(struct vcpu_locked vcpu_locked);
J-Alves3b31f092024-08-07 13:26:29 +0100417
418void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked);