blob: 85cb6579b4d344a9cab605c92c4362f7311381e8 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
11#include "hf/addr.h"
12#include "hf/spinlock.h"
13
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000015
16/** The number of bits in each element of the interrupt bitfields. */
17#define INTERRUPT_REGISTER_BITS 32
18
19enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000020 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000021 VCPU_STATE_OFF,
22
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000023 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000024 VCPU_STATE_RUNNING,
25
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050026 /** The vCPU is waiting to be allocated CPU cycles to do work. */
27 VCPU_STATE_WAITING,
28
29 /**
30 * The vCPU is blocked and waiting for some work to complete on
31 * its behalf.
32 */
33 VCPU_STATE_BLOCKED,
34
35 /** The vCPU has been preempted by an interrupt. */
36 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000037
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000038 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000039 VCPU_STATE_BLOCKED_INTERRUPT,
40
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000041 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000042 VCPU_STATE_ABORTED,
43};
44
45struct interrupts {
46 /** Bitfield keeping track of which interrupts are enabled. */
47 uint32_t interrupt_enabled[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
48 /** Bitfield keeping track of which interrupts are pending. */
49 uint32_t interrupt_pending[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
Manish Pandey35e452f2021-02-18 21:36:34 +000050 /** Bitfield recording the interrupt pin configuration. */
51 uint32_t interrupt_type[HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS];
Fuad Tabba5c738432019-12-02 11:02:42 +000052 /**
53 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +000054 * pending. Count independently virtual IRQ and FIQ interrupt types
55 * i.e. the sum of the two counters is the number of bits set in
56 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +000057 */
Manish Pandey35e452f2021-02-18 21:36:34 +000058 uint32_t enabled_and_pending_irq_count;
59 uint32_t enabled_and_pending_fiq_count;
Fuad Tabba5c738432019-12-02 11:02:42 +000060};
61
62struct vcpu_fault_info {
63 ipaddr_t ipaddr;
64 vaddr_t vaddr;
65 vaddr_t pc;
66 uint32_t mode;
67};
68
69struct vcpu {
70 struct spinlock lock;
71
72 /*
73 * The state is only changed in the context of the vCPU being run. This
74 * ensures the scheduler can easily keep track of the vCPU state as
75 * transitions are indicated by the return code from the run call.
76 */
77 enum vcpu_state state;
78
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050079 bool is_bootstrapped;
Fuad Tabba5c738432019-12-02 11:02:42 +000080 struct cpu *cpu;
81 struct vm *vm;
82 struct arch_regs regs;
83 struct interrupts interrupts;
84
85 /*
86 * Determine whether the 'regs' field is available for use. This is set
87 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +010088 * back to true when it is descheduled. This is not relevant for the
89 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
90 * in the secure world) as they are pinned to physical CPUs and there
91 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +000092 */
93 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +000094
95 /*
96 * If the current vCPU is executing as a consequence of a
97 * FFA_MSG_SEND_DIRECT_REQ invocation, then this member holds the
98 * originating VM ID from which the call originated.
99 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
100 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
101 */
102 ffa_vm_id_t direct_request_origin_vm_id;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100103
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500104 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100105 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500106
107 /**
108 * Determine whether vCPU is currently handling secure interrupt.
109 */
110 bool processing_secure_interrupt;
111 bool secure_interrupt_deactivated;
112
113 /**
114 * INTID of the current secure interrupt being processed by this vCPU.
115 */
116 uint32_t current_sec_interrupt_id;
117
118 /**
119 * Track current vCPU which got pre-empted when secure interrupt
120 * triggered.
121 */
122 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600123
124 /**
125 * Current value of the Priority Mask register which is saved/restored
126 * during secure interrupt handling.
127 */
128 uint8_t priority_mask;
Fuad Tabba5c738432019-12-02 11:02:42 +0000129};
130
131/** Encapsulates a vCPU whose lock is held. */
132struct vcpu_locked {
133 struct vcpu *vcpu;
134};
135
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200136/** Container for two vcpu_locked structures. */
137struct two_vcpu_locked {
138 struct vcpu_locked vcpu1;
139 struct vcpu_locked vcpu2;
140};
141
Fuad Tabba5c738432019-12-02 11:02:42 +0000142struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200143struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000144void vcpu_unlock(struct vcpu_locked *locked);
145void vcpu_init(struct vcpu *vcpu, struct vm *vm);
146void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000148bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100149bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
150 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000151
152bool vcpu_handle_page_fault(const struct vcpu *current,
153 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200154
Olivier Depreze6f7b9d2021-02-01 11:55:48 +0100155void vcpu_reset(struct vcpu *vcpu);
Manish Pandey35e452f2021-02-18 21:36:34 +0000156
J-Alves7ac49052022-02-08 17:20:53 +0000157void vcpu_set_phys_core_idx(struct vcpu *vcpu);
158
Manish Pandey35e452f2021-02-18 21:36:34 +0000159static inline void vcpu_irq_count_increment(struct vcpu_locked vcpu_locked)
160{
161 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count++;
162}
163
164static inline void vcpu_irq_count_decrement(struct vcpu_locked vcpu_locked)
165{
166 vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count--;
167}
168
169static inline void vcpu_fiq_count_increment(struct vcpu_locked vcpu_locked)
170{
171 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count++;
172}
173
174static inline void vcpu_fiq_count_decrement(struct vcpu_locked vcpu_locked)
175{
176 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count--;
177}
178
179static inline uint32_t vcpu_interrupt_irq_count_get(
180 struct vcpu_locked vcpu_locked)
181{
182 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count;
183}
184
185static inline uint32_t vcpu_interrupt_fiq_count_get(
186 struct vcpu_locked vcpu_locked)
187{
188 return vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
189}
190
191static inline uint32_t vcpu_interrupt_count_get(struct vcpu_locked vcpu_locked)
192{
193 return vcpu_locked.vcpu->interrupts.enabled_and_pending_irq_count +
194 vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
195}