blob: a599b812ce64ee6d6d8d739e9db1f186a600cdcf [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
J-Alves67f5ba32024-09-27 18:07:11 +010015#include "hf/list.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000016#include "hf/mm.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000017#include "hf/spinlock.h"
18
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000020
Madhukar Pappireddy84154052022-06-21 18:30:25 -050021/** Action for non secure interrupt by SPMC. */
22#define NS_ACTION_QUEUED 0
23#define NS_ACTION_ME 1
24#define NS_ACTION_SIGNALED 2
Madhukar Pappireddy84154052022-06-21 18:30:25 -050025
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050026/** Maximum number of pending virtual interrupts in the queue per vCPU. */
J-Alvesa835bdc2025-02-28 18:38:43 +000027#define VINT_QUEUE_MAX 10
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050028
Fuad Tabba5c738432019-12-02 11:02:42 +000029enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000030 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000031 VCPU_STATE_OFF,
32
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000033 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000034 VCPU_STATE_RUNNING,
35
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050036 /** The vCPU is waiting to be allocated CPU cycles to do work. */
37 VCPU_STATE_WAITING,
38
39 /**
40 * The vCPU is blocked and waiting for some work to complete on
41 * its behalf.
42 */
43 VCPU_STATE_BLOCKED,
44
45 /** The vCPU has been preempted by an interrupt. */
46 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000047
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000048 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000049 VCPU_STATE_BLOCKED_INTERRUPT,
50
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000051 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000052 VCPU_STATE_ABORTED,
53};
54
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050055/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
56enum partition_runtime_model {
57 RTM_NONE,
58 /** Runtime model for FFA_RUN. */
59 RTM_FFA_RUN,
60 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
61 RTM_FFA_DIR_REQ,
62 /** Runtime model for Secure Interrupt handling. */
63 RTM_SEC_INTERRUPT,
64 /** Runtime model for SP Initialization. */
65 RTM_SP_INIT,
66};
67
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050068/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
69enum schedule_mode {
70 NONE,
71 /** Normal world scheduled mode. */
72 NWD_MODE,
73 /** SPMC scheduled mode. */
74 SPMC_MODE,
75};
76
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -060077enum power_mgmt_operation {
78 PWR_MGMT_NONE = 0,
79 /** Power off the CPU. */
80 PWR_MGMT_CPU_OFF,
81 /** No other operations are supported at the moment. */
82};
83
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050084/*
85 * This queue is implemented as a circular buffer. The entries are managed on
86 * a First In First Out basis.
87 */
88struct interrupt_queue {
89 uint32_t vint_buffer[VINT_QUEUE_MAX];
90 uint16_t head;
91 uint16_t tail;
J-Alves23a73032025-03-04 16:20:54 +000092 size_t queued_vint_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050093};
94
Fuad Tabba5c738432019-12-02 11:02:42 +000095struct interrupts {
96 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010097 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +000098 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +010099 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +0000100 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100101 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +0000102 /**
103 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +0000104 * pending. Count independently virtual IRQ and FIQ interrupt types
105 * i.e. the sum of the two counters is the number of bits set in
106 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +0000107 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000108 uint32_t enabled_and_pending_irq_count;
109 uint32_t enabled_and_pending_fiq_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500110
111 /**
112 * Partition Manager maintains a queue of pending virtual interrupts.
113 */
114 struct interrupt_queue vint_q;
Fuad Tabba5c738432019-12-02 11:02:42 +0000115};
116
117struct vcpu_fault_info {
118 ipaddr_t ipaddr;
119 vaddr_t vaddr;
120 vaddr_t pc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000121 mm_mode_t mode;
Fuad Tabba5c738432019-12-02 11:02:42 +0000122};
123
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500124struct call_chain {
125 /** Previous node in the SP call chain. */
126 struct vcpu *prev_node;
127
128 /** Next node in the SP call chain. */
129 struct vcpu *next_node;
130};
131
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100132#define LOG_BUFFER_SIZE 256
133
134struct log_buffer {
135 char chars[LOG_BUFFER_SIZE];
136 uint16_t len;
137};
138
Fuad Tabba5c738432019-12-02 11:02:42 +0000139struct vcpu {
140 struct spinlock lock;
141
142 /*
143 * The state is only changed in the context of the vCPU being run. This
144 * ensures the scheduler can easily keep track of the vCPU state as
145 * transitions are indicated by the return code from the run call.
146 */
147 enum vcpu_state state;
148
149 struct cpu *cpu;
150 struct vm *vm;
151 struct arch_regs regs;
152 struct interrupts interrupts;
153
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100154 struct log_buffer log_buffer;
155
Fuad Tabba5c738432019-12-02 11:02:42 +0000156 /*
157 * Determine whether the 'regs' field is available for use. This is set
158 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100159 * back to true when it is descheduled. This is not relevant for the
160 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
161 * in the secure world) as they are pinned to physical CPUs and there
162 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000163 */
164 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000165
166 /*
167 * If the current vCPU is executing as a consequence of a
Kathleen Capellae468c112023-12-13 17:56:28 -0500168 * direct request invocation, then this member holds the
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000169 * originating VM ID from which the call originated.
170 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
Kathleen Capellae468c112023-12-13 17:56:28 -0500171 * a result of a prior direct request invocation.
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000172 */
Kathleen Capellae468c112023-12-13 17:56:28 -0500173 struct {
Kathleen Capellae468c112023-12-13 17:56:28 -0500174 ffa_id_t vm_id;
Karl Meakin06e8b732024-09-20 18:26:49 +0100175 /** Indicate whether request is via FFA_MSG_SEND_DIRECT_REQ2. */
176 bool is_ffa_req2;
177 /** Indicate whether request is a framework message. */
178 bool is_framework;
Kathleen Capellae468c112023-12-13 17:56:28 -0500179 } direct_request_origin;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100180
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500181 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100182 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500183
184 /**
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500185 * Track current vCPU which got pre-empted when secure interrupt
186 * triggered.
187 */
188 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600189
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500190 /** SP call chain. */
191 struct call_chain call_chain;
192
193 /**
J-Alves0cbd7a32025-02-10 17:29:15 +0000194 * Track if pending interrupts have been retrieved by
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100195 * FFA_NOTIFICATION_INFO_GET.
196 */
J-Alves0cbd7a32025-02-10 17:29:15 +0000197 bool interrupts_info_get_retrieved;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100198
199 /**
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500200 * Indicates if the current vCPU is running in SPMC scheduled
201 * mode or Normal World scheduled mode.
202 */
203 enum schedule_mode scheduling_mode;
204
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500205 /**
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600206 * If the action in response to a non-secure or other-secure interrupt
207 * is to queue it, this field is used to save and restore the current
208 * priority mask.
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500209 */
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600210 uint8_t prev_interrupt_priority;
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500211
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500212 /** Partition Runtime Model. */
213 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500214
Madhukar Pappireddyeed861e2024-09-25 13:50:54 -0500215 /**
216 * An entry in a list maintained by Hafnium for pending arch timers.
217 * It exists in the list on behalf of its parent vCPU. The `prev` and
218 * `next` fields point to the adjacent entries in the list. The list
219 * itself is protected by a spinlock therefore timer entry is
220 * safeguarded from concurrent accesses.
221 */
222 struct list_entry timer_node;
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100223
224 /*
225 * List entry pointing to the next vcpu with an IPI pending on the
226 * same pinned CPU.
227 */
228 struct list_entry ipi_list_node;
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -0600229
230 /*
231 * Denotes which power management operation message is being currently
232 * handled by this vCPU.
233 */
234 enum power_mgmt_operation pwr_mgmt_op;
Fuad Tabba5c738432019-12-02 11:02:42 +0000235};
236
237/** Encapsulates a vCPU whose lock is held. */
238struct vcpu_locked {
239 struct vcpu *vcpu;
240};
241
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200242/** Container for two vcpu_locked structures. */
243struct two_vcpu_locked {
244 struct vcpu_locked vcpu1;
245 struct vcpu_locked vcpu2;
246};
247
Fuad Tabba5c738432019-12-02 11:02:42 +0000248struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200249struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000250void vcpu_unlock(struct vcpu_locked *locked);
251void vcpu_init(struct vcpu *vcpu, struct vm *vm);
252void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100253ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000254bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100255bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
256 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000257
258bool vcpu_handle_page_fault(const struct vcpu *current,
259 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200260
J-Alves7ac49052022-02-08 17:20:53 +0000261void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200262void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
263
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600264static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
265 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500266{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600267 vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
268 vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500269}
270
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600271static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
272 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500273{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600274 vcpu1_locked.vcpu->call_chain.prev_node = NULL;
275 vcpu2_locked.vcpu->call_chain.next_node = NULL;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500276}
J-Alves12cedae2023-08-04 14:37:37 +0100277
J-Alves478faac2024-10-23 10:35:57 +0100278void vcpu_set_running(struct vcpu_locked target_locked,
279 const struct ffa_value *args);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100280
J-Alves12cedae2023-08-04 14:37:37 +0100281void vcpu_save_interrupt_priority(struct vcpu_locked vcpu_locked,
282 uint8_t priority);
J-Alves3b31f092024-08-07 13:26:29 +0100283
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000284void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked);
285
286void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked);
287
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000288static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
289 uint32_t intid)
290{
291 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
292 intid) == 1U;
293}
294
295static inline enum interrupt_type vcpu_virt_interrupt_get_type(
296 struct interrupts *interrupts, uint32_t intid)
297{
298 return (enum interrupt_type)interrupt_bitmap_get_value(
299 &interrupts->interrupt_type, intid);
300}
301
302static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
303 uint32_t intid,
304 enum interrupt_type type)
305{
306 if (type == INTERRUPT_TYPE_IRQ) {
307 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
308 intid);
309 } else {
310 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
311 }
312}
313
314uint32_t vcpu_virt_interrupt_irq_count_get(struct vcpu_locked vcpu_locked);
315uint32_t vcpu_virt_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked);
316uint32_t vcpu_virt_interrupt_count_get(struct vcpu_locked vcpu_locked);
317
318void vcpu_virt_interrupt_enable(struct vcpu_locked vcpu_locked,
319 uint32_t vint_id, bool enable);
320
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000321uint32_t vcpu_virt_interrupt_peek_pending_and_enabled(
322 struct vcpu_locked vcpu_locked);
323uint32_t vcpu_virt_interrupt_get_pending_and_enabled(
324 struct vcpu_locked vcpu_locked);
325void vcpu_virt_interrupt_inject(struct vcpu_locked vcpu_locked,
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500326 uint32_t vint_id);
Daniel Boulbyd7992232025-03-06 17:09:49 +0000327void vcpu_virt_interrupt_clear(struct vcpu_locked vcpu_locked,
328 uint32_t vint_id);
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -0600329
330void vcpu_dir_req_set_state(struct vcpu_locked target_locked, bool is_ffa_req2,
331 ffa_id_t sender_vm_id, struct ffa_value args);
332
333void vcpu_dir_req_reset_state(struct vcpu_locked vcpu_locked);