blob: 0c123241768f2ec344f40f2fc4b490121677a903 [file] [log] [blame]
Fuad Tabba5c738432019-12-02 11:02:42 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Fuad Tabba5c738432019-12-02 11:02:42 +00007 */
8
9#pragma once
10
Olivier Deprezc5203fb2022-09-29 13:49:24 +020011#include "hf/arch/types.h"
12
Fuad Tabba5c738432019-12-02 11:02:42 +000013#include "hf/addr.h"
Daniel Boulby801f8ef2022-06-27 14:21:01 +010014#include "hf/interrupt_desc.h"
J-Alves67f5ba32024-09-27 18:07:11 +010015#include "hf/list.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000016#include "hf/mm.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000017#include "hf/spinlock.h"
18
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "vmapi/hf/ffa.h"
Fuad Tabba5c738432019-12-02 11:02:42 +000020
Madhukar Pappireddy84154052022-06-21 18:30:25 -050021/** Action for non secure interrupt by SPMC. */
22#define NS_ACTION_QUEUED 0
23#define NS_ACTION_ME 1
24#define NS_ACTION_SIGNALED 2
Madhukar Pappireddy84154052022-06-21 18:30:25 -050025
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050026/** Maximum number of pending virtual interrupts in the queue per vCPU. */
J-Alvesa835bdc2025-02-28 18:38:43 +000027#define VINT_QUEUE_MAX 10
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -050028
Madhukar Pappireddyd6c055d2025-05-08 15:35:46 -050029/**
30 * Refer section 7.2 of the FF-A v1.3 ALP2 specification.
31 */
Fuad Tabba5c738432019-12-02 11:02:42 +000032enum vcpu_state {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000033 /** The vCPU is switched off. */
Fuad Tabba5c738432019-12-02 11:02:42 +000034 VCPU_STATE_OFF,
35
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000036 /** The vCPU is currently running. */
Fuad Tabba5c738432019-12-02 11:02:42 +000037 VCPU_STATE_RUNNING,
38
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050039 /** The vCPU is waiting to be allocated CPU cycles to do work. */
40 VCPU_STATE_WAITING,
41
42 /**
43 * The vCPU is blocked and waiting for some work to complete on
44 * its behalf.
45 */
46 VCPU_STATE_BLOCKED,
47
48 /** The vCPU has been preempted by an interrupt. */
49 VCPU_STATE_PREEMPTED,
Fuad Tabba5c738432019-12-02 11:02:42 +000050
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000051 /** The vCPU is waiting for an interrupt. */
Fuad Tabba5c738432019-12-02 11:02:42 +000052 VCPU_STATE_BLOCKED_INTERRUPT,
53
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000054 /** The vCPU has aborted. */
Fuad Tabba5c738432019-12-02 11:02:42 +000055 VCPU_STATE_ABORTED,
Madhukar Pappireddyd6c055d2025-05-08 15:35:46 -050056
57 /** The vCPU is in NULL state. */
58 VCPU_STATE_NULL,
59
60 /** The vCPU has been stopped by partition manager. */
61 VCPU_STATE_STOPPED,
62
63 /**
64 * The partition manager has allocated all required resources to
65 * initialize the vCPU.
66 */
67 VCPU_STATE_CREATED,
68
69 /** The vCPU has been allocated CPU cycles to initialize itself. */
70 VCPU_STATE_STARTING,
71
72 /** The vCPU is currently executing to stop itself. */
73 VCPU_STATE_STOPPING,
Fuad Tabba5c738432019-12-02 11:02:42 +000074};
75
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -050076/** Refer to section 7 of the FF-A v1.1 EAC0 spec. */
77enum partition_runtime_model {
78 RTM_NONE,
79 /** Runtime model for FFA_RUN. */
80 RTM_FFA_RUN,
81 /** Runtime model for FFA_MSG_SEND_DIRECT_REQUEST. */
82 RTM_FFA_DIR_REQ,
83 /** Runtime model for Secure Interrupt handling. */
84 RTM_SEC_INTERRUPT,
85 /** Runtime model for SP Initialization. */
86 RTM_SP_INIT,
87};
88
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -050089/** Refer to section 8.2.3 of the FF-A EAC0 spec. */
90enum schedule_mode {
91 NONE,
92 /** Normal world scheduled mode. */
93 NWD_MODE,
94 /** SPMC scheduled mode. */
95 SPMC_MODE,
96};
97
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -060098enum power_mgmt_operation {
99 PWR_MGMT_NONE = 0,
100 /** Power off the CPU. */
101 PWR_MGMT_CPU_OFF,
102 /** No other operations are supported at the moment. */
103};
104
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500105/*
106 * This queue is implemented as a circular buffer. The entries are managed on
107 * a First In First Out basis.
108 */
109struct interrupt_queue {
110 uint32_t vint_buffer[VINT_QUEUE_MAX];
111 uint16_t head;
112 uint16_t tail;
J-Alves23a73032025-03-04 16:20:54 +0000113 size_t queued_vint_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500114};
115
Fuad Tabba5c738432019-12-02 11:02:42 +0000116struct interrupts {
117 /** Bitfield keeping track of which interrupts are enabled. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100118 struct interrupt_bitmap interrupt_enabled;
Fuad Tabba5c738432019-12-02 11:02:42 +0000119 /** Bitfield keeping track of which interrupts are pending. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100120 struct interrupt_bitmap interrupt_pending;
Manish Pandey35e452f2021-02-18 21:36:34 +0000121 /** Bitfield recording the interrupt pin configuration. */
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100122 struct interrupt_bitmap interrupt_type;
Fuad Tabba5c738432019-12-02 11:02:42 +0000123 /**
124 * The number of interrupts which are currently both enabled and
Manish Pandey35e452f2021-02-18 21:36:34 +0000125 * pending. Count independently virtual IRQ and FIQ interrupt types
126 * i.e. the sum of the two counters is the number of bits set in
127 * interrupt_enable & interrupt_pending.
Fuad Tabba5c738432019-12-02 11:02:42 +0000128 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000129 uint32_t enabled_and_pending_irq_count;
130 uint32_t enabled_and_pending_fiq_count;
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500131
132 /**
133 * Partition Manager maintains a queue of pending virtual interrupts.
134 */
135 struct interrupt_queue vint_q;
Fuad Tabba5c738432019-12-02 11:02:42 +0000136};
137
138struct vcpu_fault_info {
139 ipaddr_t ipaddr;
140 vaddr_t vaddr;
141 vaddr_t pc;
Karl Meakin07a69ab2025-02-07 14:53:19 +0000142 mm_mode_t mode;
Fuad Tabba5c738432019-12-02 11:02:42 +0000143};
144
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500145struct call_chain {
146 /** Previous node in the SP call chain. */
147 struct vcpu *prev_node;
148
149 /** Next node in the SP call chain. */
150 struct vcpu *next_node;
151};
152
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100153#define LOG_BUFFER_SIZE 256
154
155struct log_buffer {
156 char chars[LOG_BUFFER_SIZE];
157 uint16_t len;
158};
159
Fuad Tabba5c738432019-12-02 11:02:42 +0000160struct vcpu {
161 struct spinlock lock;
162
163 /*
164 * The state is only changed in the context of the vCPU being run. This
165 * ensures the scheduler can easily keep track of the vCPU state as
166 * transitions are indicated by the return code from the run call.
167 */
168 enum vcpu_state state;
169
170 struct cpu *cpu;
171 struct vm *vm;
172 struct arch_regs regs;
173 struct interrupts interrupts;
174
Karl Meakinc5cebbc2024-06-17 11:30:27 +0100175 struct log_buffer log_buffer;
176
Fuad Tabba5c738432019-12-02 11:02:42 +0000177 /*
178 * Determine whether the 'regs' field is available for use. This is set
179 * to false when a vCPU is about to run on a physical CPU, and is set
Olivier Deprez3caed1c2021-02-05 12:07:36 +0100180 * back to true when it is descheduled. This is not relevant for the
181 * primary VM vCPUs in the normal world (or the "other world VM" vCPUs
182 * in the secure world) as they are pinned to physical CPUs and there
183 * is no contention to take care of.
Fuad Tabba5c738432019-12-02 11:02:42 +0000184 */
185 bool regs_available;
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000186
187 /*
188 * If the current vCPU is executing as a consequence of a
Kathleen Capellae468c112023-12-13 17:56:28 -0500189 * direct request invocation, then this member holds the
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000190 * originating VM ID from which the call originated.
191 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
Kathleen Capellae468c112023-12-13 17:56:28 -0500192 * a result of a prior direct request invocation.
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000193 */
Kathleen Capellae468c112023-12-13 17:56:28 -0500194 struct {
Kathleen Capellae468c112023-12-13 17:56:28 -0500195 ffa_id_t vm_id;
Karl Meakin06e8b732024-09-20 18:26:49 +0100196 /** Indicate whether request is via FFA_MSG_SEND_DIRECT_REQ2. */
197 bool is_ffa_req2;
198 /** Indicate whether request is a framework message. */
199 bool is_framework;
Kathleen Capellae468c112023-12-13 17:56:28 -0500200 } direct_request_origin;
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100201
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500202 /** Determine whether partition is currently handling managed exit. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100203 bool processing_managed_exit;
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500204
205 /**
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500206 * Track current vCPU which got pre-empted when secure interrupt
207 * triggered.
208 */
209 struct vcpu *preempted_vcpu;
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -0600210
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500211 /** SP call chain. */
212 struct call_chain call_chain;
213
214 /**
J-Alves0cbd7a32025-02-10 17:29:15 +0000215 * Track if pending interrupts have been retrieved by
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100216 * FFA_NOTIFICATION_INFO_GET.
217 */
J-Alves0cbd7a32025-02-10 17:29:15 +0000218 bool interrupts_info_get_retrieved;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100219
220 /**
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500221 * Indicates if the current vCPU is running in SPMC scheduled
222 * mode or Normal World scheduled mode.
223 */
224 enum schedule_mode scheduling_mode;
225
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500226 /**
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600227 * If the action in response to a non-secure or other-secure interrupt
228 * is to queue it, this field is used to save and restore the current
229 * priority mask.
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500230 */
Madhukar Pappireddy94da32d2023-02-22 16:04:10 -0600231 uint8_t prev_interrupt_priority;
Madhukar Pappireddyc40f55f2022-06-22 11:00:41 -0500232
Madhukar Pappireddyfe297a32022-06-21 16:42:13 -0500233 /** Partition Runtime Model. */
234 enum partition_runtime_model rt_model;
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500235
Madhukar Pappireddyeed861e2024-09-25 13:50:54 -0500236 /**
237 * An entry in a list maintained by Hafnium for pending arch timers.
238 * It exists in the list on behalf of its parent vCPU. The `prev` and
239 * `next` fields point to the adjacent entries in the list. The list
240 * itself is protected by a spinlock therefore timer entry is
241 * safeguarded from concurrent accesses.
242 */
243 struct list_entry timer_node;
Daniel Boulby7011b5a2024-10-15 18:27:26 +0100244
245 /*
246 * List entry pointing to the next vcpu with an IPI pending on the
247 * same pinned CPU.
248 */
249 struct list_entry ipi_list_node;
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -0600250
251 /*
252 * Denotes which power management operation message is being currently
253 * handled by this vCPU.
254 */
255 enum power_mgmt_operation pwr_mgmt_op;
Fuad Tabba5c738432019-12-02 11:02:42 +0000256};
257
258/** Encapsulates a vCPU whose lock is held. */
259struct vcpu_locked {
260 struct vcpu *vcpu;
261};
262
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200263/** Container for two vcpu_locked structures. */
264struct two_vcpu_locked {
265 struct vcpu_locked vcpu1;
266 struct vcpu_locked vcpu2;
267};
268
Fuad Tabba5c738432019-12-02 11:02:42 +0000269struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
Olivier Deprez0b6f10a2020-08-05 18:21:33 +0200270struct two_vcpu_locked vcpu_lock_both(struct vcpu *vcpu1, struct vcpu *vcpu2);
Fuad Tabba5c738432019-12-02 11:02:42 +0000271void vcpu_unlock(struct vcpu_locked *locked);
272void vcpu_init(struct vcpu *vcpu, struct vm *vm);
Madhukar Pappireddy98ab38b2025-05-08 15:46:31 -0500273void vcpu_prepare(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100274ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
Fuad Tabba5c738432019-12-02 11:02:42 +0000275bool vcpu_is_off(struct vcpu_locked vcpu);
Max Shvetsov40108e72020-08-27 12:39:50 +0100276bool vcpu_secondary_reset_and_start(struct vcpu_locked vcpu_locked,
277 ipaddr_t entry, uintreg_t arg);
Fuad Tabba5c738432019-12-02 11:02:42 +0000278
279bool vcpu_handle_page_fault(const struct vcpu *current,
280 struct vcpu_fault_info *f);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200281
J-Alves7ac49052022-02-08 17:20:53 +0000282void vcpu_set_phys_core_idx(struct vcpu *vcpu);
Olivier Deprez632249e2022-09-26 09:18:31 +0200283void vcpu_set_boot_info_gp_reg(struct vcpu *vcpu);
284
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600285static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
286 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500287{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600288 vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
289 vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500290}
291
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600292static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
293 struct vcpu_locked vcpu2_locked)
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500294{
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600295 vcpu1_locked.vcpu->call_chain.prev_node = NULL;
296 vcpu2_locked.vcpu->call_chain.next_node = NULL;
Madhukar Pappireddy5992fbc2022-06-21 17:15:16 -0500297}
J-Alves12cedae2023-08-04 14:37:37 +0100298
J-Alves478faac2024-10-23 10:35:57 +0100299void vcpu_set_running(struct vcpu_locked target_locked,
300 const struct ffa_value *args);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100301
J-Alves12cedae2023-08-04 14:37:37 +0100302void vcpu_save_interrupt_priority(struct vcpu_locked vcpu_locked,
303 uint8_t priority);
J-Alves3b31f092024-08-07 13:26:29 +0100304
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000305void vcpu_enter_secure_interrupt_rtm(struct vcpu_locked vcpu_locked);
306
307void vcpu_secure_interrupt_complete(struct vcpu_locked vcpu_locked);
308
Daniel Boulbyd21e9b32025-02-13 15:53:21 +0000309static inline bool vcpu_is_virt_interrupt_pending(struct interrupts *interrupts,
310 uint32_t intid)
311{
312 return interrupt_bitmap_get_value(&interrupts->interrupt_pending,
313 intid) == 1U;
314}
315
316static inline enum interrupt_type vcpu_virt_interrupt_get_type(
317 struct interrupts *interrupts, uint32_t intid)
318{
319 return (enum interrupt_type)interrupt_bitmap_get_value(
320 &interrupts->interrupt_type, intid);
321}
322
323static inline void vcpu_virt_interrupt_set_type(struct interrupts *interrupts,
324 uint32_t intid,
325 enum interrupt_type type)
326{
327 if (type == INTERRUPT_TYPE_IRQ) {
328 interrupt_bitmap_clear_value(&interrupts->interrupt_type,
329 intid);
330 } else {
331 interrupt_bitmap_set_value(&interrupts->interrupt_type, intid);
332 }
333}
334
335uint32_t vcpu_virt_interrupt_irq_count_get(struct vcpu_locked vcpu_locked);
336uint32_t vcpu_virt_interrupt_fiq_count_get(struct vcpu_locked vcpu_locked);
337uint32_t vcpu_virt_interrupt_count_get(struct vcpu_locked vcpu_locked);
338
339void vcpu_virt_interrupt_enable(struct vcpu_locked vcpu_locked,
340 uint32_t vint_id, bool enable);
341
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000342uint32_t vcpu_virt_interrupt_peek_pending_and_enabled(
343 struct vcpu_locked vcpu_locked);
344uint32_t vcpu_virt_interrupt_get_pending_and_enabled(
345 struct vcpu_locked vcpu_locked);
346void vcpu_virt_interrupt_inject(struct vcpu_locked vcpu_locked,
Madhukar Pappireddy32913cb2024-07-19 13:04:05 -0500347 uint32_t vint_id);
Daniel Boulbyd7992232025-03-06 17:09:49 +0000348void vcpu_virt_interrupt_clear(struct vcpu_locked vcpu_locked,
349 uint32_t vint_id);
Madhukar Pappireddy5388dd92025-02-10 11:24:19 -0600350
351void vcpu_dir_req_set_state(struct vcpu_locked target_locked, bool is_ffa_req2,
352 ffa_id_t sender_vm_id, struct ffa_value args);
353
354void vcpu_dir_req_reset_state(struct vcpu_locked vcpu_locked);