blob: 7f81d1f1cf0bb3ac17991d71a512de74463eb95b [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull9726c252019-01-23 13:44:19 +000011#include <stdatomic.h>
12
Olivier Deprezfebf9e62023-03-02 15:44:35 +010013#include "hf/arch/vm/vm.h"
Andrew Walbran1f32e722019-06-07 17:57:26 +010014
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/cpu.h"
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060016#include "hf/ffa_partition_manifest.h"
Madhukar Pappireddy464f2462021-08-03 11:23:07 -050017#include "hf/interrupt_desc.h"
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000018#include "hf/list.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010019#include "hf/mm.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000020#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010021
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010022#include "vmapi/hf/ffa.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010023
Andrew Scullae9962e2019-10-03 16:51:16 +010024#define MAX_SMCS 32
Madhukar Pappireddy464f2462021-08-03 11:23:07 -050025#define VM_MANIFEST_MAX_INTERRUPTS 32
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010026
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -060027/** Action for Other-Secure interrupts by SPMC. */
28#define OTHER_S_INT_ACTION_QUEUED 0
29#define OTHER_S_INT_ACTION_SIGNALED 1
30
Andrew Walbrana36f7592019-12-13 18:43:38 +000031/**
Olivier Deprez20137752023-02-07 10:55:35 +010032 * Power management bifields stating which messages a VM is willing to be
33 * notified about.
34 */
35#define VM_POWER_MANAGEMENT_CPU_OFF_SHIFT (0)
Olivier Deprez20137752023-02-07 10:55:35 +010036
37/**
J-Alvese8c8c2b2022-12-16 15:34:48 +000038 * The state of an RX buffer, as defined by FF-A v1.1 EAC0 specification.
39 * It is used to implement ownership rules, as defined in the section 6.2.2.4.2.
Andrew Walbrana36f7592019-12-13 18:43:38 +000040 *
J-Alvese8c8c2b2022-12-16 15:34:48 +000041 * EMPTY is the initial state. It is set by default to the endpoints at the
42 * virtual instance.
43 * The follow state transitions are possible:
44 * * EMPTY => FULL: message sent to a partition. Ownership given to the
45 * partition.
46 * * EMPTY => OTHER_WORLD_OWNED: This state transition only applies to NWd VMs.
47 * Used by the SPMC or Hypervisor to track that ownership of the RX buffer
48 * belong to the other world:
49 * - The Hypervisor does this state transition after forwarding
50 * FFA_RXTX_MAP call to the SPMC, for it to map a VM's RXTX buffers into SPMC's
51 * translation regime.
52 * - SPMC was previously given ownership of the VM's RX buffer, after the
53 * FFA_RXTX_MAP interface has been successfully forwarded to it. The SPMC does
54 * this state transition, when handling a successful FFA_RX_ACQUIRE, assigning
55 * ownership to the hypervisor.
56 * * FULL => EMPTY: Partition received an RX buffer full notification, consumed
57 * the content of buffers, and called FFA_RX_RELEASE or FFA_MSG_WAIT. SPMC or
58 * Hypervisor's ownership reestablished.
59 * * OTHER_WORLD_OWNED => EMPTY: VM called FFA_RX_RELEASE, the hypervisor
60 * forwarded it to the SPMC, which reestablishes ownership of the VM's buffer.
61 * SPs should never have their buffers state set to OTHER_WORLD_OWNED.
Andrew Walbrana36f7592019-12-13 18:43:38 +000062 */
Andrew Scullaa039b32018-10-04 15:02:26 +010063enum mailbox_state {
Andrew Walbranc3910f72018-11-27 14:24:36 +000064 /** There is no message in the mailbox. */
Andrew Sculld6ee1102019-04-05 22:12:42 +010065 MAILBOX_STATE_EMPTY,
Andrew Scullaa039b32018-10-04 15:02:26 +010066
Andrew Walbranc3910f72018-11-27 14:24:36 +000067 /** There is a message in the mailbox that is waiting for a reader. */
J-Alvese8c8c2b2022-12-16 15:34:48 +000068 MAILBOX_STATE_FULL,
Andrew Scullaa039b32018-10-04 15:02:26 +010069
J-Alvese8c8c2b2022-12-16 15:34:48 +000070 /**
71 * In the SPMC, it means the Hypervisor/OS Kernel owns the RX buffer.
72 * In the Hypervisor, it means the SPMC owns the Rx buffer.
73 */
74 MAILBOX_STATE_OTHER_WORLD_OWNED,
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075};
76
Andrew Scullaa039b32018-10-04 15:02:26 +010077struct mailbox {
78 enum mailbox_state state;
Andrew Walbran70bc8622019-10-07 14:15:58 +010079 void *recv;
80 const void *send;
81
82 /** The ID of the VM which sent the message currently in `recv`. */
J-Alves19e20cf2023-08-02 12:48:55 +010083 ffa_id_t recv_sender;
Andrew Walbran70bc8622019-10-07 14:15:58 +010084
85 /** The size of the message currently in `recv`. */
86 uint32_t recv_size;
87
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000088 /**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010089 * The FF-A function ID to use to deliver the message currently in
Andrew Walbrane7ad3c02019-12-24 17:03:04 +000090 * `recv`.
91 */
92 uint32_t recv_func;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010093};
94
J-Alves4ef6e842021-03-18 12:47:01 +000095struct notifications_state {
96 /**
97 * To keep track of the notifications pending.
98 * Set on call to FFA_NOTIFICATION_SET, and cleared on call to
99 * FFA_NOTIFICATION_GET.
100 */
101 ffa_notifications_bitmap_t pending;
102
103 /**
104 * Set on FFA_NOTIFICATION_INFO_GET to keep track of the notifications
105 * whose information has been retrieved by the referred ABI.
106 * Cleared on call to FFA_NOTIFICATION_GET.
107 */
108 ffa_notifications_bitmap_t info_get_retrieved;
109};
110
111struct notifications {
112 /**
113 * The following array maps the notifications to the bound FF-A
114 * endpoint.
115 * The index in the bindings array relates to the notification
116 * ID, and bit position in 'ffa_notifications_bitmap_t'.
117 */
J-Alves19e20cf2023-08-02 12:48:55 +0100118 ffa_id_t bindings_sender_id[MAX_FFA_NOTIFICATIONS];
J-Alves4ef6e842021-03-18 12:47:01 +0000119 ffa_notifications_bitmap_t bindings_per_vcpu;
120
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700121 /* The index of the array below relates to the ID of the VCPU.
122 * This is a dynamically allocated array of struct
123 * notifications_state and has as many entries as vcpu_count.
124 */
125 struct notifications_state *per_vcpu;
J-Alves4ef6e842021-03-18 12:47:01 +0000126 struct notifications_state global;
127};
128
J-Alvesc8e8a222021-06-08 17:33:52 +0100129/**
130 * The following enum relates to a state machine to guide the insertion of
131 * IDs in the respective list as a result of a FFA_NOTIFICATION_INFO_GET call.
132 * As per the FF-A v1.1 specification, the return of the interface
133 * FFA_NOTIFICATION_INFO_GET, is a list of 16-bit values, regarding the VM ID
134 * and VCPU IDs of those with pending notifications.
135 * The overall list, is composed of "sub-lists", that starts with the VM ID, and
136 * can follow with up to 3 more VCPU IDs. A VM can have multiple 'sub-lists'.
137 * The states are traversed on a per VM basis, and should help with filling the
138 * list of IDs.
139 *
140 * INIT is the initial state. The following state transitions are possible:
141 * * INIT => INSERTING: no list has been created for the VM prior. There are
142 * notifications pending and VM ID should be inserted first. If it regards to
143 * a per VCPU notification the VCPU ID should follow. Only VCPU IDs should be
144 * inserted from this point, until reaching "sub-list" size limit.
145 * * INIT => FULL: There is no space in the ID list to insert IDs.
146 * * INSERTING => STARTING_NEW: list has been created. Adding only VCPU IDs,
147 * however "sub-list" limit has been reached. If there are more pending per VCPU
148 * notifications pending for the VM, a new list should be created starting with
149 * VM ID.
150 * * INSERTING => FULL: There is no space in the ID list to insert IDs.
151 * * STARTING_NEW => INSERTING: Started a new 'sub-list' for the given VM, for
152 * the remaining pending per VCPU notifications, only the VCPU ID should be
153 * inserted.
154 * * STARTING_NEW => FULL: There is no space in the ID list to insert IDs.
155 */
156enum notifications_info_get_state {
157 INIT,
158 INSERTING,
159 STARTING_NEW,
160 FULL,
161};
162
Andrew Scullae9962e2019-10-03 16:51:16 +0100163struct smc_whitelist {
164 uint32_t smcs[MAX_SMCS];
165 uint16_t smc_count;
166 bool permissive;
167};
168
Madhukar Pappireddy84259c92025-06-27 14:28:45 -0500169/*
170 * Implementation defined states for a VM (or SP). Refer to `vm_set_state`
171 * helper for documentation of legal transitions.
172 */
173enum vm_state {
174 /* VM has not yet been created. This is the default value. */
175 VM_STATE_NULL,
176
177 /* VM has been created and initialized by partition manager. */
178 VM_STATE_CREATED,
179
180 /*
181 * At least one execution context of the VM has been given CPU cycles to
182 * initialize itself.
183 */
184 VM_STATE_RUNNING,
185
186 /* The VM has been aborted due to a fatal error. */
187 VM_STATE_ABORTING,
188};
189
Karl Meakin2ad6b662024-07-29 20:45:40 +0100190/* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100191struct vm {
J-Alves19e20cf2023-08-02 12:48:55 +0100192 ffa_id_t id;
Kathleen Capella422b10b2023-06-30 18:28:27 -0400193 struct ffa_uuid uuids[PARTITION_MAX_UUIDS];
Karl Meakin0e617d92024-04-05 12:55:22 +0100194 enum ffa_version ffa_version;
Madhukar Pappireddy84259c92025-06-27 14:28:45 -0500195 enum vm_state state;
Karl Meakin6eeec8e2024-03-07 18:07:20 +0000196
197 /*
198 * Whether this FF-A instance has negotiated an FF-A version through a
199 * call to FFA_VERSION. Once the version has been negotiated, it is an
200 * error to attempt to change it through another call to FFA_VERSION.
201 */
202 bool ffa_version_negotiated;
203
Andrew Scullae9962e2019-10-03 16:51:16 +0100204 struct smc_whitelist smc_whitelist;
205
Andrew Walbran0d7a0682018-12-06 16:48:47 +0000206 /** See api.c for the partial ordering on locks. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100207 struct spinlock lock;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100208 ffa_vcpu_count_t vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700209 struct vcpu *vcpus;
Andrew Scull89a75242018-08-06 17:04:55 +0100210 struct mm_ptable ptable;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600211
212 /**
213 * Set of page tables used for defining the peripheral's secure
214 * IPA space, in the context of SPMC.
215 */
216 struct mm_ptable iommu_ptables[PARTITION_MAX_DMA_DEVICES];
217 /** Count of DMA devices assigned to this VM. */
218 uint8_t dma_device_count;
Andrew Scullaa039b32018-10-04 15:02:26 +0100219 struct mailbox mailbox;
J-Alves4ef6e842021-03-18 12:47:01 +0000220
221 struct {
222 /**
223 * State structures for notifications coming from VMs or coming
224 * from SPs. Both fields are maintained by the SPMC.
225 * The hypervisor ignores the 'from_sp' field, given VM
226 * notifications from SPs are managed by the SPMC.
227 */
228 struct notifications from_vm;
229 struct notifications from_sp;
J-Alves52578f82022-03-25 12:30:47 +0000230 struct notifications_state framework;
J-Alves4ef6e842021-03-18 12:47:01 +0000231 bool enabled;
232 } notifications;
233
Karl Meakina603e082024-08-02 17:57:27 +0100234 /**
235 * Whether this partition is subscribed to receiving VM created/VM
236 * destroyed messages.
237 */
238 struct {
239 bool vm_created;
240 bool vm_destroyed;
241 } vm_availability_messages;
242
J-Alvesb37fd082020-10-22 12:29:21 +0100243 /**
Max Shvetsov40108e72020-08-27 12:39:50 +0100244 * Booting parameters (FF-A SP partitions).
J-Alvesb37fd082020-10-22 12:29:21 +0100245 */
J-Alvesb37fd082020-10-22 12:29:21 +0100246 uint16_t boot_order;
J-Alves7d38f7b2022-04-13 13:22:30 +0100247
248 /** Entries to pass boot data to the VM. */
249 struct {
250 uint32_t gp_register_num;
251 ipaddr_t blob_addr;
252 } boot_info;
253
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400254 uint16_t messaging_method;
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500255
256 /**
257 * Action specified by a Partition through the manifest in response to
258 * non secure interrupt.
259 */
260 uint8_t ns_interrupts_action;
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600261
262 /**
J-Alvesce42f7a2025-02-10 13:57:41 +0000263 * Whether the SRI is used for requesting CPU cycles for a partition to
264 * handle interrupts.
265 */
266 struct sri_interrupts_policy sri_policy;
267
268 /**
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600269 * Action specified by a Partition through the manifest in response to
270 * Other-S-Int.
271 */
272 uint8_t other_s_interrupts_action;
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500273 bool me_signal_virq;
J-Alvesb37fd082020-10-22 12:29:21 +0100274
Max Shvetsov40108e72020-08-27 12:39:50 +0100275 /**
Olivier Depreza15f2352022-09-26 09:17:24 +0200276 * Bitmask reporting the power management events that a partition
277 * requests to the signaled about.
278 */
279 uint32_t power_management;
280
281 /**
Max Shvetsov40108e72020-08-27 12:39:50 +0100282 * Secondary entry point supplied by FFA_SECONDARY_EP_REGISTER used
283 * for cold and warm boot of SP execution contexts.
284 */
285 ipaddr_t secondary_ep;
286
Andrew Walbran1f32e722019-06-07 17:57:26 +0100287 /** Arch-specific VM information. */
288 struct arch_vm arch;
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800289 bool el0_partition;
Madhukar Pappireddy464f2462021-08-03 11:23:07 -0500290
291 /** Interrupt descriptor */
292 struct interrupt_descriptor interrupt_desc[VM_MANIFEST_MAX_INTERRUPTS];
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600293
294 /* List entry pointing to the next VM in the boot order list. */
295 struct list_entry boot_list_node;
Madhukar Pappireddyc35573d2025-03-17 18:00:32 -0500296
297 /**
298 * Abort action taken by SPMC if the vCPU of this partition encounters
299 * a fatal error.
300 */
301 enum abort_action abort_action;
302
303 /**
304 * Whether the partition supports all the states defined in the
305 * Partition Lifecycle guidance.
306 */
307 bool lifecycle_support;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000308};
309
310/** Encapsulates a VM whose lock is held. */
311struct vm_locked {
312 struct vm *vm;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100313};
314
Jose Marinho75509b42019-04-09 09:34:59 +0100315/** Container for two vm_locked structures */
316struct two_vm_locked {
317 struct vm_locked vm1;
318 struct vm_locked vm2;
319};
320
J-Alves19e20cf2023-08-02 12:48:55 +0100321struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600322 struct mpool *ppool, bool el0_partition,
323 uint8_t dma_device_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100324bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600325 struct vm **new_vm, bool el0_partition,
326 uint8_t dma_device_count);
Madhukar Pappireddy8e5d51a2025-05-08 18:01:09 -0500327bool vm_reinit(struct vm *vm, struct mpool *ppool);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100328ffa_vm_count_t vm_get_count(void);
J-Alves19e20cf2023-08-02 12:48:55 +0100329struct vm *vm_find(ffa_id_t id);
330struct vm_locked vm_find_locked(ffa_id_t id);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100331struct vm *vm_find_index(uint16_t index);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100332struct vm_locked vm_lock(struct vm *vm);
Jose Marinho75509b42019-04-09 09:34:59 +0100333struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000334void vm_unlock(struct vm_locked *locked);
Karl Meakine1430802024-03-06 14:08:11 +0000335struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1,
336 struct vm *vm2);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100337struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index);
J-Alves19e20cf2023-08-02 12:48:55 +0100338bool vm_id_is_current_world(ffa_id_t vm_id);
J-Alves122f1a12022-12-12 15:55:42 +0000339bool vm_is_mailbox_busy(struct vm_locked to);
J-Alvese8c8c2b2022-12-16 15:34:48 +0000340bool vm_is_mailbox_other_world_owned(struct vm_locked to);
Andrew Scull3c257452019-11-26 13:32:50 +0000341bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000342 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000343bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000344 mm_mode_t mode, struct mpool *ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000345void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000346 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000347bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
348 struct mpool *ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700349void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool);
Madhukar Pappireddyf7d5af42025-05-08 17:02:40 -0500350void vm_free_ptables(struct vm *vm, struct mpool *ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000351bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool);
Madhukar Pappireddy507c9972025-05-08 17:25:03 -0500352void vm_unmap_rxtx(struct vm_locked vm_locked, struct mpool *ppool);
Madhukar Pappireddyd88b0142025-05-08 17:45:53 -0500353void vm_unmap_memory_regions(struct vm_locked vm_locked, struct mpool *ppool);
J-Alvesb37fd082020-10-22 12:29:21 +0100354
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800355bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000356 mm_mode_t *mode);
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500357bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000358 paddr_t end, mm_mode_t mode, struct mpool *ppool,
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500359 ipaddr_t *ipa, uint8_t dma_device_id);
360
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700361void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
362 struct mpool *ppool);
Madhukar Pappireddycbb836c2025-05-08 17:16:42 -0500363void vm_reset_notifications(struct vm_locked vm_locked, struct mpool *ppool);
J-Alves66652252022-07-06 09:49:51 +0100364bool vm_mailbox_state_busy(struct vm_locked vm_locked);
J-Alvesa0f317d2021-06-09 13:31:59 +0100365bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
366 ffa_notifications_bitmap_t notifications);
J-Alvese8c8c2b2022-12-16 15:34:48 +0000367bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked);
J-Alves7461ef22021-10-18 17:21:33 +0100368bool vm_are_global_notifications_pending(struct vm_locked vm_locked);
369bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
370 ffa_vcpu_index_t vcpu_id);
J-Alves09ff9d82021-11-02 11:55:20 +0000371bool vm_are_notifications_enabled(struct vm *vm);
372bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked);
J-Alvesc003a7a2021-03-18 13:06:53 +0000373bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
374 bool is_from_vm, bool is_per_vcpu,
375 ffa_notifications_bitmap_t notif);
376bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100377 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000378 ffa_notifications_bitmap_t notifications);
379bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100380 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000381 ffa_notifications_bitmap_t notifications,
382 bool is_per_vcpu);
383void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100384 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000385 ffa_notifications_bitmap_t notifications,
386 bool is_per_vcpu);
J-Alves5a16c962022-03-25 12:32:51 +0000387void vm_notifications_partition_set_pending(
388 struct vm_locked vm_locked, bool is_from_vm,
389 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
390 bool is_per_vcpu);
J-Alves5136dda2022-03-25 12:26:38 +0000391ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
392 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id);
J-Alves14163a72022-03-25 14:01:34 +0000393void vm_notifications_framework_set_pending(
394 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications);
J-Alves663682a2022-03-25 13:56:51 +0000395ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
396 struct vm_locked vm_locked);
J-Alvesc8e8a222021-06-08 17:33:52 +0100397void vm_notifications_info_get_pending(
398 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
399 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
Karl Meakin2ad6b662024-07-29 20:45:40 +0100400 uint32_t ids_max_count,
J-Alvesc8e8a222021-06-08 17:33:52 +0100401 enum notifications_info_get_state *info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100402bool vm_notifications_pending_not_retrieved_by_scheduler(void);
403bool vm_is_notifications_pending_count_zero(void);
J-Alvesc8e8a222021-06-08 17:33:52 +0100404bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
405 uint32_t *ids_count, uint32_t *lists_sizes,
Karl Meakin2ad6b662024-07-29 20:45:40 +0100406 uint32_t *lists_count, uint32_t ids_max_count);
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400407bool vm_supports_messaging_method(struct vm *vm, uint16_t messaging_method);
J-Alves7d38f7b2022-04-13 13:22:30 +0100408void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu);
Olivier Deprez20137752023-02-07 10:55:35 +0100409
410/**
Olivier Deprez20137752023-02-07 10:55:35 +0100411 * Returns true if the VM requested to receive cpu off power management
412 * events.
413 */
414static inline bool vm_power_management_cpu_off_requested(struct vm *vm)
415{
416 return (vm->power_management &
417 (UINT32_C(1) << VM_POWER_MANAGEMENT_CPU_OFF_SHIFT)) != 0;
418}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500419
Karl Meakin82041822024-05-20 11:14:34 +0100420/* Return true if `vm` is a UP. */
421static inline bool vm_is_up(const struct vm *vm)
422{
423 return vm->vcpu_count == 1;
424}
425
426/* Return true if `vm` is a MP. */
427static inline bool vm_is_mp(const struct vm *vm)
428{
429 return vm->vcpu_count > 1;
430}
431
Karl Meakin5e996992024-05-20 11:27:07 +0100432/* Return true if `vm` is the primary VM. */
433static inline bool vm_is_primary(const struct vm *vm)
434{
435 return vm->id == HF_PRIMARY_VM_ID;
436}
437
Daniel Boulbyf3cf28c2024-08-22 10:46:23 +0100438/**
439 * Convert a CPU ID for a secondary VM to the corresponding vCPU index.
440 */
441static inline ffa_vcpu_index_t vcpu_id_to_index(cpu_id_t vcpu_id)
442{
443 /* For now we use indices as IDs. */
444 return vcpu_id;
445}
446
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500447struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
448 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr);
449struct interrupt_descriptor *vm_interrupt_set_sec_state(
450 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state);
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -0500451struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
452 uint32_t id, bool enable);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600453
454void vm_update_boot(struct vm *vm);
455struct vm *vm_get_boot_vm(void);
Madhukar Pappireddya81f5412024-11-25 09:46:48 -0600456struct vm *vm_get_boot_vm_secondary_core(void);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600457struct vm *vm_get_next_boot(struct vm *vm);
Madhukar Pappireddya81f5412024-11-25 09:46:48 -0600458struct vm *vm_get_next_boot_secondary_core(struct vm *vm);
Madhukar Pappireddy84259c92025-06-27 14:28:45 -0500459enum vm_state vm_read_state(struct vm *vm);
460bool vm_set_state(struct vm_locked vm_locked, enum vm_state to_state);
Madhukar Pappireddy6cb80be2025-06-02 16:10:51 -0500461bool vm_is_discoverable(struct vm *vm);