blob: 07456a76495cde61c632bfe547da98683e12a977 [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
J-Alves460d36c2023-10-12 17:02:15 +010011#include "hf/addr.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000012#include "hf/ffa.h"
J-Alves460d36c2023-10-12 17:02:15 +010013#include "hf/ffa_memory_internal.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070014#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000016#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000017
J-Alves13394022021-06-30 13:48:49 +010018/**
19 * The following enum relates to a state machine to guide the handling of the
20 * Scheduler Receiver Interrupt.
21 * The SRI is used to signal the receiver scheduler that there are pending
22 * notifications for the receiver, and it is sent when there is a valid call to
23 * FFA_NOTIFICATION_SET.
24 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
25 * after which the FF-A driver should process the returned list, and request
26 * the receiver scheduler to give the receiver CPU cycles to process the
27 * notification.
28 * The use of the following state machine allows for synchronized sending
29 * and handling of the SRI, as well as avoiding the occurrence of spurious
30 * SRI. A spurious SRI would be one such that upon handling a call to
31 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
32 * in an MP system.
33 * The state machine also aims at resolving the delay of the SRI by setting
34 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
35 * delaying, the SRI is sent in context switching to the primary endpoint.
36 * The SPMC is implemented under the assumption the receiver scheduler is a
37 * NWd endpoint, hence the SRI is triggered at the world switch.
38 * If concurrently another notification is set that requires immediate action,
39 * the SRI is triggered immediately within that same execution context.
40 *
41 * HANDLED is the initial state, and means a new SRI can be sent. The following
42 * state transitions are possible:
43 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
44 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
45 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
46 * receiver scheduler is being done.
47 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
48 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
49 */
50enum plat_ffa_sri_state {
51 HANDLED = 0,
52 DELAYED,
53 TRIGGERED,
54};
55
Daniel Boulby87b2dc82021-08-04 14:07:43 +010056/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000057struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000058/** Returns the SPMC ID. */
59struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020060
61void plat_ffa_log_init(void);
J-Alvesa09ac2d2022-06-07 13:46:59 +010062void plat_ffa_set_tee_enabled(bool tee_enabled);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070063void plat_ffa_init(struct mpool *ppool);
J-Alves19e20cf2023-08-02 12:48:55 +010064bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id,
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000065 uint32_t share_func);
66
Olivier Deprez55a189e2021-06-09 15:45:27 +020067bool plat_ffa_is_direct_request_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010068 ffa_id_t sender_vm_id,
69 ffa_id_t receiver_vm_id);
Olivier Deprez55a189e2021-06-09 15:45:27 +020070bool plat_ffa_is_direct_response_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010071 ffa_id_t sender_vm_id,
72 ffa_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000073bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
Kathleen Capella41fea932023-06-23 17:39:28 -040074 struct vm *receiver_vm,
75 uint32_t func);
J-Alves19e20cf2023-08-02 12:48:55 +010076bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
Olivier Deprez55a189e2021-06-09 15:45:27 +020077 struct ffa_value args,
78 struct ffa_value *ret);
Federico Recanati25053ee2022-03-14 15:01:53 +010079
Federico Recanati7bef0b92022-03-17 14:56:22 +010080bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
81 struct ffa_value *ret);
82
Federico Recanati7bef0b92022-03-17 14:56:22 +010083bool plat_ffa_acquire_receiver_rx(struct vm_locked locked,
Federico Recanati644f0462022-03-17 12:04:00 +010084 struct ffa_value *ret);
85
Federico Recanati25053ee2022-03-14 15:01:53 +010086bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
87 struct vm_locked receiver_locked);
88
J-Alves19e20cf2023-08-02 12:48:55 +010089bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
Federico Recanati25053ee2022-03-14 15:01:53 +010090 struct ffa_value *ret);
91
J-Alvesa0f317d2021-06-09 13:31:59 +010092bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010093 ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000094
95bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010096 ffa_id_t sender_id,
97 ffa_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010098bool plat_ffa_notifications_update_bindings_forward(
J-Alves19e20cf2023-08-02 12:48:55 +010099 ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
J-Alvesb15e9402021-09-08 11:44:42 +0100100 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +0000101
J-Alvesaa79c012021-07-09 14:29:45 +0100102bool plat_ffa_is_notification_set_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100103 ffa_id_t sender_id,
104 ffa_id_t receiver_id);
J-Alvesaa79c012021-07-09 14:29:45 +0100105
J-Alves19e20cf2023-08-02 12:48:55 +0100106bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
107 ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesde7bd2f2021-09-09 19:54:35 +0100108 ffa_notifications_bitmap_t bitmap,
109 struct ffa_value *ret);
110
J-Alvesaa79c012021-07-09 14:29:45 +0100111bool plat_ffa_is_notification_get_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100112 ffa_id_t receiver_id, uint32_t flags);
J-Alvesaa79c012021-07-09 14:29:45 +0100113
J-Alves98ff9562021-09-09 14:39:41 +0100114bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
115 ffa_vcpu_index_t vcpu_id,
116 ffa_notifications_bitmap_t *from_sp,
117 struct ffa_value *ret);
118
J-Alvesd605a092022-03-28 14:20:48 +0100119bool plat_ffa_notifications_get_framework_notifications(
120 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
121 uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret);
122
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200123void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked);
124
Federico Recanati10bd06c2022-02-23 17:32:59 +0100125void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
126
J-Alves70079932022-12-07 17:32:20 +0000127void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +0100128
Olivier Deprez55a189e2021-06-09 15:45:27 +0200129/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100130 * Checks whether managed exit is supported by given SP.
131 */
132bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
133
134/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200135 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
136 */
137ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
138
139/**
140 * Checks whether given handle was allocated by current world, according to
141 * handle encoding rules.
142 */
143bool plat_ffa_memory_handle_allocated_by_current_world(
144 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100145
146/**
J-Alves7db32002021-12-14 14:44:50 +0000147 * For non-secure memory, retrieve the NS mode if the partition manager supports
148 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
149 * with NS accesses by default.
150 */
151uint32_t plat_ffa_other_world_mode(void);
152
153/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100154 * Return the FF-A partition info VM/SP properties given the VM id.
155 */
156ffa_partition_properties_t plat_ffa_partition_properties(
J-Alves19e20cf2023-08-02 12:48:55 +0100157 ffa_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100158
159/**
160 * Get NWd VM's structure.
161 */
J-Alves19e20cf2023-08-02 12:48:55 +0100162struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id);
J-Alvesa0f317d2021-06-09 13:31:59 +0100163
J-Alves19e20cf2023-08-02 12:48:55 +0100164struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200165
J-Alvesa0f317d2021-06-09 13:31:59 +0100166/**
167 * Creates a bitmap for the VM of the given ID.
168 */
169struct ffa_value plat_ffa_notifications_bitmap_create(
J-Alves19e20cf2023-08-02 12:48:55 +0100170 ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count);
J-Alvesa0f317d2021-06-09 13:31:59 +0100171
172/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100173 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000174 * Returns true if the call goes well, and false if call returns with
175 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100176 */
J-Alves19e20cf2023-08-02 12:48:55 +0100177bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000178 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100179
180/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100181 * Destroys the notifications bitmap for the given VM ID.
182 */
J-Alves19e20cf2023-08-02 12:48:55 +0100183struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000184
185/**
186 * Helper to get the struct notifications, depending on the sender's id.
187 */
188struct notifications *plat_ffa_vm_get_notifications_senders_world(
J-Alves19e20cf2023-08-02 12:48:55 +0100189 struct vm_locked vm_locked, ffa_id_t sender_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000190
191/**
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700192 * Forward normal world calls of FFA_RUN ABI to other world.
193 */
J-Alves19e20cf2023-08-02 12:48:55 +0100194bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700195 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100196
197bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
198
199bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
200 uint32_t *lists_sizes,
201 uint32_t *lists_count,
202 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700203
J-Alves13394022021-06-30 13:48:49 +0100204/** Helper to set SRI current state. */
205void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
206
207/**
208 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
209 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
210 * To be called at a context switch to the NWd.
211 */
212void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
213
214/**
215 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
216 * delayed in call to FFA_NOTIFICATION_SET.
217 */
218void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
219
220/**
221 * Initialize Schedule Receiver Interrupts needed in the context of
222 * notifications support.
223 */
224void plat_ffa_sri_init(struct cpu *cpu);
225
J-Alvesca058c22021-09-10 14:02:07 +0100226void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
227 uint32_t *lists_sizes,
228 uint32_t *lists_count,
229 const uint32_t ids_count_max);
230
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700231bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
232bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500233
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600234struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddydd883202022-10-24 16:49:28 -0500235 struct vcpu **next);
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600236
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500237/**
238 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
239 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600240bool plat_ffa_run_checks(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100241 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600242 struct ffa_value *run_ret, struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500243
244/**
245 * Deactivate interrupt.
246 */
247int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
248 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500249
J-Alvescf0c4712023-08-04 14:41:50 +0100250void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next);
J-Alves6e2abc62021-12-02 14:58:56 +0000251bool plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600252 struct vcpu_locked next_locked, struct vcpu_locked current_locked,
J-Alves6e2abc62021-12-02 14:58:56 +0000253 struct vm_locked receiver_locked);
Olivier Depreze562e542020-06-11 17:31:54 +0200254
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700255bool plat_ffa_partition_info_get_regs_forward_allowed(void);
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800256
Olivier Depreze562e542020-06-11 17:31:54 +0200257void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000258 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200259 struct ffa_partition_info *partitions,
260 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700261
262void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
263 paddr_t fdt_addr,
264 size_t fdt_allocated_size,
265 const struct manifest_vm *manifest_vm,
J-Alves77b6f4f2023-03-15 11:34:49 +0000266 const struct boot_params *boot_params,
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700267 struct mpool *ppool);
Olivier Deprezd614d322021-06-18 15:21:00 +0200268
269/**
270 * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at
271 * the virtual FF-A instance.
272 */
273bool plat_ffa_is_secondary_ep_register_supported(void);
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500274
275/**
276 * Perform checks for the state transition being requested by the Partition
277 * based on it's runtime model and return false if an illegal transition is
278 * being performed.
279 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600280bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100281 ffa_id_t vm_id,
282 ffa_id_t receiver_vm_id,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600283 struct vcpu_locked locked_vcpu,
284 uint32_t func,
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500285 enum vcpu_state *next_state);
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500286
Madhukar Pappireddyd46c06e2022-06-21 18:14:52 -0500287struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600288void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
Madhukar Pappireddy1480fce2022-06-21 18:09:25 -0500289 struct vcpu_locked target_locked);
290
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500291void plat_ffa_wind_call_chain_ffa_direct_req(
292 struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100293 struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id);
Madhukar Pappireddyc0fb87e2022-06-21 17:59:15 -0500294
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600295void plat_ffa_unwind_call_chain_ffa_direct_resp(
296 struct vcpu_locked current_locked, struct vcpu_locked next_locked);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500297
Madhukar Pappireddy486360d2022-09-06 15:32:24 -0500298void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
299 struct vm_locked vm_locked);
J-Alves66652252022-07-06 09:49:51 +0100300
J-Alvesb5084cf2022-07-06 14:20:12 +0100301bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
302 struct vcpu **next,
303 struct ffa_value to_ret,
304 struct ffa_value *signal_interrupt);
J-Alves66652252022-07-06 09:49:51 +0100305/*
306 * Handles FF-A memory share calls with recipients from the other world.
307 */
308struct ffa_value plat_ffa_other_world_mem_send(
309 struct vm *from, uint32_t share_func,
310 struct ffa_memory_region **memory_region, uint32_t length,
311 uint32_t fragment_length, struct mpool *page_pool);
J-Alvesfc19b372022-07-06 12:17:35 +0100312
313/**
314 * Handles the memory reclaim if a memory handle from the other world is
315 * provided.
316 */
317struct ffa_value plat_ffa_other_world_mem_reclaim(
318 struct vm *to, ffa_memory_handle_t handle,
319 ffa_memory_region_flags_t flags, struct mpool *page_pool);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500320
J-Alvesb5084cf2022-07-06 14:20:12 +0100321/**
J-Alvesfdd29272022-07-19 13:16:31 +0100322 * Handles the continuation of the memory send operation in case the memory
323 * region descriptor contains multiple segments.
324 */
325struct ffa_value plat_ffa_other_world_mem_send_continue(
326 struct vm *from, void *fragment, uint32_t fragment_length,
327 ffa_memory_handle_t handle, struct mpool *page_pool);
Madhukar Pappireddy5fd32482022-01-07 14:53:26 -0600328
J-Alves27b71962022-12-12 15:29:58 +0000329/**
330 * This FF-A v1.0 FFA_MSG_SEND interface.
331 * Implemented for the Hypervisor, but not in the SPMC.
332 */
J-Alves19e20cf2023-08-02 12:48:55 +0100333struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
334 ffa_id_t receiver_vm_id, uint32_t size,
J-Alves27b71962022-12-12 15:29:58 +0000335 struct vcpu *current, struct vcpu **next);
Madhukar Pappireddy1f2f2132023-02-14 17:48:44 -0600336
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600337struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500338 struct vcpu **next,
339 uint32_t timeout_low,
340 uint32_t timeout_high);
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200341
342ffa_memory_attributes_t plat_ffa_memory_security_mode(
343 ffa_memory_attributes_t attributes, uint32_t mode);
Kathleen Capella6ab05132023-05-10 12:27:35 -0400344
345/**
346 * FF-A v1.2 FFA_ERROR interface.
347 * Implemented for SPMC in RTM_SP_INIT runtime model.
348 */
349struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
350 uint32_t error_code);
Raghu Krishnamurthya9ccf122023-03-27 20:42:01 -0700351
J-Alves19e20cf2023-08-02 12:48:55 +0100352bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id);
J-Alves2ced1672022-12-12 14:35:38 +0000353
354struct ffa_value plat_ffa_msg_recv(bool block,
355 struct vcpu_locked current_locked,
356 struct vcpu **next);
J-Alvesbc7ab4f2022-12-13 12:09:25 +0000357
358int64_t plat_ffa_mailbox_writable_get(const struct vcpu *current);
359
J-Alves19e20cf2023-08-02 12:48:55 +0100360int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current);
Madhukar Pappireddy72d23932023-07-24 15:57:28 -0500361
362/**
363 * Reconfigure the interrupt belonging to the current partition at runtime.
364 */
365int64_t plat_ffa_interrupt_reconfigure(uint32_t int_id, uint32_t command,
366 uint32_t value, struct vcpu *current);
Madhukar Pappireddy9e09d7a2023-08-08 14:53:49 -0500367
368/**
369 * Reclaim all resources belonging to VM in aborted state.
370 */
371void plat_ffa_free_vm_resources(struct vm_locked vm_locked);