blob: b5f2d018f1f39110132a6126bc6e8ed25df013ad [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
11#include "hf/ffa.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070012#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000014#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000015
J-Alves13394022021-06-30 13:48:49 +010016/**
17 * The following enum relates to a state machine to guide the handling of the
18 * Scheduler Receiver Interrupt.
19 * The SRI is used to signal the receiver scheduler that there are pending
20 * notifications for the receiver, and it is sent when there is a valid call to
21 * FFA_NOTIFICATION_SET.
22 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
23 * after which the FF-A driver should process the returned list, and request
24 * the receiver scheduler to give the receiver CPU cycles to process the
25 * notification.
26 * The use of the following state machine allows for synchronized sending
27 * and handling of the SRI, as well as avoiding the occurrence of spurious
28 * SRI. A spurious SRI would be one such that upon handling a call to
29 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
30 * in an MP system.
31 * The state machine also aims at resolving the delay of the SRI by setting
32 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
33 * delaying, the SRI is sent in context switching to the primary endpoint.
34 * The SPMC is implemented under the assumption the receiver scheduler is a
35 * NWd endpoint, hence the SRI is triggered at the world switch.
36 * If concurrently another notification is set that requires immediate action,
37 * the SRI is triggered immediately within that same execution context.
38 *
39 * HANDLED is the initial state, and means a new SRI can be sent. The following
40 * state transitions are possible:
41 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
42 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
43 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
44 * receiver scheduler is being done.
45 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
46 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
47 */
48enum plat_ffa_sri_state {
49 HANDLED = 0,
50 DELAYED,
51 TRIGGERED,
52};
53
Daniel Boulby87b2dc82021-08-04 14:07:43 +010054/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000055struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000056/** Returns the SPMC ID. */
57struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020058
59void plat_ffa_log_init(void);
J-Alvesa09ac2d2022-06-07 13:46:59 +010060void plat_ffa_set_tee_enabled(bool tee_enabled);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070061void plat_ffa_init(struct mpool *ppool);
J-Alves19e20cf2023-08-02 12:48:55 +010062bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id,
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000063 uint32_t share_func);
64
Olivier Deprez55a189e2021-06-09 15:45:27 +020065bool plat_ffa_is_direct_request_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010066 ffa_id_t sender_vm_id,
67 ffa_id_t receiver_vm_id);
Olivier Deprez55a189e2021-06-09 15:45:27 +020068bool plat_ffa_is_direct_response_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010069 ffa_id_t sender_vm_id,
70 ffa_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000071bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
72 struct vm *receiver_vm);
J-Alves19e20cf2023-08-02 12:48:55 +010073bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 struct ffa_value args,
75 struct ffa_value *ret);
Federico Recanati25053ee2022-03-14 15:01:53 +010076
Federico Recanati7bef0b92022-03-17 14:56:22 +010077bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
78 struct ffa_value *ret);
79
Federico Recanati7bef0b92022-03-17 14:56:22 +010080bool plat_ffa_acquire_receiver_rx(struct vm_locked locked,
Federico Recanati644f0462022-03-17 12:04:00 +010081 struct ffa_value *ret);
82
Federico Recanati25053ee2022-03-14 15:01:53 +010083bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
84 struct vm_locked receiver_locked);
85
J-Alves19e20cf2023-08-02 12:48:55 +010086bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
Federico Recanati25053ee2022-03-14 15:01:53 +010087 struct ffa_value *ret);
88
J-Alvesa0f317d2021-06-09 13:31:59 +010089bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010090 ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000091
92bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010093 ffa_id_t sender_id,
94 ffa_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010095bool plat_ffa_notifications_update_bindings_forward(
J-Alves19e20cf2023-08-02 12:48:55 +010096 ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
J-Alvesb15e9402021-09-08 11:44:42 +010097 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +000098
J-Alvesaa79c012021-07-09 14:29:45 +010099bool plat_ffa_is_notification_set_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100100 ffa_id_t sender_id,
101 ffa_id_t receiver_id);
J-Alvesaa79c012021-07-09 14:29:45 +0100102
J-Alves19e20cf2023-08-02 12:48:55 +0100103bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
104 ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesde7bd2f2021-09-09 19:54:35 +0100105 ffa_notifications_bitmap_t bitmap,
106 struct ffa_value *ret);
107
J-Alvesaa79c012021-07-09 14:29:45 +0100108bool plat_ffa_is_notification_get_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100109 ffa_id_t receiver_id, uint32_t flags);
J-Alvesaa79c012021-07-09 14:29:45 +0100110
J-Alves98ff9562021-09-09 14:39:41 +0100111bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
112 ffa_vcpu_index_t vcpu_id,
113 ffa_notifications_bitmap_t *from_sp,
114 struct ffa_value *ret);
115
J-Alvesd605a092022-03-28 14:20:48 +0100116bool plat_ffa_notifications_get_framework_notifications(
117 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
118 uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret);
119
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200120void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked);
121
Federico Recanati10bd06c2022-02-23 17:32:59 +0100122void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
123
J-Alves70079932022-12-07 17:32:20 +0000124void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +0100125
Olivier Deprez55a189e2021-06-09 15:45:27 +0200126/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100127 * Checks whether managed exit is supported by given SP.
128 */
129bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
130
131/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200132 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
133 */
134ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
135
136/**
137 * Checks whether given handle was allocated by current world, according to
138 * handle encoding rules.
139 */
140bool plat_ffa_memory_handle_allocated_by_current_world(
141 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100142
143/**
J-Alves7db32002021-12-14 14:44:50 +0000144 * For non-secure memory, retrieve the NS mode if the partition manager supports
145 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
146 * with NS accesses by default.
147 */
148uint32_t plat_ffa_other_world_mode(void);
149
150/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100151 * Return the FF-A partition info VM/SP properties given the VM id.
152 */
153ffa_partition_properties_t plat_ffa_partition_properties(
J-Alves19e20cf2023-08-02 12:48:55 +0100154 ffa_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100155
156/**
157 * Get NWd VM's structure.
158 */
J-Alves19e20cf2023-08-02 12:48:55 +0100159struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id);
J-Alvesa0f317d2021-06-09 13:31:59 +0100160
J-Alves19e20cf2023-08-02 12:48:55 +0100161struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200162
J-Alvesa0f317d2021-06-09 13:31:59 +0100163/**
164 * Creates a bitmap for the VM of the given ID.
165 */
166struct ffa_value plat_ffa_notifications_bitmap_create(
J-Alves19e20cf2023-08-02 12:48:55 +0100167 ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count);
J-Alvesa0f317d2021-06-09 13:31:59 +0100168
169/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100170 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000171 * Returns true if the call goes well, and false if call returns with
172 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100173 */
J-Alves19e20cf2023-08-02 12:48:55 +0100174bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000175 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100176
177/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100178 * Destroys the notifications bitmap for the given VM ID.
179 */
J-Alves19e20cf2023-08-02 12:48:55 +0100180struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000181
182/**
183 * Helper to get the struct notifications, depending on the sender's id.
184 */
185struct notifications *plat_ffa_vm_get_notifications_senders_world(
J-Alves19e20cf2023-08-02 12:48:55 +0100186 struct vm_locked vm_locked, ffa_id_t sender_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000187
188/**
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700189 * Forward normal world calls of FFA_RUN ABI to other world.
190 */
J-Alves19e20cf2023-08-02 12:48:55 +0100191bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700192 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100193
194bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
195
196bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
197 uint32_t *lists_sizes,
198 uint32_t *lists_count,
199 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700200
J-Alves13394022021-06-30 13:48:49 +0100201/** Helper to set SRI current state. */
202void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
203
204/**
205 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
206 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
207 * To be called at a context switch to the NWd.
208 */
209void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
210
211/**
212 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
213 * delayed in call to FFA_NOTIFICATION_SET.
214 */
215void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
216
217/**
218 * Initialize Schedule Receiver Interrupts needed in the context of
219 * notifications support.
220 */
221void plat_ffa_sri_init(struct cpu *cpu);
222
J-Alvesca058c22021-09-10 14:02:07 +0100223void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
224 uint32_t *lists_sizes,
225 uint32_t *lists_count,
226 const uint32_t ids_count_max);
227
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700228bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
229bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500230
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600231struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddydd883202022-10-24 16:49:28 -0500232 struct vcpu **next);
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600233
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500234/**
235 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
236 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600237bool plat_ffa_run_checks(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100238 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600239 struct ffa_value *run_ret, struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500240
241/**
242 * Deactivate interrupt.
243 */
244int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
245 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500246
J-Alvescf0c4712023-08-04 14:41:50 +0100247void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next);
J-Alves6e2abc62021-12-02 14:58:56 +0000248bool plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600249 struct vcpu_locked next_locked, struct vcpu_locked current_locked,
J-Alves6e2abc62021-12-02 14:58:56 +0000250 struct vm_locked receiver_locked);
Olivier Depreze562e542020-06-11 17:31:54 +0200251
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700252bool plat_ffa_partition_info_get_regs_forward_allowed(void);
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800253
Olivier Depreze562e542020-06-11 17:31:54 +0200254void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000255 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200256 struct ffa_partition_info *partitions,
257 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700258
259void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
260 paddr_t fdt_addr,
261 size_t fdt_allocated_size,
262 const struct manifest_vm *manifest_vm,
J-Alves77b6f4f2023-03-15 11:34:49 +0000263 const struct boot_params *boot_params,
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700264 struct mpool *ppool);
Olivier Deprezd614d322021-06-18 15:21:00 +0200265
266/**
267 * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at
268 * the virtual FF-A instance.
269 */
270bool plat_ffa_is_secondary_ep_register_supported(void);
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500271
272/**
273 * Perform checks for the state transition being requested by the Partition
274 * based on it's runtime model and return false if an illegal transition is
275 * being performed.
276 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600277bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100278 ffa_id_t vm_id,
279 ffa_id_t receiver_vm_id,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600280 struct vcpu_locked locked_vcpu,
281 uint32_t func,
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500282 enum vcpu_state *next_state);
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500283
Madhukar Pappireddyd46c06e2022-06-21 18:14:52 -0500284struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600285void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
Madhukar Pappireddy1480fce2022-06-21 18:09:25 -0500286 struct vcpu_locked target_locked);
287
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500288void plat_ffa_wind_call_chain_ffa_direct_req(
289 struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100290 struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id);
Madhukar Pappireddyc0fb87e2022-06-21 17:59:15 -0500291
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600292void plat_ffa_unwind_call_chain_ffa_direct_resp(
293 struct vcpu_locked current_locked, struct vcpu_locked next_locked);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500294
Madhukar Pappireddy486360d2022-09-06 15:32:24 -0500295void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
296 struct vm_locked vm_locked);
J-Alves66652252022-07-06 09:49:51 +0100297
J-Alvesb5084cf2022-07-06 14:20:12 +0100298bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
299 struct vcpu **next,
300 struct ffa_value to_ret,
301 struct ffa_value *signal_interrupt);
J-Alves66652252022-07-06 09:49:51 +0100302/*
303 * Handles FF-A memory share calls with recipients from the other world.
304 */
305struct ffa_value plat_ffa_other_world_mem_send(
306 struct vm *from, uint32_t share_func,
307 struct ffa_memory_region **memory_region, uint32_t length,
308 uint32_t fragment_length, struct mpool *page_pool);
J-Alvesfc19b372022-07-06 12:17:35 +0100309
310/**
311 * Handles the memory reclaim if a memory handle from the other world is
312 * provided.
313 */
314struct ffa_value plat_ffa_other_world_mem_reclaim(
315 struct vm *to, ffa_memory_handle_t handle,
316 ffa_memory_region_flags_t flags, struct mpool *page_pool);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500317
J-Alvesb5084cf2022-07-06 14:20:12 +0100318/**
319 * Handles the memory retrieve request if the specified memory handle belongs
320 * to the other world.
321 */
322struct ffa_value plat_ffa_other_world_mem_retrieve(
323 struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
324 uint32_t length, struct mpool *page_pool);
J-Alvesfdd29272022-07-19 13:16:31 +0100325
326/**
327 * Handles the continuation of the memory send operation in case the memory
328 * region descriptor contains multiple segments.
329 */
330struct ffa_value plat_ffa_other_world_mem_send_continue(
331 struct vm *from, void *fragment, uint32_t fragment_length,
332 ffa_memory_handle_t handle, struct mpool *page_pool);
Madhukar Pappireddy5fd32482022-01-07 14:53:26 -0600333
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600334bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked);
J-Alves27b71962022-12-12 15:29:58 +0000335
336/**
337 * This FF-A v1.0 FFA_MSG_SEND interface.
338 * Implemented for the Hypervisor, but not in the SPMC.
339 */
J-Alves19e20cf2023-08-02 12:48:55 +0100340struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
341 ffa_id_t receiver_vm_id, uint32_t size,
J-Alves27b71962022-12-12 15:29:58 +0000342 struct vcpu *current, struct vcpu **next);
Madhukar Pappireddy1f2f2132023-02-14 17:48:44 -0600343
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600344struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500345 struct vcpu **next,
346 uint32_t timeout_low,
347 uint32_t timeout_high);
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200348
349ffa_memory_attributes_t plat_ffa_memory_security_mode(
350 ffa_memory_attributes_t attributes, uint32_t mode);
Kathleen Capella6ab05132023-05-10 12:27:35 -0400351
352/**
353 * FF-A v1.2 FFA_ERROR interface.
354 * Implemented for SPMC in RTM_SP_INIT runtime model.
355 */
356struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
357 uint32_t error_code);
Raghu Krishnamurthya9ccf122023-03-27 20:42:01 -0700358
J-Alves19e20cf2023-08-02 12:48:55 +0100359bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id);
J-Alves2ced1672022-12-12 14:35:38 +0000360
361struct ffa_value plat_ffa_msg_recv(bool block,
362 struct vcpu_locked current_locked,
363 struct vcpu **next);
J-Alvesbc7ab4f2022-12-13 12:09:25 +0000364
365int64_t plat_ffa_mailbox_writable_get(const struct vcpu *current);
366
J-Alves19e20cf2023-08-02 12:48:55 +0100367int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current);
Madhukar Pappireddy72d23932023-07-24 15:57:28 -0500368
369/**
370 * Reconfigure the interrupt belonging to the current partition at runtime.
371 */
372int64_t plat_ffa_interrupt_reconfigure(uint32_t int_id, uint32_t command,
373 uint32_t value, struct vcpu *current);