blob: 8326e5da738412439e233040899d3241dec1b3a4 [file] [log] [blame]
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#pragma once
10
11#include "hf/ffa.h"
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -070012#include "hf/manifest.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020013#include "hf/vcpu.h"
J-Alvesc003a7a2021-03-18 13:06:53 +000014#include "hf/vm.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000015
J-Alves13394022021-06-30 13:48:49 +010016/**
17 * The following enum relates to a state machine to guide the handling of the
18 * Scheduler Receiver Interrupt.
19 * The SRI is used to signal the receiver scheduler that there are pending
20 * notifications for the receiver, and it is sent when there is a valid call to
21 * FFA_NOTIFICATION_SET.
22 * The FFA_NOTIFICATION_INFO_GET interface must be called in the SRI handler,
23 * after which the FF-A driver should process the returned list, and request
24 * the receiver scheduler to give the receiver CPU cycles to process the
25 * notification.
26 * The use of the following state machine allows for synchronized sending
27 * and handling of the SRI, as well as avoiding the occurrence of spurious
28 * SRI. A spurious SRI would be one such that upon handling a call to
29 * FFA_NOTIFICATION_INFO_GET would return error FFA_NO_DATA, which is plausible
30 * in an MP system.
31 * The state machine also aims at resolving the delay of the SRI by setting
32 * flag FFA_NOTIFICATIONS_FLAG_DELAY_SRI in the arguments of the set call. By
33 * delaying, the SRI is sent in context switching to the primary endpoint.
34 * The SPMC is implemented under the assumption the receiver scheduler is a
35 * NWd endpoint, hence the SRI is triggered at the world switch.
36 * If concurrently another notification is set that requires immediate action,
37 * the SRI is triggered immediately within that same execution context.
38 *
39 * HANDLED is the initial state, and means a new SRI can be sent. The following
40 * state transitions are possible:
41 * * HANDLED => DELAYED: Setting notification, and requesting SRI delay.
42 * * HANDLED => TRIGGERED: Setting notification, and not requesting SRI delay.
43 * * DELAYED => TRIGGERED: SRI was delayed, and the context switch to the
44 * receiver scheduler is being done.
45 * * DELAYED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
46 * * TRIGGERED => HANDLED: the scheduler called FFA_NOTIFICATION_INFO_GET.
47 */
48enum plat_ffa_sri_state {
49 HANDLED = 0,
50 DELAYED,
51 TRIGGERED,
52};
53
Daniel Boulby87b2dc82021-08-04 14:07:43 +010054/** Returns information on features that are specific to the platform. */
J-Alves6f72ca82021-11-01 12:34:58 +000055struct ffa_value plat_ffa_features(uint32_t function_feature_id);
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000056/** Returns the SPMC ID. */
57struct ffa_value plat_ffa_spmc_id_get(void);
Olivier Deprez55a189e2021-06-09 15:45:27 +020058
59void plat_ffa_log_init(void);
J-Alvesa09ac2d2022-06-07 13:46:59 +010060void plat_ffa_set_tee_enabled(bool tee_enabled);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070061void plat_ffa_init(struct mpool *ppool);
J-Alves19e20cf2023-08-02 12:48:55 +010062bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id,
Maksims Svecovsa3d570c2021-12-08 11:16:32 +000063 uint32_t share_func);
64
Olivier Deprez55a189e2021-06-09 15:45:27 +020065bool plat_ffa_is_direct_request_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010066 ffa_id_t sender_vm_id,
67 ffa_id_t receiver_vm_id);
Olivier Deprez55a189e2021-06-09 15:45:27 +020068bool plat_ffa_is_direct_response_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010069 ffa_id_t sender_vm_id,
70 ffa_id_t receiver_vm_id);
J-Alves439ac972021-11-18 17:32:03 +000071bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
72 struct vm *receiver_vm);
J-Alves19e20cf2023-08-02 12:48:55 +010073bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
Olivier Deprez55a189e2021-06-09 15:45:27 +020074 struct ffa_value args,
75 struct ffa_value *ret);
Federico Recanati25053ee2022-03-14 15:01:53 +010076
Federico Recanati7bef0b92022-03-17 14:56:22 +010077bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
78 struct ffa_value *ret);
79
Federico Recanati7bef0b92022-03-17 14:56:22 +010080bool plat_ffa_acquire_receiver_rx(struct vm_locked locked,
Federico Recanati644f0462022-03-17 12:04:00 +010081 struct ffa_value *ret);
82
Federico Recanati25053ee2022-03-14 15:01:53 +010083bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
84 struct vm_locked receiver_locked);
85
J-Alves19e20cf2023-08-02 12:48:55 +010086bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
Federico Recanati25053ee2022-03-14 15:01:53 +010087 struct ffa_value *ret);
88
J-Alvesa0f317d2021-06-09 13:31:59 +010089bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010090 ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +000091
92bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +010093 ffa_id_t sender_id,
94 ffa_id_t receiver_id);
J-Alvesb15e9402021-09-08 11:44:42 +010095bool plat_ffa_notifications_update_bindings_forward(
J-Alves19e20cf2023-08-02 12:48:55 +010096 ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
J-Alvesb15e9402021-09-08 11:44:42 +010097 ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
J-Alvesc003a7a2021-03-18 13:06:53 +000098
J-Alvesaa79c012021-07-09 14:29:45 +010099bool plat_ffa_is_notification_set_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100100 ffa_id_t sender_id,
101 ffa_id_t receiver_id);
J-Alvesaa79c012021-07-09 14:29:45 +0100102
J-Alves19e20cf2023-08-02 12:48:55 +0100103bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
104 ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesde7bd2f2021-09-09 19:54:35 +0100105 ffa_notifications_bitmap_t bitmap,
106 struct ffa_value *ret);
107
J-Alvesaa79c012021-07-09 14:29:45 +0100108bool plat_ffa_is_notification_get_valid(struct vcpu *current,
J-Alves19e20cf2023-08-02 12:48:55 +0100109 ffa_id_t receiver_id, uint32_t flags);
J-Alvesaa79c012021-07-09 14:29:45 +0100110
J-Alves98ff9562021-09-09 14:39:41 +0100111bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
112 ffa_vcpu_index_t vcpu_id,
113 ffa_notifications_bitmap_t *from_sp,
114 struct ffa_value *ret);
115
J-Alvesd605a092022-03-28 14:20:48 +0100116bool plat_ffa_notifications_get_framework_notifications(
117 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
118 uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret);
119
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200120void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked);
121
Federico Recanati10bd06c2022-02-23 17:32:59 +0100122void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked);
123
J-Alves70079932022-12-07 17:32:20 +0000124void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +0100125
Olivier Deprez55a189e2021-06-09 15:45:27 +0200126/**
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100127 * Checks whether managed exit is supported by given SP.
128 */
129bool plat_ffa_vm_managed_exit_supported(struct vm *vm);
130
131/**
Olivier Deprez55a189e2021-06-09 15:45:27 +0200132 * Encodes memory handle according to section 5.10.2 of the FF-A v1.0 spec.
133 */
134ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index);
135
136/**
137 * Checks whether given handle was allocated by current world, according to
138 * handle encoding rules.
139 */
140bool plat_ffa_memory_handle_allocated_by_current_world(
141 ffa_memory_handle_t handle);
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100142
143/**
J-Alves7db32002021-12-14 14:44:50 +0000144 * For non-secure memory, retrieve the NS mode if the partition manager supports
145 * it. The SPMC will return MM_MODE_NS, and the hypervisor 0 as it only deals
146 * with NS accesses by default.
147 */
148uint32_t plat_ffa_other_world_mode(void);
149
150/**
151 * For memory management operations between SWd and NWd, the SPMC might need
152 * to operate NS-memory. The function below returns the mode to use in the mm.h
153 * library, depending on the memory ownder's id.
154 */
J-Alves19e20cf2023-08-02 12:48:55 +0100155uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id);
J-Alves7db32002021-12-14 14:44:50 +0000156
157/**
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100158 * Return the FF-A partition info VM/SP properties given the VM id.
159 */
160ffa_partition_properties_t plat_ffa_partition_properties(
J-Alves19e20cf2023-08-02 12:48:55 +0100161 ffa_id_t vm_id, const struct vm *target);
J-Alvesa0f317d2021-06-09 13:31:59 +0100162
163/**
164 * Get NWd VM's structure.
165 */
J-Alves19e20cf2023-08-02 12:48:55 +0100166struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id);
J-Alvesa0f317d2021-06-09 13:31:59 +0100167
J-Alves19e20cf2023-08-02 12:48:55 +0100168struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +0200169
J-Alvesa0f317d2021-06-09 13:31:59 +0100170/**
171 * Creates a bitmap for the VM of the given ID.
172 */
173struct ffa_value plat_ffa_notifications_bitmap_create(
J-Alves19e20cf2023-08-02 12:48:55 +0100174 ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count);
J-Alvesa0f317d2021-06-09 13:31:59 +0100175
176/**
J-Alvesa9c7cba2021-08-25 16:26:11 +0100177 * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
J-Alvesa4730db2021-11-02 10:31:01 +0000178 * Returns true if the call goes well, and false if call returns with
179 * FFA_ERROR_32.
J-Alvesa9c7cba2021-08-25 16:26:11 +0100180 */
J-Alves19e20cf2023-08-02 12:48:55 +0100181bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
J-Alvesa4730db2021-11-02 10:31:01 +0000182 ffa_vcpu_count_t vcpu_count);
J-Alvesa9c7cba2021-08-25 16:26:11 +0100183
184/**
J-Alvesa0f317d2021-06-09 13:31:59 +0100185 * Destroys the notifications bitmap for the given VM ID.
186 */
J-Alves19e20cf2023-08-02 12:48:55 +0100187struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000188
189/**
190 * Helper to get the struct notifications, depending on the sender's id.
191 */
192struct notifications *plat_ffa_vm_get_notifications_senders_world(
J-Alves19e20cf2023-08-02 12:48:55 +0100193 struct vm_locked vm_locked, ffa_id_t sender_id);
J-Alvesc003a7a2021-03-18 13:06:53 +0000194
195/**
196 * Helper to check if FF-A ID is a VM ID.
197 */
J-Alves19e20cf2023-08-02 12:48:55 +0100198bool plat_ffa_is_vm_id(ffa_id_t vm_id);
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700199
200/**
201 * Forward normal world calls of FFA_RUN ABI to other world.
202 */
J-Alves19e20cf2023-08-02 12:48:55 +0100203bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700204 struct ffa_value *ret);
J-Alvesc8e8a222021-06-08 17:33:52 +0100205
206bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
207
208bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
209 uint32_t *lists_sizes,
210 uint32_t *lists_count,
211 const uint32_t ids_count_max);
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700212
J-Alves13394022021-06-30 13:48:49 +0100213/** Helper to set SRI current state. */
214void plat_ffa_sri_state_set(enum plat_ffa_sri_state state);
215
216/**
217 * Helper to send SRI and safely update `ffa_sri_state`, if there has been
218 * a call to FFA_NOTIFICATION_SET, and the SRI has been delayed.
219 * To be called at a context switch to the NWd.
220 */
221void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu);
222
223/**
224 * Helper to send SRI and safely update `ffa_sri_state`, if it hasn't been
225 * delayed in call to FFA_NOTIFICATION_SET.
226 */
227void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu);
228
229/**
230 * Initialize Schedule Receiver Interrupts needed in the context of
231 * notifications support.
232 */
233void plat_ffa_sri_init(struct cpu *cpu);
234
J-Alvesca058c22021-09-10 14:02:07 +0100235void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
236 uint32_t *lists_sizes,
237 uint32_t *lists_count,
238 const uint32_t ids_count_max);
239
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700240bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
241bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500242
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600243struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddydd883202022-10-24 16:49:28 -0500244 struct vcpu **next);
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600245
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500246/**
247 * Check if current SP can resume target VM/SP using FFA_RUN ABI.
248 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600249bool plat_ffa_run_checks(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100250 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600251 struct ffa_value *run_ret, struct vcpu **next);
Madhukar Pappireddyf675bb62021-08-03 12:57:10 -0500252
253/**
254 * Deactivate interrupt.
255 */
256int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
257 struct vcpu *current);
Madhukar Pappireddycbecc962021-08-03 13:11:57 -0500258
J-Alvescf0c4712023-08-04 14:41:50 +0100259void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next);
J-Alves6e2abc62021-12-02 14:58:56 +0000260bool plat_ffa_inject_notification_pending_interrupt(
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600261 struct vcpu_locked next_locked, struct vcpu_locked current_locked,
J-Alves6e2abc62021-12-02 14:58:56 +0000262 struct vm_locked receiver_locked);
Olivier Depreze562e542020-06-11 17:31:54 +0200263
Raghu Krishnamurthye74d6532023-06-07 12:21:54 -0700264bool plat_ffa_partition_info_get_regs_forward_allowed(void);
Raghu Krishnamurthyef432cb2022-12-29 06:56:32 -0800265
Olivier Depreze562e542020-06-11 17:31:54 +0200266void plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000267 const uint32_t flags,
Olivier Depreze562e542020-06-11 17:31:54 +0200268 struct ffa_partition_info *partitions,
269 ffa_vm_count_t *ret_count);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700270
271void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
272 paddr_t fdt_addr,
273 size_t fdt_allocated_size,
274 const struct manifest_vm *manifest_vm,
J-Alves77b6f4f2023-03-15 11:34:49 +0000275 const struct boot_params *boot_params,
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700276 struct mpool *ppool);
Olivier Deprezd614d322021-06-18 15:21:00 +0200277
278/**
279 * Returns true if the FFA_SECONDARY_EP_REGISTER interface is supported at
280 * the virtual FF-A instance.
281 */
282bool plat_ffa_is_secondary_ep_register_supported(void);
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500283
284/**
285 * Perform checks for the state transition being requested by the Partition
286 * based on it's runtime model and return false if an illegal transition is
287 * being performed.
288 */
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600289bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100290 ffa_id_t vm_id,
291 ffa_id_t receiver_vm_id,
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600292 struct vcpu_locked locked_vcpu,
293 uint32_t func,
Madhukar Pappireddy0ea239a2022-06-21 17:26:57 -0500294 enum vcpu_state *next_state);
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500295
Madhukar Pappireddyd46c06e2022-06-21 18:14:52 -0500296struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current);
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600297void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
Madhukar Pappireddy1480fce2022-06-21 18:09:25 -0500298 struct vcpu_locked target_locked);
299
Madhukar Pappireddy49fe6702022-06-21 17:52:23 -0500300void plat_ffa_wind_call_chain_ffa_direct_req(
301 struct vcpu_locked current_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100302 struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id);
Madhukar Pappireddyc0fb87e2022-06-21 17:59:15 -0500303
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600304void plat_ffa_unwind_call_chain_ffa_direct_resp(
305 struct vcpu_locked current_locked, struct vcpu_locked next_locked);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500306
Madhukar Pappireddy486360d2022-09-06 15:32:24 -0500307void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
308 struct vm_locked vm_locked);
J-Alves66652252022-07-06 09:49:51 +0100309
J-Alvesb5084cf2022-07-06 14:20:12 +0100310bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
311 struct vcpu **next,
312 struct ffa_value to_ret,
313 struct ffa_value *signal_interrupt);
J-Alves66652252022-07-06 09:49:51 +0100314/*
315 * Handles FF-A memory share calls with recipients from the other world.
316 */
317struct ffa_value plat_ffa_other_world_mem_send(
318 struct vm *from, uint32_t share_func,
319 struct ffa_memory_region **memory_region, uint32_t length,
320 uint32_t fragment_length, struct mpool *page_pool);
J-Alvesfc19b372022-07-06 12:17:35 +0100321
322/**
323 * Handles the memory reclaim if a memory handle from the other world is
324 * provided.
325 */
326struct ffa_value plat_ffa_other_world_mem_reclaim(
327 struct vm *to, ffa_memory_handle_t handle,
328 ffa_memory_region_flags_t flags, struct mpool *page_pool);
Madhukar Pappireddy2f76e492022-09-06 15:21:59 -0500329
J-Alvesb5084cf2022-07-06 14:20:12 +0100330/**
331 * Handles the memory retrieve request if the specified memory handle belongs
332 * to the other world.
333 */
334struct ffa_value plat_ffa_other_world_mem_retrieve(
335 struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
336 uint32_t length, struct mpool *page_pool);
J-Alvesfdd29272022-07-19 13:16:31 +0100337
338/**
339 * Handles the continuation of the memory send operation in case the memory
340 * region descriptor contains multiple segments.
341 */
342struct ffa_value plat_ffa_other_world_mem_send_continue(
343 struct vm *from, void *fragment, uint32_t fragment_length,
344 ffa_memory_handle_t handle, struct mpool *page_pool);
Madhukar Pappireddy5fd32482022-01-07 14:53:26 -0600345
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600346bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked);
J-Alves27b71962022-12-12 15:29:58 +0000347
348/**
349 * This FF-A v1.0 FFA_MSG_SEND interface.
350 * Implemented for the Hypervisor, but not in the SPMC.
351 */
J-Alves19e20cf2023-08-02 12:48:55 +0100352struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
353 ffa_id_t receiver_vm_id, uint32_t size,
J-Alves27b71962022-12-12 15:29:58 +0000354 struct vcpu *current, struct vcpu **next);
Madhukar Pappireddy1f2f2132023-02-14 17:48:44 -0600355
Madhukar Pappireddybd10e572023-03-06 16:39:49 -0600356struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
Madhukar Pappireddy184501c2023-05-23 17:24:06 -0500357 struct vcpu **next,
358 uint32_t timeout_low,
359 uint32_t timeout_high);
Olivier Deprez878bd5b2021-04-15 19:05:10 +0200360
361ffa_memory_attributes_t plat_ffa_memory_security_mode(
362 ffa_memory_attributes_t attributes, uint32_t mode);
Kathleen Capella6ab05132023-05-10 12:27:35 -0400363
364/**
365 * FF-A v1.2 FFA_ERROR interface.
366 * Implemented for SPMC in RTM_SP_INIT runtime model.
367 */
368struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
369 uint32_t error_code);
Raghu Krishnamurthya9ccf122023-03-27 20:42:01 -0700370
J-Alves19e20cf2023-08-02 12:48:55 +0100371bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id);
J-Alves2ced1672022-12-12 14:35:38 +0000372
373struct ffa_value plat_ffa_msg_recv(bool block,
374 struct vcpu_locked current_locked,
375 struct vcpu **next);
J-Alvesbc7ab4f2022-12-13 12:09:25 +0000376
377int64_t plat_ffa_mailbox_writable_get(const struct vcpu *current);
378
J-Alves19e20cf2023-08-02 12:48:55 +0100379int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current);