blob: 552dffe72353af06c7946e942fae3a485f9eee0b [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010010
Andrew Scull6d2db332018-10-10 15:28:17 +010011#include "hf/abi.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/ffa.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010013#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010014
Andrew Scull5ac05f02018-08-10 17:23:22 +010015/**
Fuad Tabba77a4b012019-11-15 12:13:08 +000016 * This function must be implemented to trigger the architecture-specific
Andrew Scull5ac05f02018-08-10 17:23:22 +010017 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010018 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010019int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020struct ffa_value ffa_call(struct ffa_value args);
Olivier Deprezc60cc1f2023-07-07 13:18:06 +020021struct ffa_value ffa_call_ext(struct ffa_value args);
Maksims Svecovse4700b42022-06-23 16:35:09 +010022void memcpy_s(void *dest, size_t destsz, const void *src, size_t count);
Andrew Scullf35a5c92018-08-07 18:09:46 +010023
Andrew Scull5ac05f02018-08-10 17:23:22 +010024/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000025 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010026 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010027static inline struct ffa_value ffa_id_get(void)
Andrew Walbrand230f662019-10-07 18:03:36 +010028{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010029 return ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
Andrew Walbrand230f662019-10-07 18:03:36 +010030}
31
32/**
Olivier Deprez421677d2021-06-18 12:18:53 +020033 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
34 * FF-A instances.
35 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
36 */
37static inline struct ffa_value ffa_spm_id_get(void)
38{
39 return ffa_call((struct ffa_value){.func = FFA_SPM_ID_GET_32});
40}
41
42/**
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080043 * Requests information for partitions instantiated in the system via registers
44 * (as opposed to rx buffer in the case of ffa_partition_info_get).
45 */
46static inline struct ffa_value ffa_partition_info_get_regs(
47 const struct ffa_uuid *uuid, const uint16_t start_index,
48 const uint16_t tag)
49{
50 uint64_t arg1 = (uint64_t)uuid->uuid[1] << 32 | uuid->uuid[0];
51 uint64_t arg2 = (uint64_t)uuid->uuid[3] << 32 | uuid->uuid[2];
52 uint64_t arg3 = start_index | (uint64_t)tag << 16;
53
Olivier Deprezc60cc1f2023-07-07 13:18:06 +020054 return ffa_call_ext((struct ffa_value){
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080055 .func = FFA_PARTITION_INFO_GET_REGS_64,
56 .arg1 = arg1,
57 .arg2 = arg2,
58 .arg3 = arg3,
59 });
60}
61
62/**
Daniel Boulby8373fcf2021-12-13 18:00:30 +000063 * Requests information for partitions instantiated in the system. If the
64 * FFA_PARTITION_COUNT_FLAG is not set, the information is returned
65 * in the RX buffer of the caller as an array of partition information
66 * descriptors (struct ffa_partition_info).
Fuad Tabbae4efcc32020-07-16 15:37:27 +010067 *
68 * A Null UUID (UUID that is all zeros) returns information for all partitions,
69 * whereas a non-Null UUID returns information only for partitions that match.
70 *
Daniel Boulby8373fcf2021-12-13 18:00:30 +000071 * Flags may include:
72 * - FFA_PARTITION_COUNT_FLAG, which specifes if the partition info descriptors
73 * are returned in RX buffer or just the count in arg2.
74 * 1 returns just the count.
75 * 0 returns the count with the partition info descriptors.
76 *
Fuad Tabbae4efcc32020-07-16 15:37:27 +010077 * Returns:
78 * - FFA_SUCCESS on success. The count of partition information descriptors
79 * populated in the RX buffer is returned in arg2 (register w2).
80 * - FFA_BUSY if the caller's RX buffer is not free.
81 * - FFA_NO_MEMORY if the results do not fit in the callers RX buffer.
82 * - FFA_INVALID_PARAMETERS for an unrecognized UUID.
83 */
84static inline struct ffa_value ffa_partition_info_get(
Daniel Boulby8373fcf2021-12-13 18:00:30 +000085 const struct ffa_uuid *uuid, const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +010086{
87 return ffa_call((struct ffa_value){.func = FFA_PARTITION_INFO_GET_32,
88 .arg1 = uuid->uuid[0],
89 .arg2 = uuid->uuid[1],
90 .arg3 = uuid->uuid[2],
Daniel Boulby8373fcf2021-12-13 18:00:30 +000091 .arg4 = uuid->uuid[3],
92 .arg5 = flags});
Fuad Tabbae4efcc32020-07-16 15:37:27 +010093}
Fuad Tabbae4efcc32020-07-16 15:37:27 +010094/**
Olivier Deprez175161a2021-11-23 18:41:09 +010095 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1
96 * Registers vCPU secondary entry point for the caller VM.
97 * Called from secure virtual FF-A instance.
98 */
99static inline struct ffa_value ffa_secondary_ep_register(uintptr_t address)
100{
101 return ffa_call((struct ffa_value){.func = FFA_SECONDARY_EP_REGISTER_64,
102 .arg1 = address});
103}
104
105/**
Andrew Walbrand230f662019-10-07 18:03:36 +0100106 * Returns the VM's own ID.
107 */
J-Alves19e20cf2023-08-02 12:48:55 +0100108static inline ffa_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100109{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100110 return ffa_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +0100111}
112
113/**
Andrew Walbran27faff32019-10-02 18:20:57 +0100114 * Runs the given vCPU of the given VM.
115 */
J-Alves19e20cf2023-08-02 12:48:55 +0100116static inline struct ffa_value ffa_run(ffa_id_t vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100117 ffa_vcpu_index_t vcpu_idx)
Andrew Walbran27faff32019-10-02 18:20:57 +0100118{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100119 return ffa_call((struct ffa_value){.func = FFA_RUN_32,
120 ffa_vm_vcpu(vm_id, vcpu_idx)});
Andrew Walbran27faff32019-10-02 18:20:57 +0100121}
122
123/**
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500124 * Hints that the vCPU is willing to yield its current use of the physical CPU
125 * and intends to be resumed at the expiration of the timeout.
126 */
127static inline struct ffa_value ffa_yield_timeout(uint32_t timeout_low,
128 uint32_t timeout_high)
129{
130 return ffa_call((struct ffa_value){.func = FFA_YIELD_32,
131 .arg2 = timeout_low,
132 .arg3 = timeout_high});
133}
134
135/**
136 * Relinquish the current physical CPU cycles back.
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000137 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100138static inline struct ffa_value ffa_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000139{
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500140 return ffa_yield_timeout(0, 0);
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000141}
142
143/**
Andrew Scull5ac05f02018-08-10 17:23:22 +0100144 * Configures the pages to send/receive data through. The pages must not be
145 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +0000146 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000147 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000149 * aligned or are the same.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100150 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100151 * due to insufficient page table memory.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100152 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000153 * the caller.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100154 * - FFA_SUCCESS on success if no further action is needed.
155 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000156 * needs to wake up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100157 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100158static inline struct ffa_value ffa_rxtx_map(hf_ipaddr_t send, hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100159{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100160 return ffa_call(
161 (struct ffa_value){.func = FFA_RXTX_MAP_64,
162 .arg1 = send,
163 .arg2 = recv,
164 .arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100165}
166
167/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +0100168 * Unmaps the RX/TX buffer pair of an endpoint or Hypervisor from the
169 * translation regime of the callee.
170 *
171 * Returns:
172 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
173 * behalf of the caller.
174 * - FFA_SUCCESS on success if no further action is needed.
175 */
176static inline struct ffa_value ffa_rxtx_unmap(void)
177{
178 /* Note that allocator ID MBZ at virtual instance. */
179 return ffa_call((struct ffa_value){.func = FFA_RXTX_UNMAP_32});
180}
181
182/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100183 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000184 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000185 * If the recipient's receive buffer is busy, it can optionally register the
186 * caller to be notified when the recipient's receive buffer becomes available.
187 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100188 * Attributes may include:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100189 * - FFA_MSG_SEND_NOTIFY, to notify the caller when it should try again.
190 * - FFA_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
Andrew Walbran70bc8622019-10-07 14:15:58 +0100191 * message.
192 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100193 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
Andrew Walbran70bc8622019-10-07 14:15:58 +0100194 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000195 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100196 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100197 */
J-Alves19e20cf2023-08-02 12:48:55 +0100198static inline struct ffa_value ffa_msg_send(ffa_id_t sender_vm_id,
199 ffa_id_t target_vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100200 uint32_t size, uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100201{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100202 return ffa_call((struct ffa_value){
203 .func = FFA_MSG_SEND_32,
Andrew Walbran70bc8622019-10-07 14:15:58 +0100204 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
205 .arg3 = size,
206 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100207}
208
Federico Recanati25053ee2022-03-14 15:01:53 +0100209/**
210 * Copies data from the sender's send buffer to the recipient's receive buffer
211 * and notifies the receiver.
212 *
213 * `flags` may include a 'Delay Schedule Receiver interrupt'.
214 *
215 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
216 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
217 * - BUSY: receiver's mailbox was full.
218 * - DENIED: receiver is not in a state to handle the request or doesn't
219 * support indirect messages.
220 */
221static inline struct ffa_value ffa_msg_send2(uint32_t flags)
222{
223 return ffa_call((struct ffa_value){
224 .func = FFA_MSG_SEND2_32, .arg1 = 0, .arg2 = flags});
225}
226
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100227static inline struct ffa_value ffa_mem_donate(uint32_t length,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100228 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000229{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100230 return ffa_call((struct ffa_value){.func = FFA_MEM_DONATE_32,
231 .arg1 = length,
232 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000233}
234
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100235static inline struct ffa_value ffa_mem_lend(uint32_t length,
236 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000237{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100238 return ffa_call((struct ffa_value){.func = FFA_MEM_LEND_32,
239 .arg1 = length,
240 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000241}
242
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100243static inline struct ffa_value ffa_mem_share(uint32_t length,
244 uint32_t fragment_length)
Andrew Walbran82d6d152019-12-24 15:02:06 +0000245{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246 return ffa_call((struct ffa_value){.func = FFA_MEM_SHARE_32,
247 .arg1 = length,
248 .arg2 = fragment_length});
Andrew Walbran82d6d152019-12-24 15:02:06 +0000249}
250
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100251static inline struct ffa_value ffa_mem_retrieve_req(uint32_t length,
252 uint32_t fragment_length)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000253{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 return ffa_call((struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
255 .arg1 = length,
256 .arg2 = fragment_length});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000257}
258
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100259static inline struct ffa_value ffa_mem_relinquish(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000260{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100261 return ffa_call((struct ffa_value){.func = FFA_MEM_RELINQUISH_32});
262}
263
264static inline struct ffa_value ffa_mem_reclaim(ffa_memory_handle_t handle,
265 ffa_memory_region_flags_t flags)
266{
267 return ffa_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
268 .arg1 = (uint32_t)handle,
269 .arg2 = (uint32_t)(handle >> 32),
270 .arg3 = flags});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000271}
272
Andrew Walbranca808b12020-05-15 17:22:28 +0100273static inline struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
274 uint32_t fragment_offset)
275{
276 /* Note that sender MBZ at virtual instance. */
277 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
278 .arg1 = (uint32_t)handle,
279 .arg2 = (uint32_t)(handle >> 32),
280 .arg3 = fragment_offset});
281}
282
283static inline struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
284 uint32_t fragment_length)
285{
286 /* Note that sender MBZ at virtual instance. */
287 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
288 .arg1 = (uint32_t)handle,
289 .arg2 = (uint32_t)(handle >> 32),
290 .arg3 = fragment_length});
291}
292
Andrew Scull5ac05f02018-08-10 17:23:22 +0100293/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100294 * Called by secondary VMs to receive a message. This will block until a message
295 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100296 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100297 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000298 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100299 * If no message is immediately available and there are no enabled and pending
300 * interrupts (irrespective of whether interrupts are enabled globally), then
301 * this will block until a message is available or an enabled interrupt becomes
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000302 * pending. This matches the behaviour of the WFI instruction on AArch64, except
Andrew Walbran0de4f162019-09-03 16:44:20 +0100303 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100304 *
305 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100306 * - FFA_MSG_SEND if a message is successfully received.
307 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
308 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100309 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100310static inline struct ffa_value ffa_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100311{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100312 return ffa_call((struct ffa_value){.func = FFA_MSG_WAIT_32});
Andrew Walbran0de4f162019-09-03 16:44:20 +0100313}
314
315/**
316 * Called by secondary VMs to receive a message. The call will return whether or
317 * not a message is available.
318 *
319 * The mailbox must be cleared before a new message can be received.
320 *
321 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100322 * - FFA_MSG_SEND if a message is successfully received.
323 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
324 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
325 * - FFA_ERROR FFA_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100326 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100327static inline struct ffa_value ffa_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100328{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100329 return ffa_call((struct ffa_value){.func = FFA_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100330}
331
332/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000333 * Releases the caller's mailbox so that a new message can be received. The
334 * caller must have copied out all data they wish to preserve as new messages
335 * will overwrite the old and will arrive asynchronously.
Andrew Walbran54afb502018-11-26 16:01:11 +0000336 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000337 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100338 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
339 * - FFA_SUCCESS on success if no further action is needed.
340 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000341 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000342 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100343 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100344static inline struct ffa_value ffa_rx_release(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100345{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100346 return ffa_call((struct ffa_value){.func = FFA_RX_RELEASE_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100347}
Andrew Walbran318f5732018-11-20 16:23:42 +0000348
349/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000350 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
351 * by this function, the caller must have called api_mailbox_send before with
352 * the notify argument set to true, and this call must have failed because the
353 * mailbox was not available.
354 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000355 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000356 *
357 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
358 * became writable.
359 */
360static inline int64_t hf_mailbox_writable_get(void)
361{
362 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
363}
364
365/**
366 * Retrieves the next VM waiting to be notified that the mailbox of the
367 * specified VM became writable. Only primary VMs are allowed to call this.
368 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000369 * Returns -1 on failure or if there are no waiters; the VM id of the next
370 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000371 */
J-Alves19e20cf2023-08-02 12:48:55 +0100372static inline int64_t hf_mailbox_waiter_get(ffa_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000373{
374 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
375}
376
377/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000378 * Enables or disables a given interrupt ID.
379 *
380 * Returns 0 on success, or -1 if the intid is invalid.
381 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000382static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable,
Olivier Deprezc5203fb2022-09-29 13:49:24 +0200383 uint32_t type)
Andrew Walbran318f5732018-11-20 16:23:42 +0000384{
Manish Pandey35e452f2021-02-18 21:36:34 +0000385 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, type);
Andrew Walbran318f5732018-11-20 16:23:42 +0000386}
387
388/**
389 * Gets the ID of the pending interrupt (if any) and acknowledge it.
390 *
391 * Returns HF_INVALID_INTID if there are no pending interrupts.
392 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000393static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000394{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000395 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000396}
397
398/**
Madhukar Pappireddy94cc33f2022-12-22 10:06:30 -0600399 * Deactivate the physical interrupt.
400 *
401 * Returns 0 on success, or -1 otherwise.
402 */
403static inline int64_t hf_interrupt_deactivate(uint32_t intid)
404{
405 return hf_call(HF_INTERRUPT_DEACTIVATE, intid, intid, 0);
406}
407
408/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000409 * Injects a virtual interrupt of the given ID into the given target vCPU.
410 * This doesn't cause the vCPU to actually be run immediately; it will be taken
411 * when the vCPU is next run, which is up to the scheduler.
412 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000413 * Returns:
414 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
415 * ID is invalid, or the current VM is not allowed to inject interrupts to
416 * the target VM.
417 * - 0 on success if no further action is needed.
418 * - 1 if it was called by the primary VM and the primary VM now needs to wake
419 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000420 */
J-Alves19e20cf2023-08-02 12:48:55 +0100421static inline int64_t hf_interrupt_inject(ffa_id_t target_vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100422 ffa_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000423 uint32_t intid)
424{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000425 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000426 intid);
427}
Andrew Scull6386f252018-12-06 13:29:10 +0000428
Madhukar Pappireddy89ffc2c2023-07-24 16:33:52 -0500429/**
430 * Reconfigure the virtual interrupt belonging to the current SP. Note that the
431 * virtual interrupt is identity mapped to the physical interrupt id.
432 *
433 * Returns -1 on failure, or 0 on success.
434 */
435static inline int64_t hf_interrupt_reconfigure(uint32_t intid, uint32_t command,
436 uint32_t value)
437{
438 return hf_call(HF_INTERRUPT_RECONFIGURE, intid, command, value);
439}
440
441static inline int64_t hf_interrupt_reconfigure_target_cpu(
442 uint32_t intid, ffa_vcpu_index_t target_cpu_index)
443{
444 return hf_interrupt_reconfigure(intid, INT_RECONFIGURE_TARGET_PE,
445 (uint32_t)target_cpu_index);
446}
447
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100448/** Obtains the Hafnium's version of the implemented FF-A specification. */
449static inline int32_t ffa_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100450{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100451 return ffa_call((struct ffa_value){.func = FFA_VERSION_32,
452 .arg1 = requested_version})
Andrew Walbran9fd29072020-04-22 12:12:14 +0100453 .func;
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100454}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100455
456/**
457 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100458 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100459 *
460 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100461 * - FFA_SUCCESS in .func if the optional interface with function_id is
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100462 * implemented.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100463 * - FFA_ERROR in .func if the optional interface with function_id is not
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100464 * implemented.
465 */
Karl Meakin34b8ae92023-01-13 13:33:07 +0000466static inline struct ffa_value ffa_features(uint64_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100467{
Karl Meakin34b8ae92023-01-13 13:33:07 +0000468 return ffa_call((struct ffa_value){
469 .func = FFA_FEATURES_32,
470 .arg1 = function_id,
471 });
472}
473
474/**
475 * Discovery function returning information about the implementation of optional
476 * FF-A interfaces which require an extra input property
477 *
478 * Returns:
479 * - FFA_SUCCESS in .func if the optional interface with function_id is
480 * implemented.
481 * - FFA_ERROR in .func if the optional interface with function_id is not
482 * implemented.
483 */
484static inline struct ffa_value ffa_features_with_input_property(
485 uint64_t function_id, uint64_t param)
486{
487 return ffa_call((struct ffa_value){
488 .func = FFA_FEATURES_32, .arg1 = function_id, .arg2 = param});
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100489}
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000490
491static inline struct ffa_value ffa_msg_send_direct_req(
J-Alves19e20cf2023-08-02 12:48:55 +0100492 ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000493 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
494{
495 return ffa_call((struct ffa_value){
496 .func = FFA_MSG_SEND_DIRECT_REQ_32,
497 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
498 .arg3 = arg3,
499 .arg4 = arg4,
500 .arg5 = arg5,
501 .arg6 = arg6,
502 .arg7 = arg7,
503 });
504}
505
506static inline struct ffa_value ffa_msg_send_direct_resp(
J-Alves19e20cf2023-08-02 12:48:55 +0100507 ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000508 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
509{
510 return ffa_call((struct ffa_value){
511 .func = FFA_MSG_SEND_DIRECT_RESP_32,
512 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
513 .arg3 = arg3,
514 .arg4 = arg4,
515 .arg5 = arg5,
516 .arg6 = arg6,
517 .arg7 = arg7,
518 });
519}
J-Alvesef69ac92021-08-26 09:21:27 +0100520
521static inline struct ffa_value ffa_notification_bind(
J-Alves19e20cf2023-08-02 12:48:55 +0100522 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesef69ac92021-08-26 09:21:27 +0100523 ffa_notifications_bitmap_t bitmap)
524{
525 return ffa_call((struct ffa_value){
526 .func = FFA_NOTIFICATION_BIND_32,
527 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
528 .arg2 = flags,
529 .arg3 = (uint32_t)(bitmap),
530 .arg4 = (uint32_t)(bitmap >> 32),
531 });
532}
533
534static inline struct ffa_value ffa_notification_unbind(
J-Alves19e20cf2023-08-02 12:48:55 +0100535 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id,
J-Alvesef69ac92021-08-26 09:21:27 +0100536 ffa_notifications_bitmap_t bitmap)
537{
538 return ffa_call((struct ffa_value){
539 .func = FFA_NOTIFICATION_UNBIND_32,
540 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
541 .arg3 = (uint32_t)(bitmap),
542 .arg4 = (uint32_t)(bitmap >> 32),
543 });
544}
545
546static inline struct ffa_value ffa_notification_set(
J-Alves19e20cf2023-08-02 12:48:55 +0100547 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesef69ac92021-08-26 09:21:27 +0100548 ffa_notifications_bitmap_t bitmap)
549{
550 return ffa_call((struct ffa_value){
551 .func = FFA_NOTIFICATION_SET_32,
552 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
553 .arg2 = flags,
554 .arg3 = (uint32_t)(bitmap),
555 .arg4 = (uint32_t)(bitmap >> 32),
556 });
557}
558
J-Alves19e20cf2023-08-02 12:48:55 +0100559static inline struct ffa_value ffa_notification_get(ffa_id_t receiver_vm_id,
J-Alvesef69ac92021-08-26 09:21:27 +0100560 ffa_vcpu_index_t vcpu_id,
561 uint32_t flags)
562{
563 return ffa_call((struct ffa_value){
564 .func = FFA_NOTIFICATION_GET_32,
J-Alvesbe6e3032021-11-30 14:54:12 +0000565 .arg1 = (vcpu_id << 16) | (receiver_vm_id),
J-Alvesef69ac92021-08-26 09:21:27 +0100566 .arg2 = flags,
567 });
568}
569
570static inline struct ffa_value ffa_notification_info_get(void)
571{
572 return ffa_call((struct ffa_value){
573 .func = FFA_NOTIFICATION_INFO_GET_64,
574 });
575}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700576
577static inline struct ffa_value ffa_mem_perm_get(uint64_t base_va)
578{
579 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_GET_32,
580 .arg1 = base_va});
581}
582
583static inline struct ffa_value ffa_mem_perm_set(uint64_t base_va,
584 uint32_t page_count,
585 uint32_t mem_perm)
586{
587 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_SET_32,
588 .arg1 = base_va,
589 .arg2 = page_count,
590 .arg3 = mem_perm});
591}
Maksims Svecovse4700b42022-06-23 16:35:09 +0100592
593static inline struct ffa_value ffa_console_log_32(const char *src, size_t size)
594{
595 struct ffa_value req = {
596 .func = FFA_CONSOLE_LOG_32,
597 .arg1 = size,
598 };
Kathleen Capella7928b922023-02-08 14:14:30 -0500599
600 uint64_t *arg_addrs[] = {&req.arg2, &req.arg3, &req.arg4,
601 &req.arg5, &req.arg6, &req.arg7};
602
603 uint32_t src_index = 0;
604 uint32_t arg_idx = 0;
605
606 while (size > 0 && arg_idx < 6) {
607 size_t arg_size =
608 size < sizeof(uint32_t) ? size : sizeof(uint32_t);
609 memcpy_s(arg_addrs[arg_idx++], sizeof(uint32_t),
610 &src[src_index], arg_size);
611 src_index += arg_size;
612 size -= arg_size;
613 }
Maksims Svecovse4700b42022-06-23 16:35:09 +0100614
615 return ffa_call(req);
616}
617
618static inline struct ffa_value ffa_console_log_64(const char *src, size_t size)
619{
620 struct ffa_value req = {
621 .func = FFA_CONSOLE_LOG_64,
622 .arg1 = size,
623 };
624 memcpy_s(&req.arg2, sizeof(uint64_t) * 6, src, size);
625
626 return ffa_call(req);
627}