blob: aaaff94b1c1596f92f7884a0f773f8ebccb25ae5 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010010
Andrew Scull6d2db332018-10-10 15:28:17 +010011#include "hf/abi.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/ffa.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010013#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010014
Andrew Scull5ac05f02018-08-10 17:23:22 +010015/**
Fuad Tabba77a4b012019-11-15 12:13:08 +000016 * This function must be implemented to trigger the architecture-specific
Andrew Scull5ac05f02018-08-10 17:23:22 +010017 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010018 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010019int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020struct ffa_value ffa_call(struct ffa_value args);
Olivier Deprezc60cc1f2023-07-07 13:18:06 +020021struct ffa_value ffa_call_ext(struct ffa_value args);
Karl Meakin2ad6b662024-07-29 20:45:40 +010022/* NOLINTNEXTLINE(readability-redundant-declaration) */
Maksims Svecovse4700b42022-06-23 16:35:09 +010023void memcpy_s(void *dest, size_t destsz, const void *src, size_t count);
Andrew Scullf35a5c92018-08-07 18:09:46 +010024
Andrew Scull5ac05f02018-08-10 17:23:22 +010025/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000026 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010027 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028static inline struct ffa_value ffa_id_get(void)
Andrew Walbrand230f662019-10-07 18:03:36 +010029{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010030 return ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
Andrew Walbrand230f662019-10-07 18:03:36 +010031}
32
33/**
Olivier Deprez421677d2021-06-18 12:18:53 +020034 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
35 * FF-A instances.
36 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
37 */
38static inline struct ffa_value ffa_spm_id_get(void)
39{
40 return ffa_call((struct ffa_value){.func = FFA_SPM_ID_GET_32});
41}
42
43/**
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080044 * Requests information for partitions instantiated in the system via registers
45 * (as opposed to rx buffer in the case of ffa_partition_info_get).
46 */
47static inline struct ffa_value ffa_partition_info_get_regs(
48 const struct ffa_uuid *uuid, const uint16_t start_index,
49 const uint16_t tag)
50{
Karl Meakin9478e322024-09-23 17:47:09 +010051 uint64_t arg1;
52 uint64_t arg2;
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080053 uint64_t arg3 = start_index | (uint64_t)tag << 16;
54
Karl Meakin9478e322024-09-23 17:47:09 +010055 ffa_uuid_to_u64x2(&arg1, &arg2, uuid);
Olivier Deprezc60cc1f2023-07-07 13:18:06 +020056 return ffa_call_ext((struct ffa_value){
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080057 .func = FFA_PARTITION_INFO_GET_REGS_64,
58 .arg1 = arg1,
59 .arg2 = arg2,
60 .arg3 = arg3,
61 });
62}
63
64/**
Daniel Boulby8373fcf2021-12-13 18:00:30 +000065 * Requests information for partitions instantiated in the system. If the
66 * FFA_PARTITION_COUNT_FLAG is not set, the information is returned
67 * in the RX buffer of the caller as an array of partition information
68 * descriptors (struct ffa_partition_info).
Fuad Tabbae4efcc32020-07-16 15:37:27 +010069 *
70 * A Null UUID (UUID that is all zeros) returns information for all partitions,
71 * whereas a non-Null UUID returns information only for partitions that match.
72 *
Daniel Boulby8373fcf2021-12-13 18:00:30 +000073 * Flags may include:
74 * - FFA_PARTITION_COUNT_FLAG, which specifes if the partition info descriptors
75 * are returned in RX buffer or just the count in arg2.
76 * 1 returns just the count.
77 * 0 returns the count with the partition info descriptors.
78 *
Fuad Tabbae4efcc32020-07-16 15:37:27 +010079 * Returns:
80 * - FFA_SUCCESS on success. The count of partition information descriptors
81 * populated in the RX buffer is returned in arg2 (register w2).
82 * - FFA_BUSY if the caller's RX buffer is not free.
83 * - FFA_NO_MEMORY if the results do not fit in the callers RX buffer.
84 * - FFA_INVALID_PARAMETERS for an unrecognized UUID.
85 */
86static inline struct ffa_value ffa_partition_info_get(
Daniel Boulby8373fcf2021-12-13 18:00:30 +000087 const struct ffa_uuid *uuid, const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +010088{
89 return ffa_call((struct ffa_value){.func = FFA_PARTITION_INFO_GET_32,
90 .arg1 = uuid->uuid[0],
91 .arg2 = uuid->uuid[1],
92 .arg3 = uuid->uuid[2],
Daniel Boulby8373fcf2021-12-13 18:00:30 +000093 .arg4 = uuid->uuid[3],
94 .arg5 = flags});
Fuad Tabbae4efcc32020-07-16 15:37:27 +010095}
Fuad Tabbae4efcc32020-07-16 15:37:27 +010096/**
Olivier Deprez175161a2021-11-23 18:41:09 +010097 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1
98 * Registers vCPU secondary entry point for the caller VM.
99 * Called from secure virtual FF-A instance.
100 */
101static inline struct ffa_value ffa_secondary_ep_register(uintptr_t address)
102{
103 return ffa_call((struct ffa_value){.func = FFA_SECONDARY_EP_REGISTER_64,
104 .arg1 = address});
105}
106
107/**
Andrew Walbrand230f662019-10-07 18:03:36 +0100108 * Returns the VM's own ID.
109 */
J-Alves19e20cf2023-08-02 12:48:55 +0100110static inline ffa_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100111{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100112 return ffa_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +0100113}
114
115/**
Andrew Walbran27faff32019-10-02 18:20:57 +0100116 * Runs the given vCPU of the given VM.
117 */
J-Alves19e20cf2023-08-02 12:48:55 +0100118static inline struct ffa_value ffa_run(ffa_id_t vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100119 ffa_vcpu_index_t vcpu_idx)
Andrew Walbran27faff32019-10-02 18:20:57 +0100120{
Kathleen Capella036cc592023-11-30 18:26:15 -0500121 return ffa_call_ext((struct ffa_value){.func = FFA_RUN_32,
122 ffa_vm_vcpu(vm_id, vcpu_idx)});
Andrew Walbran27faff32019-10-02 18:20:57 +0100123}
124
125/**
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500126 * Hints that the vCPU is willing to yield its current use of the physical CPU
127 * and intends to be resumed at the expiration of the timeout.
128 */
129static inline struct ffa_value ffa_yield_timeout(uint32_t timeout_low,
130 uint32_t timeout_high)
131{
132 return ffa_call((struct ffa_value){.func = FFA_YIELD_32,
133 .arg2 = timeout_low,
134 .arg3 = timeout_high});
135}
136
137/**
138 * Relinquish the current physical CPU cycles back.
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000139 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100140static inline struct ffa_value ffa_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000141{
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500142 return ffa_yield_timeout(0, 0);
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000143}
144
145/**
Andrew Scull5ac05f02018-08-10 17:23:22 +0100146 * Configures the pages to send/receive data through. The pages must not be
147 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +0000148 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000149 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100150 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000151 * aligned or are the same.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100152 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100153 * due to insufficient page table memory.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100154 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000155 * the caller.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100156 * - FFA_SUCCESS on success if no further action is needed.
157 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000158 * needs to wake up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100159 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100160static inline struct ffa_value ffa_rxtx_map(hf_ipaddr_t send, hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100161{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100162 return ffa_call(
163 (struct ffa_value){.func = FFA_RXTX_MAP_64,
164 .arg1 = send,
165 .arg2 = recv,
166 .arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100167}
168
169/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +0100170 * Unmaps the RX/TX buffer pair of an endpoint or Hypervisor from the
171 * translation regime of the callee.
172 *
173 * Returns:
174 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
175 * behalf of the caller.
176 * - FFA_SUCCESS on success if no further action is needed.
177 */
178static inline struct ffa_value ffa_rxtx_unmap(void)
179{
180 /* Note that allocator ID MBZ at virtual instance. */
181 return ffa_call((struct ffa_value){.func = FFA_RXTX_UNMAP_32});
182}
183
184/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100185 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000186 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000187 * If the recipient's receive buffer is busy, it can optionally register the
188 * caller to be notified when the recipient's receive buffer becomes available.
189 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100190 * Attributes may include:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100191 * - FFA_MSG_SEND_NOTIFY, to notify the caller when it should try again.
192 * - FFA_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
Andrew Walbran70bc8622019-10-07 14:15:58 +0100193 * message.
194 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100195 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
Andrew Walbran70bc8622019-10-07 14:15:58 +0100196 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000197 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100198 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100199 */
J-Alves19e20cf2023-08-02 12:48:55 +0100200static inline struct ffa_value ffa_msg_send(ffa_id_t sender_vm_id,
201 ffa_id_t target_vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100202 uint32_t size, uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100203{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100204 return ffa_call((struct ffa_value){
205 .func = FFA_MSG_SEND_32,
Andrew Walbran70bc8622019-10-07 14:15:58 +0100206 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
207 .arg3 = size,
208 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100209}
210
Federico Recanati25053ee2022-03-14 15:01:53 +0100211/**
212 * Copies data from the sender's send buffer to the recipient's receive buffer
213 * and notifies the receiver.
214 *
215 * `flags` may include a 'Delay Schedule Receiver interrupt'.
216 *
217 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
218 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
219 * - BUSY: receiver's mailbox was full.
220 * - DENIED: receiver is not in a state to handle the request or doesn't
221 * support indirect messages.
222 */
223static inline struct ffa_value ffa_msg_send2(uint32_t flags)
224{
225 return ffa_call((struct ffa_value){
226 .func = FFA_MSG_SEND2_32, .arg1 = 0, .arg2 = flags});
227}
228
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229static inline struct ffa_value ffa_mem_donate(uint32_t length,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100230 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000231{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100232 return ffa_call((struct ffa_value){.func = FFA_MEM_DONATE_32,
233 .arg1 = length,
234 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000235}
236
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237static inline struct ffa_value ffa_mem_lend(uint32_t length,
238 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000239{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100240 return ffa_call((struct ffa_value){.func = FFA_MEM_LEND_32,
241 .arg1 = length,
242 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000243}
244
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100245static inline struct ffa_value ffa_mem_share(uint32_t length,
246 uint32_t fragment_length)
Andrew Walbran82d6d152019-12-24 15:02:06 +0000247{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100248 return ffa_call((struct ffa_value){.func = FFA_MEM_SHARE_32,
249 .arg1 = length,
250 .arg2 = fragment_length});
Andrew Walbran82d6d152019-12-24 15:02:06 +0000251}
252
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100253static inline struct ffa_value ffa_mem_retrieve_req(uint32_t length,
254 uint32_t fragment_length)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000255{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100256 return ffa_call((struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
257 .arg1 = length,
258 .arg2 = fragment_length});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000259}
260
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100261static inline struct ffa_value ffa_mem_relinquish(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000262{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100263 return ffa_call((struct ffa_value){.func = FFA_MEM_RELINQUISH_32});
264}
265
266static inline struct ffa_value ffa_mem_reclaim(ffa_memory_handle_t handle,
267 ffa_memory_region_flags_t flags)
268{
269 return ffa_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
270 .arg1 = (uint32_t)handle,
271 .arg2 = (uint32_t)(handle >> 32),
272 .arg3 = flags});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000273}
274
Andrew Walbranca808b12020-05-15 17:22:28 +0100275static inline struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
276 uint32_t fragment_offset)
277{
278 /* Note that sender MBZ at virtual instance. */
279 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
280 .arg1 = (uint32_t)handle,
281 .arg2 = (uint32_t)(handle >> 32),
282 .arg3 = fragment_offset});
283}
284
285static inline struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
286 uint32_t fragment_length)
287{
288 /* Note that sender MBZ at virtual instance. */
289 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
290 .arg1 = (uint32_t)handle,
291 .arg2 = (uint32_t)(handle >> 32),
292 .arg3 = fragment_length});
293}
294
Andrew Scull5ac05f02018-08-10 17:23:22 +0100295/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100296 * Called by secondary VMs to receive a message. This will block until a message
297 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100298 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100299 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000300 *
Kathleen Capella337dbdf2024-08-20 14:39:42 -0400301 * By default, FFA_MSG_WAIT will release the mailbox back to the SPMC. The
302 * FFA_MSG_WAIT_FLAG_RETAIN_RX flag can be used with `ffa_msg_wait_with_flags`
303 * function to override this default and allow the VM to retain the RX buffer.
304 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100305 * If no message is immediately available and there are no enabled and pending
306 * interrupts (irrespective of whether interrupts are enabled globally), then
307 * this will block until a message is available or an enabled interrupt becomes
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000308 * pending. This matches the behaviour of the WFI instruction on AArch64, except
Andrew Walbran0de4f162019-09-03 16:44:20 +0100309 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100310 *
311 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100312 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
313 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100314 */
Kathleen Capella337dbdf2024-08-20 14:39:42 -0400315static inline struct ffa_value ffa_msg_wait_with_flags(uint32_t flags)
316{
317 return ffa_call_ext(
318 (struct ffa_value){.func = FFA_MSG_WAIT_32, .arg2 = flags});
319}
320
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100321static inline struct ffa_value ffa_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100322{
Kathleen Capella337dbdf2024-08-20 14:39:42 -0400323 return ffa_msg_wait_with_flags(0);
Andrew Walbran0de4f162019-09-03 16:44:20 +0100324}
325
326/**
327 * Called by secondary VMs to receive a message. The call will return whether or
328 * not a message is available.
329 *
330 * The mailbox must be cleared before a new message can be received.
331 *
332 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100333 * - FFA_MSG_SEND if a message is successfully received.
334 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
335 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
336 * - FFA_ERROR FFA_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100337 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100338static inline struct ffa_value ffa_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100339{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100340 return ffa_call((struct ffa_value){.func = FFA_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100341}
342
343/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000344 * Releases the caller's mailbox so that a new message can be received. The
345 * caller must have copied out all data they wish to preserve as new messages
346 * will overwrite the old and will arrive asynchronously.
Andrew Walbran54afb502018-11-26 16:01:11 +0000347 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000348 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100349 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
350 * - FFA_SUCCESS on success if no further action is needed.
351 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000352 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000353 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100354 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100355static inline struct ffa_value ffa_rx_release(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100356{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100357 return ffa_call((struct ffa_value){.func = FFA_RX_RELEASE_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100358}
Andrew Walbran318f5732018-11-20 16:23:42 +0000359
360/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000361 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
362 * by this function, the caller must have called api_mailbox_send before with
363 * the notify argument set to true, and this call must have failed because the
364 * mailbox was not available.
365 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000366 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000367 *
368 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
369 * became writable.
370 */
371static inline int64_t hf_mailbox_writable_get(void)
372{
373 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
374}
375
376/**
377 * Retrieves the next VM waiting to be notified that the mailbox of the
378 * specified VM became writable. Only primary VMs are allowed to call this.
379 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000380 * Returns -1 on failure or if there are no waiters; the VM id of the next
381 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000382 */
J-Alves19e20cf2023-08-02 12:48:55 +0100383static inline int64_t hf_mailbox_waiter_get(ffa_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000384{
385 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
386}
387
388/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000389 * Enables or disables a given interrupt ID.
390 *
391 * Returns 0 on success, or -1 if the intid is invalid.
392 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000393static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable,
Olivier Deprezc5203fb2022-09-29 13:49:24 +0200394 uint32_t type)
Andrew Walbran318f5732018-11-20 16:23:42 +0000395{
Manish Pandey35e452f2021-02-18 21:36:34 +0000396 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, type);
Andrew Walbran318f5732018-11-20 16:23:42 +0000397}
398
399/**
400 * Gets the ID of the pending interrupt (if any) and acknowledge it.
401 *
402 * Returns HF_INVALID_INTID if there are no pending interrupts.
403 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000404static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000405{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000406 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000407}
408
409/**
Madhukar Pappireddy94cc33f2022-12-22 10:06:30 -0600410 * Deactivate the physical interrupt.
411 *
412 * Returns 0 on success, or -1 otherwise.
413 */
414static inline int64_t hf_interrupt_deactivate(uint32_t intid)
415{
416 return hf_call(HF_INTERRUPT_DEACTIVATE, intid, intid, 0);
417}
418
419/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000420 * Injects a virtual interrupt of the given ID into the given target vCPU.
421 * This doesn't cause the vCPU to actually be run immediately; it will be taken
422 * when the vCPU is next run, which is up to the scheduler.
423 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000424 * Returns:
425 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
426 * ID is invalid, or the current VM is not allowed to inject interrupts to
427 * the target VM.
428 * - 0 on success if no further action is needed.
429 * - 1 if it was called by the primary VM and the primary VM now needs to wake
430 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000431 */
J-Alves19e20cf2023-08-02 12:48:55 +0100432static inline int64_t hf_interrupt_inject(ffa_id_t target_vm_id,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100433 ffa_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000434 uint32_t intid)
435{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000436 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000437 intid);
438}
Andrew Scull6386f252018-12-06 13:29:10 +0000439
Madhukar Pappireddy89ffc2c2023-07-24 16:33:52 -0500440/**
441 * Reconfigure the virtual interrupt belonging to the current SP. Note that the
442 * virtual interrupt is identity mapped to the physical interrupt id.
443 *
444 * Returns -1 on failure, or 0 on success.
445 */
446static inline int64_t hf_interrupt_reconfigure(uint32_t intid, uint32_t command,
447 uint32_t value)
448{
449 return hf_call(HF_INTERRUPT_RECONFIGURE, intid, command, value);
450}
451
452static inline int64_t hf_interrupt_reconfigure_target_cpu(
453 uint32_t intid, ffa_vcpu_index_t target_cpu_index)
454{
455 return hf_interrupt_reconfigure(intid, INT_RECONFIGURE_TARGET_PE,
456 (uint32_t)target_cpu_index);
457}
458
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100459/** Obtains the Hafnium's version of the implemented FF-A specification. */
Karl Meakin0e617d92024-04-05 12:55:22 +0100460static inline enum ffa_version ffa_version(enum ffa_version requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100461{
Karl Meakin0e617d92024-04-05 12:55:22 +0100462 return ffa_call((struct ffa_value){
463 .func = FFA_VERSION_32,
464 .arg1 = (uint32_t)requested_version,
465 })
Andrew Walbran9fd29072020-04-22 12:12:14 +0100466 .func;
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100467}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100468
469/**
470 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100471 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100472 *
473 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100474 * - FFA_SUCCESS in .func if the optional interface with function_id is
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100475 * implemented.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100476 * - FFA_ERROR in .func if the optional interface with function_id is not
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100477 * implemented.
478 */
Karl Meakin34b8ae92023-01-13 13:33:07 +0000479static inline struct ffa_value ffa_features(uint64_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100480{
Karl Meakin34b8ae92023-01-13 13:33:07 +0000481 return ffa_call((struct ffa_value){
482 .func = FFA_FEATURES_32,
483 .arg1 = function_id,
484 });
485}
486
487/**
488 * Discovery function returning information about the implementation of optional
489 * FF-A interfaces which require an extra input property
490 *
491 * Returns:
492 * - FFA_SUCCESS in .func if the optional interface with function_id is
493 * implemented.
494 * - FFA_ERROR in .func if the optional interface with function_id is not
495 * implemented.
496 */
497static inline struct ffa_value ffa_features_with_input_property(
498 uint64_t function_id, uint64_t param)
499{
500 return ffa_call((struct ffa_value){
501 .func = FFA_FEATURES_32, .arg1 = function_id, .arg2 = param});
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100502}
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000503
504static inline struct ffa_value ffa_msg_send_direct_req(
J-Alves19e20cf2023-08-02 12:48:55 +0100505 ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000506 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
507{
508 return ffa_call((struct ffa_value){
509 .func = FFA_MSG_SEND_DIRECT_REQ_32,
510 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
511 .arg3 = arg3,
512 .arg4 = arg4,
513 .arg5 = arg5,
514 .arg6 = arg6,
515 .arg7 = arg7,
516 });
517}
518
Kathleen Capella734ddc42023-07-28 11:54:06 -0400519static inline struct ffa_value ffa_msg_send_direct_req2(
520 ffa_id_t sender_vm_id, ffa_id_t target_vm_id,
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400521 const struct ffa_uuid *uuid, const uint64_t *msg, size_t count)
Kathleen Capella734ddc42023-07-28 11:54:06 -0400522{
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400523 struct ffa_value args;
524 size_t arg_idx = 0;
525 uint64_t total_args;
526 size_t msg_idx = 0;
527 uint64_t *arg_ptrs[] = {
528 &args.arg4,
529 &args.arg5,
530 &args.arg6,
531 &args.arg7,
532 &args.extended_val.arg8,
533 &args.extended_val.arg9,
534 &args.extended_val.arg10,
535 &args.extended_val.arg11,
536 &args.extended_val.arg12,
537 &args.extended_val.arg13,
538 &args.extended_val.arg14,
539 &args.extended_val.arg15,
540 &args.extended_val.arg16,
541 &args.extended_val.arg17,
542 };
Kathleen Capella734ddc42023-07-28 11:54:06 -0400543
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400544 args.func = FFA_MSG_SEND_DIRECT_REQ2_64;
545 args.arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id;
Karl Meakin9478e322024-09-23 17:47:09 +0100546 ffa_uuid_to_u64x2(&args.arg2, &args.arg3, uuid);
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400547
548 total_args = (sizeof(arg_ptrs) / sizeof(uint64_t *));
549
550 while (arg_idx < total_args && msg_idx < count) {
551 *arg_ptrs[arg_idx++] = msg[msg_idx++];
552 }
553
Kathleen Capella0abd8872024-02-02 18:01:12 -0500554 while (arg_idx < total_args) {
555 *arg_ptrs[arg_idx++] = 0;
556 }
557
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400558 return ffa_call_ext(args);
Kathleen Capella734ddc42023-07-28 11:54:06 -0400559}
560
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000561static inline struct ffa_value ffa_msg_send_direct_resp(
J-Alves19e20cf2023-08-02 12:48:55 +0100562 ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000563 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
564{
Kathleen Capella35238872024-01-12 15:05:52 -0500565 return ffa_call_ext((struct ffa_value){
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000566 .func = FFA_MSG_SEND_DIRECT_RESP_32,
567 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
568 .arg3 = arg3,
569 .arg4 = arg4,
570 .arg5 = arg5,
571 .arg6 = arg6,
572 .arg7 = arg7,
573 });
574}
J-Alvesef69ac92021-08-26 09:21:27 +0100575
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400576static inline struct ffa_value ffa_msg_send_direct_resp2(ffa_id_t sender_vm_id,
577 ffa_id_t target_vm_id,
578 const uint64_t *msg,
579 size_t count)
580{
581 struct ffa_value args;
582 size_t arg_idx = 0;
583 size_t total_args;
584 size_t msg_idx = 0;
585 uint64_t *arg_ptrs[] = {
586 &args.arg4,
587 &args.arg5,
588 &args.arg6,
589 &args.arg7,
590 &args.extended_val.arg8,
591 &args.extended_val.arg9,
592 &args.extended_val.arg10,
593 &args.extended_val.arg11,
594 &args.extended_val.arg12,
595 &args.extended_val.arg13,
596 &args.extended_val.arg14,
597 &args.extended_val.arg15,
598 &args.extended_val.arg16,
599 &args.extended_val.arg17,
600 };
601
602 args.func = FFA_MSG_SEND_DIRECT_RESP2_64;
603 args.arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id;
604 args.arg2 = 0;
605 args.arg3 = 0;
606
607 total_args = sizeof(arg_ptrs) / sizeof(uint64_t *);
608
609 while (arg_idx < total_args && msg_idx < count) {
610 *arg_ptrs[arg_idx++] = msg[msg_idx++];
611 }
612
Kathleen Capella0abd8872024-02-02 18:01:12 -0500613 while (arg_idx < total_args) {
614 *arg_ptrs[arg_idx++] = 0;
615 }
616
Kathleen Capellade0b0da2023-10-04 13:46:27 -0400617 return ffa_call_ext(args);
618}
619
J-Alvesef69ac92021-08-26 09:21:27 +0100620static inline struct ffa_value ffa_notification_bind(
J-Alves19e20cf2023-08-02 12:48:55 +0100621 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesef69ac92021-08-26 09:21:27 +0100622 ffa_notifications_bitmap_t bitmap)
623{
624 return ffa_call((struct ffa_value){
625 .func = FFA_NOTIFICATION_BIND_32,
626 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
627 .arg2 = flags,
628 .arg3 = (uint32_t)(bitmap),
629 .arg4 = (uint32_t)(bitmap >> 32),
630 });
631}
632
633static inline struct ffa_value ffa_notification_unbind(
J-Alves19e20cf2023-08-02 12:48:55 +0100634 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id,
J-Alvesef69ac92021-08-26 09:21:27 +0100635 ffa_notifications_bitmap_t bitmap)
636{
637 return ffa_call((struct ffa_value){
638 .func = FFA_NOTIFICATION_UNBIND_32,
639 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
640 .arg3 = (uint32_t)(bitmap),
641 .arg4 = (uint32_t)(bitmap >> 32),
642 });
643}
644
645static inline struct ffa_value ffa_notification_set(
J-Alves19e20cf2023-08-02 12:48:55 +0100646 ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
J-Alvesef69ac92021-08-26 09:21:27 +0100647 ffa_notifications_bitmap_t bitmap)
648{
649 return ffa_call((struct ffa_value){
650 .func = FFA_NOTIFICATION_SET_32,
651 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
652 .arg2 = flags,
653 .arg3 = (uint32_t)(bitmap),
654 .arg4 = (uint32_t)(bitmap >> 32),
655 });
656}
657
J-Alves19e20cf2023-08-02 12:48:55 +0100658static inline struct ffa_value ffa_notification_get(ffa_id_t receiver_vm_id,
J-Alvesef69ac92021-08-26 09:21:27 +0100659 ffa_vcpu_index_t vcpu_id,
660 uint32_t flags)
661{
662 return ffa_call((struct ffa_value){
663 .func = FFA_NOTIFICATION_GET_32,
J-Alvesbe6e3032021-11-30 14:54:12 +0000664 .arg1 = (vcpu_id << 16) | (receiver_vm_id),
J-Alvesef69ac92021-08-26 09:21:27 +0100665 .arg2 = flags,
666 });
667}
668
669static inline struct ffa_value ffa_notification_info_get(void)
670{
671 return ffa_call((struct ffa_value){
672 .func = FFA_NOTIFICATION_INFO_GET_64,
673 });
674}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700675
676static inline struct ffa_value ffa_mem_perm_get(uint64_t base_va)
677{
678 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_GET_32,
679 .arg1 = base_va});
680}
681
682static inline struct ffa_value ffa_mem_perm_set(uint64_t base_va,
683 uint32_t page_count,
684 uint32_t mem_perm)
685{
686 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_SET_32,
687 .arg1 = base_va,
688 .arg2 = page_count,
689 .arg3 = mem_perm});
690}
Maksims Svecovse4700b42022-06-23 16:35:09 +0100691
692static inline struct ffa_value ffa_console_log_32(const char *src, size_t size)
693{
694 struct ffa_value req = {
695 .func = FFA_CONSOLE_LOG_32,
696 .arg1 = size,
697 };
Kathleen Capella7928b922023-02-08 14:14:30 -0500698
699 uint64_t *arg_addrs[] = {&req.arg2, &req.arg3, &req.arg4,
700 &req.arg5, &req.arg6, &req.arg7};
701
702 uint32_t src_index = 0;
703 uint32_t arg_idx = 0;
704
705 while (size > 0 && arg_idx < 6) {
706 size_t arg_size =
707 size < sizeof(uint32_t) ? size : sizeof(uint32_t);
708 memcpy_s(arg_addrs[arg_idx++], sizeof(uint32_t),
709 &src[src_index], arg_size);
710 src_index += arg_size;
711 size -= arg_size;
712 }
Maksims Svecovse4700b42022-06-23 16:35:09 +0100713
714 return ffa_call(req);
715}
716
717static inline struct ffa_value ffa_console_log_64(const char *src, size_t size)
718{
719 struct ffa_value req = {
720 .func = FFA_CONSOLE_LOG_64,
721 .arg1 = size,
722 };
Karl Meakin587cd292024-03-11 15:13:44 +0000723 const size_t destsz = sizeof(uint64_t) * 6;
724 const size_t count = size > destsz ? destsz : size;
725
726 memcpy_s(&req.arg2, destsz, src, count);
Maksims Svecovse4700b42022-06-23 16:35:09 +0100727
728 return ffa_call(req);
729}
Karl Meakin587cd292024-03-11 15:13:44 +0000730
731static inline struct ffa_value ffa_console_log_64_extended(const char *src,
732 size_t size)
733{
734 struct ffa_value req = {
735 .func = FFA_CONSOLE_LOG_64,
736 .arg1 = size,
737 };
738 const size_t destsz = sizeof(uint64_t) * 16;
739 const size_t count = size > destsz ? destsz : size;
740
741 memcpy_s(&req.arg2, destsz, src, count);
742
743 return ffa_call_ext(req);
744}