blob: d5d9f345270dc7f8d0b3edcad9cda52238987ab0 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010010
Andrew Scull6d2db332018-10-10 15:28:17 +010011#include "hf/abi.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010012#include "hf/ffa.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010013#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010014
Andrew Scull5ac05f02018-08-10 17:23:22 +010015/**
Fuad Tabba77a4b012019-11-15 12:13:08 +000016 * This function must be implemented to trigger the architecture-specific
Andrew Scull5ac05f02018-08-10 17:23:22 +010017 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010018 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010019int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020struct ffa_value ffa_call(struct ffa_value args);
Maksims Svecovse4700b42022-06-23 16:35:09 +010021void memcpy_s(void *dest, size_t destsz, const void *src, size_t count);
Andrew Scullf35a5c92018-08-07 18:09:46 +010022
Andrew Scull5ac05f02018-08-10 17:23:22 +010023/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000024 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010025 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026static inline struct ffa_value ffa_id_get(void)
Andrew Walbrand230f662019-10-07 18:03:36 +010027{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028 return ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
Andrew Walbrand230f662019-10-07 18:03:36 +010029}
30
31/**
Olivier Deprez421677d2021-06-18 12:18:53 +020032 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
33 * FF-A instances.
34 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
35 */
36static inline struct ffa_value ffa_spm_id_get(void)
37{
38 return ffa_call((struct ffa_value){.func = FFA_SPM_ID_GET_32});
39}
40
41/**
Raghu Krishnamurthy2957b922022-12-27 10:29:12 -080042 * Requests information for partitions instantiated in the system via registers
43 * (as opposed to rx buffer in the case of ffa_partition_info_get).
44 */
45static inline struct ffa_value ffa_partition_info_get_regs(
46 const struct ffa_uuid *uuid, const uint16_t start_index,
47 const uint16_t tag)
48{
49 uint64_t arg1 = (uint64_t)uuid->uuid[1] << 32 | uuid->uuid[0];
50 uint64_t arg2 = (uint64_t)uuid->uuid[3] << 32 | uuid->uuid[2];
51 uint64_t arg3 = start_index | (uint64_t)tag << 16;
52
53 return ffa_call((struct ffa_value){
54 .func = FFA_PARTITION_INFO_GET_REGS_64,
55 .arg1 = arg1,
56 .arg2 = arg2,
57 .arg3 = arg3,
58 });
59}
60
61/**
Daniel Boulby8373fcf2021-12-13 18:00:30 +000062 * Requests information for partitions instantiated in the system. If the
63 * FFA_PARTITION_COUNT_FLAG is not set, the information is returned
64 * in the RX buffer of the caller as an array of partition information
65 * descriptors (struct ffa_partition_info).
Fuad Tabbae4efcc32020-07-16 15:37:27 +010066 *
67 * A Null UUID (UUID that is all zeros) returns information for all partitions,
68 * whereas a non-Null UUID returns information only for partitions that match.
69 *
Daniel Boulby8373fcf2021-12-13 18:00:30 +000070 * Flags may include:
71 * - FFA_PARTITION_COUNT_FLAG, which specifes if the partition info descriptors
72 * are returned in RX buffer or just the count in arg2.
73 * 1 returns just the count.
74 * 0 returns the count with the partition info descriptors.
75 *
Fuad Tabbae4efcc32020-07-16 15:37:27 +010076 * Returns:
77 * - FFA_SUCCESS on success. The count of partition information descriptors
78 * populated in the RX buffer is returned in arg2 (register w2).
79 * - FFA_BUSY if the caller's RX buffer is not free.
80 * - FFA_NO_MEMORY if the results do not fit in the callers RX buffer.
81 * - FFA_INVALID_PARAMETERS for an unrecognized UUID.
82 */
83static inline struct ffa_value ffa_partition_info_get(
Daniel Boulby8373fcf2021-12-13 18:00:30 +000084 const struct ffa_uuid *uuid, const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +010085{
86 return ffa_call((struct ffa_value){.func = FFA_PARTITION_INFO_GET_32,
87 .arg1 = uuid->uuid[0],
88 .arg2 = uuid->uuid[1],
89 .arg3 = uuid->uuid[2],
Daniel Boulby8373fcf2021-12-13 18:00:30 +000090 .arg4 = uuid->uuid[3],
91 .arg5 = flags});
Fuad Tabbae4efcc32020-07-16 15:37:27 +010092}
Fuad Tabbae4efcc32020-07-16 15:37:27 +010093/**
Olivier Deprez175161a2021-11-23 18:41:09 +010094 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1
95 * Registers vCPU secondary entry point for the caller VM.
96 * Called from secure virtual FF-A instance.
97 */
98static inline struct ffa_value ffa_secondary_ep_register(uintptr_t address)
99{
100 return ffa_call((struct ffa_value){.func = FFA_SECONDARY_EP_REGISTER_64,
101 .arg1 = address});
102}
103
104/**
Andrew Walbrand230f662019-10-07 18:03:36 +0100105 * Returns the VM's own ID.
106 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107static inline ffa_vm_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100108{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109 return ffa_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +0100110}
111
112/**
Andrew Walbran27faff32019-10-02 18:20:57 +0100113 * Runs the given vCPU of the given VM.
114 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115static inline struct ffa_value ffa_run(ffa_vm_id_t vm_id,
116 ffa_vcpu_index_t vcpu_idx)
Andrew Walbran27faff32019-10-02 18:20:57 +0100117{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100118 return ffa_call((struct ffa_value){.func = FFA_RUN_32,
119 ffa_vm_vcpu(vm_id, vcpu_idx)});
Andrew Walbran27faff32019-10-02 18:20:57 +0100120}
121
122/**
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500123 * Hints that the vCPU is willing to yield its current use of the physical CPU
124 * and intends to be resumed at the expiration of the timeout.
125 */
126static inline struct ffa_value ffa_yield_timeout(uint32_t timeout_low,
127 uint32_t timeout_high)
128{
129 return ffa_call((struct ffa_value){.func = FFA_YIELD_32,
130 .arg2 = timeout_low,
131 .arg3 = timeout_high});
132}
133
134/**
135 * Relinquish the current physical CPU cycles back.
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000136 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100137static inline struct ffa_value ffa_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000138{
Madhukar Pappireddy9fb96ae2023-05-23 17:40:29 -0500139 return ffa_yield_timeout(0, 0);
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000140}
141
142/**
Andrew Scull5ac05f02018-08-10 17:23:22 +0100143 * Configures the pages to send/receive data through. The pages must not be
144 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +0000145 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000146 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000148 * aligned or are the same.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100149 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Fuad Tabba9dc276f2020-07-16 09:29:32 +0100150 * due to insufficient page table memory.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100151 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000152 * the caller.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100153 * - FFA_SUCCESS on success if no further action is needed.
154 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000155 * needs to wake up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100156 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100157static inline struct ffa_value ffa_rxtx_map(hf_ipaddr_t send, hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100158{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100159 return ffa_call(
160 (struct ffa_value){.func = FFA_RXTX_MAP_64,
161 .arg1 = send,
162 .arg2 = recv,
163 .arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100164}
165
166/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +0100167 * Unmaps the RX/TX buffer pair of an endpoint or Hypervisor from the
168 * translation regime of the callee.
169 *
170 * Returns:
171 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
172 * behalf of the caller.
173 * - FFA_SUCCESS on success if no further action is needed.
174 */
175static inline struct ffa_value ffa_rxtx_unmap(void)
176{
177 /* Note that allocator ID MBZ at virtual instance. */
178 return ffa_call((struct ffa_value){.func = FFA_RXTX_UNMAP_32});
179}
180
181/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100182 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000183 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000184 * If the recipient's receive buffer is busy, it can optionally register the
185 * caller to be notified when the recipient's receive buffer becomes available.
186 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100187 * Attributes may include:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100188 * - FFA_MSG_SEND_NOTIFY, to notify the caller when it should try again.
189 * - FFA_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
Andrew Walbran70bc8622019-10-07 14:15:58 +0100190 * message.
191 *
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100192 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
Andrew Walbran70bc8622019-10-07 14:15:58 +0100193 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000194 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100195 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100196 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100197static inline struct ffa_value ffa_msg_send(ffa_vm_id_t sender_vm_id,
198 ffa_vm_id_t target_vm_id,
199 uint32_t size, uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100200{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100201 return ffa_call((struct ffa_value){
202 .func = FFA_MSG_SEND_32,
Andrew Walbran70bc8622019-10-07 14:15:58 +0100203 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
204 .arg3 = size,
205 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100206}
207
Federico Recanati25053ee2022-03-14 15:01:53 +0100208/**
209 * Copies data from the sender's send buffer to the recipient's receive buffer
210 * and notifies the receiver.
211 *
212 * `flags` may include a 'Delay Schedule Receiver interrupt'.
213 *
214 * Returns FFA_SUCCESS if the message is sent, or an error code otherwise:
215 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
216 * - BUSY: receiver's mailbox was full.
217 * - DENIED: receiver is not in a state to handle the request or doesn't
218 * support indirect messages.
219 */
220static inline struct ffa_value ffa_msg_send2(uint32_t flags)
221{
222 return ffa_call((struct ffa_value){
223 .func = FFA_MSG_SEND2_32, .arg1 = 0, .arg2 = flags});
224}
225
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100226static inline struct ffa_value ffa_mem_donate(uint32_t length,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100227 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000228{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229 return ffa_call((struct ffa_value){.func = FFA_MEM_DONATE_32,
230 .arg1 = length,
231 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000232}
233
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100234static inline struct ffa_value ffa_mem_lend(uint32_t length,
235 uint32_t fragment_length)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000236{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237 return ffa_call((struct ffa_value){.func = FFA_MEM_LEND_32,
238 .arg1 = length,
239 .arg2 = fragment_length});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000240}
241
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242static inline struct ffa_value ffa_mem_share(uint32_t length,
243 uint32_t fragment_length)
Andrew Walbran82d6d152019-12-24 15:02:06 +0000244{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100245 return ffa_call((struct ffa_value){.func = FFA_MEM_SHARE_32,
246 .arg1 = length,
247 .arg2 = fragment_length});
Andrew Walbran82d6d152019-12-24 15:02:06 +0000248}
249
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100250static inline struct ffa_value ffa_mem_retrieve_req(uint32_t length,
251 uint32_t fragment_length)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000252{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100253 return ffa_call((struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
254 .arg1 = length,
255 .arg2 = fragment_length});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000256}
257
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100258static inline struct ffa_value ffa_mem_relinquish(void)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000259{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100260 return ffa_call((struct ffa_value){.func = FFA_MEM_RELINQUISH_32});
261}
262
263static inline struct ffa_value ffa_mem_reclaim(ffa_memory_handle_t handle,
264 ffa_memory_region_flags_t flags)
265{
266 return ffa_call((struct ffa_value){.func = FFA_MEM_RECLAIM_32,
267 .arg1 = (uint32_t)handle,
268 .arg2 = (uint32_t)(handle >> 32),
269 .arg3 = flags});
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000270}
271
Andrew Walbranca808b12020-05-15 17:22:28 +0100272static inline struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
273 uint32_t fragment_offset)
274{
275 /* Note that sender MBZ at virtual instance. */
276 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
277 .arg1 = (uint32_t)handle,
278 .arg2 = (uint32_t)(handle >> 32),
279 .arg3 = fragment_offset});
280}
281
282static inline struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
283 uint32_t fragment_length)
284{
285 /* Note that sender MBZ at virtual instance. */
286 return ffa_call((struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
287 .arg1 = (uint32_t)handle,
288 .arg2 = (uint32_t)(handle >> 32),
289 .arg3 = fragment_length});
290}
291
Andrew Scull5ac05f02018-08-10 17:23:22 +0100292/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100293 * Called by secondary VMs to receive a message. This will block until a message
294 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100295 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100296 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000297 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100298 * If no message is immediately available and there are no enabled and pending
299 * interrupts (irrespective of whether interrupts are enabled globally), then
300 * this will block until a message is available or an enabled interrupt becomes
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000301 * pending. This matches the behaviour of the WFI instruction on AArch64, except
Andrew Walbran0de4f162019-09-03 16:44:20 +0100302 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100303 *
304 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100305 * - FFA_MSG_SEND if a message is successfully received.
306 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
307 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100308 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100309static inline struct ffa_value ffa_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100310{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100311 return ffa_call((struct ffa_value){.func = FFA_MSG_WAIT_32});
Andrew Walbran0de4f162019-09-03 16:44:20 +0100312}
313
314/**
315 * Called by secondary VMs to receive a message. The call will return whether or
316 * not a message is available.
317 *
318 * The mailbox must be cleared before a new message can be received.
319 *
320 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100321 * - FFA_MSG_SEND if a message is successfully received.
322 * - FFA_ERROR FFA_NOT_SUPPORTED if called from the primary VM.
323 * - FFA_ERROR FFA_INTERRUPTED if an interrupt happened during the call.
324 * - FFA_ERROR FFA_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100325 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326static inline struct ffa_value ffa_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100327{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100328 return ffa_call((struct ffa_value){.func = FFA_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100329}
330
331/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000332 * Releases the caller's mailbox so that a new message can be received. The
333 * caller must have copied out all data they wish to preserve as new messages
334 * will overwrite the old and will arrive asynchronously.
Andrew Walbran54afb502018-11-26 16:01:11 +0000335 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000336 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100337 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
338 * - FFA_SUCCESS on success if no further action is needed.
339 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000340 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000341 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100342 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100343static inline struct ffa_value ffa_rx_release(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100344{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100345 return ffa_call((struct ffa_value){.func = FFA_RX_RELEASE_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100346}
Andrew Walbran318f5732018-11-20 16:23:42 +0000347
348/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000349 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
350 * by this function, the caller must have called api_mailbox_send before with
351 * the notify argument set to true, and this call must have failed because the
352 * mailbox was not available.
353 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000354 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000355 *
356 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
357 * became writable.
358 */
359static inline int64_t hf_mailbox_writable_get(void)
360{
361 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
362}
363
364/**
365 * Retrieves the next VM waiting to be notified that the mailbox of the
366 * specified VM became writable. Only primary VMs are allowed to call this.
367 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000368 * Returns -1 on failure or if there are no waiters; the VM id of the next
369 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000370 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100371static inline int64_t hf_mailbox_waiter_get(ffa_vm_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000372{
373 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
374}
375
376/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000377 * Enables or disables a given interrupt ID.
378 *
379 * Returns 0 on success, or -1 if the intid is invalid.
380 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000381static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable,
Olivier Deprezc5203fb2022-09-29 13:49:24 +0200382 uint32_t type)
Andrew Walbran318f5732018-11-20 16:23:42 +0000383{
Manish Pandey35e452f2021-02-18 21:36:34 +0000384 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, type);
Andrew Walbran318f5732018-11-20 16:23:42 +0000385}
386
387/**
388 * Gets the ID of the pending interrupt (if any) and acknowledge it.
389 *
390 * Returns HF_INVALID_INTID if there are no pending interrupts.
391 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000392static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000393{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000394 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000395}
396
397/**
Madhukar Pappireddy94cc33f2022-12-22 10:06:30 -0600398 * Deactivate the physical interrupt.
399 *
400 * Returns 0 on success, or -1 otherwise.
401 */
402static inline int64_t hf_interrupt_deactivate(uint32_t intid)
403{
404 return hf_call(HF_INTERRUPT_DEACTIVATE, intid, intid, 0);
405}
406
407/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000408 * Injects a virtual interrupt of the given ID into the given target vCPU.
409 * This doesn't cause the vCPU to actually be run immediately; it will be taken
410 * when the vCPU is next run, which is up to the scheduler.
411 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000412 * Returns:
413 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
414 * ID is invalid, or the current VM is not allowed to inject interrupts to
415 * the target VM.
416 * - 0 on success if no further action is needed.
417 * - 1 if it was called by the primary VM and the primary VM now needs to wake
418 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000419 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100420static inline int64_t hf_interrupt_inject(ffa_vm_id_t target_vm_id,
421 ffa_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000422 uint32_t intid)
423{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000424 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000425 intid);
426}
Andrew Scull6386f252018-12-06 13:29:10 +0000427
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100428/** Obtains the Hafnium's version of the implemented FF-A specification. */
429static inline int32_t ffa_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100430{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100431 return ffa_call((struct ffa_value){.func = FFA_VERSION_32,
432 .arg1 = requested_version})
Andrew Walbran9fd29072020-04-22 12:12:14 +0100433 .func;
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100434}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100435
436/**
437 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100438 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100439 *
440 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100441 * - FFA_SUCCESS in .func if the optional interface with function_id is
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100442 * implemented.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100443 * - FFA_ERROR in .func if the optional interface with function_id is not
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100444 * implemented.
445 */
Karl Meakin34b8ae92023-01-13 13:33:07 +0000446static inline struct ffa_value ffa_features(uint64_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100447{
Karl Meakin34b8ae92023-01-13 13:33:07 +0000448 return ffa_call((struct ffa_value){
449 .func = FFA_FEATURES_32,
450 .arg1 = function_id,
451 });
452}
453
454/**
455 * Discovery function returning information about the implementation of optional
456 * FF-A interfaces which require an extra input property
457 *
458 * Returns:
459 * - FFA_SUCCESS in .func if the optional interface with function_id is
460 * implemented.
461 * - FFA_ERROR in .func if the optional interface with function_id is not
462 * implemented.
463 */
464static inline struct ffa_value ffa_features_with_input_property(
465 uint64_t function_id, uint64_t param)
466{
467 return ffa_call((struct ffa_value){
468 .func = FFA_FEATURES_32, .arg1 = function_id, .arg2 = param});
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100469}
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000470
471static inline struct ffa_value ffa_msg_send_direct_req(
472 ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
473 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
474{
475 return ffa_call((struct ffa_value){
476 .func = FFA_MSG_SEND_DIRECT_REQ_32,
477 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
478 .arg3 = arg3,
479 .arg4 = arg4,
480 .arg5 = arg5,
481 .arg6 = arg6,
482 .arg7 = arg7,
483 });
484}
485
486static inline struct ffa_value ffa_msg_send_direct_resp(
487 ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
488 uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
489{
490 return ffa_call((struct ffa_value){
491 .func = FFA_MSG_SEND_DIRECT_RESP_32,
492 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
493 .arg3 = arg3,
494 .arg4 = arg4,
495 .arg5 = arg5,
496 .arg6 = arg6,
497 .arg7 = arg7,
498 });
499}
J-Alvesef69ac92021-08-26 09:21:27 +0100500
501static inline struct ffa_value ffa_notification_bind(
502 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
503 ffa_notifications_bitmap_t bitmap)
504{
505 return ffa_call((struct ffa_value){
506 .func = FFA_NOTIFICATION_BIND_32,
507 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
508 .arg2 = flags,
509 .arg3 = (uint32_t)(bitmap),
510 .arg4 = (uint32_t)(bitmap >> 32),
511 });
512}
513
514static inline struct ffa_value ffa_notification_unbind(
515 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id,
516 ffa_notifications_bitmap_t bitmap)
517{
518 return ffa_call((struct ffa_value){
519 .func = FFA_NOTIFICATION_UNBIND_32,
520 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
521 .arg3 = (uint32_t)(bitmap),
522 .arg4 = (uint32_t)(bitmap >> 32),
523 });
524}
525
526static inline struct ffa_value ffa_notification_set(
527 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
528 ffa_notifications_bitmap_t bitmap)
529{
530 return ffa_call((struct ffa_value){
531 .func = FFA_NOTIFICATION_SET_32,
532 .arg1 = (sender_vm_id << 16) | (receiver_vm_id),
533 .arg2 = flags,
534 .arg3 = (uint32_t)(bitmap),
535 .arg4 = (uint32_t)(bitmap >> 32),
536 });
537}
538
539static inline struct ffa_value ffa_notification_get(ffa_vm_id_t receiver_vm_id,
540 ffa_vcpu_index_t vcpu_id,
541 uint32_t flags)
542{
543 return ffa_call((struct ffa_value){
544 .func = FFA_NOTIFICATION_GET_32,
J-Alvesbe6e3032021-11-30 14:54:12 +0000545 .arg1 = (vcpu_id << 16) | (receiver_vm_id),
J-Alvesef69ac92021-08-26 09:21:27 +0100546 .arg2 = flags,
547 });
548}
549
550static inline struct ffa_value ffa_notification_info_get(void)
551{
552 return ffa_call((struct ffa_value){
553 .func = FFA_NOTIFICATION_INFO_GET_64,
554 });
555}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -0700556
557static inline struct ffa_value ffa_mem_perm_get(uint64_t base_va)
558{
559 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_GET_32,
560 .arg1 = base_va});
561}
562
563static inline struct ffa_value ffa_mem_perm_set(uint64_t base_va,
564 uint32_t page_count,
565 uint32_t mem_perm)
566{
567 return ffa_call((struct ffa_value){.func = FFA_MEM_PERM_SET_32,
568 .arg1 = base_va,
569 .arg2 = page_count,
570 .arg3 = mem_perm});
571}
Maksims Svecovse4700b42022-06-23 16:35:09 +0100572
573static inline struct ffa_value ffa_console_log_32(const char *src, size_t size)
574{
575 struct ffa_value req = {
576 .func = FFA_CONSOLE_LOG_32,
577 .arg1 = size,
578 };
Kathleen Capella7928b922023-02-08 14:14:30 -0500579
580 uint64_t *arg_addrs[] = {&req.arg2, &req.arg3, &req.arg4,
581 &req.arg5, &req.arg6, &req.arg7};
582
583 uint32_t src_index = 0;
584 uint32_t arg_idx = 0;
585
586 while (size > 0 && arg_idx < 6) {
587 size_t arg_size =
588 size < sizeof(uint32_t) ? size : sizeof(uint32_t);
589 memcpy_s(arg_addrs[arg_idx++], sizeof(uint32_t),
590 &src[src_index], arg_size);
591 src_index += arg_size;
592 size -= arg_size;
593 }
Maksims Svecovse4700b42022-06-23 16:35:09 +0100594
595 return ffa_call(req);
596}
597
598static inline struct ffa_value ffa_console_log_64(const char *src, size_t size)
599{
600 struct ffa_value req = {
601 .func = FFA_CONSOLE_LOG_64,
602 .arg1 = size,
603 };
604 memcpy_s(&req.arg2, sizeof(uint64_t) * 6, src, size);
605
606 return ffa_call(req);
607}