blob: 5a1e097379a8c1a239cdbcbe1442995d0e33cdd2 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010018
Andrew Scull6d2db332018-10-10 15:28:17 +010019#include "hf/abi.h"
Jose Marinhoa1dfeda2019-02-27 16:46:03 +000020#include "hf/spci.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010021#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010022
Andrew Scull5ac05f02018-08-10 17:23:22 +010023/**
Fuad Tabba77a4b012019-11-15 12:13:08 +000024 * This function must be implemented to trigger the architecture-specific
Andrew Scull5ac05f02018-08-10 17:23:22 +010025 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010026 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010027int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbran7f920af2019-09-03 17:09:30 +010028struct spci_value spci_call(struct spci_value args);
Andrew Scullf35a5c92018-08-07 18:09:46 +010029
Andrew Scull5ac05f02018-08-10 17:23:22 +010030/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000031 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010032 */
Andrew Walbrand230f662019-10-07 18:03:36 +010033static inline struct spci_value spci_id_get(void)
34{
35 return spci_call((struct spci_value){.func = SPCI_ID_GET_32});
36}
37
38/**
39 * Returns the VM's own ID.
40 */
Andrew Walbran95534922019-06-19 11:32:54 +010041static inline spci_vm_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +010042{
Andrew Walbrand230f662019-10-07 18:03:36 +010043 return spci_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +010044}
45
46/**
Andrew Scullf35a5c92018-08-07 18:09:46 +010047 * Returns the number of secondary VMs.
48 */
Andrew Walbran52d99672019-06-25 15:51:11 +010049static inline spci_vm_count_t hf_vm_get_count(void)
Andrew Scullf35a5c92018-08-07 18:09:46 +010050{
51 return hf_call(HF_VM_GET_COUNT, 0, 0, 0);
52}
53
Andrew Scull5ac05f02018-08-10 17:23:22 +010054/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000055 * Returns the number of vCPUs configured in the given secondary VM.
Andrew Scullf35a5c92018-08-07 18:09:46 +010056 */
Andrew Walbranc6d23c42019-06-26 13:30:42 +010057static inline spci_vcpu_count_t hf_vcpu_get_count(spci_vm_id_t vm_id)
Andrew Scullf35a5c92018-08-07 18:09:46 +010058{
Andrew Scull19503262018-09-20 14:48:39 +010059 return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0);
Andrew Scullf35a5c92018-08-07 18:09:46 +010060}
61
Andrew Scull5ac05f02018-08-10 17:23:22 +010062/**
Andrew Walbran27faff32019-10-02 18:20:57 +010063 * Runs the given vCPU of the given VM.
64 */
65static inline struct spci_value spci_run(spci_vm_id_t vm_id,
66 spci_vcpu_index_t vcpu_idx)
67{
68 return spci_call((struct spci_value){.func = SPCI_RUN_32,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +000069 spci_vm_vcpu(vm_id, vcpu_idx)});
Andrew Walbran27faff32019-10-02 18:20:57 +010070}
71
72/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000073 * Hints that the vCPU is willing to yield its current use of the physical CPU.
Jose Marinho135dff32019-02-28 10:25:57 +000074 * This call always returns SPCI_SUCCESS.
Andrew Scull55c4d8b2018-12-18 18:50:18 +000075 */
Andrew Walbran16075b62019-09-03 17:11:07 +010076static inline struct spci_value spci_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +000077{
Andrew Walbran16075b62019-09-03 17:11:07 +010078 return spci_call((struct spci_value){.func = SPCI_YIELD_32});
Andrew Scull55c4d8b2018-12-18 18:50:18 +000079}
80
81/**
Andrew Scull5ac05f02018-08-10 17:23:22 +010082 * Configures the pages to send/receive data through. The pages must not be
83 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +000084 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000085 * Returns:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000086 * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
87 * aligned or are the same.
88 * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
89 * due to insuffient page table memory.
90 * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
91 * the caller.
92 * - SPCI_SUCCESS on success if no further action is needed.
93 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
94 * needs to wake up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +010095 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000096static inline struct spci_value spci_rxtx_map(hf_ipaddr_t send,
97 hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +010098{
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000099 return spci_call(
Andrew Walbran3abf29e2020-01-21 17:34:15 +0000100 (struct spci_value){.func = SPCI_RXTX_MAP_64,
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000101 .arg1 = send,
102 .arg2 = recv,
103 .arg3 = HF_MAILBOX_SIZE / SPCI_PAGE_SIZE});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100104}
105
106/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100107 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000108 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000109 * If the recipient's receive buffer is busy, it can optionally register the
110 * caller to be notified when the recipient's receive buffer becomes available.
111 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100112 * Attributes may include:
113 * - SPCI_MSG_SEND_NOTIFY, to notify the caller when it should try again.
Andrew Walbran85aabe92019-12-03 12:03:03 +0000114 * - SPCI_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
Andrew Walbran70bc8622019-10-07 14:15:58 +0100115 * message.
116 *
117 * Returns SPCI_SUCCESS if the message is sent, or an error code otherwise:
118 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000119 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100120 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100121 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100122static inline struct spci_value spci_msg_send(spci_vm_id_t sender_vm_id,
123 spci_vm_id_t target_vm_id,
124 uint32_t size,
125 uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100126{
Andrew Walbran70bc8622019-10-07 14:15:58 +0100127 return spci_call((struct spci_value){
128 .func = SPCI_MSG_SEND_32,
129 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
130 .arg3 = size,
131 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100132}
133
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000134static inline struct spci_value spci_mem_donate(uint32_t fragment_length,
135 uint32_t length,
136 uint32_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000137{
138 return spci_call((struct spci_value){.func = SPCI_MEM_DONATE_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000139 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000140 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000141 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000142}
143
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000144static inline struct spci_value spci_mem_lend(uint32_t fragment_length,
145 uint32_t length, uint32_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000146{
147 return spci_call((struct spci_value){.func = SPCI_MEM_LEND_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000148 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000149 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000150 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000151}
152
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000153static inline struct spci_value spci_mem_share(uint32_t fragment_length,
154 uint32_t length, uint32_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000155{
156 return spci_call((struct spci_value){.func = SPCI_MEM_SHARE_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000157 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000158 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000159 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000160}
161
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000162static inline struct spci_value hf_spci_mem_relinquish(uint32_t fragment_length,
163 uint32_t length,
164 uint32_t cookie)
Andrew Walbran82d6d152019-12-24 15:02:06 +0000165{
166 return spci_call((struct spci_value){.func = HF_SPCI_MEM_RELINQUISH,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000167 .arg3 = fragment_length,
Andrew Walbran82d6d152019-12-24 15:02:06 +0000168 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000169 .arg5 = cookie});
Andrew Walbran82d6d152019-12-24 15:02:06 +0000170}
171
Andrew Scull5ac05f02018-08-10 17:23:22 +0100172/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100173 * Called by secondary VMs to receive a message. This will block until a message
174 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100175 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100176 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000177 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100178 * If no message is immediately available and there are no enabled and pending
179 * interrupts (irrespective of whether interrupts are enabled globally), then
180 * this will block until a message is available or an enabled interrupt becomes
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000181 * pending. This matches the behaviour of the WFI instruction on AArch64, except
Andrew Walbran0de4f162019-09-03 16:44:20 +0100182 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100183 *
184 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100185 * - SPCI_MSG_SEND if a message is successfully received.
186 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
187 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100188 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100189static inline struct spci_value spci_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100190{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100191 return spci_call((struct spci_value){.func = SPCI_MSG_WAIT_32});
Andrew Walbran0de4f162019-09-03 16:44:20 +0100192}
193
194/**
195 * Called by secondary VMs to receive a message. The call will return whether or
196 * not a message is available.
197 *
198 * The mailbox must be cleared before a new message can be received.
199 *
200 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100201 * - SPCI_MSG_SEND if a message is successfully received.
202 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
203 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
204 * - SPCI_ERROR SPCI_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100205 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100206static inline struct spci_value spci_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100207{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100208 return spci_call((struct spci_value){.func = SPCI_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100209}
210
211/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000212 * Releases the caller's mailbox so that a new message can be received. The
213 * caller must have copied out all data they wish to preserve as new messages
214 * will overwrite the old and will arrive asynchronously.
Andrew Walbran54afb502018-11-26 16:01:11 +0000215 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000216 * Returns:
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000217 * - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
218 * - SPCI_SUCCESS on success if no further action is needed.
219 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
220 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000221 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100222 */
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000223static inline struct spci_value spci_rx_release(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100224{
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000225 return spci_call((struct spci_value){.func = SPCI_RX_RELEASE_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100226}
Andrew Walbran318f5732018-11-20 16:23:42 +0000227
228/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000229 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
230 * by this function, the caller must have called api_mailbox_send before with
231 * the notify argument set to true, and this call must have failed because the
232 * mailbox was not available.
233 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000234 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000235 *
236 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
237 * became writable.
238 */
239static inline int64_t hf_mailbox_writable_get(void)
240{
241 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
242}
243
244/**
245 * Retrieves the next VM waiting to be notified that the mailbox of the
246 * specified VM became writable. Only primary VMs are allowed to call this.
247 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000248 * Returns -1 on failure or if there are no waiters; the VM id of the next
249 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000250 */
Andrew Walbran95534922019-06-19 11:32:54 +0100251static inline int64_t hf_mailbox_waiter_get(spci_vm_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000252{
253 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
254}
255
256/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000257 * Enables or disables a given interrupt ID.
258 *
259 * Returns 0 on success, or -1 if the intid is invalid.
260 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000261static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable)
Andrew Walbran318f5732018-11-20 16:23:42 +0000262{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000263 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000264}
265
266/**
267 * Gets the ID of the pending interrupt (if any) and acknowledge it.
268 *
269 * Returns HF_INVALID_INTID if there are no pending interrupts.
270 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000271static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000272{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000273 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000274}
275
276/**
277 * Injects a virtual interrupt of the given ID into the given target vCPU.
278 * This doesn't cause the vCPU to actually be run immediately; it will be taken
279 * when the vCPU is next run, which is up to the scheduler.
280 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000281 * Returns:
282 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
283 * ID is invalid, or the current VM is not allowed to inject interrupts to
284 * the target VM.
285 * - 0 on success if no further action is needed.
286 * - 1 if it was called by the primary VM and the primary VM now needs to wake
287 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000288 */
Andrew Walbran95534922019-06-19 11:32:54 +0100289static inline int64_t hf_interrupt_inject(spci_vm_id_t target_vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100290 spci_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000291 uint32_t intid)
292{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000293 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000294 intid);
295}
Andrew Scull6386f252018-12-06 13:29:10 +0000296
297/**
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100298 * Sends a character to the debug log for the VM.
299 *
300 * Returns 0 on success, or -1 if it failed for some reason.
301 */
302static inline int64_t hf_debug_log(char c)
303{
304 return hf_call(HF_DEBUG_LOG, c, 0, 0);
305}
306
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100307/** Obtains the Hafnium's version of the implemented SPCI specification. */
Andrew Walbran7f920af2019-09-03 17:09:30 +0100308static inline struct spci_value spci_version(void)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100309{
Andrew Walbran7f920af2019-09-03 17:09:30 +0100310 return spci_call((struct spci_value){.func = SPCI_VERSION_32});
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100311}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100312
313/**
314 * Discovery function returning information about the implementation of optional
315 * SPCI interfaces.
316 *
317 * Returns:
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000318 * - SPCI_SUCCESS in .func if the optional interface with function_id is
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100319 * implemented.
320 * - SPCI_ERROR in .func if the optional interface with function_id is not
321 * implemented.
322 */
323static inline struct spci_value spci_features(uint32_t function_id)
324{
325 return spci_call((struct spci_value){.func = SPCI_FEATURES_32,
326 .arg1 = function_id});
327}