blob: d4119d6720257d186230d8afc9415461d9488158 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010018
Andrew Scull6d2db332018-10-10 15:28:17 +010019#include "hf/abi.h"
Jose Marinhoa1dfeda2019-02-27 16:46:03 +000020#include "hf/spci.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010021#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010022
Andrew Scull5ac05f02018-08-10 17:23:22 +010023/**
Fuad Tabba77a4b012019-11-15 12:13:08 +000024 * This function must be implemented to trigger the architecture-specific
Andrew Scull5ac05f02018-08-10 17:23:22 +010025 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010026 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010027int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbran7f920af2019-09-03 17:09:30 +010028struct spci_value spci_call(struct spci_value args);
Andrew Scullf35a5c92018-08-07 18:09:46 +010029
Andrew Scull5ac05f02018-08-10 17:23:22 +010030/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000031 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010032 */
Andrew Walbrand230f662019-10-07 18:03:36 +010033static inline struct spci_value spci_id_get(void)
34{
35 return spci_call((struct spci_value){.func = SPCI_ID_GET_32});
36}
37
38/**
39 * Returns the VM's own ID.
40 */
Andrew Walbran95534922019-06-19 11:32:54 +010041static inline spci_vm_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +010042{
Andrew Walbrand230f662019-10-07 18:03:36 +010043 return spci_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +010044}
45
46/**
Andrew Scullf35a5c92018-08-07 18:09:46 +010047 * Returns the number of secondary VMs.
48 */
Andrew Walbran52d99672019-06-25 15:51:11 +010049static inline spci_vm_count_t hf_vm_get_count(void)
Andrew Scullf35a5c92018-08-07 18:09:46 +010050{
51 return hf_call(HF_VM_GET_COUNT, 0, 0, 0);
52}
53
Andrew Scull5ac05f02018-08-10 17:23:22 +010054/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000055 * Returns the number of vCPUs configured in the given secondary VM.
Andrew Scullf35a5c92018-08-07 18:09:46 +010056 */
Andrew Walbranc6d23c42019-06-26 13:30:42 +010057static inline spci_vcpu_count_t hf_vcpu_get_count(spci_vm_id_t vm_id)
Andrew Scullf35a5c92018-08-07 18:09:46 +010058{
Andrew Scull19503262018-09-20 14:48:39 +010059 return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0);
Andrew Scullf35a5c92018-08-07 18:09:46 +010060}
61
Andrew Scull5ac05f02018-08-10 17:23:22 +010062/**
Andrew Walbran27faff32019-10-02 18:20:57 +010063 * Runs the given vCPU of the given VM.
64 */
65static inline struct spci_value spci_run(spci_vm_id_t vm_id,
66 spci_vcpu_index_t vcpu_idx)
67{
68 return spci_call((struct spci_value){.func = SPCI_RUN_32,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +000069 spci_vm_vcpu(vm_id, vcpu_idx)});
Andrew Walbran27faff32019-10-02 18:20:57 +010070}
71
72/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000073 * Hints that the vCPU is willing to yield its current use of the physical CPU.
Jose Marinho135dff32019-02-28 10:25:57 +000074 * This call always returns SPCI_SUCCESS.
Andrew Scull55c4d8b2018-12-18 18:50:18 +000075 */
Andrew Walbran16075b62019-09-03 17:11:07 +010076static inline struct spci_value spci_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +000077{
Andrew Walbran16075b62019-09-03 17:11:07 +010078 return spci_call((struct spci_value){.func = SPCI_YIELD_32});
Andrew Scull55c4d8b2018-12-18 18:50:18 +000079}
80
81/**
Andrew Scull5ac05f02018-08-10 17:23:22 +010082 * Configures the pages to send/receive data through. The pages must not be
83 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +000084 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000085 * Returns:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000086 * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
87 * aligned or are the same.
88 * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
89 * due to insuffient page table memory.
90 * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
91 * the caller.
92 * - SPCI_SUCCESS on success if no further action is needed.
93 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
94 * needs to wake up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +010095 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000096static inline struct spci_value spci_rxtx_map(hf_ipaddr_t send,
97 hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +010098{
Andrew Walbranbfffb0f2019-11-05 14:02:34 +000099 return spci_call(
Andrew Walbran3abf29e2020-01-21 17:34:15 +0000100 (struct spci_value){.func = SPCI_RXTX_MAP_64,
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000101 .arg1 = send,
102 .arg2 = recv,
103 .arg3 = HF_MAILBOX_SIZE / SPCI_PAGE_SIZE});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100104}
105
106/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100107 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000108 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000109 * If the recipient's receive buffer is busy, it can optionally register the
110 * caller to be notified when the recipient's receive buffer becomes available.
111 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100112 * Attributes may include:
113 * - SPCI_MSG_SEND_NOTIFY, to notify the caller when it should try again.
Andrew Walbran85aabe92019-12-03 12:03:03 +0000114 * - SPCI_MSG_SEND_LEGACY_MEMORY_*, to send a legacy architected memory sharing
Andrew Walbran70bc8622019-10-07 14:15:58 +0100115 * message.
116 *
117 * Returns SPCI_SUCCESS if the message is sent, or an error code otherwise:
118 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000119 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100120 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100121 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100122static inline struct spci_value spci_msg_send(spci_vm_id_t sender_vm_id,
123 spci_vm_id_t target_vm_id,
124 uint32_t size,
125 uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100126{
Andrew Walbran70bc8622019-10-07 14:15:58 +0100127 return spci_call((struct spci_value){
128 .func = SPCI_MSG_SEND_32,
129 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
130 .arg3 = size,
131 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100132}
133
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000134static inline struct spci_value spci_mem_donate(uint32_t fragment_length,
135 uint32_t length,
Andrew Walbran382e9182020-03-04 11:27:27 +0000136 spci_cookie_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000137{
138 return spci_call((struct spci_value){.func = SPCI_MEM_DONATE_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000139 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000140 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000141 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000142}
143
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000144static inline struct spci_value spci_mem_lend(uint32_t fragment_length,
Andrew Walbran382e9182020-03-04 11:27:27 +0000145 uint32_t length,
146 spci_cookie_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000147{
148 return spci_call((struct spci_value){.func = SPCI_MEM_LEND_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000149 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000150 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000151 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000152}
153
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000154static inline struct spci_value spci_mem_share(uint32_t fragment_length,
Andrew Walbran382e9182020-03-04 11:27:27 +0000155 uint32_t length,
156 spci_cookie_t cookie)
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000157{
158 return spci_call((struct spci_value){.func = SPCI_MEM_SHARE_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000159 .arg3 = fragment_length,
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000160 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000161 .arg5 = cookie});
Andrew Walbrane908c4a2019-12-02 17:13:47 +0000162}
163
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000164static inline struct spci_value spci_mem_retrieve_req(uint32_t fragment_length,
165 uint32_t length,
Andrew Walbran382e9182020-03-04 11:27:27 +0000166 spci_cookie_t cookie)
Andrew Walbran82d6d152019-12-24 15:02:06 +0000167{
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000168 return spci_call((struct spci_value){.func = SPCI_MEM_RETRIEVE_REQ_32,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000169 .arg3 = fragment_length,
Andrew Walbran82d6d152019-12-24 15:02:06 +0000170 .arg4 = length,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000171 .arg5 = cookie});
Andrew Walbran82d6d152019-12-24 15:02:06 +0000172}
173
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000174static inline struct spci_value spci_mem_relinquish(void)
175{
176 return spci_call((struct spci_value){.func = SPCI_MEM_RELINQUISH_32});
177}
178
179static inline struct spci_value spci_mem_reclaim(uint32_t handle,
180 uint32_t flags)
181{
182 return spci_call((struct spci_value){
183 .func = SPCI_MEM_RECLAIM_32, .arg1 = handle, .arg2 = flags});
184}
185
Andrew Scull5ac05f02018-08-10 17:23:22 +0100186/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100187 * Called by secondary VMs to receive a message. This will block until a message
188 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100189 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100190 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000191 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100192 * If no message is immediately available and there are no enabled and pending
193 * interrupts (irrespective of whether interrupts are enabled globally), then
194 * this will block until a message is available or an enabled interrupt becomes
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000195 * pending. This matches the behaviour of the WFI instruction on AArch64, except
Andrew Walbran0de4f162019-09-03 16:44:20 +0100196 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100197 *
198 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100199 * - SPCI_MSG_SEND if a message is successfully received.
200 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
201 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100202 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100203static inline struct spci_value spci_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100204{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100205 return spci_call((struct spci_value){.func = SPCI_MSG_WAIT_32});
Andrew Walbran0de4f162019-09-03 16:44:20 +0100206}
207
208/**
209 * Called by secondary VMs to receive a message. The call will return whether or
210 * not a message is available.
211 *
212 * The mailbox must be cleared before a new message can be received.
213 *
214 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100215 * - SPCI_MSG_SEND if a message is successfully received.
216 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
217 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
218 * - SPCI_ERROR SPCI_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100219 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100220static inline struct spci_value spci_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100221{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100222 return spci_call((struct spci_value){.func = SPCI_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100223}
224
225/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000226 * Releases the caller's mailbox so that a new message can be received. The
227 * caller must have copied out all data they wish to preserve as new messages
228 * will overwrite the old and will arrive asynchronously.
Andrew Walbran54afb502018-11-26 16:01:11 +0000229 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000230 * Returns:
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000231 * - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
232 * - SPCI_SUCCESS on success if no further action is needed.
233 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
234 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000235 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100236 */
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000237static inline struct spci_value spci_rx_release(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100238{
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000239 return spci_call((struct spci_value){.func = SPCI_RX_RELEASE_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100240}
Andrew Walbran318f5732018-11-20 16:23:42 +0000241
242/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000243 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
244 * by this function, the caller must have called api_mailbox_send before with
245 * the notify argument set to true, and this call must have failed because the
246 * mailbox was not available.
247 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000248 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000249 *
250 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
251 * became writable.
252 */
253static inline int64_t hf_mailbox_writable_get(void)
254{
255 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
256}
257
258/**
259 * Retrieves the next VM waiting to be notified that the mailbox of the
260 * specified VM became writable. Only primary VMs are allowed to call this.
261 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000262 * Returns -1 on failure or if there are no waiters; the VM id of the next
263 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000264 */
Andrew Walbran95534922019-06-19 11:32:54 +0100265static inline int64_t hf_mailbox_waiter_get(spci_vm_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000266{
267 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
268}
269
270/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000271 * Enables or disables a given interrupt ID.
272 *
273 * Returns 0 on success, or -1 if the intid is invalid.
274 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000275static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable)
Andrew Walbran318f5732018-11-20 16:23:42 +0000276{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000277 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000278}
279
280/**
281 * Gets the ID of the pending interrupt (if any) and acknowledge it.
282 *
283 * Returns HF_INVALID_INTID if there are no pending interrupts.
284 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000285static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000286{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000287 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000288}
289
290/**
291 * Injects a virtual interrupt of the given ID into the given target vCPU.
292 * This doesn't cause the vCPU to actually be run immediately; it will be taken
293 * when the vCPU is next run, which is up to the scheduler.
294 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000295 * Returns:
296 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
297 * ID is invalid, or the current VM is not allowed to inject interrupts to
298 * the target VM.
299 * - 0 on success if no further action is needed.
300 * - 1 if it was called by the primary VM and the primary VM now needs to wake
301 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000302 */
Andrew Walbran95534922019-06-19 11:32:54 +0100303static inline int64_t hf_interrupt_inject(spci_vm_id_t target_vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100304 spci_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000305 uint32_t intid)
306{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000307 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000308 intid);
309}
Andrew Scull6386f252018-12-06 13:29:10 +0000310
311/**
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100312 * Sends a character to the debug log for the VM.
313 *
314 * Returns 0 on success, or -1 if it failed for some reason.
315 */
316static inline int64_t hf_debug_log(char c)
317{
318 return hf_call(HF_DEBUG_LOG, c, 0, 0);
319}
320
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100321/** Obtains the Hafnium's version of the implemented SPCI specification. */
Andrew Walbran9fd29072020-04-22 12:12:14 +0100322static inline int32_t spci_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100323{
Andrew Walbran9fd29072020-04-22 12:12:14 +0100324 return spci_call((struct spci_value){.func = SPCI_VERSION_32,
325 .arg1 = requested_version})
326 .func;
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100327}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100328
329/**
330 * Discovery function returning information about the implementation of optional
331 * SPCI interfaces.
332 *
333 * Returns:
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000334 * - SPCI_SUCCESS in .func if the optional interface with function_id is
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100335 * implemented.
336 * - SPCI_ERROR in .func if the optional interface with function_id is not
337 * implemented.
338 */
339static inline struct spci_value spci_features(uint32_t function_id)
340{
341 return spci_call((struct spci_value){.func = SPCI_FEATURES_32,
342 .arg1 = function_id});
343}