blob: 90397c4626d85e89604066e6ff0a6c415f55ed6b [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Andrew Scullf35a5c92018-08-07 18:09:46 +010018
Andrew Scull6d2db332018-10-10 15:28:17 +010019#include "hf/abi.h"
Jose Marinhoa1dfeda2019-02-27 16:46:03 +000020#include "hf/spci.h"
Andrew Scull6d2db332018-10-10 15:28:17 +010021#include "hf/types.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +010022
Andrew Scull5ac05f02018-08-10 17:23:22 +010023/**
24 * This function must be implemented to trigger the architecture specific
25 * mechanism to call to the hypervisor.
Andrew Scullf35a5c92018-08-07 18:09:46 +010026 */
Andrew Walbran4e6bcc72019-09-11 13:57:22 +010027int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3);
Andrew Walbran7f920af2019-09-03 17:09:30 +010028struct spci_value spci_call(struct spci_value args);
Andrew Scullf35a5c92018-08-07 18:09:46 +010029
Andrew Scull5ac05f02018-08-10 17:23:22 +010030/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +000031 * Returns the VM's own ID.
Andrew Scull5ac05f02018-08-10 17:23:22 +010032 */
Andrew Walbrand230f662019-10-07 18:03:36 +010033static inline struct spci_value spci_id_get(void)
34{
35 return spci_call((struct spci_value){.func = SPCI_ID_GET_32});
36}
37
38/**
39 * Returns the VM's own ID.
40 */
Andrew Walbran95534922019-06-19 11:32:54 +010041static inline spci_vm_id_t hf_vm_get_id(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +010042{
Andrew Walbrand230f662019-10-07 18:03:36 +010043 return spci_id_get().arg2;
Andrew Scull5ac05f02018-08-10 17:23:22 +010044}
45
46/**
Andrew Scullf35a5c92018-08-07 18:09:46 +010047 * Returns the number of secondary VMs.
48 */
Andrew Walbran52d99672019-06-25 15:51:11 +010049static inline spci_vm_count_t hf_vm_get_count(void)
Andrew Scullf35a5c92018-08-07 18:09:46 +010050{
51 return hf_call(HF_VM_GET_COUNT, 0, 0, 0);
52}
53
Andrew Scull5ac05f02018-08-10 17:23:22 +010054/**
Andrew Scullf35a5c92018-08-07 18:09:46 +010055 * Returns the number of VCPUs configured in the given secondary VM.
56 */
Andrew Walbranc6d23c42019-06-26 13:30:42 +010057static inline spci_vcpu_count_t hf_vcpu_get_count(spci_vm_id_t vm_id)
Andrew Scullf35a5c92018-08-07 18:09:46 +010058{
Andrew Scull19503262018-09-20 14:48:39 +010059 return hf_call(HF_VCPU_GET_COUNT, vm_id, 0, 0);
Andrew Scullf35a5c92018-08-07 18:09:46 +010060}
61
Andrew Scull5ac05f02018-08-10 17:23:22 +010062/**
Andrew Walbrand1e52b42019-10-02 18:20:57 +010063 * Runs the given vCPU of the given VM.
64 */
65static inline struct spci_value spci_run(spci_vm_id_t vm_id,
66 spci_vcpu_index_t vcpu_idx)
67{
68 return spci_call((struct spci_value){.func = SPCI_RUN_32,
69 (uint32_t)vm_id << 16 | vcpu_idx});
70}
71
72/**
73 * Runs the given vCPU of the given VM.
Andrew Scull55c4d8b2018-12-18 18:50:18 +000074 *
75 * Returns an hf_vcpu_run_return struct telling the scheduler what to do next.
76 */
Andrew Walbran95534922019-06-19 11:32:54 +010077static inline struct hf_vcpu_run_return hf_vcpu_run(spci_vm_id_t vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +010078 spci_vcpu_index_t vcpu_idx)
Andrew Scull55c4d8b2018-12-18 18:50:18 +000079{
Andrew Walbrand1e52b42019-10-02 18:20:57 +010080 return hf_vcpu_run_return_decode(spci_run(vm_id, vcpu_idx));
Andrew Scull55c4d8b2018-12-18 18:50:18 +000081}
82
83/**
84 * Hints that the vcpu is willing to yield its current use of the physical CPU.
Jose Marinho135dff32019-02-28 10:25:57 +000085 * This call always returns SPCI_SUCCESS.
Andrew Scull55c4d8b2018-12-18 18:50:18 +000086 */
Andrew Walbran16075b62019-09-03 17:11:07 +010087static inline struct spci_value spci_yield(void)
Andrew Scull55c4d8b2018-12-18 18:50:18 +000088{
Andrew Walbran16075b62019-09-03 17:11:07 +010089 return spci_call((struct spci_value){.func = SPCI_YIELD_32});
Andrew Scull55c4d8b2018-12-18 18:50:18 +000090}
91
92/**
Andrew Scull5ac05f02018-08-10 17:23:22 +010093 * Configures the pages to send/receive data through. The pages must not be
94 * shared.
Andrew Walbran54afb502018-11-26 16:01:11 +000095 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000096 * Returns:
97 * - -1 on failure.
98 * - 0 on success if no further action is needed.
99 * - 1 if it was called by the primary VM and the primary VM now needs to wake
100 * up or kick waiters.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100101 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100102static inline int64_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100103{
104 return hf_call(HF_VM_CONFIGURE, send, recv, 0);
105}
106
107/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100108 * Copies data from the sender's send buffer to the recipient's receive buffer.
Andrew Walbran54afb502018-11-26 16:01:11 +0000109 *
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000110 * If the recipient's receive buffer is busy, it can optionally register the
111 * caller to be notified when the recipient's receive buffer becomes available.
112 *
Andrew Walbran70bc8622019-10-07 14:15:58 +0100113 * Attributes may include:
114 * - SPCI_MSG_SEND_NOTIFY, to notify the caller when it should try again.
115 * - SPCI_MSG_SEND_LEGACY_MEMORY, to send a legacy architected memory sharing
116 * message.
117 *
118 * Returns SPCI_SUCCESS if the message is sent, or an error code otherwise:
119 * - INVALID_PARAMETERS: one or more of the parameters do not conform.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000120 * - BUSY: the message could not be delivered either because the mailbox
Andrew Walbran70bc8622019-10-07 14:15:58 +0100121 * was full or the target VM is not yet set up.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100122 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100123static inline struct spci_value spci_msg_send(spci_vm_id_t sender_vm_id,
124 spci_vm_id_t target_vm_id,
125 uint32_t size,
126 uint32_t attributes)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100127{
Andrew Walbran70bc8622019-10-07 14:15:58 +0100128 return spci_call((struct spci_value){
129 .func = SPCI_MSG_SEND_32,
130 .arg1 = ((uint64_t)sender_vm_id << 16) | target_vm_id,
131 .arg3 = size,
132 .arg4 = attributes});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100133}
134
135/**
Andrew Walbran0de4f162019-09-03 16:44:20 +0100136 * Called by secondary VMs to receive a message. This will block until a message
137 * is received.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100138 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100139 * The mailbox must be cleared before a new message can be received.
Andrew Walbran9311c9a2019-03-12 16:59:04 +0000140 *
Andrew Walbran0de4f162019-09-03 16:44:20 +0100141 * If no message is immediately available and there are no enabled and pending
142 * interrupts (irrespective of whether interrupts are enabled globally), then
143 * this will block until a message is available or an enabled interrupt becomes
144 * pending. This matches the behaviour of the WFI instruction on aarch64, except
145 * that a message becoming available is also treated like a wake-up event.
Andrew Walbranc8500812019-06-26 10:36:48 +0100146 *
147 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100148 * - SPCI_MSG_SEND if a message is successfully received.
149 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
150 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100151 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100152static inline struct spci_value spci_msg_wait(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100153{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100154 return spci_call((struct spci_value){.func = SPCI_MSG_WAIT_32});
Andrew Walbran0de4f162019-09-03 16:44:20 +0100155}
156
157/**
158 * Called by secondary VMs to receive a message. The call will return whether or
159 * not a message is available.
160 *
161 * The mailbox must be cleared before a new message can be received.
162 *
163 * Returns:
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100164 * - SPCI_MSG_SEND if a message is successfully received.
165 * - SPCI_ERROR SPCI_NOT_SUPPORTED if called from the primary VM.
166 * - SPCI_ERROR SPCI_INTERRUPTED if an interrupt happened during the call.
167 * - SPCI_ERROR SPCI_RETRY if there was no pending message.
Andrew Walbran0de4f162019-09-03 16:44:20 +0100168 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100169static inline struct spci_value spci_msg_poll(void)
Andrew Walbran0de4f162019-09-03 16:44:20 +0100170{
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100171 return spci_call((struct spci_value){.func = SPCI_MSG_POLL_32});
Andrew Scull5ac05f02018-08-10 17:23:22 +0100172}
173
174/**
Andrew Walbran54afb502018-11-26 16:01:11 +0000175 * Clears the caller's mailbox so a new message can be received.
176 *
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000177 * Returns:
Andrew Scullaa7db8e2019-02-01 14:12:19 +0000178 * - -1 on failure, if the mailbox hasn't been read.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000179 * - 0 on success if no further action is needed.
180 * - 1 if it was called by the primary VM and the primary VM now needs to wake
181 * up or kick waiters. Waiters should be retrieved by calling
182 * hf_mailbox_waiter_get.
Andrew Scull5ac05f02018-08-10 17:23:22 +0100183 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100184static inline int64_t hf_mailbox_clear(void)
Andrew Scull5ac05f02018-08-10 17:23:22 +0100185{
Andrew Scullaa039b32018-10-04 15:02:26 +0100186 return hf_call(HF_MAILBOX_CLEAR, 0, 0, 0);
Andrew Scull5ac05f02018-08-10 17:23:22 +0100187}
Andrew Walbran318f5732018-11-20 16:23:42 +0000188
189/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000190 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
191 * by this function, the caller must have called api_mailbox_send before with
192 * the notify argument set to true, and this call must have failed because the
193 * mailbox was not available.
194 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000195 * It should be called repeatedly to retrieve a list of VMs.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000196 *
197 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
198 * became writable.
199 */
200static inline int64_t hf_mailbox_writable_get(void)
201{
202 return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
203}
204
205/**
206 * Retrieves the next VM waiting to be notified that the mailbox of the
207 * specified VM became writable. Only primary VMs are allowed to call this.
208 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000209 * Returns -1 on failure or if there are no waiters; the VM id of the next
210 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000211 */
Andrew Walbran95534922019-06-19 11:32:54 +0100212static inline int64_t hf_mailbox_waiter_get(spci_vm_id_t vm_id)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000213{
214 return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
215}
216
217/**
Andrew Walbran318f5732018-11-20 16:23:42 +0000218 * Enables or disables a given interrupt ID.
219 *
220 * Returns 0 on success, or -1 if the intid is invalid.
221 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000222static inline int64_t hf_interrupt_enable(uint32_t intid, bool enable)
Andrew Walbran318f5732018-11-20 16:23:42 +0000223{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000224 return hf_call(HF_INTERRUPT_ENABLE, intid, enable, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000225}
226
227/**
228 * Gets the ID of the pending interrupt (if any) and acknowledge it.
229 *
230 * Returns HF_INVALID_INTID if there are no pending interrupts.
231 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000232static inline uint32_t hf_interrupt_get(void)
Andrew Walbran318f5732018-11-20 16:23:42 +0000233{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000234 return hf_call(HF_INTERRUPT_GET, 0, 0, 0);
Andrew Walbran318f5732018-11-20 16:23:42 +0000235}
236
237/**
238 * Injects a virtual interrupt of the given ID into the given target vCPU.
239 * This doesn't cause the vCPU to actually be run immediately; it will be taken
240 * when the vCPU is next run, which is up to the scheduler.
241 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000242 * Returns:
243 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
244 * ID is invalid, or the current VM is not allowed to inject interrupts to
245 * the target VM.
246 * - 0 on success if no further action is needed.
247 * - 1 if it was called by the primary VM and the primary VM now needs to wake
248 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000249 */
Andrew Walbran95534922019-06-19 11:32:54 +0100250static inline int64_t hf_interrupt_inject(spci_vm_id_t target_vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100251 spci_vcpu_index_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000252 uint32_t intid)
253{
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000254 return hf_call(HF_INTERRUPT_INJECT, target_vm_id, target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000255 intid);
256}
Andrew Scull6386f252018-12-06 13:29:10 +0000257
258/**
259 * Shares a region of memory with another VM.
260 *
261 * Returns 0 on success or -1 if the sharing was not allowed or failed.
262 *
263 * TODO: replace this with a better API once we have decided what that should
264 * look like.
265 */
Andrew Walbran95534922019-06-19 11:32:54 +0100266static inline int64_t hf_share_memory(spci_vm_id_t vm_id, hf_ipaddr_t addr,
Andrew Scull6386f252018-12-06 13:29:10 +0000267 size_t size, enum hf_share share)
268{
269 return hf_call(HF_SHARE_MEMORY, (((uint64_t)vm_id) << 32) | share, addr,
270 size);
271}
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100272
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +0100273/**
274 * Sends a character to the debug log for the VM.
275 *
276 * Returns 0 on success, or -1 if it failed for some reason.
277 */
278static inline int64_t hf_debug_log(char c)
279{
280 return hf_call(HF_DEBUG_LOG, c, 0, 0);
281}
282
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100283/** Obtains the Hafnium's version of the implemented SPCI specification. */
Andrew Walbran7f920af2019-09-03 17:09:30 +0100284static inline struct spci_value spci_version(void)
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100285{
Andrew Walbran7f920af2019-09-03 17:09:30 +0100286 return spci_call((struct spci_value){.func = SPCI_VERSION_32});
Jose Marinhofc0b2b62019-06-06 11:18:45 +0100287}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +0100288
289/**
290 * Discovery function returning information about the implementation of optional
291 * SPCI interfaces.
292 *
293 * Returns:
294 * - SPCI_SUCCESS in .func if the the optional interface with function_id is
295 * implemented.
296 * - SPCI_ERROR in .func if the optional interface with function_id is not
297 * implemented.
298 */
299static inline struct spci_value spci_features(uint32_t function_id)
300{
301 return spci_call((struct spci_value){.func = SPCI_FEATURES_32,
302 .arg1 = function_id});
303}