blob: 8761e6152f605bbd1f009e4366f82841ca696d1c [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/std.h"
22#include "hf/vm.h"
23
Andrew Scullf35a5c92018-08-07 18:09:46 +010024#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010025
Andrew Scullaa039b32018-10-04 15:02:26 +010026static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010027 "Currently, a page is mapped for the send and receive buffers so "
28 "the maximum request is the size of a page.");
29
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010030/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010031 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010032 *
33 * This triggers the scheduling logic to run. Run in the context of secondary VM
34 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
35 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010036 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010037static struct vcpu *api_switch_to_primary(struct vcpu *current,
38 struct hf_vcpu_run_return primary_ret,
Andrew Scull6bca35e2018-10-02 12:05:32 +010039 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010040{
Andrew Scull19503262018-09-20 14:48:39 +010041 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010042 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010043
44 /* Switch back to primary VM. */
Andrew Scull19503262018-09-20 14:48:39 +010045 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010046
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010047 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010048 arch_regs_set_retval(&next->regs,
49 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010050
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010051 /* Mark the current vcpu as waiting. */
52 sl_lock(&current->lock);
53 current->state = secondary_state;
54 sl_unlock(&current->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010055
56 return next;
57}
58
59/**
Andrew Scullaa039b32018-10-04 15:02:26 +010060 * Returns to the primary vm leaving the current vcpu ready to be scheduled
61 * again.
62 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010063struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010064{
Andrew Scull6d2db332018-10-10 15:28:17 +010065 struct hf_vcpu_run_return ret = {
66 .code = HF_VCPU_RUN_YIELD,
67 };
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010068 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010069}
70
71/**
72 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
73 * vm.
74 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010075struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010076{
Andrew Scull6d2db332018-10-10 15:28:17 +010077 struct hf_vcpu_run_return ret = {
78 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
79 };
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010080 return api_switch_to_primary(current, ret,
81 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +010082}
83
84/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010085 * Returns the number of VMs configured to run.
86 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010087int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010088{
Andrew Scull19503262018-09-20 14:48:39 +010089 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010090}
91
92/**
93 * Returns the number of vcpus configured in the given VM.
94 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010095int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010096{
Andrew Scull19503262018-09-20 14:48:39 +010097 struct vm *vm;
98
99 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100100 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100101 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100102 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100103
Andrew Scull19503262018-09-20 14:48:39 +0100104 vm = vm_get(vm_id);
105 if (vm == NULL) {
106 return -1;
107 }
108
109 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100110}
111
112/**
113 * Runs the given vcpu of the given vm.
114 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100115struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100116 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100117 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100118{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100121 struct hf_vcpu_run_return ret = {
122 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
123 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100124
125 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100126 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100127 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100128 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100129
Andrew Scull19503262018-09-20 14:48:39 +0100130 /* Only secondary VM vcpus can be run. */
131 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100132 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100133 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100134
Andrew Scull19503262018-09-20 14:48:39 +0100135 /* The requested VM must exist. */
136 vm = vm_get(vm_id);
137 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100138 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100139 }
140
141 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100142 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100143 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100144 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100145
Andrew Scullf3d45592018-09-20 14:30:22 +0100146 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100147
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100148 sl_lock(&vcpu->lock);
149 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100150 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100151 } else {
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100152 vcpu->cpu = current->cpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100153 vcpu->state = vcpu_state_running;
154 vm_set_current(vm);
155 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100156 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100157 }
158 sl_unlock(&vcpu->lock);
159
Andrew Scull6d2db332018-10-10 15:28:17 +0100160out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100162}
163
164/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100165 * Configures the VM to send/receive data through the specified pages. The pages
166 * must not be shared.
167 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100168int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
169 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100170{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100171 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100172 paddr_t pa_send_begin;
173 paddr_t pa_send_end;
174 paddr_t pa_recv_begin;
175 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100176 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100177
178 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100179 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
180 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100181 return -1;
182 }
183
184 sl_lock(&vm->lock);
185
186 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100187 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100188 ret = -1;
189 goto exit;
190 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100191
192 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100193 * TODO: Once memory sharing is implemented, we need to make sure that
194 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100195 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100196
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100197 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100198 * Convert the intermediate physical addresses to physical address
199 * provided the address was acessible from the VM which ensures that the
200 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100201 */
Andrew Scull80871322018-08-06 12:04:09 +0100202 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
203 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100204 ret = -1;
205 goto exit;
206 }
207
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100208 /* Fail if the same page is used for the send and receive pages. */
209 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
210 ret = -1;
211 goto exit;
212 }
213
Andrew Scull80871322018-08-06 12:04:09 +0100214 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
215 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100216
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100217 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100218 vm->mailbox.send =
219 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
220 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100221 ret = -1;
222 goto exit;
223 }
224
225 /*
226 * Map the receive page as writable in the hypervisor address space. On
227 * failure, unmap the send page before returning.
228 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100229 vm->mailbox.recv =
230 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
231 if (!vm->mailbox.recv) {
232 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100233 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100234 ret = -1;
235 goto exit;
236 }
237
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100238 /* TODO: Notify any waiters. */
239
240 ret = 0;
241exit:
242 sl_unlock(&vm->lock);
243
244 return ret;
245}
246
247/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100248 * Copies data from the sender's send buffer to the recipient's receive buffer
249 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100250 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100251int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
252 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100253{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100254 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100255 struct vm *to;
256 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100257 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100258 int64_t ret;
Andrew Scull6d2db332018-10-10 15:28:17 +0100259 struct hf_vcpu_run_return primary_ret = {
260 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
261 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100262
Andrew Scullaa039b32018-10-04 15:02:26 +0100263 /* Limit the size of transfer. */
264 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100265 return -1;
266 }
267
268 /* Disallow reflexive requests as this suggests an error in the VM. */
269 if (vm_id == from->id) {
270 return -1;
271 }
272
273 /* Ensure the target VM exists. */
274 to = vm_get(vm_id);
275 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100276 return -1;
277 }
278
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100279 /*
280 * Check that the sender has configured its send buffer. It is safe to
281 * use from_buf after releasing the lock because the buffer cannot be
282 * modified once it's configured.
283 */
284 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100285 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100286 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100287 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100288 return -1;
289 }
290
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100291 sl_lock(&to->lock);
292
Andrew Scullaa039b32018-10-04 15:02:26 +0100293 if (to->mailbox.state != mailbox_state_empty ||
294 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100295 /* Fail if the target isn't currently ready to receive data. */
296 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100297 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100298 }
299
Andrew Scullaa039b32018-10-04 15:02:26 +0100300 /* Copy data. */
301 memcpy(to->mailbox.recv, from_buf, size);
302 to->mailbox.recv_bytes = size;
303 to->mailbox.recv_from_id = from->id;
304 to->mailbox.state = mailbox_state_read;
305
306 /* Messages for the primary VM are delivered directly. */
307 if (to->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100308 primary_ret.code = HF_VCPU_RUN_MESSAGE;
309 primary_ret.message.size = size;
Andrew Scullaa039b32018-10-04 15:02:26 +0100310 ret = 0;
Andrew Scull4a496572018-10-11 18:44:08 +0100311 /*
312 * clang-tidy isn't able to prove that
313 * `from->id != HF_PRIMARY_VM_ID` so cover that specific case
Andrew Sculla1317a12018-10-12 18:15:20 +0100314 * explicitly so as not to hide other possible bugs. clang-check
315 * is more clever and finds that this is dead code so we also
316 * pretend to use the new value.
Andrew Scull4a496572018-10-11 18:44:08 +0100317 */
318 if (from->id == HF_PRIMARY_VM_ID) {
319 vcpu = 0;
Andrew Sculla1317a12018-10-12 18:15:20 +0100320 (void)vcpu;
Andrew Scull4a496572018-10-11 18:44:08 +0100321 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100322 goto out;
323 }
324
325 /*
326 * Try to find a vcpu to handle the message and tell the scheduler to
327 * run it.
328 */
329 if (to->mailbox.recv_waiter == NULL) {
330 /*
331 * The scheduler must choose a vcpu to interrupt so it can
332 * handle the message.
333 */
334 to->mailbox.state = mailbox_state_received;
335 vcpu = HF_INVALID_VCPU;
336 } else {
337 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
338
339 /*
340 * Take target vcpu out of waiter list and mark as ready
341 * to run again.
342 */
343 sl_lock(&to_vcpu->lock);
344 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
345 to_vcpu->state = vcpu_state_ready;
346
347 /* Return from HF_MAILBOX_RECEIVE. */
348 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100349 hf_mailbox_receive_return_encode((
350 struct hf_mailbox_receive_return){
351 .vm_id = to->mailbox.recv_from_id,
352 .size = size,
353 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100354
355 sl_unlock(&to_vcpu->lock);
356
357 vcpu = to_vcpu - to->vcpus;
358 }
359
360 /* Return to the primary VM directly or with a switch. */
Andrew Scull6d2db332018-10-10 15:28:17 +0100361 primary_ret.code = HF_VCPU_RUN_WAKE_UP;
362 primary_ret.wake_up.vm_id = to->id;
363 primary_ret.wake_up.vcpu = vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100364 ret = 0;
365
366out:
367 /*
368 * Unlock before routing the return values as switching to the primary
369 * will acquire more locks and nesting the locks is avoidable.
370 */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100371 sl_unlock(&to->lock);
372
Andrew Scullaa039b32018-10-04 15:02:26 +0100373 /* Report errors to the sender. */
374 if (ret != 0) {
375 return ret;
376 }
377
378 /* If the sender is the primary, return the vcpu to schedule. */
379 if (from->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100380 return primary_ret.wake_up.vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100381 }
382
383 /* Switch to primary for scheduling and return success to the sender. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100384 *next = api_switch_to_primary(current, primary_ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100385 return 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100386}
387
388/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100389 * Receives a message from the mailbox. If one isn't available, this function
390 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100391 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100392 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100393 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100394struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100395 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100396 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100397{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100398 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100399 struct hf_mailbox_receive_return ret = {
400 .vm_id = HF_INVALID_VM_ID,
401 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100402
Andrew Scullaa039b32018-10-04 15:02:26 +0100403 /*
404 * The primary VM will receive messages as a status code from running
405 * vcpus and must not call this function.
406 */
Andrew Scull19503262018-09-20 14:48:39 +0100407 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100408 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100409 }
410
411 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100412
Andrew Scullaa039b32018-10-04 15:02:26 +0100413 /* Return pending messages without blocking. */
414 if (vm->mailbox.state == mailbox_state_received) {
415 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100416 ret.vm_id = vm->mailbox.recv_from_id;
417 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100418 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100419 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100420
421 /* No pending message so fail if not allowed to block. */
422 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100423 goto out;
424 }
425
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100426 sl_lock(&current->lock);
427 current->state = vcpu_state_blocked_mailbox;
Andrew Scullaa039b32018-10-04 15:02:26 +0100428
429 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100430 current->mailbox_next = vm->mailbox.recv_waiter;
431 vm->mailbox.recv_waiter = current;
432 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100433
434 /* Switch back to primary vm to block. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100435 *next = api_wait_for_interrupt(current);
Andrew Scullaa039b32018-10-04 15:02:26 +0100436out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100437 sl_unlock(&vm->lock);
438
439 return ret;
440}
441
442/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100443 * Clears the caller's mailbox so that a new message can be received. The caller
444 * must have copied out all data they wish to preserve as new messages will
445 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100446 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100447int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100448{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100449 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100450 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100451
452 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100453 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100454 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100455 vm->mailbox.state = mailbox_state_empty;
456 } else {
457 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100458 }
459 sl_unlock(&vm->lock);
460
461 if (ret == 0) {
462 /* TODO: Notify waiters, if any. */
463 }
464
465 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100466}