blob: 39d0da0a7d225636722ac2aff337e9b990e022a8 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/std.h"
22#include "hf/vm.h"
23
Andrew Scullf35a5c92018-08-07 18:09:46 +010024#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010025
Andrew Scullaa039b32018-10-04 15:02:26 +010026static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010027 "Currently, a page is mapped for the send and receive buffers so "
28 "the maximum request is the size of a page.");
29
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010030/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010031 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010032 *
33 * This triggers the scheduling logic to run. Run in the context of secondary VM
34 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
35 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010036 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010037static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +000038 struct hf_vcpu_run_return primary_ret)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010039{
Andrew Scull19503262018-09-20 14:48:39 +010040 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010041 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010042
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010043 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010044 arch_regs_set_retval(&next->regs,
45 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010046
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010047 return next;
48}
49
50/**
Andrew Scullaa039b32018-10-04 15:02:26 +010051 * Returns to the primary vm leaving the current vcpu ready to be scheduled
52 * again.
53 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010054struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010055{
Andrew Scull6d2db332018-10-10 15:28:17 +010056 struct hf_vcpu_run_return ret = {
57 .code = HF_VCPU_RUN_YIELD,
58 };
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +000059 return api_switch_to_primary(current, ret);
Andrew Scullaa039b32018-10-04 15:02:26 +010060}
61
62/**
63 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
64 * vm.
65 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010066struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010067{
Andrew Scull6d2db332018-10-10 15:28:17 +010068 struct hf_vcpu_run_return ret = {
69 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
70 };
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +000071
72 /* Mark the current vcpu as waiting for interrupt. */
73 sl_lock(&current->lock);
74 current->state = vcpu_state_blocked_interrupt;
75 sl_unlock(&current->lock);
76
77 return api_switch_to_primary(current, ret);
Andrew Scullaa039b32018-10-04 15:02:26 +010078}
79
80/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081 * Returns the number of VMs configured to run.
82 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010083int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010084{
Andrew Scull19503262018-09-20 14:48:39 +010085 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010086}
87
88/**
89 * Returns the number of vcpus configured in the given VM.
90 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010091int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010092{
Andrew Scull19503262018-09-20 14:48:39 +010093 struct vm *vm;
94
95 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010096 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010097 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010098 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010099
Andrew Scull19503262018-09-20 14:48:39 +0100100 vm = vm_get(vm_id);
101 if (vm == NULL) {
102 return -1;
103 }
104
105 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100106}
107
108/**
109 * Runs the given vcpu of the given vm.
110 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100111struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100112 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100113 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100114{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100115 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100116 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100117 struct hf_vcpu_run_return ret = {
118 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
119 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120
121 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100122 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100123 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100124 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100125
Andrew Scull19503262018-09-20 14:48:39 +0100126 /* Only secondary VM vcpus can be run. */
127 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100128 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100129 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100130
Andrew Scull19503262018-09-20 14:48:39 +0100131 /* The requested VM must exist. */
132 vm = vm_get(vm_id);
133 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100134 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100135 }
136
137 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100138 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100139 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100140 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100141
Andrew Scullf3d45592018-09-20 14:30:22 +0100142 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100143
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100144 sl_lock(&vcpu->lock);
145 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100146 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100147 } else {
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100148 vcpu->cpu = current->cpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100149 vcpu->state = vcpu_state_running;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100150 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100151 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100152 }
153 sl_unlock(&vcpu->lock);
154
Andrew Scull6d2db332018-10-10 15:28:17 +0100155out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100156 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100157}
158
159/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100160 * Configures the VM to send/receive data through the specified pages. The pages
161 * must not be shared.
162 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100163int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
164 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100165{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100166 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100167 paddr_t pa_send_begin;
168 paddr_t pa_send_end;
169 paddr_t pa_recv_begin;
170 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100171 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100172
173 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100174 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
175 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100176 return -1;
177 }
178
179 sl_lock(&vm->lock);
180
181 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100182 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100183 ret = -1;
184 goto exit;
185 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100186
187 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100188 * TODO: Once memory sharing is implemented, we need to make sure that
189 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100190 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100191
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100192 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100193 * Convert the intermediate physical addresses to physical address
194 * provided the address was acessible from the VM which ensures that the
195 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100196 */
Andrew Scull80871322018-08-06 12:04:09 +0100197 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
198 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100199 ret = -1;
200 goto exit;
201 }
202
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100203 /* Fail if the same page is used for the send and receive pages. */
204 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
205 ret = -1;
206 goto exit;
207 }
208
Andrew Scull80871322018-08-06 12:04:09 +0100209 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
210 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100211
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100212 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100213 vm->mailbox.send =
214 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
215 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100216 ret = -1;
217 goto exit;
218 }
219
220 /*
221 * Map the receive page as writable in the hypervisor address space. On
222 * failure, unmap the send page before returning.
223 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100224 vm->mailbox.recv =
225 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
226 if (!vm->mailbox.recv) {
227 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100228 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100229 ret = -1;
230 goto exit;
231 }
232
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100233 /* TODO: Notify any waiters. */
234
235 ret = 0;
236exit:
237 sl_unlock(&vm->lock);
238
239 return ret;
240}
241
242/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100243 * Copies data from the sender's send buffer to the recipient's receive buffer
244 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100245 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100246int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
247 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100248{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100249 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100250 struct vm *to;
251 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100252 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100253 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100254
Andrew Scullaa039b32018-10-04 15:02:26 +0100255 /* Limit the size of transfer. */
256 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100257 return -1;
258 }
259
260 /* Disallow reflexive requests as this suggests an error in the VM. */
261 if (vm_id == from->id) {
262 return -1;
263 }
264
265 /* Ensure the target VM exists. */
266 to = vm_get(vm_id);
267 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100268 return -1;
269 }
270
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100271 /*
272 * Check that the sender has configured its send buffer. It is safe to
273 * use from_buf after releasing the lock because the buffer cannot be
274 * modified once it's configured.
275 */
276 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100277 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100278 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100279 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100280 return -1;
281 }
282
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100283 sl_lock(&to->lock);
284
Andrew Scullaa039b32018-10-04 15:02:26 +0100285 if (to->mailbox.state != mailbox_state_empty ||
286 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100287 /* Fail if the target isn't currently ready to receive data. */
288 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100289 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100290 }
291
Andrew Scullaa039b32018-10-04 15:02:26 +0100292 /* Copy data. */
293 memcpy(to->mailbox.recv, from_buf, size);
294 to->mailbox.recv_bytes = size;
295 to->mailbox.recv_from_id = from->id;
296 to->mailbox.state = mailbox_state_read;
297
298 /* Messages for the primary VM are delivered directly. */
299 if (to->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000300 struct hf_vcpu_run_return primary_ret = {
301 .code = HF_VCPU_RUN_MESSAGE,
302 .message.size = size,
303 };
304 *next = api_switch_to_primary(current, primary_ret);
Andrew Scullaa039b32018-10-04 15:02:26 +0100305 ret = 0;
306 goto out;
307 }
308
309 /*
310 * Try to find a vcpu to handle the message and tell the scheduler to
311 * run it.
312 */
313 if (to->mailbox.recv_waiter == NULL) {
314 /*
315 * The scheduler must choose a vcpu to interrupt so it can
316 * handle the message.
317 */
318 to->mailbox.state = mailbox_state_received;
319 vcpu = HF_INVALID_VCPU;
320 } else {
321 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
322
323 /*
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000324 * Take target vcpu out of waiter list and mark it as ready to
325 * run again.
Andrew Scullaa039b32018-10-04 15:02:26 +0100326 */
327 sl_lock(&to_vcpu->lock);
328 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
329 to_vcpu->state = vcpu_state_ready;
330
331 /* Return from HF_MAILBOX_RECEIVE. */
332 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100333 hf_mailbox_receive_return_encode((
334 struct hf_mailbox_receive_return){
335 .vm_id = to->mailbox.recv_from_id,
336 .size = size,
337 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100338
339 sl_unlock(&to_vcpu->lock);
340
341 vcpu = to_vcpu - to->vcpus;
342 }
343
344 /* Return to the primary VM directly or with a switch. */
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000345 if (from->id == HF_PRIMARY_VM_ID) {
346 ret = vcpu;
347 } else {
348 struct hf_vcpu_run_return primary_ret = {
349 .code = HF_VCPU_RUN_WAKE_UP,
350 .wake_up.vm_id = to->id,
351 .wake_up.vcpu = vcpu,
352 };
353 *next = api_switch_to_primary(current, primary_ret);
354 ret = 0;
355 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100356
357out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100358 sl_unlock(&to->lock);
359
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000360 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100361}
362
363/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100364 * Receives a message from the mailbox. If one isn't available, this function
365 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100366 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100367 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100368 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100369struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100370 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100371 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100372{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100373 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100374 struct hf_mailbox_receive_return ret = {
375 .vm_id = HF_INVALID_VM_ID,
376 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100377
Andrew Scullaa039b32018-10-04 15:02:26 +0100378 /*
379 * The primary VM will receive messages as a status code from running
380 * vcpus and must not call this function.
381 */
Andrew Scull19503262018-09-20 14:48:39 +0100382 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100383 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100384 }
385
386 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100387
Andrew Scullaa039b32018-10-04 15:02:26 +0100388 /* Return pending messages without blocking. */
389 if (vm->mailbox.state == mailbox_state_received) {
390 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100391 ret.vm_id = vm->mailbox.recv_from_id;
392 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100393 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100394 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100395
396 /* No pending message so fail if not allowed to block. */
397 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100398 goto out;
399 }
400
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100401 sl_lock(&current->lock);
402 current->state = vcpu_state_blocked_mailbox;
Andrew Scullaa039b32018-10-04 15:02:26 +0100403
404 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100405 current->mailbox_next = vm->mailbox.recv_waiter;
406 vm->mailbox.recv_waiter = current;
407 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100408
409 /* Switch back to primary vm to block. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100410 *next = api_wait_for_interrupt(current);
Andrew Scullaa039b32018-10-04 15:02:26 +0100411out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100412 sl_unlock(&vm->lock);
413
414 return ret;
415}
416
417/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100418 * Clears the caller's mailbox so that a new message can be received. The caller
419 * must have copied out all data they wish to preserve as new messages will
420 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100421 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100422int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100423{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100424 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100425 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100426
427 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100428 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100429 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100430 vm->mailbox.state = mailbox_state_empty;
431 } else {
432 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100433 }
434 sl_unlock(&vm->lock);
435
436 if (ret == 0) {
437 /* TODO: Notify waiters, if any. */
438 }
439
440 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100441}