blob: 3b8d921fb4981c78acd6308d4444aa8801e6e55f [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/std.h"
22#include "hf/vm.h"
23
Andrew Scullf35a5c92018-08-07 18:09:46 +010024#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010025
Andrew Scullaa039b32018-10-04 15:02:26 +010026static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010027 "Currently, a page is mapped for the send and receive buffers so "
28 "the maximum request is the size of a page.");
29
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010030/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010031 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010032 *
33 * This triggers the scheduling logic to run. Run in the context of secondary VM
34 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
35 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010036 */
Andrew Scull6d2db332018-10-10 15:28:17 +010037static struct vcpu *api_switch_to_primary(struct hf_vcpu_run_return primary_ret,
Andrew Scull6bca35e2018-10-02 12:05:32 +010038 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010039{
40 struct vcpu *vcpu = cpu()->current;
Andrew Scull19503262018-09-20 14:48:39 +010041 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
42 struct vcpu *next = &primary->vcpus[cpu_index(cpu())];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010043
44 /* Switch back to primary VM. */
Andrew Scull19503262018-09-20 14:48:39 +010045 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010046
47 /*
Andrew Scull6d2db332018-10-10 15:28:17 +010048 * Set the return value for the primary VM's call to HF_VCPU_RUN.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010049 */
Andrew Scull6d2db332018-10-10 15:28:17 +010050 arch_regs_set_retval(&next->regs,
51 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010052
53 /* Mark the vcpu as waiting. */
54 sl_lock(&vcpu->lock);
55 vcpu->state = secondary_state;
56 sl_unlock(&vcpu->lock);
57
58 return next;
59}
60
61/**
Andrew Scullaa039b32018-10-04 15:02:26 +010062 * Returns to the primary vm leaving the current vcpu ready to be scheduled
63 * again.
64 */
65struct vcpu *api_yield(void)
66{
Andrew Scull6d2db332018-10-10 15:28:17 +010067 struct hf_vcpu_run_return ret = {
68 .code = HF_VCPU_RUN_YIELD,
69 };
70 return api_switch_to_primary(ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010071}
72
73/**
74 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
75 * vm.
76 */
77struct vcpu *api_wait_for_interrupt(void)
78{
Andrew Scull6d2db332018-10-10 15:28:17 +010079 struct hf_vcpu_run_return ret = {
80 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
81 };
82 return api_switch_to_primary(ret, vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +010083}
84
85/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010086 * Returns the number of VMs configured to run.
87 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010088int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010089{
Andrew Scull19503262018-09-20 14:48:39 +010090 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010091}
92
93/**
94 * Returns the number of vcpus configured in the given VM.
95 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010096int64_t api_vcpu_get_count(uint32_t vm_id)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010097{
Andrew Scull19503262018-09-20 14:48:39 +010098 struct vm *vm;
99
100 /* Only the primary VM needs to know about vcpus for scheduling. */
101 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100102 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100103 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100104
Andrew Scull19503262018-09-20 14:48:39 +0100105 vm = vm_get(vm_id);
106 if (vm == NULL) {
107 return -1;
108 }
109
110 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100111}
112
113/**
114 * Runs the given vcpu of the given vm.
115 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100116struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
117 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100118{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100121 struct hf_vcpu_run_return ret = {
122 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
123 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100124
125 /* Only the primary VM can switch vcpus. */
Andrew Scull19503262018-09-20 14:48:39 +0100126 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100127 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100128 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100129
Andrew Scull19503262018-09-20 14:48:39 +0100130 /* Only secondary VM vcpus can be run. */
131 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100132 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100133 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100134
Andrew Scull19503262018-09-20 14:48:39 +0100135 /* The requested VM must exist. */
136 vm = vm_get(vm_id);
137 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100138 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100139 }
140
141 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100142 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100143 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100144 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100145
Andrew Scullf3d45592018-09-20 14:30:22 +0100146 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100147
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100148 sl_lock(&vcpu->lock);
149 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100150 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100151 } else {
152 vcpu->state = vcpu_state_running;
153 vm_set_current(vm);
154 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100155 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100156 }
157 sl_unlock(&vcpu->lock);
158
Andrew Scull6d2db332018-10-10 15:28:17 +0100159out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100160 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100161}
162
163/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100164 * Configures the VM to send/receive data through the specified pages. The pages
165 * must not be shared.
166 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100167int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100168{
169 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100170 paddr_t pa_send_begin;
171 paddr_t pa_send_end;
172 paddr_t pa_recv_begin;
173 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100174 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100175
176 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100177 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
178 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100179 return -1;
180 }
181
182 sl_lock(&vm->lock);
183
184 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100185 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100186 ret = -1;
187 goto exit;
188 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100189
190 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100191 * TODO: Once memory sharing is implemented, we need to make sure that
192 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100193 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100194
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100195 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100196 * Convert the intermediate physical addresses to physical address
197 * provided the address was acessible from the VM which ensures that the
198 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100199 */
Andrew Scull80871322018-08-06 12:04:09 +0100200 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
201 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100202 ret = -1;
203 goto exit;
204 }
205
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100206 /* Fail if the same page is used for the send and receive pages. */
207 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
208 ret = -1;
209 goto exit;
210 }
211
Andrew Scull80871322018-08-06 12:04:09 +0100212 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
213 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100214
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100215 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100216 vm->mailbox.send =
217 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
218 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100219 ret = -1;
220 goto exit;
221 }
222
223 /*
224 * Map the receive page as writable in the hypervisor address space. On
225 * failure, unmap the send page before returning.
226 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100227 vm->mailbox.recv =
228 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
229 if (!vm->mailbox.recv) {
230 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100231 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100232 ret = -1;
233 goto exit;
234 }
235
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100236 /* TODO: Notify any waiters. */
237
238 ret = 0;
239exit:
240 sl_unlock(&vm->lock);
241
242 return ret;
243}
244
245/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100246 * Copies data from the sender's send buffer to the recipient's receive buffer
247 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100248 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100249int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100250{
251 struct vm *from = cpu()->current->vm;
252 struct vm *to;
253 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100254 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100255 int64_t ret;
Andrew Scull6d2db332018-10-10 15:28:17 +0100256 struct hf_vcpu_run_return primary_ret = {
257 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
258 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100259
Andrew Scullaa039b32018-10-04 15:02:26 +0100260 /* Limit the size of transfer. */
261 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100262 return -1;
263 }
264
265 /* Disallow reflexive requests as this suggests an error in the VM. */
266 if (vm_id == from->id) {
267 return -1;
268 }
269
270 /* Ensure the target VM exists. */
271 to = vm_get(vm_id);
272 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100273 return -1;
274 }
275
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100276 /*
277 * Check that the sender has configured its send buffer. It is safe to
278 * use from_buf after releasing the lock because the buffer cannot be
279 * modified once it's configured.
280 */
281 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100282 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100283 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100284 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100285 return -1;
286 }
287
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100288 sl_lock(&to->lock);
289
Andrew Scullaa039b32018-10-04 15:02:26 +0100290 if (to->mailbox.state != mailbox_state_empty ||
291 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100292 /* Fail if the target isn't currently ready to receive data. */
293 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100294 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100295 }
296
Andrew Scullaa039b32018-10-04 15:02:26 +0100297 /* Copy data. */
298 memcpy(to->mailbox.recv, from_buf, size);
299 to->mailbox.recv_bytes = size;
300 to->mailbox.recv_from_id = from->id;
301 to->mailbox.state = mailbox_state_read;
302
303 /* Messages for the primary VM are delivered directly. */
304 if (to->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100305 primary_ret.code = HF_VCPU_RUN_MESSAGE;
306 primary_ret.message.size = size;
Andrew Scullaa039b32018-10-04 15:02:26 +0100307 ret = 0;
Andrew Scull4a496572018-10-11 18:44:08 +0100308 /*
309 * clang-tidy isn't able to prove that
310 * `from->id != HF_PRIMARY_VM_ID` so cover that specific case
Andrew Sculla1317a12018-10-12 18:15:20 +0100311 * explicitly so as not to hide other possible bugs. clang-check
312 * is more clever and finds that this is dead code so we also
313 * pretend to use the new value.
Andrew Scull4a496572018-10-11 18:44:08 +0100314 */
315 if (from->id == HF_PRIMARY_VM_ID) {
316 vcpu = 0;
Andrew Sculla1317a12018-10-12 18:15:20 +0100317 (void)vcpu;
Andrew Scull4a496572018-10-11 18:44:08 +0100318 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100319 goto out;
320 }
321
322 /*
323 * Try to find a vcpu to handle the message and tell the scheduler to
324 * run it.
325 */
326 if (to->mailbox.recv_waiter == NULL) {
327 /*
328 * The scheduler must choose a vcpu to interrupt so it can
329 * handle the message.
330 */
331 to->mailbox.state = mailbox_state_received;
332 vcpu = HF_INVALID_VCPU;
333 } else {
334 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
335
336 /*
337 * Take target vcpu out of waiter list and mark as ready
338 * to run again.
339 */
340 sl_lock(&to_vcpu->lock);
341 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
342 to_vcpu->state = vcpu_state_ready;
343
344 /* Return from HF_MAILBOX_RECEIVE. */
345 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100346 hf_mailbox_receive_return_encode((
347 struct hf_mailbox_receive_return){
348 .vm_id = to->mailbox.recv_from_id,
349 .size = size,
350 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100351
352 sl_unlock(&to_vcpu->lock);
353
354 vcpu = to_vcpu - to->vcpus;
355 }
356
357 /* Return to the primary VM directly or with a switch. */
Andrew Scull6d2db332018-10-10 15:28:17 +0100358 primary_ret.code = HF_VCPU_RUN_WAKE_UP;
359 primary_ret.wake_up.vm_id = to->id;
360 primary_ret.wake_up.vcpu = vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100361 ret = 0;
362
363out:
364 /*
365 * Unlock before routing the return values as switching to the primary
366 * will acquire more locks and nesting the locks is avoidable.
367 */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100368 sl_unlock(&to->lock);
369
Andrew Scullaa039b32018-10-04 15:02:26 +0100370 /* Report errors to the sender. */
371 if (ret != 0) {
372 return ret;
373 }
374
375 /* If the sender is the primary, return the vcpu to schedule. */
376 if (from->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100377 return primary_ret.wake_up.vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100378 }
379
380 /* Switch to primary for scheduling and return success to the sender. */
381 *next = api_switch_to_primary(primary_ret, vcpu_state_ready);
382 return 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100383}
384
385/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100386 * Receives a message from the mailbox. If one isn't available, this function
387 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100388 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100389 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100390 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100391struct hf_mailbox_receive_return api_mailbox_receive(bool block,
392 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100393{
394 struct vcpu *vcpu = cpu()->current;
395 struct vm *vm = vcpu->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100396 struct hf_mailbox_receive_return ret = {
397 .vm_id = HF_INVALID_VM_ID,
398 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100399
Andrew Scullaa039b32018-10-04 15:02:26 +0100400 /*
401 * The primary VM will receive messages as a status code from running
402 * vcpus and must not call this function.
403 */
Andrew Scull19503262018-09-20 14:48:39 +0100404 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100405 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100406 }
407
408 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100409
Andrew Scullaa039b32018-10-04 15:02:26 +0100410 /* Return pending messages without blocking. */
411 if (vm->mailbox.state == mailbox_state_received) {
412 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100413 ret.vm_id = vm->mailbox.recv_from_id;
414 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100415 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100416 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100417
418 /* No pending message so fail if not allowed to block. */
419 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100420 goto out;
421 }
422
423 sl_lock(&vcpu->lock);
424 vcpu->state = vcpu_state_blocked_mailbox;
425
426 /* Push vcpu into waiter list. */
427 vcpu->mailbox_next = vm->mailbox.recv_waiter;
428 vm->mailbox.recv_waiter = vcpu;
429 sl_unlock(&vcpu->lock);
430
431 /* Switch back to primary vm to block. */
432 *next = api_wait_for_interrupt();
433
434out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100435 sl_unlock(&vm->lock);
436
437 return ret;
438}
439
440/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100441 * Clears the caller's mailbox so that a new message can be received. The caller
442 * must have copied out all data they wish to preserve as new messages will
443 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100444 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100445int64_t api_mailbox_clear(void)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100446{
447 struct vm *vm = cpu()->current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100448 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100449
450 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100451 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100452 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100453 vm->mailbox.state = mailbox_state_empty;
454 } else {
455 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100456 }
457 sl_unlock(&vm->lock);
458
459 if (ret == 0) {
460 /* TODO: Notify waiters, if any. */
461 }
462
463 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100464}