blob: 526e29984082244fdea0f5d2dc652e7e634d173d [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/std.h"
22#include "hf/vm.h"
23
Andrew Scullf35a5c92018-08-07 18:09:46 +010024#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010025
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000026/*
27 * To eliminate the risk of deadlocks, we define a partial order for the
28 * acquisition of locks held concurrently by the same physical CPU. Our current
29 * ordering requirements are as follows:
30 *
31 * vm::lock -> vcpu::lock
32 */
33
Andrew Scullaa039b32018-10-04 15:02:26 +010034static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010035 "Currently, a page is mapped for the send and receive buffers so "
36 "the maximum request is the size of a page.");
37
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010039 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010040 *
41 * This triggers the scheduling logic to run. Run in the context of secondary VM
42 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
43 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010044 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010045static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000046 struct hf_vcpu_run_return primary_ret,
47 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010048{
Andrew Scull19503262018-09-20 14:48:39 +010049 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010050 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010051
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010052 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010053 arch_regs_set_retval(&next->regs,
54 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010055
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000056 /* Mark the current vcpu as waiting. */
57 sl_lock(&current->lock);
58 current->state = secondary_state;
59 sl_unlock(&current->lock);
60
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010061 return next;
62}
63
64/**
Andrew Scullaa039b32018-10-04 15:02:26 +010065 * Returns to the primary vm leaving the current vcpu ready to be scheduled
66 * again.
67 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010068struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010069{
Andrew Scull6d2db332018-10-10 15:28:17 +010070 struct hf_vcpu_run_return ret = {
71 .code = HF_VCPU_RUN_YIELD,
72 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000073 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010074}
75
76/**
77 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
78 * vm.
79 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010080struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010081{
Andrew Scull6d2db332018-10-10 15:28:17 +010082 struct hf_vcpu_run_return ret = {
83 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
84 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000085 return api_switch_to_primary(current, ret,
86 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +010087}
88
89/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010090 * Returns the number of VMs configured to run.
91 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010092int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010093{
Andrew Scull19503262018-09-20 14:48:39 +010094 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010095}
96
97/**
98 * Returns the number of vcpus configured in the given VM.
99 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100100int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100101{
Andrew Scull19503262018-09-20 14:48:39 +0100102 struct vm *vm;
103
104 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100105 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100106 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100107 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100108
Andrew Scull19503262018-09-20 14:48:39 +0100109 vm = vm_get(vm_id);
110 if (vm == NULL) {
111 return -1;
112 }
113
114 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100115}
116
117/**
118 * Runs the given vcpu of the given vm.
119 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100120struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100121 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100122 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100123{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100124 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100125 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100126 struct hf_vcpu_run_return ret = {
127 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
128 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100129
130 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100131 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100132 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100133 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100134
Andrew Scull19503262018-09-20 14:48:39 +0100135 /* Only secondary VM vcpus can be run. */
136 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100137 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100138 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100139
Andrew Scull19503262018-09-20 14:48:39 +0100140 /* The requested VM must exist. */
141 vm = vm_get(vm_id);
142 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100143 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100144 }
145
146 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100147 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100148 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100149 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100150
Andrew Scullf3d45592018-09-20 14:30:22 +0100151 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100152
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100153 sl_lock(&vcpu->lock);
154 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100155 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100156 } else {
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100157 vcpu->cpu = current->cpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100158 vcpu->state = vcpu_state_running;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100159 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100160 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 }
162 sl_unlock(&vcpu->lock);
163
Andrew Scull6d2db332018-10-10 15:28:17 +0100164out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100165 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100166}
167
168/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100169 * Configures the VM to send/receive data through the specified pages. The pages
170 * must not be shared.
171 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100172int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
173 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100174{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100175 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100176 paddr_t pa_send_begin;
177 paddr_t pa_send_end;
178 paddr_t pa_recv_begin;
179 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100180 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100181
182 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100183 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
184 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100185 return -1;
186 }
187
188 sl_lock(&vm->lock);
189
190 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100191 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100192 ret = -1;
193 goto exit;
194 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100195
196 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100197 * TODO: Once memory sharing is implemented, we need to make sure that
198 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100199 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100200
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100201 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100202 * Convert the intermediate physical addresses to physical address
203 * provided the address was acessible from the VM which ensures that the
204 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100205 */
Andrew Scull80871322018-08-06 12:04:09 +0100206 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
207 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100208 ret = -1;
209 goto exit;
210 }
211
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100212 /* Fail if the same page is used for the send and receive pages. */
213 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
214 ret = -1;
215 goto exit;
216 }
217
Andrew Scull80871322018-08-06 12:04:09 +0100218 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
219 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100220
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100221 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100222 vm->mailbox.send =
223 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
224 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100225 ret = -1;
226 goto exit;
227 }
228
229 /*
230 * Map the receive page as writable in the hypervisor address space. On
231 * failure, unmap the send page before returning.
232 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100233 vm->mailbox.recv =
234 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
235 if (!vm->mailbox.recv) {
236 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100237 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100238 ret = -1;
239 goto exit;
240 }
241
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100242 /* TODO: Notify any waiters. */
243
244 ret = 0;
245exit:
246 sl_unlock(&vm->lock);
247
248 return ret;
249}
250
251/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100252 * Copies data from the sender's send buffer to the recipient's receive buffer
253 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100254 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100255int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
256 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100257{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100258 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100259 struct vm *to;
260 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100261 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100262 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100263
Andrew Scullaa039b32018-10-04 15:02:26 +0100264 /* Limit the size of transfer. */
265 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100266 return -1;
267 }
268
269 /* Disallow reflexive requests as this suggests an error in the VM. */
270 if (vm_id == from->id) {
271 return -1;
272 }
273
274 /* Ensure the target VM exists. */
275 to = vm_get(vm_id);
276 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100277 return -1;
278 }
279
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100280 /*
281 * Check that the sender has configured its send buffer. It is safe to
282 * use from_buf after releasing the lock because the buffer cannot be
283 * modified once it's configured.
284 */
285 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100286 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100287 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100288 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100289 return -1;
290 }
291
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100292 sl_lock(&to->lock);
293
Andrew Scullaa039b32018-10-04 15:02:26 +0100294 if (to->mailbox.state != mailbox_state_empty ||
295 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100296 /* Fail if the target isn't currently ready to receive data. */
297 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100298 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100299 }
300
Andrew Scullaa039b32018-10-04 15:02:26 +0100301 /* Copy data. */
302 memcpy(to->mailbox.recv, from_buf, size);
303 to->mailbox.recv_bytes = size;
304 to->mailbox.recv_from_id = from->id;
305 to->mailbox.state = mailbox_state_read;
306
307 /* Messages for the primary VM are delivered directly. */
308 if (to->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000309 struct hf_vcpu_run_return primary_ret = {
310 .code = HF_VCPU_RUN_MESSAGE,
311 .message.size = size,
312 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000313 *next = api_switch_to_primary(current, primary_ret,
314 vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100315 ret = 0;
316 goto out;
317 }
318
319 /*
320 * Try to find a vcpu to handle the message and tell the scheduler to
321 * run it.
322 */
323 if (to->mailbox.recv_waiter == NULL) {
324 /*
325 * The scheduler must choose a vcpu to interrupt so it can
326 * handle the message.
327 */
328 to->mailbox.state = mailbox_state_received;
329 vcpu = HF_INVALID_VCPU;
330 } else {
331 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
332
333 /*
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000334 * Take target vcpu out of waiter list and mark it as ready to
335 * run again.
Andrew Scullaa039b32018-10-04 15:02:26 +0100336 */
337 sl_lock(&to_vcpu->lock);
338 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
339 to_vcpu->state = vcpu_state_ready;
340
341 /* Return from HF_MAILBOX_RECEIVE. */
342 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100343 hf_mailbox_receive_return_encode((
344 struct hf_mailbox_receive_return){
345 .vm_id = to->mailbox.recv_from_id,
346 .size = size,
347 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100348
349 sl_unlock(&to_vcpu->lock);
350
351 vcpu = to_vcpu - to->vcpus;
352 }
353
354 /* Return to the primary VM directly or with a switch. */
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000355 if (from->id == HF_PRIMARY_VM_ID) {
356 ret = vcpu;
357 } else {
358 struct hf_vcpu_run_return primary_ret = {
359 .code = HF_VCPU_RUN_WAKE_UP,
360 .wake_up.vm_id = to->id,
361 .wake_up.vcpu = vcpu,
362 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000363 *next = api_switch_to_primary(current, primary_ret,
364 vcpu_state_ready);
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000365 ret = 0;
366 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100367
368out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100369 sl_unlock(&to->lock);
370
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000371 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100372}
373
374/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100375 * Receives a message from the mailbox. If one isn't available, this function
376 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100377 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100378 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100379 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100380struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100381 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100382 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100383{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100384 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100385 struct hf_mailbox_receive_return ret = {
386 .vm_id = HF_INVALID_VM_ID,
387 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100388
Andrew Scullaa039b32018-10-04 15:02:26 +0100389 /*
390 * The primary VM will receive messages as a status code from running
391 * vcpus and must not call this function.
392 */
Andrew Scull19503262018-09-20 14:48:39 +0100393 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100394 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100395 }
396
397 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100398
Andrew Scullaa039b32018-10-04 15:02:26 +0100399 /* Return pending messages without blocking. */
400 if (vm->mailbox.state == mailbox_state_received) {
401 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100402 ret.vm_id = vm->mailbox.recv_from_id;
403 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100404 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100405 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100406
407 /* No pending message so fail if not allowed to block. */
408 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100409 goto out;
410 }
411
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100412 sl_lock(&current->lock);
413 current->state = vcpu_state_blocked_mailbox;
Andrew Scullaa039b32018-10-04 15:02:26 +0100414
415 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100416 current->mailbox_next = vm->mailbox.recv_waiter;
417 vm->mailbox.recv_waiter = current;
418 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100419
420 /* Switch back to primary vm to block. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100421 *next = api_wait_for_interrupt(current);
Andrew Scullaa039b32018-10-04 15:02:26 +0100422out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100423 sl_unlock(&vm->lock);
424
425 return ret;
426}
427
428/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100429 * Clears the caller's mailbox so that a new message can be received. The caller
430 * must have copied out all data they wish to preserve as new messages will
431 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100432 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100433int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100434{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100435 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100436 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100437
438 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100439 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100440 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100441 vm->mailbox.state = mailbox_state_empty;
442 } else {
443 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100444 }
445 sl_unlock(&vm->lock);
446
447 if (ret == 0) {
448 /* TODO: Notify waiters, if any. */
449 }
450
451 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100452}