blob: 16df0d90ec2186d4c14bd8ee6c481da74c84c6d6 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/std.h"
22#include "hf/vm.h"
23
Andrew Scullf35a5c92018-08-07 18:09:46 +010024#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010025
Andrew Scullaa039b32018-10-04 15:02:26 +010026static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010027 "Currently, a page is mapped for the send and receive buffers so "
28 "the maximum request is the size of a page.");
29
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010030/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010031 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010032 *
33 * This triggers the scheduling logic to run. Run in the context of secondary VM
34 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
35 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010036 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010037static struct vcpu *api_switch_to_primary(struct vcpu *current,
38 struct hf_vcpu_run_return primary_ret,
Andrew Scull6bca35e2018-10-02 12:05:32 +010039 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010040{
Andrew Scull19503262018-09-20 14:48:39 +010041 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010042 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010043
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010044 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010045 arch_regs_set_retval(&next->regs,
46 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010047
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010048 /* Mark the current vcpu as waiting. */
49 sl_lock(&current->lock);
50 current->state = secondary_state;
51 sl_unlock(&current->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010052
53 return next;
54}
55
56/**
Andrew Scullaa039b32018-10-04 15:02:26 +010057 * Returns to the primary vm leaving the current vcpu ready to be scheduled
58 * again.
59 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010060struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010061{
Andrew Scull6d2db332018-10-10 15:28:17 +010062 struct hf_vcpu_run_return ret = {
63 .code = HF_VCPU_RUN_YIELD,
64 };
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010065 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010066}
67
68/**
69 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
70 * vm.
71 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010072struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010073{
Andrew Scull6d2db332018-10-10 15:28:17 +010074 struct hf_vcpu_run_return ret = {
75 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
76 };
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010077 return api_switch_to_primary(current, ret,
78 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +010079}
80
81/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010082 * Returns the number of VMs configured to run.
83 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010084int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010085{
Andrew Scull19503262018-09-20 14:48:39 +010086 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010087}
88
89/**
90 * Returns the number of vcpus configured in the given VM.
91 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010092int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010093{
Andrew Scull19503262018-09-20 14:48:39 +010094 struct vm *vm;
95
96 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010097 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010099 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100100
Andrew Scull19503262018-09-20 14:48:39 +0100101 vm = vm_get(vm_id);
102 if (vm == NULL) {
103 return -1;
104 }
105
106 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100107}
108
109/**
110 * Runs the given vcpu of the given vm.
111 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100112struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100113 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100114 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100115{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100116 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100117 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100118 struct hf_vcpu_run_return ret = {
119 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
120 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100121
122 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100123 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100124 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100125 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100126
Andrew Scull19503262018-09-20 14:48:39 +0100127 /* Only secondary VM vcpus can be run. */
128 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100129 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100130 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100131
Andrew Scull19503262018-09-20 14:48:39 +0100132 /* The requested VM must exist. */
133 vm = vm_get(vm_id);
134 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100135 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100136 }
137
138 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100139 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100140 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100141 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100142
Andrew Scullf3d45592018-09-20 14:30:22 +0100143 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100144
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100145 sl_lock(&vcpu->lock);
146 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100147 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100148 } else {
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100149 vcpu->cpu = current->cpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100150 vcpu->state = vcpu_state_running;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100151 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100152 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100153 }
154 sl_unlock(&vcpu->lock);
155
Andrew Scull6d2db332018-10-10 15:28:17 +0100156out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100157 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100158}
159
160/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 * Configures the VM to send/receive data through the specified pages. The pages
162 * must not be shared.
163 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100164int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
165 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100166{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100167 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100168 paddr_t pa_send_begin;
169 paddr_t pa_send_end;
170 paddr_t pa_recv_begin;
171 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100172 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100173
174 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100175 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
176 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100177 return -1;
178 }
179
180 sl_lock(&vm->lock);
181
182 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100183 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100184 ret = -1;
185 goto exit;
186 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100187
188 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100189 * TODO: Once memory sharing is implemented, we need to make sure that
190 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100191 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100192
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100193 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100194 * Convert the intermediate physical addresses to physical address
195 * provided the address was acessible from the VM which ensures that the
196 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100197 */
Andrew Scull80871322018-08-06 12:04:09 +0100198 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
199 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100200 ret = -1;
201 goto exit;
202 }
203
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100204 /* Fail if the same page is used for the send and receive pages. */
205 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
206 ret = -1;
207 goto exit;
208 }
209
Andrew Scull80871322018-08-06 12:04:09 +0100210 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
211 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100212
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100213 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100214 vm->mailbox.send =
215 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
216 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100217 ret = -1;
218 goto exit;
219 }
220
221 /*
222 * Map the receive page as writable in the hypervisor address space. On
223 * failure, unmap the send page before returning.
224 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100225 vm->mailbox.recv =
226 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
227 if (!vm->mailbox.recv) {
228 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100229 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100230 ret = -1;
231 goto exit;
232 }
233
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100234 /* TODO: Notify any waiters. */
235
236 ret = 0;
237exit:
238 sl_unlock(&vm->lock);
239
240 return ret;
241}
242
243/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100244 * Copies data from the sender's send buffer to the recipient's receive buffer
245 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100246 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100247int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
248 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100249{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100250 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100251 struct vm *to;
252 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100253 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100254 int64_t ret;
Andrew Scull6d2db332018-10-10 15:28:17 +0100255 struct hf_vcpu_run_return primary_ret = {
256 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
257 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100258
Andrew Scullaa039b32018-10-04 15:02:26 +0100259 /* Limit the size of transfer. */
260 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100261 return -1;
262 }
263
264 /* Disallow reflexive requests as this suggests an error in the VM. */
265 if (vm_id == from->id) {
266 return -1;
267 }
268
269 /* Ensure the target VM exists. */
270 to = vm_get(vm_id);
271 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100272 return -1;
273 }
274
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100275 /*
276 * Check that the sender has configured its send buffer. It is safe to
277 * use from_buf after releasing the lock because the buffer cannot be
278 * modified once it's configured.
279 */
280 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100281 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100282 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100283 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100284 return -1;
285 }
286
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100287 sl_lock(&to->lock);
288
Andrew Scullaa039b32018-10-04 15:02:26 +0100289 if (to->mailbox.state != mailbox_state_empty ||
290 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100291 /* Fail if the target isn't currently ready to receive data. */
292 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100293 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100294 }
295
Andrew Scullaa039b32018-10-04 15:02:26 +0100296 /* Copy data. */
297 memcpy(to->mailbox.recv, from_buf, size);
298 to->mailbox.recv_bytes = size;
299 to->mailbox.recv_from_id = from->id;
300 to->mailbox.state = mailbox_state_read;
301
302 /* Messages for the primary VM are delivered directly. */
303 if (to->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100304 primary_ret.code = HF_VCPU_RUN_MESSAGE;
305 primary_ret.message.size = size;
Andrew Scullaa039b32018-10-04 15:02:26 +0100306 ret = 0;
Andrew Scull4a496572018-10-11 18:44:08 +0100307 /*
308 * clang-tidy isn't able to prove that
309 * `from->id != HF_PRIMARY_VM_ID` so cover that specific case
Andrew Sculla1317a12018-10-12 18:15:20 +0100310 * explicitly so as not to hide other possible bugs. clang-check
311 * is more clever and finds that this is dead code so we also
312 * pretend to use the new value.
Andrew Scull4a496572018-10-11 18:44:08 +0100313 */
314 if (from->id == HF_PRIMARY_VM_ID) {
315 vcpu = 0;
Andrew Sculla1317a12018-10-12 18:15:20 +0100316 (void)vcpu;
Andrew Scull4a496572018-10-11 18:44:08 +0100317 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100318 goto out;
319 }
320
321 /*
322 * Try to find a vcpu to handle the message and tell the scheduler to
323 * run it.
324 */
325 if (to->mailbox.recv_waiter == NULL) {
326 /*
327 * The scheduler must choose a vcpu to interrupt so it can
328 * handle the message.
329 */
330 to->mailbox.state = mailbox_state_received;
331 vcpu = HF_INVALID_VCPU;
332 } else {
333 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
334
335 /*
336 * Take target vcpu out of waiter list and mark as ready
337 * to run again.
338 */
339 sl_lock(&to_vcpu->lock);
340 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
341 to_vcpu->state = vcpu_state_ready;
342
343 /* Return from HF_MAILBOX_RECEIVE. */
344 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100345 hf_mailbox_receive_return_encode((
346 struct hf_mailbox_receive_return){
347 .vm_id = to->mailbox.recv_from_id,
348 .size = size,
349 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100350
351 sl_unlock(&to_vcpu->lock);
352
353 vcpu = to_vcpu - to->vcpus;
354 }
355
356 /* Return to the primary VM directly or with a switch. */
Andrew Scull6d2db332018-10-10 15:28:17 +0100357 primary_ret.code = HF_VCPU_RUN_WAKE_UP;
358 primary_ret.wake_up.vm_id = to->id;
359 primary_ret.wake_up.vcpu = vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100360 ret = 0;
361
362out:
363 /*
364 * Unlock before routing the return values as switching to the primary
365 * will acquire more locks and nesting the locks is avoidable.
366 */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100367 sl_unlock(&to->lock);
368
Andrew Scullaa039b32018-10-04 15:02:26 +0100369 /* Report errors to the sender. */
370 if (ret != 0) {
371 return ret;
372 }
373
374 /* If the sender is the primary, return the vcpu to schedule. */
375 if (from->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100376 return primary_ret.wake_up.vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100377 }
378
379 /* Switch to primary for scheduling and return success to the sender. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100380 *next = api_switch_to_primary(current, primary_ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100381 return 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100382}
383
384/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100385 * Receives a message from the mailbox. If one isn't available, this function
386 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100387 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100388 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100389 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100390struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100391 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100392 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100393{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100394 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100395 struct hf_mailbox_receive_return ret = {
396 .vm_id = HF_INVALID_VM_ID,
397 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100398
Andrew Scullaa039b32018-10-04 15:02:26 +0100399 /*
400 * The primary VM will receive messages as a status code from running
401 * vcpus and must not call this function.
402 */
Andrew Scull19503262018-09-20 14:48:39 +0100403 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100404 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100405 }
406
407 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100408
Andrew Scullaa039b32018-10-04 15:02:26 +0100409 /* Return pending messages without blocking. */
410 if (vm->mailbox.state == mailbox_state_received) {
411 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100412 ret.vm_id = vm->mailbox.recv_from_id;
413 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100414 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100415 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100416
417 /* No pending message so fail if not allowed to block. */
418 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100419 goto out;
420 }
421
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100422 sl_lock(&current->lock);
423 current->state = vcpu_state_blocked_mailbox;
Andrew Scullaa039b32018-10-04 15:02:26 +0100424
425 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100426 current->mailbox_next = vm->mailbox.recv_waiter;
427 vm->mailbox.recv_waiter = current;
428 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100429
430 /* Switch back to primary vm to block. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100431 *next = api_wait_for_interrupt(current);
Andrew Scullaa039b32018-10-04 15:02:26 +0100432out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100433 sl_unlock(&vm->lock);
434
435 return ret;
436}
437
438/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100439 * Clears the caller's mailbox so that a new message can be received. The caller
440 * must have copied out all data they wish to preserve as new messages will
441 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100442 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100443int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100444{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100445 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100446 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100447
448 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100449 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100450 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100451 vm->mailbox.state = mailbox_state_empty;
452 } else {
453 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100454 }
455 sl_unlock(&vm->lock);
456
457 if (ret == 0) {
458 /* TODO: Notify waiters, if any. */
459 }
460
461 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100462}