blob: 4ab7416517d9ffc68d15ce2fa84c7c8d9710731a [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Walbran318f5732018-11-20 16:23:42 +000021#include "hf/arch/cpu.h"
22
23#include "hf/dlog.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010024#include "hf/std.h"
25#include "hf/vm.h"
26
Andrew Scullf35a5c92018-08-07 18:09:46 +010027#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010028
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000029/*
30 * To eliminate the risk of deadlocks, we define a partial order for the
31 * acquisition of locks held concurrently by the same physical CPU. Our current
32 * ordering requirements are as follows:
33 *
34 * vm::lock -> vcpu::lock
35 */
36
Andrew Scullaa039b32018-10-04 15:02:26 +010037static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010038 "Currently, a page is mapped for the send and receive buffers so "
39 "the maximum request is the size of a page.");
40
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000041static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000042
43/**
44 * Initialies the API page pool by taking ownership of the contents of the given
45 * page pool.
46 */
47void api_init(struct mpool *ppool)
48{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000049 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000050}
51
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010052/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010053 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010054 *
55 * This triggers the scheduling logic to run. Run in the context of secondary VM
56 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
57 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010058 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010059static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000060 struct hf_vcpu_run_return primary_ret,
61 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062{
Andrew Scull19503262018-09-20 14:48:39 +010063 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010064 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010065
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010066 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010067 arch_regs_set_retval(&next->regs,
68 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010069
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000070 /* Mark the current vcpu as waiting. */
71 sl_lock(&current->lock);
72 current->state = secondary_state;
73 sl_unlock(&current->lock);
74
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 return next;
76}
77
78/**
Andrew Scullaa039b32018-10-04 15:02:26 +010079 * Returns to the primary vm leaving the current vcpu ready to be scheduled
80 * again.
81 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010082struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010083{
Andrew Scull6d2db332018-10-10 15:28:17 +010084 struct hf_vcpu_run_return ret = {
85 .code = HF_VCPU_RUN_YIELD,
86 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000087 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010088}
89
90/**
91 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
92 * vm.
93 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010094struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010095{
Andrew Scull6d2db332018-10-10 15:28:17 +010096 struct hf_vcpu_run_return ret = {
97 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
98 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000099 return api_switch_to_primary(current, ret,
100 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +0100101}
102
103/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100104 * Returns the number of VMs configured to run.
105 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100106int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100107{
Andrew Scull19503262018-09-20 14:48:39 +0100108 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100109}
110
111/**
112 * Returns the number of vcpus configured in the given VM.
113 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100114int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100115{
Andrew Scull19503262018-09-20 14:48:39 +0100116 struct vm *vm;
117
118 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100119 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100121 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100122
Andrew Scull19503262018-09-20 14:48:39 +0100123 vm = vm_get(vm_id);
124 if (vm == NULL) {
125 return -1;
126 }
127
128 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100129}
130
131/**
132 * Runs the given vcpu of the given vm.
133 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100134struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100135 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100136 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100137{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100138 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100139 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100140 struct hf_vcpu_run_return ret = {
141 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
142 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100143
144 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100145 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100146 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100147 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100148
Andrew Scull19503262018-09-20 14:48:39 +0100149 /* Only secondary VM vcpus can be run. */
150 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100151 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100152 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100153
Andrew Scull19503262018-09-20 14:48:39 +0100154 /* The requested VM must exist. */
155 vm = vm_get(vm_id);
156 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100157 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100158 }
159
160 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100162 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100163 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100164
Andrew Scullf3d45592018-09-20 14:30:22 +0100165 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100166
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100167 sl_lock(&vcpu->lock);
168 if (vcpu->state != vcpu_state_ready) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100169 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100170 } else {
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100171 vcpu->cpu = current->cpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100172 vcpu->state = vcpu_state_running;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100173 *next = vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100174 ret.code = HF_VCPU_RUN_YIELD;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100175 }
176 sl_unlock(&vcpu->lock);
177
Andrew Scull6d2db332018-10-10 15:28:17 +0100178out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100179 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100180}
181
182/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100183 * Configures the VM to send/receive data through the specified pages. The pages
184 * must not be shared.
185 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100186int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
187 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100188{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100189 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100190 paddr_t pa_send_begin;
191 paddr_t pa_send_end;
192 paddr_t pa_recv_begin;
193 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100194 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100195
196 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100197 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
198 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100199 return -1;
200 }
201
202 sl_lock(&vm->lock);
203
204 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100205 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100206 ret = -1;
207 goto exit;
208 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100209
210 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100211 * TODO: Once memory sharing is implemented, we need to make sure that
212 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100213 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100214
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000215 /* Ensure the pages are accessible from the VM. */
216 if (!mm_vm_is_mapped(&vm->ptable, send, 0) ||
217 !mm_vm_is_mapped(&vm->ptable, recv, 0)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100218 ret = -1;
219 goto exit;
220 }
221
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000222 /* Convert to physical addresses. */
223 pa_send_begin = pa_from_ipa(send);
224 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
225
226 pa_recv_begin = pa_from_ipa(recv);
227 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
228
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100229 /* Fail if the same page is used for the send and receive pages. */
230 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
231 ret = -1;
232 goto exit;
233 }
234
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100235 /* Map the send page as read-only in the hypervisor address space. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000236 vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000237 MM_MODE_R, &api_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100238 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100239 ret = -1;
240 goto exit;
241 }
242
243 /*
244 * Map the receive page as writable in the hypervisor address space. On
245 * failure, unmap the send page before returning.
246 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000247 vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000248 MM_MODE_W, &api_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100249 if (!vm->mailbox.recv) {
250 vm->mailbox.send = NULL;
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000251 mm_unmap(pa_send_begin, pa_send_end, 0, &api_page_pool);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100252 ret = -1;
253 goto exit;
254 }
255
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100256 /* TODO: Notify any waiters. */
257
258 ret = 0;
259exit:
260 sl_unlock(&vm->lock);
261
262 return ret;
263}
264
265/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100266 * Copies data from the sender's send buffer to the recipient's receive buffer
267 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100268 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100269int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
270 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100271{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100272 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100273 struct vm *to;
274 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100275 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100276 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100277
Andrew Scullaa039b32018-10-04 15:02:26 +0100278 /* Limit the size of transfer. */
279 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100280 return -1;
281 }
282
283 /* Disallow reflexive requests as this suggests an error in the VM. */
284 if (vm_id == from->id) {
285 return -1;
286 }
287
288 /* Ensure the target VM exists. */
289 to = vm_get(vm_id);
290 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100291 return -1;
292 }
293
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100294 /*
295 * Check that the sender has configured its send buffer. It is safe to
296 * use from_buf after releasing the lock because the buffer cannot be
297 * modified once it's configured.
298 */
299 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100300 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100301 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100302 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100303 return -1;
304 }
305
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100306 sl_lock(&to->lock);
307
Andrew Scullaa039b32018-10-04 15:02:26 +0100308 if (to->mailbox.state != mailbox_state_empty ||
309 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100310 /* Fail if the target isn't currently ready to receive data. */
311 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100312 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100313 }
314
Andrew Scullaa039b32018-10-04 15:02:26 +0100315 /* Copy data. */
316 memcpy(to->mailbox.recv, from_buf, size);
317 to->mailbox.recv_bytes = size;
318 to->mailbox.recv_from_id = from->id;
319 to->mailbox.state = mailbox_state_read;
320
321 /* Messages for the primary VM are delivered directly. */
322 if (to->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000323 struct hf_vcpu_run_return primary_ret = {
324 .code = HF_VCPU_RUN_MESSAGE,
325 .message.size = size,
326 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000327 *next = api_switch_to_primary(current, primary_ret,
328 vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100329 ret = 0;
330 goto out;
331 }
332
333 /*
334 * Try to find a vcpu to handle the message and tell the scheduler to
335 * run it.
336 */
337 if (to->mailbox.recv_waiter == NULL) {
338 /*
339 * The scheduler must choose a vcpu to interrupt so it can
340 * handle the message.
341 */
342 to->mailbox.state = mailbox_state_received;
343 vcpu = HF_INVALID_VCPU;
344 } else {
345 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
346
347 /*
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000348 * Take target vcpu out of waiter list and mark it as ready to
349 * run again.
Andrew Scullaa039b32018-10-04 15:02:26 +0100350 */
351 sl_lock(&to_vcpu->lock);
352 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
353 to_vcpu->state = vcpu_state_ready;
354
355 /* Return from HF_MAILBOX_RECEIVE. */
356 arch_regs_set_retval(&to_vcpu->regs,
Andrew Scull6d2db332018-10-10 15:28:17 +0100357 hf_mailbox_receive_return_encode((
358 struct hf_mailbox_receive_return){
359 .vm_id = to->mailbox.recv_from_id,
360 .size = size,
361 }));
Andrew Scullaa039b32018-10-04 15:02:26 +0100362
363 sl_unlock(&to_vcpu->lock);
364
365 vcpu = to_vcpu - to->vcpus;
366 }
367
368 /* Return to the primary VM directly or with a switch. */
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000369 if (from->id == HF_PRIMARY_VM_ID) {
370 ret = vcpu;
371 } else {
372 struct hf_vcpu_run_return primary_ret = {
373 .code = HF_VCPU_RUN_WAKE_UP,
374 .wake_up.vm_id = to->id,
375 .wake_up.vcpu = vcpu,
376 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000377 *next = api_switch_to_primary(current, primary_ret,
378 vcpu_state_ready);
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000379 ret = 0;
380 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100381
382out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100383 sl_unlock(&to->lock);
384
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000385 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100386}
387
388/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100389 * Receives a message from the mailbox. If one isn't available, this function
390 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100391 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100392 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100393 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100394struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100395 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100396 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100397{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100398 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100399 struct hf_mailbox_receive_return ret = {
400 .vm_id = HF_INVALID_VM_ID,
401 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100402
Andrew Scullaa039b32018-10-04 15:02:26 +0100403 /*
404 * The primary VM will receive messages as a status code from running
405 * vcpus and must not call this function.
406 */
Andrew Scull19503262018-09-20 14:48:39 +0100407 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100408 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100409 }
410
411 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100412
Andrew Scullaa039b32018-10-04 15:02:26 +0100413 /* Return pending messages without blocking. */
414 if (vm->mailbox.state == mailbox_state_received) {
415 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100416 ret.vm_id = vm->mailbox.recv_from_id;
417 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100418 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100419 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100420
421 /* No pending message so fail if not allowed to block. */
422 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100423 goto out;
424 }
425
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100426 sl_lock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100427
428 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100429 current->mailbox_next = vm->mailbox.recv_waiter;
430 vm->mailbox.recv_waiter = current;
431 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100432
433 /* Switch back to primary vm to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +0000434 {
435 struct hf_vcpu_run_return run_return = {
436 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
437 };
438 *next = api_switch_to_primary(current, run_return,
439 vcpu_state_blocked_mailbox);
440 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100441out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100442 sl_unlock(&vm->lock);
443
444 return ret;
445}
446
447/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100448 * Clears the caller's mailbox so that a new message can be received. The caller
449 * must have copied out all data they wish to preserve as new messages will
450 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100451 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100452int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100453{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100454 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100455 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100456
457 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100458 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100459 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100460 vm->mailbox.state = mailbox_state_empty;
461 } else {
462 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100463 }
464 sl_unlock(&vm->lock);
465
466 if (ret == 0) {
467 /* TODO: Notify waiters, if any. */
468 }
469
470 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100471}
Andrew Walbran318f5732018-11-20 16:23:42 +0000472
473/**
474 * Enables or disables a given interrupt ID for the calling vCPU.
475 *
476 * Returns 0 on success, or -1 if the intid is invalid.
477 */
478int64_t api_enable_interrupt(uint32_t intid, bool enable, struct vcpu *current)
479{
480 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
481 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
482 if (intid >= HF_NUM_INTIDS) {
483 return -1;
484 }
485
486 sl_lock(&current->lock);
487 if (enable) {
488 current->interrupts.interrupt_enabled[intid_index] |=
489 intid_mask;
490 /* If it is pending, change state and trigger a virtual IRQ. */
491 if (current->interrupts.interrupt_pending[intid_index] &
492 intid_mask) {
493 arch_regs_set_virtual_interrupt(&current->regs, true);
494 }
495 } else {
496 current->interrupts.interrupt_enabled[intid_index] &=
497 ~intid_mask;
498 }
499
500 sl_unlock(&current->lock);
501 return 0;
502}
503
504/**
505 * Returns the ID of the next pending interrupt for the calling vCPU, and
506 * acknowledges it (i.e. marks it as no longer pending). Returns
507 * HF_INVALID_INTID if there are no pending interrupts.
508 */
509uint32_t api_get_and_acknowledge_interrupt(struct vcpu *current)
510{
511 uint8_t i;
512 uint32_t first_interrupt = HF_INVALID_INTID;
513 bool interrupts_remain = false;
514
515 /*
516 * Find the first enabled and pending interrupt ID, return it, and
517 * deactivate it.
518 */
519 sl_lock(&current->lock);
520 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
521 uint32_t enabled_and_pending =
522 current->interrupts.interrupt_enabled[i] &
523 current->interrupts.interrupt_pending[i];
524 if (enabled_and_pending == 0) {
525 continue;
526 }
527
528 if (first_interrupt != HF_INVALID_INTID) {
529 interrupts_remain = true;
530 break;
531 }
532
533 uint8_t bit_index = ctz(enabled_and_pending);
534 /* Mark it as no longer pending. */
535 current->interrupts.interrupt_pending[i] &= ~(1u << bit_index);
536 first_interrupt = i * INTERRUPT_REGISTER_BITS + bit_index;
537
538 enabled_and_pending = current->interrupts.interrupt_enabled[i] &
539 current->interrupts.interrupt_pending[i];
540 if (enabled_and_pending != 0) {
541 interrupts_remain = true;
542 break;
543 }
544 }
545 /*
546 * If there are no more enabled and pending interrupts left, clear the
547 * VI bit.
548 */
549 arch_regs_set_virtual_interrupt(&current->regs, interrupts_remain);
550
551 sl_unlock(&current->lock);
552 return first_interrupt;
553}
554
555/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +0000556 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +0000557 * given VM and vCPU.
558 */
559static inline bool is_injection_allowed(uint32_t target_vm_id,
560 struct vcpu *current)
561{
562 uint32_t current_vm_id = current->vm->id;
563 /*
564 * The primary VM is allowed to inject interrupts into any VM. Secondary
565 * VMs are only allowed to inject interrupts into their own vCPUs.
566 */
567 return current_vm_id == HF_PRIMARY_VM_ID ||
568 current_vm_id == target_vm_id;
569}
570
571/**
572 * Injects a virtual interrupt of the given ID into the given target vCPU.
573 * This doesn't cause the vCPU to actually be run immediately; it will be taken
574 * when the vCPU is next run, which is up to the scheduler.
575 *
576 * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist, the
577 * interrupt ID is invalid, or the current VM is not allowed to inject
578 * interrupts to the target VM.
579 */
580int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
581 uint32_t intid, struct vcpu *current,
582 struct vcpu **next)
583{
584 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
585 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
586 struct vcpu *target_vcpu;
587 struct vm *target_vm = vm_get(target_vm_id);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000588 bool need_vm_lock;
Andrew Walbran318f5732018-11-20 16:23:42 +0000589
590 if (intid >= HF_NUM_INTIDS) {
591 return -1;
592 }
593 if (target_vm == NULL) {
594 return -1;
595 }
596 if (target_vcpu_idx >= target_vm->vcpu_count) {
597 /* The requested vcpu must exist. */
598 return -1;
599 }
600 if (!is_injection_allowed(target_vm_id, current)) {
601 return -1;
602 }
603 target_vcpu = &target_vm->vcpus[target_vcpu_idx];
604
605 dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
606 target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
607
608 sl_lock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000609 /*
610 * If we need the target_vm lock we need to release the target_vcpu lock
611 * first to maintain the correct order of locks. In-between releasing
612 * and acquiring it again the state of the vCPU could change in such a
613 * way that we don't actually need to touch the target_vm after all, but
614 * that's alright: we'll take the target_vm lock anyway, but it's safe,
615 * just perhaps a little slow in this unusual case. The reverse is not
616 * possible: if need_vm_lock is false, we don't release the target_vcpu
617 * lock until we are done, so nothing should change in such as way that
618 * we need the VM lock after all.
619 */
620 need_vm_lock = (target_vcpu->interrupts.interrupt_enabled[intid_index] &
621 intid_mask) &&
622 target_vcpu->state == vcpu_state_blocked_mailbox;
623 if (need_vm_lock) {
624 sl_unlock(&target_vcpu->lock);
625 sl_lock(&target_vm->lock);
626 sl_lock(&target_vcpu->lock);
627 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000628
629 /* Make it pending. */
630 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
631
Andrew Walbran69520dc2018-12-06 11:39:38 +0000632 /*
633 * If it is enabled, change state and trigger a virtual IRQ. If you
634 * change this logic make sure to update the need_vm_lock logic above to
635 * match.
636 */
Andrew Walbran318f5732018-11-20 16:23:42 +0000637 if (target_vcpu->interrupts.interrupt_enabled[intid_index] &
638 intid_mask) {
639 dlog("IRQ %d is enabled for VM %d VCPU %d, setting VI.\n",
640 intid, target_vm_id, target_vcpu_idx);
641 arch_regs_set_virtual_interrupt(&target_vcpu->regs, true);
642
643 if (target_vcpu->state == vcpu_state_blocked_interrupt) {
Andrew Walbran318f5732018-11-20 16:23:42 +0000644 target_vcpu->state = vcpu_state_ready;
Andrew Walbran69520dc2018-12-06 11:39:38 +0000645 } else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
646 /*
647 * If you change this logic make sure to update the
648 * need_vm_lock logic above to match.
649 */
650 target_vcpu->state = vcpu_state_ready;
651
652 /* Take target vCPU out of mailbox recv_waiter list. */
653 /*
Andrew Walbrana793ea02018-12-12 13:21:47 +0000654 * TODO: Consider using a doubly-linked list for the
Andrew Walbran69520dc2018-12-06 11:39:38 +0000655 * receive waiter list to avoid the linear search here.
656 */
657 struct vcpu **previous_next_pointer =
658 &target_vm->mailbox.recv_waiter;
659 while (*previous_next_pointer != NULL &&
660 *previous_next_pointer != target_vcpu) {
661 /*
662 * TODO(qwandor): Do we need to lock the vCPUs
663 * somehow while we walk the linked list, or is
664 * the VM lock enough?
665 */
666 previous_next_pointer =
667 &(*previous_next_pointer)->mailbox_next;
668 }
669 if (*previous_next_pointer == NULL) {
670 dlog("Target VCPU state is "
671 "vcpu_state_blocked_mailbox but is not in "
672 "VM mailbox waiter list. This should "
673 "never happen.\n");
674 } else {
675 *previous_next_pointer =
676 target_vcpu->mailbox_next;
677 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000678 }
679
680 if (current->vm->id != HF_PRIMARY_VM_ID &&
681 current != target_vcpu) {
682 /*
683 * Switch to the primary so that it can switch to the
684 * target.
685 */
686 struct hf_vcpu_run_return ret = {
687 .code = HF_VCPU_RUN_WAKE_UP,
688 .wake_up.vm_id = target_vm_id,
689 .wake_up.vcpu = target_vcpu_idx,
690 };
691 *next = api_switch_to_primary(current, ret,
692 vcpu_state_ready);
693 }
694 }
695
696 sl_unlock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000697 if (need_vm_lock) {
698 sl_unlock(&target_vm->lock);
699 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000700
701 return 0;
702}