blob: 6be8702bb16e65f5a553e57b59fd38769ad3bf86 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Walbran318f5732018-11-20 16:23:42 +000021#include "hf/arch/cpu.h"
22
23#include "hf/dlog.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010024#include "hf/std.h"
25#include "hf/vm.h"
26
Andrew Scullf35a5c92018-08-07 18:09:46 +010027#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010028
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000029/*
30 * To eliminate the risk of deadlocks, we define a partial order for the
31 * acquisition of locks held concurrently by the same physical CPU. Our current
32 * ordering requirements are as follows:
33 *
34 * vm::lock -> vcpu::lock
35 */
36
Andrew Scullaa039b32018-10-04 15:02:26 +010037static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010038 "Currently, a page is mapped for the send and receive buffers so "
39 "the maximum request is the size of a page.");
40
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000041static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000042
43/**
44 * Initialies the API page pool by taking ownership of the contents of the given
45 * page pool.
46 */
47void api_init(struct mpool *ppool)
48{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000049 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000050}
51
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010052/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010053 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010054 *
55 * This triggers the scheduling logic to run. Run in the context of secondary VM
56 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
57 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010058 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010059static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000060 struct hf_vcpu_run_return primary_ret,
61 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062{
Andrew Scull19503262018-09-20 14:48:39 +010063 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010064 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010065
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010066 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010067 arch_regs_set_retval(&next->regs,
68 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010069
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000070 /* Mark the current vcpu as waiting. */
71 sl_lock(&current->lock);
72 current->state = secondary_state;
73 sl_unlock(&current->lock);
74
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 return next;
76}
77
78/**
Andrew Scullaa039b32018-10-04 15:02:26 +010079 * Returns to the primary vm leaving the current vcpu ready to be scheduled
80 * again.
81 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010082struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010083{
Andrew Scull6d2db332018-10-10 15:28:17 +010084 struct hf_vcpu_run_return ret = {
85 .code = HF_VCPU_RUN_YIELD,
86 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000087 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +010088}
89
90/**
91 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
92 * vm.
93 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010094struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +010095{
Andrew Scull6d2db332018-10-10 15:28:17 +010096 struct hf_vcpu_run_return ret = {
97 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
98 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000099 return api_switch_to_primary(current, ret,
100 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +0100101}
102
103/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100104 * Returns the number of VMs configured to run.
105 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100106int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100107{
Andrew Scull19503262018-09-20 14:48:39 +0100108 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100109}
110
111/**
112 * Returns the number of vcpus configured in the given VM.
113 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100114int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100115{
Andrew Scull19503262018-09-20 14:48:39 +0100116 struct vm *vm;
117
118 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100119 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100121 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100122
Andrew Scull19503262018-09-20 14:48:39 +0100123 vm = vm_get(vm_id);
124 if (vm == NULL) {
125 return -1;
126 }
127
128 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100129}
130
131/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000132 * This function is called by the architecture-specific context switching
133 * function to indicate that register state for the given vcpu has been saved
134 * and can therefore be used by other pcpus.
135 */
136void api_regs_state_saved(struct vcpu *vcpu)
137{
138 sl_lock(&vcpu->lock);
139 vcpu->regs_available = true;
140 sl_unlock(&vcpu->lock);
141}
142
143/**
144 * Prepares the vcpu to run by updating its state and fetching whether a return
145 * value needs to be forced onto the vCPU.
146 */
147static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
148 struct retval_state *vcpu_retval)
149{
150 bool ret;
151
152 sl_lock(&vcpu->lock);
153 if (vcpu->state != vcpu_state_ready) {
154 ret = false;
155 goto out;
156 }
157
158 vcpu->cpu = current->cpu;
159 vcpu->state = vcpu_state_running;
160
161 /* Fetch return value to inject into vCPU if there is one. */
162 *vcpu_retval = vcpu->retval;
163 if (vcpu_retval->force) {
164 vcpu->retval.force = false;
165 }
166
167 /*
168 * Wait until the registers become available. Care must be taken when
169 * looping on this: it shouldn't be done while holding other locks
170 * to avoid deadlocks.
171 */
172 while (!vcpu->regs_available) {
173 sl_unlock(&vcpu->lock);
174 sl_lock(&vcpu->lock);
175 }
176
177 /*
178 * Mark the registers as unavailable now that we're about to reflect
179 * them onto the real registers. This will also prevent another physical
180 * CPU from trying to read these registers.
181 */
182 vcpu->regs_available = false;
183
184 ret = true;
185
186out:
187 sl_unlock(&vcpu->lock);
188 return ret;
189}
190
191/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100192 * Runs the given vcpu of the given vm.
193 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100194struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100195 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100196 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100197{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100198 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100199 struct vcpu *vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000200 struct retval_state vcpu_retval;
Andrew Scull6d2db332018-10-10 15:28:17 +0100201 struct hf_vcpu_run_return ret = {
202 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
203 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100204
205 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100206 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100207 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100208 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100209
Andrew Scull19503262018-09-20 14:48:39 +0100210 /* Only secondary VM vcpus can be run. */
211 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100212 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100213 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100214
Andrew Scull19503262018-09-20 14:48:39 +0100215 /* The requested VM must exist. */
216 vm = vm_get(vm_id);
217 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100218 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100219 }
220
221 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100222 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100223 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100224 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100225
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000226 /* Update state if allowed. */
Andrew Scullf3d45592018-09-20 14:30:22 +0100227 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000228 if (!api_vcpu_prepare_run(current, vcpu, &vcpu_retval)) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100229 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000230 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100231 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000232
233 *next = vcpu;
234 ret.code = HF_VCPU_RUN_YIELD;
235
236 /* Update return value if one was injected. */
237 if (vcpu_retval.force) {
238 arch_regs_set_retval(&vcpu->regs, vcpu_retval.value);
239 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100240
Andrew Scull6d2db332018-10-10 15:28:17 +0100241out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100242 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100243}
244
245/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100246 * Configures the VM to send/receive data through the specified pages. The pages
247 * must not be shared.
248 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100249int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
250 const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100251{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100252 struct vm *vm = current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100253 paddr_t pa_send_begin;
254 paddr_t pa_send_end;
255 paddr_t pa_recv_begin;
256 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100257 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100258
259 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100260 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
261 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100262 return -1;
263 }
264
265 sl_lock(&vm->lock);
266
267 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100268 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100269 ret = -1;
270 goto exit;
271 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100272
273 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100274 * TODO: Once memory sharing is implemented, we need to make sure that
275 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100276 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100277
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000278 /* Ensure the pages are accessible from the VM. */
279 if (!mm_vm_is_mapped(&vm->ptable, send, 0) ||
280 !mm_vm_is_mapped(&vm->ptable, recv, 0)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100281 ret = -1;
282 goto exit;
283 }
284
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000285 /* Convert to physical addresses. */
286 pa_send_begin = pa_from_ipa(send);
287 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
288
289 pa_recv_begin = pa_from_ipa(recv);
290 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
291
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100292 /* Fail if the same page is used for the send and receive pages. */
293 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
294 ret = -1;
295 goto exit;
296 }
297
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100298 /* Map the send page as read-only in the hypervisor address space. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000299 vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000300 MM_MODE_R, &api_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100301 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100302 ret = -1;
303 goto exit;
304 }
305
306 /*
307 * Map the receive page as writable in the hypervisor address space. On
308 * failure, unmap the send page before returning.
309 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000310 vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000311 MM_MODE_W, &api_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100312 if (!vm->mailbox.recv) {
313 vm->mailbox.send = NULL;
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +0000314 mm_unmap(pa_send_begin, pa_send_end, 0, &api_page_pool);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100315 ret = -1;
316 goto exit;
317 }
318
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100319 /* TODO: Notify any waiters. */
320
321 ret = 0;
322exit:
323 sl_unlock(&vm->lock);
324
325 return ret;
326}
327
328/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100329 * Copies data from the sender's send buffer to the recipient's receive buffer
330 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100331 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100332int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu *current,
333 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100334{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100335 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100336 struct vm *to;
337 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100338 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100339 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100340
Andrew Scullaa039b32018-10-04 15:02:26 +0100341 /* Limit the size of transfer. */
342 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100343 return -1;
344 }
345
346 /* Disallow reflexive requests as this suggests an error in the VM. */
347 if (vm_id == from->id) {
348 return -1;
349 }
350
351 /* Ensure the target VM exists. */
352 to = vm_get(vm_id);
353 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100354 return -1;
355 }
356
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100357 /*
358 * Check that the sender has configured its send buffer. It is safe to
359 * use from_buf after releasing the lock because the buffer cannot be
360 * modified once it's configured.
361 */
362 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100363 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100364 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100365 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100366 return -1;
367 }
368
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100369 sl_lock(&to->lock);
370
Andrew Scullaa039b32018-10-04 15:02:26 +0100371 if (to->mailbox.state != mailbox_state_empty ||
372 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100373 /* Fail if the target isn't currently ready to receive data. */
374 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100375 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100376 }
377
Andrew Scullaa039b32018-10-04 15:02:26 +0100378 /* Copy data. */
379 memcpy(to->mailbox.recv, from_buf, size);
380 to->mailbox.recv_bytes = size;
381 to->mailbox.recv_from_id = from->id;
382 to->mailbox.state = mailbox_state_read;
383
384 /* Messages for the primary VM are delivered directly. */
385 if (to->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000386 struct hf_vcpu_run_return primary_ret = {
387 .code = HF_VCPU_RUN_MESSAGE,
388 .message.size = size,
389 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000390 *next = api_switch_to_primary(current, primary_ret,
391 vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100392 ret = 0;
393 goto out;
394 }
395
396 /*
397 * Try to find a vcpu to handle the message and tell the scheduler to
398 * run it.
399 */
400 if (to->mailbox.recv_waiter == NULL) {
401 /*
402 * The scheduler must choose a vcpu to interrupt so it can
403 * handle the message.
404 */
405 to->mailbox.state = mailbox_state_received;
406 vcpu = HF_INVALID_VCPU;
407 } else {
408 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
409
410 /*
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000411 * Take target vcpu out of waiter list and mark it as ready to
412 * run again.
Andrew Scullaa039b32018-10-04 15:02:26 +0100413 */
414 sl_lock(&to_vcpu->lock);
415 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
416 to_vcpu->state = vcpu_state_ready;
417
418 /* Return from HF_MAILBOX_RECEIVE. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000419 to_vcpu->retval.force = true;
420 to_vcpu->retval.value = hf_mailbox_receive_return_encode(
421 (struct hf_mailbox_receive_return){
422 .vm_id = to->mailbox.recv_from_id,
423 .size = size,
424 });
Andrew Scullaa039b32018-10-04 15:02:26 +0100425
426 sl_unlock(&to_vcpu->lock);
427
428 vcpu = to_vcpu - to->vcpus;
429 }
430
431 /* Return to the primary VM directly or with a switch. */
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000432 if (from->id == HF_PRIMARY_VM_ID) {
433 ret = vcpu;
434 } else {
435 struct hf_vcpu_run_return primary_ret = {
436 .code = HF_VCPU_RUN_WAKE_UP,
437 .wake_up.vm_id = to->id,
438 .wake_up.vcpu = vcpu,
439 };
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000440 *next = api_switch_to_primary(current, primary_ret,
441 vcpu_state_ready);
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000442 ret = 0;
443 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100444
445out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100446 sl_unlock(&to->lock);
447
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000448 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100449}
450
451/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100452 * Receives a message from the mailbox. If one isn't available, this function
453 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100454 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100455 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100456 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100457struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100458 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100459 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100460{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100461 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100462 struct hf_mailbox_receive_return ret = {
463 .vm_id = HF_INVALID_VM_ID,
464 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100465
Andrew Scullaa039b32018-10-04 15:02:26 +0100466 /*
467 * The primary VM will receive messages as a status code from running
468 * vcpus and must not call this function.
469 */
Andrew Scull19503262018-09-20 14:48:39 +0100470 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100471 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100472 }
473
474 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100475
Andrew Scullaa039b32018-10-04 15:02:26 +0100476 /* Return pending messages without blocking. */
477 if (vm->mailbox.state == mailbox_state_received) {
478 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100479 ret.vm_id = vm->mailbox.recv_from_id;
480 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100481 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100482 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100483
484 /* No pending message so fail if not allowed to block. */
485 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100486 goto out;
487 }
488
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100489 sl_lock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100490
491 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100492 current->mailbox_next = vm->mailbox.recv_waiter;
493 vm->mailbox.recv_waiter = current;
494 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100495
496 /* Switch back to primary vm to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +0000497 {
498 struct hf_vcpu_run_return run_return = {
499 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
500 };
501 *next = api_switch_to_primary(current, run_return,
502 vcpu_state_blocked_mailbox);
503 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100504out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100505 sl_unlock(&vm->lock);
506
507 return ret;
508}
509
510/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100511 * Clears the caller's mailbox so that a new message can be received. The caller
512 * must have copied out all data they wish to preserve as new messages will
513 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100514 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100515int64_t api_mailbox_clear(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100516{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100517 struct vm *vm = current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100518 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100519
520 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100521 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100522 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100523 vm->mailbox.state = mailbox_state_empty;
524 } else {
525 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100526 }
527 sl_unlock(&vm->lock);
528
529 if (ret == 0) {
530 /* TODO: Notify waiters, if any. */
531 }
532
533 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100534}
Andrew Walbran318f5732018-11-20 16:23:42 +0000535
536/**
537 * Enables or disables a given interrupt ID for the calling vCPU.
538 *
539 * Returns 0 on success, or -1 if the intid is invalid.
540 */
541int64_t api_enable_interrupt(uint32_t intid, bool enable, struct vcpu *current)
542{
543 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
544 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
545 if (intid >= HF_NUM_INTIDS) {
546 return -1;
547 }
548
549 sl_lock(&current->lock);
550 if (enable) {
551 current->interrupts.interrupt_enabled[intid_index] |=
552 intid_mask;
553 /* If it is pending, change state and trigger a virtual IRQ. */
554 if (current->interrupts.interrupt_pending[intid_index] &
555 intid_mask) {
556 arch_regs_set_virtual_interrupt(&current->regs, true);
557 }
558 } else {
559 current->interrupts.interrupt_enabled[intid_index] &=
560 ~intid_mask;
561 }
562
563 sl_unlock(&current->lock);
564 return 0;
565}
566
567/**
568 * Returns the ID of the next pending interrupt for the calling vCPU, and
569 * acknowledges it (i.e. marks it as no longer pending). Returns
570 * HF_INVALID_INTID if there are no pending interrupts.
571 */
572uint32_t api_get_and_acknowledge_interrupt(struct vcpu *current)
573{
574 uint8_t i;
575 uint32_t first_interrupt = HF_INVALID_INTID;
576 bool interrupts_remain = false;
577
578 /*
579 * Find the first enabled and pending interrupt ID, return it, and
580 * deactivate it.
581 */
582 sl_lock(&current->lock);
583 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
584 uint32_t enabled_and_pending =
585 current->interrupts.interrupt_enabled[i] &
586 current->interrupts.interrupt_pending[i];
587 if (enabled_and_pending == 0) {
588 continue;
589 }
590
591 if (first_interrupt != HF_INVALID_INTID) {
592 interrupts_remain = true;
593 break;
594 }
595
596 uint8_t bit_index = ctz(enabled_and_pending);
597 /* Mark it as no longer pending. */
598 current->interrupts.interrupt_pending[i] &= ~(1u << bit_index);
599 first_interrupt = i * INTERRUPT_REGISTER_BITS + bit_index;
600
601 enabled_and_pending = current->interrupts.interrupt_enabled[i] &
602 current->interrupts.interrupt_pending[i];
603 if (enabled_and_pending != 0) {
604 interrupts_remain = true;
605 break;
606 }
607 }
608 /*
609 * If there are no more enabled and pending interrupts left, clear the
610 * VI bit.
611 */
612 arch_regs_set_virtual_interrupt(&current->regs, interrupts_remain);
613
614 sl_unlock(&current->lock);
615 return first_interrupt;
616}
617
618/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +0000619 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +0000620 * given VM and vCPU.
621 */
622static inline bool is_injection_allowed(uint32_t target_vm_id,
623 struct vcpu *current)
624{
625 uint32_t current_vm_id = current->vm->id;
626 /*
627 * The primary VM is allowed to inject interrupts into any VM. Secondary
628 * VMs are only allowed to inject interrupts into their own vCPUs.
629 */
630 return current_vm_id == HF_PRIMARY_VM_ID ||
631 current_vm_id == target_vm_id;
632}
633
634/**
635 * Injects a virtual interrupt of the given ID into the given target vCPU.
636 * This doesn't cause the vCPU to actually be run immediately; it will be taken
637 * when the vCPU is next run, which is up to the scheduler.
638 *
639 * Returns 0 on success, or -1 if the target VM or vCPU doesn't exist, the
640 * interrupt ID is invalid, or the current VM is not allowed to inject
641 * interrupts to the target VM.
642 */
643int64_t api_inject_interrupt(uint32_t target_vm_id, uint32_t target_vcpu_idx,
644 uint32_t intid, struct vcpu *current,
645 struct vcpu **next)
646{
647 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
648 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
649 struct vcpu *target_vcpu;
650 struct vm *target_vm = vm_get(target_vm_id);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000651 bool need_vm_lock;
Andrew Walbran318f5732018-11-20 16:23:42 +0000652
653 if (intid >= HF_NUM_INTIDS) {
654 return -1;
655 }
656 if (target_vm == NULL) {
657 return -1;
658 }
659 if (target_vcpu_idx >= target_vm->vcpu_count) {
660 /* The requested vcpu must exist. */
661 return -1;
662 }
663 if (!is_injection_allowed(target_vm_id, current)) {
664 return -1;
665 }
666 target_vcpu = &target_vm->vcpus[target_vcpu_idx];
667
668 dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
669 target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
670
671 sl_lock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000672 /*
673 * If we need the target_vm lock we need to release the target_vcpu lock
674 * first to maintain the correct order of locks. In-between releasing
675 * and acquiring it again the state of the vCPU could change in such a
676 * way that we don't actually need to touch the target_vm after all, but
677 * that's alright: we'll take the target_vm lock anyway, but it's safe,
678 * just perhaps a little slow in this unusual case. The reverse is not
679 * possible: if need_vm_lock is false, we don't release the target_vcpu
680 * lock until we are done, so nothing should change in such as way that
681 * we need the VM lock after all.
682 */
683 need_vm_lock = (target_vcpu->interrupts.interrupt_enabled[intid_index] &
684 intid_mask) &&
685 target_vcpu->state == vcpu_state_blocked_mailbox;
686 if (need_vm_lock) {
687 sl_unlock(&target_vcpu->lock);
688 sl_lock(&target_vm->lock);
689 sl_lock(&target_vcpu->lock);
690 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000691
692 /* Make it pending. */
693 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
694
Andrew Walbran69520dc2018-12-06 11:39:38 +0000695 /*
696 * If it is enabled, change state and trigger a virtual IRQ. If you
697 * change this logic make sure to update the need_vm_lock logic above to
698 * match.
699 */
Andrew Walbran318f5732018-11-20 16:23:42 +0000700 if (target_vcpu->interrupts.interrupt_enabled[intid_index] &
701 intid_mask) {
702 dlog("IRQ %d is enabled for VM %d VCPU %d, setting VI.\n",
703 intid, target_vm_id, target_vcpu_idx);
704 arch_regs_set_virtual_interrupt(&target_vcpu->regs, true);
705
706 if (target_vcpu->state == vcpu_state_blocked_interrupt) {
Andrew Walbran318f5732018-11-20 16:23:42 +0000707 target_vcpu->state = vcpu_state_ready;
Andrew Walbran69520dc2018-12-06 11:39:38 +0000708 } else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
709 /*
710 * If you change this logic make sure to update the
711 * need_vm_lock logic above to match.
712 */
713 target_vcpu->state = vcpu_state_ready;
714
715 /* Take target vCPU out of mailbox recv_waiter list. */
716 /*
Andrew Walbrana793ea02018-12-12 13:21:47 +0000717 * TODO: Consider using a doubly-linked list for the
Andrew Walbran69520dc2018-12-06 11:39:38 +0000718 * receive waiter list to avoid the linear search here.
719 */
720 struct vcpu **previous_next_pointer =
721 &target_vm->mailbox.recv_waiter;
722 while (*previous_next_pointer != NULL &&
723 *previous_next_pointer != target_vcpu) {
724 /*
725 * TODO(qwandor): Do we need to lock the vCPUs
726 * somehow while we walk the linked list, or is
727 * the VM lock enough?
728 */
729 previous_next_pointer =
730 &(*previous_next_pointer)->mailbox_next;
731 }
732 if (*previous_next_pointer == NULL) {
733 dlog("Target VCPU state is "
734 "vcpu_state_blocked_mailbox but is not in "
735 "VM mailbox waiter list. This should "
736 "never happen.\n");
737 } else {
738 *previous_next_pointer =
739 target_vcpu->mailbox_next;
740 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000741 }
742
743 if (current->vm->id != HF_PRIMARY_VM_ID &&
744 current != target_vcpu) {
745 /*
746 * Switch to the primary so that it can switch to the
747 * target.
748 */
749 struct hf_vcpu_run_return ret = {
750 .code = HF_VCPU_RUN_WAKE_UP,
751 .wake_up.vm_id = target_vm_id,
752 .wake_up.vcpu = target_vcpu_idx,
753 };
754 *next = api_switch_to_primary(current, ret,
755 vcpu_state_ready);
756 }
757 }
758
759 sl_unlock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000760 if (need_vm_lock) {
761 sl_unlock(&target_vm->lock);
762 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000763
764 return 0;
765}