blob: cd90205af7d8ca2ca2f7397f3527c2b746b9bfa9 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002
Andrew Scull13652af2018-09-17 14:49:08 +01003#include <assert.h>
4
Andrew Scull18c78fc2018-08-20 12:57:41 +01005#include "hf/std.h"
6#include "hf/vm.h"
7
Andrew Scullf35a5c92018-08-07 18:09:46 +01008#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01009
Andrew Scullaa039b32018-10-04 15:02:26 +010010static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010011 "Currently, a page is mapped for the send and receive buffers so "
12 "the maximum request is the size of a page.");
13
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010014/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010015 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010016 *
17 * This triggers the scheduling logic to run. Run in the context of secondary VM
18 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
19 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010020 */
Andrew Scull6bca35e2018-10-02 12:05:32 +010021static struct vcpu *api_switch_to_primary(size_t primary_retval,
22 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010023{
24 struct vcpu *vcpu = cpu()->current;
Andrew Scull19503262018-09-20 14:48:39 +010025 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
26 struct vcpu *next = &primary->vcpus[cpu_index(cpu())];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010027
28 /* Switch back to primary VM. */
Andrew Scull19503262018-09-20 14:48:39 +010029 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010030
31 /*
Andrew Scullaa039b32018-10-04 15:02:26 +010032 * Set the return valuefor the primary VM's call to HF_VCPU_RUN.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010033 */
34 arch_regs_set_retval(&next->regs, primary_retval);
35
36 /* Mark the vcpu as waiting. */
37 sl_lock(&vcpu->lock);
38 vcpu->state = secondary_state;
39 sl_unlock(&vcpu->lock);
40
41 return next;
42}
43
44/**
Andrew Scullaa039b32018-10-04 15:02:26 +010045 * Returns to the primary vm leaving the current vcpu ready to be scheduled
46 * again.
47 */
48struct vcpu *api_yield(void)
49{
50 return api_switch_to_primary(
51 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0),
52 vcpu_state_ready);
53}
54
55/**
56 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
57 * vm.
58 */
59struct vcpu *api_wait_for_interrupt(void)
60{
61 return api_switch_to_primary(
62 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0),
63 vcpu_state_blocked_interrupt);
64}
65
66/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010067 * Returns the number of VMs configured to run.
68 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010069int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070{
Andrew Scull19503262018-09-20 14:48:39 +010071 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072}
73
74/**
75 * Returns the number of vcpus configured in the given VM.
76 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010077int64_t api_vcpu_get_count(uint32_t vm_id)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010078{
Andrew Scull19503262018-09-20 14:48:39 +010079 struct vm *vm;
80
81 /* Only the primary VM needs to know about vcpus for scheduling. */
82 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010083 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010084 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010085
Andrew Scull19503262018-09-20 14:48:39 +010086 vm = vm_get(vm_id);
87 if (vm == NULL) {
88 return -1;
89 }
90
91 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010092}
93
94/**
95 * Runs the given vcpu of the given vm.
96 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010097int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010099 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100100 struct vcpu *vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100101 int64_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100102
103 /* Only the primary VM can switch vcpus. */
Andrew Scull19503262018-09-20 14:48:39 +0100104 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
105 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100106 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100107
Andrew Scull19503262018-09-20 14:48:39 +0100108 /* Only secondary VM vcpus can be run. */
109 if (vm_id == HF_PRIMARY_VM_ID) {
110 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100111 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100112
Andrew Scull19503262018-09-20 14:48:39 +0100113 /* The requested VM must exist. */
114 vm = vm_get(vm_id);
115 if (vm == NULL) {
116 goto fail;
117 }
118
119 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100120 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull19503262018-09-20 14:48:39 +0100121 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100122 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100123
Andrew Scullf3d45592018-09-20 14:30:22 +0100124 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100125
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100126 sl_lock(&vcpu->lock);
127 if (vcpu->state != vcpu_state_ready) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100128 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0,
129 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100130 } else {
131 vcpu->state = vcpu_state_running;
132 vm_set_current(vm);
133 *next = vcpu;
Andrew Scullaa039b32018-10-04 15:02:26 +0100134 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100135 }
136 sl_unlock(&vcpu->lock);
137
138 return ret;
Andrew Scull19503262018-09-20 14:48:39 +0100139
140fail:
Andrew Scullaa039b32018-10-04 15:02:26 +0100141 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0, 0);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100142}
143
144/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100145 * Configures the VM to send/receive data through the specified pages. The pages
146 * must not be shared.
147 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100148int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100149{
150 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100151 paddr_t pa_send_begin;
152 paddr_t pa_send_end;
153 paddr_t pa_recv_begin;
154 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100155 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100156
157 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100158 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
159 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100160 return -1;
161 }
162
163 sl_lock(&vm->lock);
164
165 /* We only allow these to be setup once. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100166 if (vm->mailbox.send || vm->mailbox.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100167 ret = -1;
168 goto exit;
169 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100170
171 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100172 * TODO: Once memory sharing is implemented, we need to make sure that
173 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100174 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100175
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100176 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100177 * Convert the intermediate physical addresses to physical address
178 * provided the address was acessible from the VM which ensures that the
179 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100180 */
Andrew Scull80871322018-08-06 12:04:09 +0100181 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
182 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100183 ret = -1;
184 goto exit;
185 }
186
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100187 /* Fail if the same page is used for the send and receive pages. */
188 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
189 ret = -1;
190 goto exit;
191 }
192
Andrew Scull80871322018-08-06 12:04:09 +0100193 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
194 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100195
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100196 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scullaa039b32018-10-04 15:02:26 +0100197 vm->mailbox.send =
198 mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
199 if (!vm->mailbox.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100200 ret = -1;
201 goto exit;
202 }
203
204 /*
205 * Map the receive page as writable in the hypervisor address space. On
206 * failure, unmap the send page before returning.
207 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100208 vm->mailbox.recv =
209 mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
210 if (!vm->mailbox.recv) {
211 vm->mailbox.send = NULL;
Andrew Scull80871322018-08-06 12:04:09 +0100212 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100213 ret = -1;
214 goto exit;
215 }
216
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100217 /* TODO: Notify any waiters. */
218
219 ret = 0;
220exit:
221 sl_unlock(&vm->lock);
222
223 return ret;
224}
225
226/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100227 * Copies data from the sender's send buffer to the recipient's receive buffer
228 * and notifies the recipient.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100229 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100230int64_t api_mailbox_send(uint32_t vm_id, size_t size, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100231{
232 struct vm *from = cpu()->current->vm;
233 struct vm *to;
234 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100235 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100236 int64_t ret;
Andrew Scullaa039b32018-10-04 15:02:26 +0100237 int64_t primary_ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100238
Andrew Scullaa039b32018-10-04 15:02:26 +0100239 /* Limit the size of transfer. */
240 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100241 return -1;
242 }
243
244 /* Disallow reflexive requests as this suggests an error in the VM. */
245 if (vm_id == from->id) {
246 return -1;
247 }
248
249 /* Ensure the target VM exists. */
250 to = vm_get(vm_id);
251 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100252 return -1;
253 }
254
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100255 /*
256 * Check that the sender has configured its send buffer. It is safe to
257 * use from_buf after releasing the lock because the buffer cannot be
258 * modified once it's configured.
259 */
260 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100261 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100262 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100263 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100264 return -1;
265 }
266
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100267 sl_lock(&to->lock);
268
Andrew Scullaa039b32018-10-04 15:02:26 +0100269 if (to->mailbox.state != mailbox_state_empty ||
270 to->mailbox.recv == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100271 /* Fail if the target isn't currently ready to receive data. */
272 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100273 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100274 }
275
Andrew Scullaa039b32018-10-04 15:02:26 +0100276 /* Copy data. */
277 memcpy(to->mailbox.recv, from_buf, size);
278 to->mailbox.recv_bytes = size;
279 to->mailbox.recv_from_id = from->id;
280 to->mailbox.state = mailbox_state_read;
281
282 /* Messages for the primary VM are delivered directly. */
283 if (to->id == HF_PRIMARY_VM_ID) {
284 primary_ret =
285 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_MESSAGE, 0, size);
286 ret = 0;
287 goto out;
288 }
289
290 /*
291 * Try to find a vcpu to handle the message and tell the scheduler to
292 * run it.
293 */
294 if (to->mailbox.recv_waiter == NULL) {
295 /*
296 * The scheduler must choose a vcpu to interrupt so it can
297 * handle the message.
298 */
299 to->mailbox.state = mailbox_state_received;
300 vcpu = HF_INVALID_VCPU;
301 } else {
302 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
303
304 /*
305 * Take target vcpu out of waiter list and mark as ready
306 * to run again.
307 */
308 sl_lock(&to_vcpu->lock);
309 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
310 to_vcpu->state = vcpu_state_ready;
311
312 /* Return from HF_MAILBOX_RECEIVE. */
313 arch_regs_set_retval(&to_vcpu->regs,
314 HF_MAILBOX_RECEIVE_RESPONSE(
315 to->mailbox.recv_from_id, size));
316
317 sl_unlock(&to_vcpu->lock);
318
319 vcpu = to_vcpu - to->vcpus;
320 }
321
322 /* Return to the primary VM directly or with a switch. */
323 primary_ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAKE_UP, to->id, vcpu);
324 ret = 0;
325
326out:
327 /*
328 * Unlock before routing the return values as switching to the primary
329 * will acquire more locks and nesting the locks is avoidable.
330 */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100331 sl_unlock(&to->lock);
332
Andrew Scullaa039b32018-10-04 15:02:26 +0100333 /* Report errors to the sender. */
334 if (ret != 0) {
335 return ret;
336 }
337
338 /* If the sender is the primary, return the vcpu to schedule. */
339 if (from->id == HF_PRIMARY_VM_ID) {
340 return vcpu;
341 }
342
343 /* Switch to primary for scheduling and return success to the sender. */
344 *next = api_switch_to_primary(primary_ret, vcpu_state_ready);
345 return 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100346}
347
348/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100349 * Receives a message from the mailbox. If one isn't available, this function
350 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100351 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100352 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100353 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100354int64_t api_mailbox_receive(bool block, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100355{
356 struct vcpu *vcpu = cpu()->current;
357 struct vm *vm = vcpu->vm;
Andrew Scullaa039b32018-10-04 15:02:26 +0100358 int64_t ret = 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100359
Andrew Scullaa039b32018-10-04 15:02:26 +0100360 /*
361 * The primary VM will receive messages as a status code from running
362 * vcpus and must not call this function.
363 */
Andrew Scull19503262018-09-20 14:48:39 +0100364 if (vm->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100365 return -1;
366 }
367
368 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100369
Andrew Scullaa039b32018-10-04 15:02:26 +0100370 /* Return pending messages without blocking. */
371 if (vm->mailbox.state == mailbox_state_received) {
372 vm->mailbox.state = mailbox_state_read;
373 block = false;
374 ret = HF_MAILBOX_RECEIVE_RESPONSE(vm->mailbox.recv_from_id,
375 vm->mailbox.recv_bytes);
376 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100377 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100378
379 /* No pending message so fail if not allowed to block. */
380 if (!block) {
381 ret = -1;
382 goto out;
383 }
384
385 sl_lock(&vcpu->lock);
386 vcpu->state = vcpu_state_blocked_mailbox;
387
388 /* Push vcpu into waiter list. */
389 vcpu->mailbox_next = vm->mailbox.recv_waiter;
390 vm->mailbox.recv_waiter = vcpu;
391 sl_unlock(&vcpu->lock);
392
393 /* Switch back to primary vm to block. */
394 *next = api_wait_for_interrupt();
395
396out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100397 sl_unlock(&vm->lock);
398
399 return ret;
400}
401
402/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100403 * Clears the caller's mailbox so that a new message can be received. The caller
404 * must have copied out all data they wish to preserve as new messages will
405 * overwrite the old and will arrive asynchronously.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100406 */
Andrew Scullaa039b32018-10-04 15:02:26 +0100407int64_t api_mailbox_clear(void)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100408{
409 struct vm *vm = cpu()->current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100410 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100411
412 sl_lock(&vm->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100413 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100414 ret = 0;
Andrew Scullaa039b32018-10-04 15:02:26 +0100415 vm->mailbox.state = mailbox_state_empty;
416 } else {
417 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100418 }
419 sl_unlock(&vm->lock);
420
421 if (ret == 0) {
422 /* TODO: Notify waiters, if any. */
423 }
424
425 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100426}