blob: 11a263205ba0a8090ca7af3e4ab01bbed2e87f78 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002
Andrew Scull13652af2018-09-17 14:49:08 +01003#include <assert.h>
4
Andrew Scull18c78fc2018-08-20 12:57:41 +01005#include "hf/std.h"
6#include "hf/vm.h"
7
Andrew Scullf35a5c92018-08-07 18:09:46 +01008#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01009
Andrew Scull13652af2018-09-17 14:49:08 +010010static_assert(HF_RPC_REQUEST_MAX_SIZE == PAGE_SIZE,
11 "Currently, a page is mapped for the send and receive buffers so "
12 "the maximum request is the size of a page.");
13
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010014/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010015 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
16 */
Andrew Scull6bca35e2018-10-02 12:05:32 +010017static struct vcpu *api_switch_to_primary(size_t primary_retval,
18 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010019{
20 struct vcpu *vcpu = cpu()->current;
Andrew Scull19503262018-09-20 14:48:39 +010021 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
22 struct vcpu *next = &primary->vcpus[cpu_index(cpu())];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010023
24 /* Switch back to primary VM. */
Andrew Scull19503262018-09-20 14:48:39 +010025 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010026
27 /*
28 * Inidicate to primary VM that this vcpu blocked waiting for an
29 * interrupt.
30 */
31 arch_regs_set_retval(&next->regs, primary_retval);
32
33 /* Mark the vcpu as waiting. */
34 sl_lock(&vcpu->lock);
35 vcpu->state = secondary_state;
36 sl_unlock(&vcpu->lock);
37
38 return next;
39}
40
41/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010042 * Returns the number of VMs configured to run.
43 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010044int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010045{
Andrew Scull19503262018-09-20 14:48:39 +010046 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010047}
48
49/**
50 * Returns the number of vcpus configured in the given VM.
51 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010052int64_t api_vcpu_get_count(uint32_t vm_id)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053{
Andrew Scull19503262018-09-20 14:48:39 +010054 struct vm *vm;
55
56 /* Only the primary VM needs to know about vcpus for scheduling. */
57 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010058 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010059 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010060
Andrew Scull19503262018-09-20 14:48:39 +010061 vm = vm_get(vm_id);
62 if (vm == NULL) {
63 return -1;
64 }
65
66 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010067}
68
69/**
70 * Runs the given vcpu of the given vm.
71 */
Andrew Scullc0e569a2018-10-02 18:05:21 +010072int64_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010073{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010074 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010075 struct vcpu *vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +010076 int64_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077
78 /* Only the primary VM can switch vcpus. */
Andrew Scull19503262018-09-20 14:48:39 +010079 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
80 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010081 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010082
Andrew Scull19503262018-09-20 14:48:39 +010083 /* Only secondary VM vcpus can be run. */
84 if (vm_id == HF_PRIMARY_VM_ID) {
85 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010086 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010087
Andrew Scull19503262018-09-20 14:48:39 +010088 /* The requested VM must exist. */
89 vm = vm_get(vm_id);
90 if (vm == NULL) {
91 goto fail;
92 }
93
94 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010095 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull19503262018-09-20 14:48:39 +010096 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010097 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098
Andrew Scullf3d45592018-09-20 14:30:22 +010099 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100100
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100101 sl_lock(&vcpu->lock);
102 if (vcpu->state != vcpu_state_ready) {
Andrew Scull13652af2018-09-17 14:49:08 +0100103 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100104 } else {
105 vcpu->state = vcpu_state_running;
106 vm_set_current(vm);
107 *next = vcpu;
Andrew Scull13652af2018-09-17 14:49:08 +0100108 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100109 }
110 sl_unlock(&vcpu->lock);
111
112 return ret;
Andrew Scull19503262018-09-20 14:48:39 +0100113
114fail:
115 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100116}
117
118/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119 * Configures the VM to send/receive data through the specified pages. The pages
120 * must not be shared.
121 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100122int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100123{
124 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100125 paddr_t pa_send_begin;
126 paddr_t pa_send_end;
127 paddr_t pa_recv_begin;
128 paddr_t pa_recv_end;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100129 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100130
131 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100132 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
133 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100134 return -1;
135 }
136
137 sl_lock(&vm->lock);
138
139 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100140 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100141 ret = -1;
142 goto exit;
143 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100144
145 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100146 * TODO: Once memory sharing is implemented, we need to make sure that
147 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100148 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100149
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100150 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100151 * Convert the intermediate physical addresses to physical address
152 * provided the address was acessible from the VM which ensures that the
153 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100154 */
Andrew Scull80871322018-08-06 12:04:09 +0100155 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
156 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100157 ret = -1;
158 goto exit;
159 }
160
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100161 /* Fail if the same page is used for the send and receive pages. */
162 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
163 ret = -1;
164 goto exit;
165 }
166
Andrew Scull80871322018-08-06 12:04:09 +0100167 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
168 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100169
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100170 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100171 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
172 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100173 ret = -1;
174 goto exit;
175 }
176
177 /*
178 * Map the receive page as writable in the hypervisor address space. On
179 * failure, unmap the send page before returning.
180 */
Andrew Scull80871322018-08-06 12:04:09 +0100181 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
182 if (!vm->rpc.recv) {
183 vm->rpc.send = NULL;
184 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100185 ret = -1;
186 goto exit;
187 }
188
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100189 /* TODO: Notify any waiters. */
190
191 ret = 0;
192exit:
193 sl_unlock(&vm->lock);
194
195 return ret;
196}
197
198/**
199 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
200 * from the caller's send buffer to the destination's receive buffer.
201 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100202int64_t api_rpc_request(uint32_t vm_id, size_t size)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100203{
204 struct vm *from = cpu()->current->vm;
205 struct vm *to;
206 const void *from_buf;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100207 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100208
209 /* Basic argument validation. */
Andrew Scull19503262018-09-20 14:48:39 +0100210 if (size > HF_RPC_REQUEST_MAX_SIZE) {
211 return -1;
212 }
213
214 /* Disallow reflexive requests as this suggests an error in the VM. */
215 if (vm_id == from->id) {
216 return -1;
217 }
218
219 /* Ensure the target VM exists. */
220 to = vm_get(vm_id);
221 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100222 return -1;
223 }
224
225 /* Only the primary VM can make calls. */
Andrew Scull19503262018-09-20 14:48:39 +0100226 if (from->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100227 return -1;
228 }
229
230 /*
231 * Check that the sender has configured its send buffer. It is safe to
232 * use from_buf after releasing the lock because the buffer cannot be
233 * modified once it's configured.
234 */
235 sl_lock(&from->lock);
236 from_buf = from->rpc.send;
237 sl_unlock(&from->lock);
238 if (!from_buf) {
239 return -1;
240 }
241
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100242 sl_lock(&to->lock);
243
244 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
245 /* Fail if the target isn't currently ready to receive data. */
246 ret = -1;
247 } else {
248 /* Copy data. */
249 memcpy(to->rpc.recv, from_buf, size);
250 to->rpc.recv_bytes = size;
251
252 if (!to->rpc.recv_waiter) {
253 to->rpc.state = rpc_state_pending;
Andrew Scull13652af2018-09-17 14:49:08 +0100254 ret = to->vcpu_count;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100255 } else {
256 struct vcpu *to_vcpu = to->rpc.recv_waiter;
257
258 to->rpc.state = rpc_state_inflight;
259
260 /*
261 * Take target vcpu out of waiter list and mark as ready
262 * to run again.
263 */
264 sl_lock(&to_vcpu->lock);
265 to->rpc.recv_waiter = to_vcpu->rpc_next;
266 to_vcpu->state = vcpu_state_ready;
267 arch_regs_set_retval(&to_vcpu->regs, size);
268 sl_unlock(&to_vcpu->lock);
269
Andrew Scull13652af2018-09-17 14:49:08 +0100270 ret = to_vcpu - to->vcpus;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100271 }
272 }
273
274 sl_unlock(&to->lock);
275
276 return ret;
277}
278
279/**
280 * Reads a request sent from a previous call to api_rpc_request. If one isn't
281 * available, this function can optionally block the caller until one becomes
282 * available.
283 *
284 * Once the caller has completed handling a request, it must indicate it by
285 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
286 * until the current one is acknowledged.
287 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100288int64_t api_rpc_read_request(bool block, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100289{
290 struct vcpu *vcpu = cpu()->current;
291 struct vm *vm = vcpu->vm;
Andrew Scull19503262018-09-20 14:48:39 +0100292 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Andrew Scullc0e569a2018-10-02 18:05:21 +0100293 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100294
295 /* Only the secondary VMs can receive calls. */
Andrew Scull19503262018-09-20 14:48:39 +0100296 if (vm->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100297 return -1;
298 }
299
300 sl_lock(&vm->lock);
301 if (vm->rpc.state == rpc_state_pending) {
302 ret = vm->rpc.recv_bytes;
303 vm->rpc.state = rpc_state_inflight;
304 } else if (!block) {
305 ret = -1;
306 } else {
307 sl_lock(&vcpu->lock);
308 vcpu->state = vcpu_state_blocked_rpc;
309
310 /* Push vcpu into waiter list. */
311 vcpu->rpc_next = vm->rpc.recv_waiter;
312 vm->rpc.recv_waiter = vcpu;
313 sl_unlock(&vcpu->lock);
314
315 /* Switch back to primary vm. */
Andrew Scull19503262018-09-20 14:48:39 +0100316 *next = &primary->vcpus[cpu_index(cpu())];
317 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100318
319 /*
320 * Inidicate to primary VM that this vcpu blocked waiting for an
321 * interrupt.
322 */
Andrew Scull13652af2018-09-17 14:49:08 +0100323 arch_regs_set_retval(
324 &(*next)->regs,
325 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
326 0));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100327 ret = 0;
328 }
329 sl_unlock(&vm->lock);
330
331 return ret;
332}
333
334/**
335 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
336 * caller's send buffer to the destination's receive buffer.
337 *
338 * It can optionally acknowledge the pending request.
339 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100340int64_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100341{
342 struct vm *from = cpu()->current->vm;
343 struct vm *to;
344 const void *from_buf;
Wedson Almeida Filho4fa42492018-08-11 00:21:31 +0100345
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100346 /* Basic argument validation. */
347 if (size > PAGE_SIZE) {
348 return -1;
349 }
350
351 /* Only the secondary VM can send responses. */
Andrew Scull19503262018-09-20 14:48:39 +0100352 if (from->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100353 return -1;
354 }
355
356 /* Acknowledge the current pending request if requested. */
357 if (ack) {
358 api_rpc_ack();
359 }
360
361 /*
362 * Check that the sender has configured its send buffer. It is safe to
363 * use from_buf after releasing the lock because the buffer cannot be
364 * modified once it's configured.
365 */
366 sl_lock(&from->lock);
367 from_buf = from->rpc.send;
368 sl_unlock(&from->lock);
369 if (!from_buf) {
370 return -1;
371 }
372
Andrew Scull19503262018-09-20 14:48:39 +0100373 to = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100374 sl_lock(&to->lock);
375
376 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
377 /*
378 * Fail if the target isn't currently ready to receive a
379 * response.
380 */
381 sl_unlock(&to->lock);
382 return -1;
383 }
384
385 /* Copy data. */
386 memcpy(to->rpc.recv, from_buf, size);
387 to->rpc.recv_bytes = size;
388 to->rpc.state = rpc_state_inflight;
389 sl_unlock(&to->lock);
390
391 /*
392 * Switch back to primary VM so that it is aware that a response was
393 * received. But we leave the current vcpu still runnable.
394 */
Andrew Scull13652af2018-09-17 14:49:08 +0100395 *next = api_switch_to_primary(
396 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_RESPONSE_READY, size),
397 vcpu_state_ready);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100398
399 return 0;
400}
401
402/**
403 * Acknowledges that either a request or a reply has been received and handled.
404 * After this call completes, the caller will be able to receive additional
405 * requests or replies.
406 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100407int64_t api_rpc_ack(void)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100408{
409 struct vm *vm = cpu()->current->vm;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100410 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100411
412 sl_lock(&vm->lock);
413 if (vm->rpc.state != rpc_state_inflight) {
414 ret = -1;
415 } else {
416 ret = 0;
417 vm->rpc.state = rpc_state_idle;
418 }
419 sl_unlock(&vm->lock);
420
421 if (ret == 0) {
422 /* TODO: Notify waiters, if any. */
423 }
424
425 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100426}
Andrew Scull6bca35e2018-10-02 12:05:32 +0100427
428/**
429 * Returns to the primary vm leaving the current vcpu ready to be scheduled
430 * again.
431 */
432struct vcpu *api_yield(void)
433{
434 return api_switch_to_primary(HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0),
435 vcpu_state_ready);
436}
437
438/**
439 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
440 * vm.
441 */
442struct vcpu *api_wait_for_interrupt(void)
443{
444 return api_switch_to_primary(
445 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0),
446 vcpu_state_blocked_interrupt);
447}