blob: b28cd5978ba22b855f60d0bdab8a0aeb164c8213 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002
Andrew Scull13652af2018-09-17 14:49:08 +01003#include <assert.h>
4
Andrew Scull18c78fc2018-08-20 12:57:41 +01005#include "hf/std.h"
6#include "hf/vm.h"
7
Andrew Scullf35a5c92018-08-07 18:09:46 +01008#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01009
Andrew Scull13652af2018-09-17 14:49:08 +010010static_assert(HF_RPC_REQUEST_MAX_SIZE == PAGE_SIZE,
11 "Currently, a page is mapped for the send and receive buffers so "
12 "the maximum request is the size of a page.");
13
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010014/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010015 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
16 */
17struct vcpu *api_switch_to_primary(size_t primary_retval,
18 enum vcpu_state secondary_state)
19{
20 struct vcpu *vcpu = cpu()->current;
Andrew Scull19503262018-09-20 14:48:39 +010021 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
22 struct vcpu *next = &primary->vcpus[cpu_index(cpu())];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010023
24 /* Switch back to primary VM. */
Andrew Scull19503262018-09-20 14:48:39 +010025 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010026
27 /*
28 * Inidicate to primary VM that this vcpu blocked waiting for an
29 * interrupt.
30 */
31 arch_regs_set_retval(&next->regs, primary_retval);
32
33 /* Mark the vcpu as waiting. */
34 sl_lock(&vcpu->lock);
35 vcpu->state = secondary_state;
36 sl_unlock(&vcpu->lock);
37
38 return next;
39}
40
41/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010042 * Returns the number of VMs configured to run.
43 */
44int32_t api_vm_get_count(void)
45{
Andrew Scull19503262018-09-20 14:48:39 +010046 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010047}
48
49/**
50 * Returns the number of vcpus configured in the given VM.
51 */
Andrew Scull19503262018-09-20 14:48:39 +010052int32_t api_vcpu_get_count(uint32_t vm_id)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053{
Andrew Scull19503262018-09-20 14:48:39 +010054 struct vm *vm;
55
56 /* Only the primary VM needs to know about vcpus for scheduling. */
57 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010058 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010059 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010060
Andrew Scull19503262018-09-20 14:48:39 +010061 vm = vm_get(vm_id);
62 if (vm == NULL) {
63 return -1;
64 }
65
66 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010067}
68
69/**
70 * Runs the given vcpu of the given vm.
71 */
Andrew Scull19503262018-09-20 14:48:39 +010072int32_t api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010073{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010074 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010075 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010076 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077
78 /* Only the primary VM can switch vcpus. */
Andrew Scull19503262018-09-20 14:48:39 +010079 if (cpu()->current->vm->id != HF_PRIMARY_VM_ID) {
80 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010081 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010082
Andrew Scull19503262018-09-20 14:48:39 +010083 /* Only secondary VM vcpus can be run. */
84 if (vm_id == HF_PRIMARY_VM_ID) {
85 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010086 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010087
Andrew Scull19503262018-09-20 14:48:39 +010088 /* The requested VM must exist. */
89 vm = vm_get(vm_id);
90 if (vm == NULL) {
91 goto fail;
92 }
93
94 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010095 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull19503262018-09-20 14:48:39 +010096 goto fail;
Andrew Scull7364a8e2018-07-19 15:39:29 +010097 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098
Andrew Scullf3d45592018-09-20 14:30:22 +010099 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100100
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100101 sl_lock(&vcpu->lock);
102 if (vcpu->state != vcpu_state_ready) {
Andrew Scull13652af2018-09-17 14:49:08 +0100103 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100104 } else {
105 vcpu->state = vcpu_state_running;
106 vm_set_current(vm);
107 *next = vcpu;
Andrew Scull13652af2018-09-17 14:49:08 +0100108 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100109 }
110 sl_unlock(&vcpu->lock);
111
112 return ret;
Andrew Scull19503262018-09-20 14:48:39 +0100113
114fail:
115 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100116}
117
118/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100119 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100120 * vm.
121 */
122struct vcpu *api_wait_for_interrupt(void)
123{
Andrew Scull13652af2018-09-17 14:49:08 +0100124 return api_switch_to_primary(
125 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0),
126 vcpu_state_blocked_interrupt);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100127}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100128
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100129/**
130 * Configures the VM to send/receive data through the specified pages. The pages
131 * must not be shared.
132 */
Andrew Scull265ada92018-07-30 15:19:01 +0100133int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100134{
135 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100136 paddr_t pa_send_begin;
137 paddr_t pa_send_end;
138 paddr_t pa_recv_begin;
139 paddr_t pa_recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100140 int32_t ret;
141
142 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100143 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
144 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100145 return -1;
146 }
147
148 sl_lock(&vm->lock);
149
150 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100151 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100152 ret = -1;
153 goto exit;
154 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100155
156 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100157 * TODO: Once memory sharing is implemented, we need to make sure that
158 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100159 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100160
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100162 * Convert the intermediate physical addresses to physical address
163 * provided the address was acessible from the VM which ensures that the
164 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100165 */
Andrew Scull80871322018-08-06 12:04:09 +0100166 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
167 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100168 ret = -1;
169 goto exit;
170 }
171
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100172 /* Fail if the same page is used for the send and receive pages. */
173 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
174 ret = -1;
175 goto exit;
176 }
177
Andrew Scull80871322018-08-06 12:04:09 +0100178 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
179 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100180
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100181 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100182 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
183 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100184 ret = -1;
185 goto exit;
186 }
187
188 /*
189 * Map the receive page as writable in the hypervisor address space. On
190 * failure, unmap the send page before returning.
191 */
Andrew Scull80871322018-08-06 12:04:09 +0100192 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
193 if (!vm->rpc.recv) {
194 vm->rpc.send = NULL;
195 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100196 ret = -1;
197 goto exit;
198 }
199
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100200 /* TODO: Notify any waiters. */
201
202 ret = 0;
203exit:
204 sl_unlock(&vm->lock);
205
206 return ret;
207}
208
209/**
210 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
211 * from the caller's send buffer to the destination's receive buffer.
212 */
Andrew Scull19503262018-09-20 14:48:39 +0100213int32_t api_rpc_request(uint32_t vm_id, size_t size)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100214{
215 struct vm *from = cpu()->current->vm;
216 struct vm *to;
217 const void *from_buf;
218 int32_t ret;
219
220 /* Basic argument validation. */
Andrew Scull19503262018-09-20 14:48:39 +0100221 if (size > HF_RPC_REQUEST_MAX_SIZE) {
222 return -1;
223 }
224
225 /* Disallow reflexive requests as this suggests an error in the VM. */
226 if (vm_id == from->id) {
227 return -1;
228 }
229
230 /* Ensure the target VM exists. */
231 to = vm_get(vm_id);
232 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100233 return -1;
234 }
235
236 /* Only the primary VM can make calls. */
Andrew Scull19503262018-09-20 14:48:39 +0100237 if (from->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100238 return -1;
239 }
240
241 /*
242 * Check that the sender has configured its send buffer. It is safe to
243 * use from_buf after releasing the lock because the buffer cannot be
244 * modified once it's configured.
245 */
246 sl_lock(&from->lock);
247 from_buf = from->rpc.send;
248 sl_unlock(&from->lock);
249 if (!from_buf) {
250 return -1;
251 }
252
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100253 sl_lock(&to->lock);
254
255 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
256 /* Fail if the target isn't currently ready to receive data. */
257 ret = -1;
258 } else {
259 /* Copy data. */
260 memcpy(to->rpc.recv, from_buf, size);
261 to->rpc.recv_bytes = size;
262
263 if (!to->rpc.recv_waiter) {
264 to->rpc.state = rpc_state_pending;
Andrew Scull13652af2018-09-17 14:49:08 +0100265 ret = to->vcpu_count;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100266 } else {
267 struct vcpu *to_vcpu = to->rpc.recv_waiter;
268
269 to->rpc.state = rpc_state_inflight;
270
271 /*
272 * Take target vcpu out of waiter list and mark as ready
273 * to run again.
274 */
275 sl_lock(&to_vcpu->lock);
276 to->rpc.recv_waiter = to_vcpu->rpc_next;
277 to_vcpu->state = vcpu_state_ready;
278 arch_regs_set_retval(&to_vcpu->regs, size);
279 sl_unlock(&to_vcpu->lock);
280
Andrew Scull13652af2018-09-17 14:49:08 +0100281 ret = to_vcpu - to->vcpus;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100282 }
283 }
284
285 sl_unlock(&to->lock);
286
287 return ret;
288}
289
290/**
291 * Reads a request sent from a previous call to api_rpc_request. If one isn't
292 * available, this function can optionally block the caller until one becomes
293 * available.
294 *
295 * Once the caller has completed handling a request, it must indicate it by
296 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
297 * until the current one is acknowledged.
298 */
299int32_t api_rpc_read_request(bool block, struct vcpu **next)
300{
301 struct vcpu *vcpu = cpu()->current;
302 struct vm *vm = vcpu->vm;
Andrew Scull19503262018-09-20 14:48:39 +0100303 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100304 int32_t ret;
305
306 /* Only the secondary VMs can receive calls. */
Andrew Scull19503262018-09-20 14:48:39 +0100307 if (vm->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100308 return -1;
309 }
310
311 sl_lock(&vm->lock);
312 if (vm->rpc.state == rpc_state_pending) {
313 ret = vm->rpc.recv_bytes;
314 vm->rpc.state = rpc_state_inflight;
315 } else if (!block) {
316 ret = -1;
317 } else {
318 sl_lock(&vcpu->lock);
319 vcpu->state = vcpu_state_blocked_rpc;
320
321 /* Push vcpu into waiter list. */
322 vcpu->rpc_next = vm->rpc.recv_waiter;
323 vm->rpc.recv_waiter = vcpu;
324 sl_unlock(&vcpu->lock);
325
326 /* Switch back to primary vm. */
Andrew Scull19503262018-09-20 14:48:39 +0100327 *next = &primary->vcpus[cpu_index(cpu())];
328 vm_set_current(primary);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100329
330 /*
331 * Inidicate to primary VM that this vcpu blocked waiting for an
332 * interrupt.
333 */
Andrew Scull13652af2018-09-17 14:49:08 +0100334 arch_regs_set_retval(
335 &(*next)->regs,
336 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
337 0));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100338 ret = 0;
339 }
340 sl_unlock(&vm->lock);
341
342 return ret;
343}
344
345/**
346 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
347 * caller's send buffer to the destination's receive buffer.
348 *
349 * It can optionally acknowledge the pending request.
350 */
351int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
352{
353 struct vm *from = cpu()->current->vm;
354 struct vm *to;
355 const void *from_buf;
Wedson Almeida Filho4fa42492018-08-11 00:21:31 +0100356
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100357 /* Basic argument validation. */
358 if (size > PAGE_SIZE) {
359 return -1;
360 }
361
362 /* Only the secondary VM can send responses. */
Andrew Scull19503262018-09-20 14:48:39 +0100363 if (from->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100364 return -1;
365 }
366
367 /* Acknowledge the current pending request if requested. */
368 if (ack) {
369 api_rpc_ack();
370 }
371
372 /*
373 * Check that the sender has configured its send buffer. It is safe to
374 * use from_buf after releasing the lock because the buffer cannot be
375 * modified once it's configured.
376 */
377 sl_lock(&from->lock);
378 from_buf = from->rpc.send;
379 sl_unlock(&from->lock);
380 if (!from_buf) {
381 return -1;
382 }
383
Andrew Scull19503262018-09-20 14:48:39 +0100384 to = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100385 sl_lock(&to->lock);
386
387 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
388 /*
389 * Fail if the target isn't currently ready to receive a
390 * response.
391 */
392 sl_unlock(&to->lock);
393 return -1;
394 }
395
396 /* Copy data. */
397 memcpy(to->rpc.recv, from_buf, size);
398 to->rpc.recv_bytes = size;
399 to->rpc.state = rpc_state_inflight;
400 sl_unlock(&to->lock);
401
402 /*
403 * Switch back to primary VM so that it is aware that a response was
404 * received. But we leave the current vcpu still runnable.
405 */
Andrew Scull13652af2018-09-17 14:49:08 +0100406 *next = api_switch_to_primary(
407 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_RESPONSE_READY, size),
408 vcpu_state_ready);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100409
410 return 0;
411}
412
413/**
414 * Acknowledges that either a request or a reply has been received and handled.
415 * After this call completes, the caller will be able to receive additional
416 * requests or replies.
417 */
418int32_t api_rpc_ack(void)
419{
420 struct vm *vm = cpu()->current->vm;
421 int32_t ret;
422
423 sl_lock(&vm->lock);
424 if (vm->rpc.state != rpc_state_inflight) {
425 ret = -1;
426 } else {
427 ret = 0;
428 vm->rpc.state = rpc_state_idle;
429 }
430 sl_unlock(&vm->lock);
431
432 if (ret == 0) {
433 /* TODO: Notify waiters, if any. */
434 }
435
436 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100437}