blob: 947982cb2352245c984e1d5e6e76584728cb4a3a [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002
Andrew Scull13652af2018-09-17 14:49:08 +01003#include <assert.h>
4
Andrew Scull18c78fc2018-08-20 12:57:41 +01005#include "hf/std.h"
6#include "hf/vm.h"
7
Andrew Scullf35a5c92018-08-07 18:09:46 +01008#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01009
Andrew Scull13652af2018-09-17 14:49:08 +010010static_assert(HF_RPC_REQUEST_MAX_SIZE == PAGE_SIZE,
11 "Currently, a page is mapped for the send and receive buffers so "
12 "the maximum request is the size of a page.");
13
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010014struct vm secondary_vm[MAX_VMS];
15uint32_t secondary_vm_count;
16struct vm primary_vm;
17
18/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010019 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
20 */
21struct vcpu *api_switch_to_primary(size_t primary_retval,
22 enum vcpu_state secondary_state)
23{
24 struct vcpu *vcpu = cpu()->current;
25 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
26
27 /* Switch back to primary VM. */
28 vm_set_current(&primary_vm);
29
30 /*
31 * Inidicate to primary VM that this vcpu blocked waiting for an
32 * interrupt.
33 */
34 arch_regs_set_retval(&next->regs, primary_retval);
35
36 /* Mark the vcpu as waiting. */
37 sl_lock(&vcpu->lock);
38 vcpu->state = secondary_state;
39 sl_unlock(&vcpu->lock);
40
41 return next;
42}
43
44/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010045 * Returns the number of VMs configured to run.
46 */
47int32_t api_vm_get_count(void)
48{
49 return secondary_vm_count;
50}
51
52/**
53 * Returns the number of vcpus configured in the given VM.
54 */
55int32_t api_vcpu_get_count(uint32_t vm_idx)
56{
Andrew Scull7364a8e2018-07-19 15:39:29 +010057 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010058 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010059 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010060
61 return secondary_vm[vm_idx].vcpu_count;
62}
63
64/**
65 * Runs the given vcpu of the given vm.
66 */
67int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
68{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010069 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010071 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072
73 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010074 if (cpu()->current->vm != &primary_vm) {
Andrew Scull13652af2018-09-17 14:49:08 +010075 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Andrew Scull7364a8e2018-07-19 15:39:29 +010076 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077
Andrew Scull7364a8e2018-07-19 15:39:29 +010078 if (vm_idx >= secondary_vm_count) {
Andrew Scull13652af2018-09-17 14:49:08 +010079 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Andrew Scull7364a8e2018-07-19 15:39:29 +010080 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010082 vm = secondary_vm + vm_idx;
83 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull13652af2018-09-17 14:49:08 +010084 return HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Andrew Scull7364a8e2018-07-19 15:39:29 +010085 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010086
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010087 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010088
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010089 sl_lock(&vcpu->lock);
90 if (vcpu->state != vcpu_state_ready) {
Andrew Scull13652af2018-09-17 14:49:08 +010091 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010092 } else {
93 vcpu->state = vcpu_state_running;
94 vm_set_current(vm);
95 *next = vcpu;
Andrew Scull13652af2018-09-17 14:49:08 +010096 ret = HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_YIELD, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010097 }
98 sl_unlock(&vcpu->lock);
99
100 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100101}
102
103/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100104 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100105 * vm.
106 */
107struct vcpu *api_wait_for_interrupt(void)
108{
Andrew Scull13652af2018-09-17 14:49:08 +0100109 return api_switch_to_primary(
110 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT, 0),
111 vcpu_state_blocked_interrupt);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100112}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100113
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100114/**
115 * Configures the VM to send/receive data through the specified pages. The pages
116 * must not be shared.
117 */
Andrew Scull265ada92018-07-30 15:19:01 +0100118int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119{
120 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100121 paddr_t pa_send_begin;
122 paddr_t pa_send_end;
123 paddr_t pa_recv_begin;
124 paddr_t pa_recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100125 int32_t ret;
126
127 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100128 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
129 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100130 return -1;
131 }
132
133 sl_lock(&vm->lock);
134
135 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100136 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100137 ret = -1;
138 goto exit;
139 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100140
141 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100142 * TODO: Once memory sharing is implemented, we need to make sure that
143 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100144 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100145
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100146 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100147 * Convert the intermediate physical addresses to physical address
148 * provided the address was acessible from the VM which ensures that the
149 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100150 */
Andrew Scull80871322018-08-06 12:04:09 +0100151 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
152 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100153 ret = -1;
154 goto exit;
155 }
156
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100157 /* Fail if the same page is used for the send and receive pages. */
158 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
159 ret = -1;
160 goto exit;
161 }
162
Andrew Scull80871322018-08-06 12:04:09 +0100163 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
164 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100165
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100166 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100167 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
168 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100169 ret = -1;
170 goto exit;
171 }
172
173 /*
174 * Map the receive page as writable in the hypervisor address space. On
175 * failure, unmap the send page before returning.
176 */
Andrew Scull80871322018-08-06 12:04:09 +0100177 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
178 if (!vm->rpc.recv) {
179 vm->rpc.send = NULL;
180 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100181 ret = -1;
182 goto exit;
183 }
184
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100185 /* TODO: Notify any waiters. */
186
187 ret = 0;
188exit:
189 sl_unlock(&vm->lock);
190
191 return ret;
192}
193
194/**
195 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
196 * from the caller's send buffer to the destination's receive buffer.
197 */
198int32_t api_rpc_request(uint32_t vm_idx, size_t size)
199{
200 struct vm *from = cpu()->current->vm;
201 struct vm *to;
202 const void *from_buf;
203 int32_t ret;
204
205 /* Basic argument validation. */
206 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
207 return -1;
208 }
209
210 /* Only the primary VM can make calls. */
211 if (from != &primary_vm) {
212 return -1;
213 }
214
215 /*
216 * Check that the sender has configured its send buffer. It is safe to
217 * use from_buf after releasing the lock because the buffer cannot be
218 * modified once it's configured.
219 */
220 sl_lock(&from->lock);
221 from_buf = from->rpc.send;
222 sl_unlock(&from->lock);
223 if (!from_buf) {
224 return -1;
225 }
226
227 to = secondary_vm + vm_idx;
228 sl_lock(&to->lock);
229
230 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
231 /* Fail if the target isn't currently ready to receive data. */
232 ret = -1;
233 } else {
234 /* Copy data. */
235 memcpy(to->rpc.recv, from_buf, size);
236 to->rpc.recv_bytes = size;
237
238 if (!to->rpc.recv_waiter) {
239 to->rpc.state = rpc_state_pending;
Andrew Scull13652af2018-09-17 14:49:08 +0100240 ret = to->vcpu_count;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100241 } else {
242 struct vcpu *to_vcpu = to->rpc.recv_waiter;
243
244 to->rpc.state = rpc_state_inflight;
245
246 /*
247 * Take target vcpu out of waiter list and mark as ready
248 * to run again.
249 */
250 sl_lock(&to_vcpu->lock);
251 to->rpc.recv_waiter = to_vcpu->rpc_next;
252 to_vcpu->state = vcpu_state_ready;
253 arch_regs_set_retval(&to_vcpu->regs, size);
254 sl_unlock(&to_vcpu->lock);
255
Andrew Scull13652af2018-09-17 14:49:08 +0100256 ret = to_vcpu - to->vcpus;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100257 }
258 }
259
260 sl_unlock(&to->lock);
261
262 return ret;
263}
264
265/**
266 * Reads a request sent from a previous call to api_rpc_request. If one isn't
267 * available, this function can optionally block the caller until one becomes
268 * available.
269 *
270 * Once the caller has completed handling a request, it must indicate it by
271 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
272 * until the current one is acknowledged.
273 */
274int32_t api_rpc_read_request(bool block, struct vcpu **next)
275{
276 struct vcpu *vcpu = cpu()->current;
277 struct vm *vm = vcpu->vm;
278 int32_t ret;
279
280 /* Only the secondary VMs can receive calls. */
281 if (vm == &primary_vm) {
282 return -1;
283 }
284
285 sl_lock(&vm->lock);
286 if (vm->rpc.state == rpc_state_pending) {
287 ret = vm->rpc.recv_bytes;
288 vm->rpc.state = rpc_state_inflight;
289 } else if (!block) {
290 ret = -1;
291 } else {
292 sl_lock(&vcpu->lock);
293 vcpu->state = vcpu_state_blocked_rpc;
294
295 /* Push vcpu into waiter list. */
296 vcpu->rpc_next = vm->rpc.recv_waiter;
297 vm->rpc.recv_waiter = vcpu;
298 sl_unlock(&vcpu->lock);
299
300 /* Switch back to primary vm. */
301 *next = &primary_vm.vcpus[cpu_index(cpu())];
302 vm_set_current(&primary_vm);
303
304 /*
305 * Inidicate to primary VM that this vcpu blocked waiting for an
306 * interrupt.
307 */
Andrew Scull13652af2018-09-17 14:49:08 +0100308 arch_regs_set_retval(
309 &(*next)->regs,
310 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
311 0));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100312 ret = 0;
313 }
314 sl_unlock(&vm->lock);
315
316 return ret;
317}
318
319/**
320 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
321 * caller's send buffer to the destination's receive buffer.
322 *
323 * It can optionally acknowledge the pending request.
324 */
325int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
326{
327 struct vm *from = cpu()->current->vm;
328 struct vm *to;
329 const void *from_buf;
Wedson Almeida Filho4fa42492018-08-11 00:21:31 +0100330
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100331 /* Basic argument validation. */
332 if (size > PAGE_SIZE) {
333 return -1;
334 }
335
336 /* Only the secondary VM can send responses. */
337 if (from == &primary_vm) {
338 return -1;
339 }
340
341 /* Acknowledge the current pending request if requested. */
342 if (ack) {
343 api_rpc_ack();
344 }
345
346 /*
347 * Check that the sender has configured its send buffer. It is safe to
348 * use from_buf after releasing the lock because the buffer cannot be
349 * modified once it's configured.
350 */
351 sl_lock(&from->lock);
352 from_buf = from->rpc.send;
353 sl_unlock(&from->lock);
354 if (!from_buf) {
355 return -1;
356 }
357
358 to = &primary_vm;
359 sl_lock(&to->lock);
360
361 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
362 /*
363 * Fail if the target isn't currently ready to receive a
364 * response.
365 */
366 sl_unlock(&to->lock);
367 return -1;
368 }
369
370 /* Copy data. */
371 memcpy(to->rpc.recv, from_buf, size);
372 to->rpc.recv_bytes = size;
373 to->rpc.state = rpc_state_inflight;
374 sl_unlock(&to->lock);
375
376 /*
377 * Switch back to primary VM so that it is aware that a response was
378 * received. But we leave the current vcpu still runnable.
379 */
Andrew Scull13652af2018-09-17 14:49:08 +0100380 *next = api_switch_to_primary(
381 HF_VCPU_RUN_RESPONSE(HF_VCPU_RUN_RESPONSE_READY, size),
382 vcpu_state_ready);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100383
384 return 0;
385}
386
387/**
388 * Acknowledges that either a request or a reply has been received and handled.
389 * After this call completes, the caller will be able to receive additional
390 * requests or replies.
391 */
392int32_t api_rpc_ack(void)
393{
394 struct vm *vm = cpu()->current->vm;
395 int32_t ret;
396
397 sl_lock(&vm->lock);
398 if (vm->rpc.state != rpc_state_inflight) {
399 ret = -1;
400 } else {
401 ret = 0;
402 vm->rpc.state = rpc_state_idle;
403 }
404 sl_unlock(&vm->lock);
405
406 if (ret == 0) {
407 /* TODO: Notify waiters, if any. */
408 }
409
410 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100411}