blob: 4176941181d4c8861284952d869b528bfeb176e9 [file] [log] [blame]
Andrew Scull18c78fc2018-08-20 12:57:41 +01001#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01002
Andrew Scull18c78fc2018-08-20 12:57:41 +01003#include "hf/std.h"
4#include "hf/vm.h"
5
Andrew Scullf35a5c92018-08-07 18:09:46 +01006#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01007
8struct vm secondary_vm[MAX_VMS];
9uint32_t secondary_vm_count;
10struct vm primary_vm;
11
12/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010013 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
14 */
15struct vcpu *api_switch_to_primary(size_t primary_retval,
16 enum vcpu_state secondary_state)
17{
18 struct vcpu *vcpu = cpu()->current;
19 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
20
21 /* Switch back to primary VM. */
22 vm_set_current(&primary_vm);
23
24 /*
25 * Inidicate to primary VM that this vcpu blocked waiting for an
26 * interrupt.
27 */
28 arch_regs_set_retval(&next->regs, primary_retval);
29
30 /* Mark the vcpu as waiting. */
31 sl_lock(&vcpu->lock);
32 vcpu->state = secondary_state;
33 sl_unlock(&vcpu->lock);
34
35 return next;
36}
37
38/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010039 * Returns the number of VMs configured to run.
40 */
41int32_t api_vm_get_count(void)
42{
43 return secondary_vm_count;
44}
45
46/**
47 * Returns the number of vcpus configured in the given VM.
48 */
49int32_t api_vcpu_get_count(uint32_t vm_idx)
50{
Andrew Scull7364a8e2018-07-19 15:39:29 +010051 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010052 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010053 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010054
55 return secondary_vm[vm_idx].vcpu_count;
56}
57
58/**
59 * Runs the given vcpu of the given vm.
60 */
61int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
62{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010063 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010064 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010065 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010066
67 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010068 if (cpu()->current->vm != &primary_vm) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010069 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010070 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010071
Andrew Scull7364a8e2018-07-19 15:39:29 +010072 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010073 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010074 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010075
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010076 vm = secondary_vm + vm_idx;
77 if (vcpu_idx >= vm->vcpu_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010078 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010079 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010080
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010081 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010082
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010083 sl_lock(&vcpu->lock);
84 if (vcpu->state != vcpu_state_ready) {
85 ret = HF_VCPU_WAIT_FOR_INTERRUPT;
86 } else {
87 vcpu->state = vcpu_state_running;
88 vm_set_current(vm);
89 *next = vcpu;
90 ret = HF_VCPU_YIELD;
91 }
92 sl_unlock(&vcpu->lock);
93
94 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010095}
96
97/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010098 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010099 * vm.
100 */
101struct vcpu *api_wait_for_interrupt(void)
102{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100103 return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
104 vcpu_state_blocked_interrupt);
105}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100106
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100107/**
108 * Configures the VM to send/receive data through the specified pages. The pages
109 * must not be shared.
110 */
Andrew Scull265ada92018-07-30 15:19:01 +0100111int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100112{
113 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100114 paddr_t pa_send_begin;
115 paddr_t pa_send_end;
116 paddr_t pa_recv_begin;
117 paddr_t pa_recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100118 int32_t ret;
119
120 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100121 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
122 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100123 return -1;
124 }
125
126 sl_lock(&vm->lock);
127
128 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100129 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100130 ret = -1;
131 goto exit;
132 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100133
134 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100135 * TODO: Once memory sharing is implemented, we need to make sure that
136 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100137 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100138
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100139 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100140 * Convert the intermediate physical addresses to physical address
141 * provided the address was acessible from the VM which ensures that the
142 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100143 */
Andrew Scull80871322018-08-06 12:04:09 +0100144 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
145 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100146 ret = -1;
147 goto exit;
148 }
149
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100150 /* Fail if the same page is used for the send and receive pages. */
151 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
152 ret = -1;
153 goto exit;
154 }
155
Andrew Scull80871322018-08-06 12:04:09 +0100156 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
157 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100158
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100159 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100160 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
161 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100162 ret = -1;
163 goto exit;
164 }
165
166 /*
167 * Map the receive page as writable in the hypervisor address space. On
168 * failure, unmap the send page before returning.
169 */
Andrew Scull80871322018-08-06 12:04:09 +0100170 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
171 if (!vm->rpc.recv) {
172 vm->rpc.send = NULL;
173 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100174 ret = -1;
175 goto exit;
176 }
177
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100178 /* TODO: Notify any waiters. */
179
180 ret = 0;
181exit:
182 sl_unlock(&vm->lock);
183
184 return ret;
185}
186
187/**
188 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
189 * from the caller's send buffer to the destination's receive buffer.
190 */
191int32_t api_rpc_request(uint32_t vm_idx, size_t size)
192{
193 struct vm *from = cpu()->current->vm;
194 struct vm *to;
195 const void *from_buf;
196 int32_t ret;
197
198 /* Basic argument validation. */
199 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
200 return -1;
201 }
202
203 /* Only the primary VM can make calls. */
204 if (from != &primary_vm) {
205 return -1;
206 }
207
208 /*
209 * Check that the sender has configured its send buffer. It is safe to
210 * use from_buf after releasing the lock because the buffer cannot be
211 * modified once it's configured.
212 */
213 sl_lock(&from->lock);
214 from_buf = from->rpc.send;
215 sl_unlock(&from->lock);
216 if (!from_buf) {
217 return -1;
218 }
219
220 to = secondary_vm + vm_idx;
221 sl_lock(&to->lock);
222
223 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
224 /* Fail if the target isn't currently ready to receive data. */
225 ret = -1;
226 } else {
227 /* Copy data. */
228 memcpy(to->rpc.recv, from_buf, size);
229 to->rpc.recv_bytes = size;
230
231 if (!to->rpc.recv_waiter) {
232 to->rpc.state = rpc_state_pending;
233 ret = 0;
234 } else {
235 struct vcpu *to_vcpu = to->rpc.recv_waiter;
236
237 to->rpc.state = rpc_state_inflight;
238
239 /*
240 * Take target vcpu out of waiter list and mark as ready
241 * to run again.
242 */
243 sl_lock(&to_vcpu->lock);
244 to->rpc.recv_waiter = to_vcpu->rpc_next;
245 to_vcpu->state = vcpu_state_ready;
246 arch_regs_set_retval(&to_vcpu->regs, size);
247 sl_unlock(&to_vcpu->lock);
248
249 ret = to_vcpu - to->vcpus + 1;
250 }
251 }
252
253 sl_unlock(&to->lock);
254
255 return ret;
256}
257
258/**
259 * Reads a request sent from a previous call to api_rpc_request. If one isn't
260 * available, this function can optionally block the caller until one becomes
261 * available.
262 *
263 * Once the caller has completed handling a request, it must indicate it by
264 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
265 * until the current one is acknowledged.
266 */
267int32_t api_rpc_read_request(bool block, struct vcpu **next)
268{
269 struct vcpu *vcpu = cpu()->current;
270 struct vm *vm = vcpu->vm;
271 int32_t ret;
272
273 /* Only the secondary VMs can receive calls. */
274 if (vm == &primary_vm) {
275 return -1;
276 }
277
278 sl_lock(&vm->lock);
279 if (vm->rpc.state == rpc_state_pending) {
280 ret = vm->rpc.recv_bytes;
281 vm->rpc.state = rpc_state_inflight;
282 } else if (!block) {
283 ret = -1;
284 } else {
285 sl_lock(&vcpu->lock);
286 vcpu->state = vcpu_state_blocked_rpc;
287
288 /* Push vcpu into waiter list. */
289 vcpu->rpc_next = vm->rpc.recv_waiter;
290 vm->rpc.recv_waiter = vcpu;
291 sl_unlock(&vcpu->lock);
292
293 /* Switch back to primary vm. */
294 *next = &primary_vm.vcpus[cpu_index(cpu())];
295 vm_set_current(&primary_vm);
296
297 /*
298 * Inidicate to primary VM that this vcpu blocked waiting for an
299 * interrupt.
300 */
301 arch_regs_set_retval(&(*next)->regs,
302 HF_VCPU_WAIT_FOR_INTERRUPT);
303 ret = 0;
304 }
305 sl_unlock(&vm->lock);
306
307 return ret;
308}
309
310/**
311 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
312 * caller's send buffer to the destination's receive buffer.
313 *
314 * It can optionally acknowledge the pending request.
315 */
316int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
317{
318 struct vm *from = cpu()->current->vm;
319 struct vm *to;
320 const void *from_buf;
Wedson Almeida Filho4fa42492018-08-11 00:21:31 +0100321
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100322 /* Basic argument validation. */
323 if (size > PAGE_SIZE) {
324 return -1;
325 }
326
327 /* Only the secondary VM can send responses. */
328 if (from == &primary_vm) {
329 return -1;
330 }
331
332 /* Acknowledge the current pending request if requested. */
333 if (ack) {
334 api_rpc_ack();
335 }
336
337 /*
338 * Check that the sender has configured its send buffer. It is safe to
339 * use from_buf after releasing the lock because the buffer cannot be
340 * modified once it's configured.
341 */
342 sl_lock(&from->lock);
343 from_buf = from->rpc.send;
344 sl_unlock(&from->lock);
345 if (!from_buf) {
346 return -1;
347 }
348
349 to = &primary_vm;
350 sl_lock(&to->lock);
351
352 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
353 /*
354 * Fail if the target isn't currently ready to receive a
355 * response.
356 */
357 sl_unlock(&to->lock);
358 return -1;
359 }
360
361 /* Copy data. */
362 memcpy(to->rpc.recv, from_buf, size);
363 to->rpc.recv_bytes = size;
364 to->rpc.state = rpc_state_inflight;
365 sl_unlock(&to->lock);
366
367 /*
368 * Switch back to primary VM so that it is aware that a response was
369 * received. But we leave the current vcpu still runnable.
370 */
371 *next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
372 vcpu_state_ready);
373
374 return 0;
375}
376
377/**
378 * Acknowledges that either a request or a reply has been received and handled.
379 * After this call completes, the caller will be able to receive additional
380 * requests or replies.
381 */
382int32_t api_rpc_ack(void)
383{
384 struct vm *vm = cpu()->current->vm;
385 int32_t ret;
386
387 sl_lock(&vm->lock);
388 if (vm->rpc.state != rpc_state_inflight) {
389 ret = -1;
390 } else {
391 ret = 0;
392 vm->rpc.state = rpc_state_idle;
393 }
394 sl_unlock(&vm->lock);
395
396 if (ret == 0) {
397 /* TODO: Notify waiters, if any. */
398 }
399
400 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100401}