blob: 2b7b5d9768cf635c926ad3f6a772f88900bcdb44 [file] [log] [blame]
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001#include "api.h"
2
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01003#include "std.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01004#include "vm.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +01005#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01006
7struct vm secondary_vm[MAX_VMS];
8uint32_t secondary_vm_count;
9struct vm primary_vm;
10
11/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010012 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
13 */
14struct vcpu *api_switch_to_primary(size_t primary_retval,
15 enum vcpu_state secondary_state)
16{
17 struct vcpu *vcpu = cpu()->current;
18 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
19
20 /* Switch back to primary VM. */
21 vm_set_current(&primary_vm);
22
23 /*
24 * Inidicate to primary VM that this vcpu blocked waiting for an
25 * interrupt.
26 */
27 arch_regs_set_retval(&next->regs, primary_retval);
28
29 /* Mark the vcpu as waiting. */
30 sl_lock(&vcpu->lock);
31 vcpu->state = secondary_state;
32 sl_unlock(&vcpu->lock);
33
34 return next;
35}
36
37/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038 * Returns the number of VMs configured to run.
39 */
40int32_t api_vm_get_count(void)
41{
42 return secondary_vm_count;
43}
44
45/**
46 * Returns the number of vcpus configured in the given VM.
47 */
48int32_t api_vcpu_get_count(uint32_t vm_idx)
49{
Andrew Scull7364a8e2018-07-19 15:39:29 +010050 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010051 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010052 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053
54 return secondary_vm[vm_idx].vcpu_count;
55}
56
57/**
58 * Runs the given vcpu of the given vm.
59 */
60int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
61{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010063 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010064 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010065
66 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 if (cpu()->current->vm != &primary_vm) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070
Andrew Scull7364a8e2018-07-19 15:39:29 +010071 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010073 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010074
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 vm = secondary_vm + vm_idx;
76 if (vcpu_idx >= vm->vcpu_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010078 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010079
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010082 sl_lock(&vcpu->lock);
83 if (vcpu->state != vcpu_state_ready) {
84 ret = HF_VCPU_WAIT_FOR_INTERRUPT;
85 } else {
86 vcpu->state = vcpu_state_running;
87 vm_set_current(vm);
88 *next = vcpu;
89 ret = HF_VCPU_YIELD;
90 }
91 sl_unlock(&vcpu->lock);
92
93 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010094}
95
96/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010097 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098 * vm.
99 */
100struct vcpu *api_wait_for_interrupt(void)
101{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100102 return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
103 vcpu_state_blocked_interrupt);
104}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100105
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106/**
107 * Configures the VM to send/receive data through the specified pages. The pages
108 * must not be shared.
109 */
Andrew Scull265ada92018-07-30 15:19:01 +0100110int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100111{
112 struct vm *vm = cpu()->current->vm;
Andrew Scull265ada92018-07-30 15:19:01 +0100113 paddr_t pa_send;
114 paddr_t pa_recv;
115 vaddr_t send_begin;
116 vaddr_t send_end;
117 vaddr_t recv_begin;
118 vaddr_t recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119 int32_t ret;
120
121 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100122 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
123 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100124 return -1;
125 }
126
127 sl_lock(&vm->lock);
128
129 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100130 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100131 ret = -1;
132 goto exit;
133 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100134
135 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100136 * TODO: Once memory sharing is implemented, we need to make sure that
137 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100138 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100139
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100140 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100141 * Convert the intermediate physical addresses to physical address
142 * provided the address was acessible from the VM which ensures that the
143 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100144 */
Andrew Scull8dce4982018-08-06 13:02:20 +0100145 if (!mm_ptable_translate_ipa(&vm->ptable, send, &pa_send) ||
146 !mm_ptable_translate_ipa(&vm->ptable, recv, &pa_recv)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100147 ret = -1;
148 goto exit;
149 }
150
Andrew Scull8dce4982018-08-06 13:02:20 +0100151 send_begin = va_from_pa(pa_send);
Andrew Scull265ada92018-07-30 15:19:01 +0100152 send_end = va_add(send_begin, PAGE_SIZE);
Andrew Scull8dce4982018-08-06 13:02:20 +0100153 recv_begin = va_from_pa(pa_recv);
Andrew Scull265ada92018-07-30 15:19:01 +0100154 recv_end = va_add(recv_begin, PAGE_SIZE);
155
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100156 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull265ada92018-07-30 15:19:01 +0100157 if (!mm_identity_map(send_begin, send_end, MM_MODE_R)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100158 ret = -1;
159 goto exit;
160 }
161
162 /*
163 * Map the receive page as writable in the hypervisor address space. On
164 * failure, unmap the send page before returning.
165 */
Andrew Scull265ada92018-07-30 15:19:01 +0100166 if (!mm_identity_map(recv_begin, recv_end, MM_MODE_W)) {
167 mm_unmap(send_begin, send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100168 ret = -1;
169 goto exit;
170 }
171
172 /* Save pointers to the pages. */
Andrew Scull8dce4982018-08-06 13:02:20 +0100173 vm->rpc.send = ptr_from_va(send_begin);
174 vm->rpc.recv = ptr_from_va(recv_begin);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100175
176 /* TODO: Notify any waiters. */
177
178 ret = 0;
179exit:
180 sl_unlock(&vm->lock);
181
182 return ret;
183}
184
185/**
186 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
187 * from the caller's send buffer to the destination's receive buffer.
188 */
189int32_t api_rpc_request(uint32_t vm_idx, size_t size)
190{
191 struct vm *from = cpu()->current->vm;
192 struct vm *to;
193 const void *from_buf;
194 int32_t ret;
195
196 /* Basic argument validation. */
197 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
198 return -1;
199 }
200
201 /* Only the primary VM can make calls. */
202 if (from != &primary_vm) {
203 return -1;
204 }
205
206 /*
207 * Check that the sender has configured its send buffer. It is safe to
208 * use from_buf after releasing the lock because the buffer cannot be
209 * modified once it's configured.
210 */
211 sl_lock(&from->lock);
212 from_buf = from->rpc.send;
213 sl_unlock(&from->lock);
214 if (!from_buf) {
215 return -1;
216 }
217
218 to = secondary_vm + vm_idx;
219 sl_lock(&to->lock);
220
221 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
222 /* Fail if the target isn't currently ready to receive data. */
223 ret = -1;
224 } else {
225 /* Copy data. */
226 memcpy(to->rpc.recv, from_buf, size);
227 to->rpc.recv_bytes = size;
228
229 if (!to->rpc.recv_waiter) {
230 to->rpc.state = rpc_state_pending;
231 ret = 0;
232 } else {
233 struct vcpu *to_vcpu = to->rpc.recv_waiter;
234
235 to->rpc.state = rpc_state_inflight;
236
237 /*
238 * Take target vcpu out of waiter list and mark as ready
239 * to run again.
240 */
241 sl_lock(&to_vcpu->lock);
242 to->rpc.recv_waiter = to_vcpu->rpc_next;
243 to_vcpu->state = vcpu_state_ready;
244 arch_regs_set_retval(&to_vcpu->regs, size);
245 sl_unlock(&to_vcpu->lock);
246
247 ret = to_vcpu - to->vcpus + 1;
248 }
249 }
250
251 sl_unlock(&to->lock);
252
253 return ret;
254}
255
256/**
257 * Reads a request sent from a previous call to api_rpc_request. If one isn't
258 * available, this function can optionally block the caller until one becomes
259 * available.
260 *
261 * Once the caller has completed handling a request, it must indicate it by
262 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
263 * until the current one is acknowledged.
264 */
265int32_t api_rpc_read_request(bool block, struct vcpu **next)
266{
267 struct vcpu *vcpu = cpu()->current;
268 struct vm *vm = vcpu->vm;
269 int32_t ret;
270
271 /* Only the secondary VMs can receive calls. */
272 if (vm == &primary_vm) {
273 return -1;
274 }
275
276 sl_lock(&vm->lock);
277 if (vm->rpc.state == rpc_state_pending) {
278 ret = vm->rpc.recv_bytes;
279 vm->rpc.state = rpc_state_inflight;
280 } else if (!block) {
281 ret = -1;
282 } else {
283 sl_lock(&vcpu->lock);
284 vcpu->state = vcpu_state_blocked_rpc;
285
286 /* Push vcpu into waiter list. */
287 vcpu->rpc_next = vm->rpc.recv_waiter;
288 vm->rpc.recv_waiter = vcpu;
289 sl_unlock(&vcpu->lock);
290
291 /* Switch back to primary vm. */
292 *next = &primary_vm.vcpus[cpu_index(cpu())];
293 vm_set_current(&primary_vm);
294
295 /*
296 * Inidicate to primary VM that this vcpu blocked waiting for an
297 * interrupt.
298 */
299 arch_regs_set_retval(&(*next)->regs,
300 HF_VCPU_WAIT_FOR_INTERRUPT);
301 ret = 0;
302 }
303 sl_unlock(&vm->lock);
304
305 return ret;
306}
307
308/**
309 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
310 * caller's send buffer to the destination's receive buffer.
311 *
312 * It can optionally acknowledge the pending request.
313 */
314int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
315{
316 struct vm *from = cpu()->current->vm;
317 struct vm *to;
318 const void *from_buf;
319 /* Basic argument validation. */
320 if (size > PAGE_SIZE) {
321 return -1;
322 }
323
324 /* Only the secondary VM can send responses. */
325 if (from == &primary_vm) {
326 return -1;
327 }
328
329 /* Acknowledge the current pending request if requested. */
330 if (ack) {
331 api_rpc_ack();
332 }
333
334 /*
335 * Check that the sender has configured its send buffer. It is safe to
336 * use from_buf after releasing the lock because the buffer cannot be
337 * modified once it's configured.
338 */
339 sl_lock(&from->lock);
340 from_buf = from->rpc.send;
341 sl_unlock(&from->lock);
342 if (!from_buf) {
343 return -1;
344 }
345
346 to = &primary_vm;
347 sl_lock(&to->lock);
348
349 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
350 /*
351 * Fail if the target isn't currently ready to receive a
352 * response.
353 */
354 sl_unlock(&to->lock);
355 return -1;
356 }
357
358 /* Copy data. */
359 memcpy(to->rpc.recv, from_buf, size);
360 to->rpc.recv_bytes = size;
361 to->rpc.state = rpc_state_inflight;
362 sl_unlock(&to->lock);
363
364 /*
365 * Switch back to primary VM so that it is aware that a response was
366 * received. But we leave the current vcpu still runnable.
367 */
368 *next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
369 vcpu_state_ready);
370
371 return 0;
372}
373
374/**
375 * Acknowledges that either a request or a reply has been received and handled.
376 * After this call completes, the caller will be able to receive additional
377 * requests or replies.
378 */
379int32_t api_rpc_ack(void)
380{
381 struct vm *vm = cpu()->current->vm;
382 int32_t ret;
383
384 sl_lock(&vm->lock);
385 if (vm->rpc.state != rpc_state_inflight) {
386 ret = -1;
387 } else {
388 ret = 0;
389 vm->rpc.state = rpc_state_idle;
390 }
391 sl_unlock(&vm->lock);
392
393 if (ret == 0) {
394 /* TODO: Notify waiters, if any. */
395 }
396
397 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100398}