blob: f6895ca4864d282098e4d1608118e229c0fd0f51 [file] [log] [blame]
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001#include "api.h"
2
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01003#include "std.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01004#include "vm.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +01005#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01006
7struct vm secondary_vm[MAX_VMS];
8uint32_t secondary_vm_count;
9struct vm primary_vm;
10
11/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010012 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
13 */
14struct vcpu *api_switch_to_primary(size_t primary_retval,
15 enum vcpu_state secondary_state)
16{
17 struct vcpu *vcpu = cpu()->current;
18 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
19
20 /* Switch back to primary VM. */
21 vm_set_current(&primary_vm);
22
23 /*
24 * Inidicate to primary VM that this vcpu blocked waiting for an
25 * interrupt.
26 */
27 arch_regs_set_retval(&next->regs, primary_retval);
28
29 /* Mark the vcpu as waiting. */
30 sl_lock(&vcpu->lock);
31 vcpu->state = secondary_state;
32 sl_unlock(&vcpu->lock);
33
34 return next;
35}
36
37/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038 * Returns the number of VMs configured to run.
39 */
40int32_t api_vm_get_count(void)
41{
42 return secondary_vm_count;
43}
44
45/**
46 * Returns the number of vcpus configured in the given VM.
47 */
48int32_t api_vcpu_get_count(uint32_t vm_idx)
49{
Andrew Scull7364a8e2018-07-19 15:39:29 +010050 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010051 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010052 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053
54 return secondary_vm[vm_idx].vcpu_count;
55}
56
57/**
58 * Runs the given vcpu of the given vm.
59 */
60int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
61{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010063 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010064 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010065
66 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 if (cpu()->current->vm != &primary_vm) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070
Andrew Scull7364a8e2018-07-19 15:39:29 +010071 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010073 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010074
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 vm = secondary_vm + vm_idx;
76 if (vcpu_idx >= vm->vcpu_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010078 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010079
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010082 sl_lock(&vcpu->lock);
83 if (vcpu->state != vcpu_state_ready) {
84 ret = HF_VCPU_WAIT_FOR_INTERRUPT;
85 } else {
86 vcpu->state = vcpu_state_running;
87 vm_set_current(vm);
88 *next = vcpu;
89 ret = HF_VCPU_YIELD;
90 }
91 sl_unlock(&vcpu->lock);
92
93 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010094}
95
96/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010097 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098 * vm.
99 */
100struct vcpu *api_wait_for_interrupt(void)
101{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100102 return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
103 vcpu_state_blocked_interrupt);
104}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100105
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106/**
107 * Configures the VM to send/receive data through the specified pages. The pages
108 * must not be shared.
109 */
Andrew Scull265ada92018-07-30 15:19:01 +0100110int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100111{
112 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100113 paddr_t pa_send_begin;
114 paddr_t pa_send_end;
115 paddr_t pa_recv_begin;
116 paddr_t pa_recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100117 int32_t ret;
118
119 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100120 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
121 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100122 return -1;
123 }
124
125 sl_lock(&vm->lock);
126
127 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100128 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100129 ret = -1;
130 goto exit;
131 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100132
133 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100134 * TODO: Once memory sharing is implemented, we need to make sure that
135 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100136 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100137
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100138 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100139 * Convert the intermediate physical addresses to physical address
140 * provided the address was acessible from the VM which ensures that the
141 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100142 */
Andrew Scull80871322018-08-06 12:04:09 +0100143 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
144 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100145 ret = -1;
146 goto exit;
147 }
148
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100149 /* Fail if the same page is used for the send and receive pages. */
150 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
151 ret = -1;
152 goto exit;
153 }
154
Andrew Scull80871322018-08-06 12:04:09 +0100155 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
156 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100157
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100158 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100159 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
160 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100161 ret = -1;
162 goto exit;
163 }
164
165 /*
166 * Map the receive page as writable in the hypervisor address space. On
167 * failure, unmap the send page before returning.
168 */
Andrew Scull80871322018-08-06 12:04:09 +0100169 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
170 if (!vm->rpc.recv) {
171 vm->rpc.send = NULL;
172 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100173 ret = -1;
174 goto exit;
175 }
176
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100177 /* TODO: Notify any waiters. */
178
179 ret = 0;
180exit:
181 sl_unlock(&vm->lock);
182
183 return ret;
184}
185
186/**
187 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
188 * from the caller's send buffer to the destination's receive buffer.
189 */
190int32_t api_rpc_request(uint32_t vm_idx, size_t size)
191{
192 struct vm *from = cpu()->current->vm;
193 struct vm *to;
194 const void *from_buf;
195 int32_t ret;
196
197 /* Basic argument validation. */
198 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
199 return -1;
200 }
201
202 /* Only the primary VM can make calls. */
203 if (from != &primary_vm) {
204 return -1;
205 }
206
207 /*
208 * Check that the sender has configured its send buffer. It is safe to
209 * use from_buf after releasing the lock because the buffer cannot be
210 * modified once it's configured.
211 */
212 sl_lock(&from->lock);
213 from_buf = from->rpc.send;
214 sl_unlock(&from->lock);
215 if (!from_buf) {
216 return -1;
217 }
218
219 to = secondary_vm + vm_idx;
220 sl_lock(&to->lock);
221
222 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
223 /* Fail if the target isn't currently ready to receive data. */
224 ret = -1;
225 } else {
226 /* Copy data. */
227 memcpy(to->rpc.recv, from_buf, size);
228 to->rpc.recv_bytes = size;
229
230 if (!to->rpc.recv_waiter) {
231 to->rpc.state = rpc_state_pending;
232 ret = 0;
233 } else {
234 struct vcpu *to_vcpu = to->rpc.recv_waiter;
235
236 to->rpc.state = rpc_state_inflight;
237
238 /*
239 * Take target vcpu out of waiter list and mark as ready
240 * to run again.
241 */
242 sl_lock(&to_vcpu->lock);
243 to->rpc.recv_waiter = to_vcpu->rpc_next;
244 to_vcpu->state = vcpu_state_ready;
245 arch_regs_set_retval(&to_vcpu->regs, size);
246 sl_unlock(&to_vcpu->lock);
247
248 ret = to_vcpu - to->vcpus + 1;
249 }
250 }
251
252 sl_unlock(&to->lock);
253
254 return ret;
255}
256
257/**
258 * Reads a request sent from a previous call to api_rpc_request. If one isn't
259 * available, this function can optionally block the caller until one becomes
260 * available.
261 *
262 * Once the caller has completed handling a request, it must indicate it by
263 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
264 * until the current one is acknowledged.
265 */
266int32_t api_rpc_read_request(bool block, struct vcpu **next)
267{
268 struct vcpu *vcpu = cpu()->current;
269 struct vm *vm = vcpu->vm;
270 int32_t ret;
271
272 /* Only the secondary VMs can receive calls. */
273 if (vm == &primary_vm) {
274 return -1;
275 }
276
277 sl_lock(&vm->lock);
278 if (vm->rpc.state == rpc_state_pending) {
279 ret = vm->rpc.recv_bytes;
280 vm->rpc.state = rpc_state_inflight;
281 } else if (!block) {
282 ret = -1;
283 } else {
284 sl_lock(&vcpu->lock);
285 vcpu->state = vcpu_state_blocked_rpc;
286
287 /* Push vcpu into waiter list. */
288 vcpu->rpc_next = vm->rpc.recv_waiter;
289 vm->rpc.recv_waiter = vcpu;
290 sl_unlock(&vcpu->lock);
291
292 /* Switch back to primary vm. */
293 *next = &primary_vm.vcpus[cpu_index(cpu())];
294 vm_set_current(&primary_vm);
295
296 /*
297 * Inidicate to primary VM that this vcpu blocked waiting for an
298 * interrupt.
299 */
300 arch_regs_set_retval(&(*next)->regs,
301 HF_VCPU_WAIT_FOR_INTERRUPT);
302 ret = 0;
303 }
304 sl_unlock(&vm->lock);
305
306 return ret;
307}
308
309/**
310 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
311 * caller's send buffer to the destination's receive buffer.
312 *
313 * It can optionally acknowledge the pending request.
314 */
315int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
316{
317 struct vm *from = cpu()->current->vm;
318 struct vm *to;
319 const void *from_buf;
Wedson Almeida Filho4fa42492018-08-11 00:21:31 +0100320
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100321 /* Basic argument validation. */
322 if (size > PAGE_SIZE) {
323 return -1;
324 }
325
326 /* Only the secondary VM can send responses. */
327 if (from == &primary_vm) {
328 return -1;
329 }
330
331 /* Acknowledge the current pending request if requested. */
332 if (ack) {
333 api_rpc_ack();
334 }
335
336 /*
337 * Check that the sender has configured its send buffer. It is safe to
338 * use from_buf after releasing the lock because the buffer cannot be
339 * modified once it's configured.
340 */
341 sl_lock(&from->lock);
342 from_buf = from->rpc.send;
343 sl_unlock(&from->lock);
344 if (!from_buf) {
345 return -1;
346 }
347
348 to = &primary_vm;
349 sl_lock(&to->lock);
350
351 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
352 /*
353 * Fail if the target isn't currently ready to receive a
354 * response.
355 */
356 sl_unlock(&to->lock);
357 return -1;
358 }
359
360 /* Copy data. */
361 memcpy(to->rpc.recv, from_buf, size);
362 to->rpc.recv_bytes = size;
363 to->rpc.state = rpc_state_inflight;
364 sl_unlock(&to->lock);
365
366 /*
367 * Switch back to primary VM so that it is aware that a response was
368 * received. But we leave the current vcpu still runnable.
369 */
370 *next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
371 vcpu_state_ready);
372
373 return 0;
374}
375
376/**
377 * Acknowledges that either a request or a reply has been received and handled.
378 * After this call completes, the caller will be able to receive additional
379 * requests or replies.
380 */
381int32_t api_rpc_ack(void)
382{
383 struct vm *vm = cpu()->current->vm;
384 int32_t ret;
385
386 sl_lock(&vm->lock);
387 if (vm->rpc.state != rpc_state_inflight) {
388 ret = -1;
389 } else {
390 ret = 0;
391 vm->rpc.state = rpc_state_idle;
392 }
393 sl_unlock(&vm->lock);
394
395 if (ret == 0) {
396 /* TODO: Notify waiters, if any. */
397 }
398
399 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100400}