blob: c823ec00ca83e1e16dc17ae273900941ebacc23c [file] [log] [blame]
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001#include "api.h"
2
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01003#include "std.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01004#include "vm.h"
Andrew Scullf35a5c92018-08-07 18:09:46 +01005#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01006
7struct vm secondary_vm[MAX_VMS];
8uint32_t secondary_vm_count;
9struct vm primary_vm;
10
11/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010012 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
13 */
14struct vcpu *api_switch_to_primary(size_t primary_retval,
15 enum vcpu_state secondary_state)
16{
17 struct vcpu *vcpu = cpu()->current;
18 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
19
20 /* Switch back to primary VM. */
21 vm_set_current(&primary_vm);
22
23 /*
24 * Inidicate to primary VM that this vcpu blocked waiting for an
25 * interrupt.
26 */
27 arch_regs_set_retval(&next->regs, primary_retval);
28
29 /* Mark the vcpu as waiting. */
30 sl_lock(&vcpu->lock);
31 vcpu->state = secondary_state;
32 sl_unlock(&vcpu->lock);
33
34 return next;
35}
36
37/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038 * Returns the number of VMs configured to run.
39 */
40int32_t api_vm_get_count(void)
41{
42 return secondary_vm_count;
43}
44
45/**
46 * Returns the number of vcpus configured in the given VM.
47 */
48int32_t api_vcpu_get_count(uint32_t vm_idx)
49{
Andrew Scull7364a8e2018-07-19 15:39:29 +010050 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010051 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010052 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053
54 return secondary_vm[vm_idx].vcpu_count;
55}
56
57/**
58 * Runs the given vcpu of the given vm.
59 */
60int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
61{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010063 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010064 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010065
66 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 if (cpu()->current->vm != &primary_vm) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070
Andrew Scull7364a8e2018-07-19 15:39:29 +010071 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010073 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010074
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 vm = secondary_vm + vm_idx;
76 if (vcpu_idx >= vm->vcpu_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010078 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010079
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010082 sl_lock(&vcpu->lock);
83 if (vcpu->state != vcpu_state_ready) {
84 ret = HF_VCPU_WAIT_FOR_INTERRUPT;
85 } else {
86 vcpu->state = vcpu_state_running;
87 vm_set_current(vm);
88 *next = vcpu;
89 ret = HF_VCPU_YIELD;
90 }
91 sl_unlock(&vcpu->lock);
92
93 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010094}
95
96/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010097 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098 * vm.
99 */
100struct vcpu *api_wait_for_interrupt(void)
101{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100102 return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
103 vcpu_state_blocked_interrupt);
104}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100105
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106/**
107 * Configures the VM to send/receive data through the specified pages. The pages
108 * must not be shared.
109 */
Andrew Scull265ada92018-07-30 15:19:01 +0100110int32_t api_vm_configure(ipaddr_t send, ipaddr_t recv)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100111{
112 struct vm *vm = cpu()->current->vm;
Andrew Scull80871322018-08-06 12:04:09 +0100113 paddr_t pa_send_begin;
114 paddr_t pa_send_end;
115 paddr_t pa_recv_begin;
116 paddr_t pa_recv_end;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100117 int32_t ret;
118
119 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100120 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
121 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100122 return -1;
123 }
124
125 sl_lock(&vm->lock);
126
127 /* We only allow these to be setup once. */
Andrew Scull265ada92018-07-30 15:19:01 +0100128 if (vm->rpc.send || vm->rpc.recv) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100129 ret = -1;
130 goto exit;
131 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100132
133 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100134 * TODO: Once memory sharing is implemented, we need to make sure that
135 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100136 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100137
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100138 /*
Andrew Scull265ada92018-07-30 15:19:01 +0100139 * Convert the intermediate physical addresses to physical address
140 * provided the address was acessible from the VM which ensures that the
141 * caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100142 */
Andrew Scull80871322018-08-06 12:04:09 +0100143 if (!mm_vm_translate(&vm->ptable, send, &pa_send_begin) ||
144 !mm_vm_translate(&vm->ptable, recv, &pa_recv_begin)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100145 ret = -1;
146 goto exit;
147 }
148
Andrew Scull80871322018-08-06 12:04:09 +0100149 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
150 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
Andrew Scull265ada92018-07-30 15:19:01 +0100151
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100152 /* Map the send page as read-only in the hypervisor address space. */
Andrew Scull80871322018-08-06 12:04:09 +0100153 vm->rpc.send = mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
154 if (!vm->rpc.send) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100155 ret = -1;
156 goto exit;
157 }
158
159 /*
160 * Map the receive page as writable in the hypervisor address space. On
161 * failure, unmap the send page before returning.
162 */
Andrew Scull80871322018-08-06 12:04:09 +0100163 vm->rpc.recv = mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
164 if (!vm->rpc.recv) {
165 vm->rpc.send = NULL;
166 mm_unmap(pa_send_begin, pa_send_end, 0);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100167 ret = -1;
168 goto exit;
169 }
170
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100171 /* TODO: Notify any waiters. */
172
173 ret = 0;
174exit:
175 sl_unlock(&vm->lock);
176
177 return ret;
178}
179
180/**
181 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
182 * from the caller's send buffer to the destination's receive buffer.
183 */
184int32_t api_rpc_request(uint32_t vm_idx, size_t size)
185{
186 struct vm *from = cpu()->current->vm;
187 struct vm *to;
188 const void *from_buf;
189 int32_t ret;
190
191 /* Basic argument validation. */
192 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
193 return -1;
194 }
195
196 /* Only the primary VM can make calls. */
197 if (from != &primary_vm) {
198 return -1;
199 }
200
201 /*
202 * Check that the sender has configured its send buffer. It is safe to
203 * use from_buf after releasing the lock because the buffer cannot be
204 * modified once it's configured.
205 */
206 sl_lock(&from->lock);
207 from_buf = from->rpc.send;
208 sl_unlock(&from->lock);
209 if (!from_buf) {
210 return -1;
211 }
212
213 to = secondary_vm + vm_idx;
214 sl_lock(&to->lock);
215
216 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
217 /* Fail if the target isn't currently ready to receive data. */
218 ret = -1;
219 } else {
220 /* Copy data. */
221 memcpy(to->rpc.recv, from_buf, size);
222 to->rpc.recv_bytes = size;
223
224 if (!to->rpc.recv_waiter) {
225 to->rpc.state = rpc_state_pending;
226 ret = 0;
227 } else {
228 struct vcpu *to_vcpu = to->rpc.recv_waiter;
229
230 to->rpc.state = rpc_state_inflight;
231
232 /*
233 * Take target vcpu out of waiter list and mark as ready
234 * to run again.
235 */
236 sl_lock(&to_vcpu->lock);
237 to->rpc.recv_waiter = to_vcpu->rpc_next;
238 to_vcpu->state = vcpu_state_ready;
239 arch_regs_set_retval(&to_vcpu->regs, size);
240 sl_unlock(&to_vcpu->lock);
241
242 ret = to_vcpu - to->vcpus + 1;
243 }
244 }
245
246 sl_unlock(&to->lock);
247
248 return ret;
249}
250
251/**
252 * Reads a request sent from a previous call to api_rpc_request. If one isn't
253 * available, this function can optionally block the caller until one becomes
254 * available.
255 *
256 * Once the caller has completed handling a request, it must indicate it by
257 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
258 * until the current one is acknowledged.
259 */
260int32_t api_rpc_read_request(bool block, struct vcpu **next)
261{
262 struct vcpu *vcpu = cpu()->current;
263 struct vm *vm = vcpu->vm;
264 int32_t ret;
265
266 /* Only the secondary VMs can receive calls. */
267 if (vm == &primary_vm) {
268 return -1;
269 }
270
271 sl_lock(&vm->lock);
272 if (vm->rpc.state == rpc_state_pending) {
273 ret = vm->rpc.recv_bytes;
274 vm->rpc.state = rpc_state_inflight;
275 } else if (!block) {
276 ret = -1;
277 } else {
278 sl_lock(&vcpu->lock);
279 vcpu->state = vcpu_state_blocked_rpc;
280
281 /* Push vcpu into waiter list. */
282 vcpu->rpc_next = vm->rpc.recv_waiter;
283 vm->rpc.recv_waiter = vcpu;
284 sl_unlock(&vcpu->lock);
285
286 /* Switch back to primary vm. */
287 *next = &primary_vm.vcpus[cpu_index(cpu())];
288 vm_set_current(&primary_vm);
289
290 /*
291 * Inidicate to primary VM that this vcpu blocked waiting for an
292 * interrupt.
293 */
294 arch_regs_set_retval(&(*next)->regs,
295 HF_VCPU_WAIT_FOR_INTERRUPT);
296 ret = 0;
297 }
298 sl_unlock(&vm->lock);
299
300 return ret;
301}
302
303/**
304 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
305 * caller's send buffer to the destination's receive buffer.
306 *
307 * It can optionally acknowledge the pending request.
308 */
309int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
310{
311 struct vm *from = cpu()->current->vm;
312 struct vm *to;
313 const void *from_buf;
314 /* Basic argument validation. */
315 if (size > PAGE_SIZE) {
316 return -1;
317 }
318
319 /* Only the secondary VM can send responses. */
320 if (from == &primary_vm) {
321 return -1;
322 }
323
324 /* Acknowledge the current pending request if requested. */
325 if (ack) {
326 api_rpc_ack();
327 }
328
329 /*
330 * Check that the sender has configured its send buffer. It is safe to
331 * use from_buf after releasing the lock because the buffer cannot be
332 * modified once it's configured.
333 */
334 sl_lock(&from->lock);
335 from_buf = from->rpc.send;
336 sl_unlock(&from->lock);
337 if (!from_buf) {
338 return -1;
339 }
340
341 to = &primary_vm;
342 sl_lock(&to->lock);
343
344 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
345 /*
346 * Fail if the target isn't currently ready to receive a
347 * response.
348 */
349 sl_unlock(&to->lock);
350 return -1;
351 }
352
353 /* Copy data. */
354 memcpy(to->rpc.recv, from_buf, size);
355 to->rpc.recv_bytes = size;
356 to->rpc.state = rpc_state_inflight;
357 sl_unlock(&to->lock);
358
359 /*
360 * Switch back to primary VM so that it is aware that a response was
361 * received. But we leave the current vcpu still runnable.
362 */
363 *next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
364 vcpu_state_ready);
365
366 return 0;
367}
368
369/**
370 * Acknowledges that either a request or a reply has been received and handled.
371 * After this call completes, the caller will be able to receive additional
372 * requests or replies.
373 */
374int32_t api_rpc_ack(void)
375{
376 struct vm *vm = cpu()->current->vm;
377 int32_t ret;
378
379 sl_lock(&vm->lock);
380 if (vm->rpc.state != rpc_state_inflight) {
381 ret = -1;
382 } else {
383 ret = 0;
384 vm->rpc.state = rpc_state_idle;
385 }
386 sl_unlock(&vm->lock);
387
388 if (ret == 0) {
389 /* TODO: Notify waiters, if any. */
390 }
391
392 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100393}