blob: 93b14c229914bf9bf703ce39bd4077171da79e00 [file] [log] [blame]
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001#include "api.h"
2
3#include "arch_api.h"
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01004#include "std.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01005#include "vm.h"
6
7struct vm secondary_vm[MAX_VMS];
8uint32_t secondary_vm_count;
9struct vm primary_vm;
10
11/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010012 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
13 */
14struct vcpu *api_switch_to_primary(size_t primary_retval,
15 enum vcpu_state secondary_state)
16{
17 struct vcpu *vcpu = cpu()->current;
18 struct vcpu *next = &primary_vm.vcpus[cpu_index(cpu())];
19
20 /* Switch back to primary VM. */
21 vm_set_current(&primary_vm);
22
23 /*
24 * Inidicate to primary VM that this vcpu blocked waiting for an
25 * interrupt.
26 */
27 arch_regs_set_retval(&next->regs, primary_retval);
28
29 /* Mark the vcpu as waiting. */
30 sl_lock(&vcpu->lock);
31 vcpu->state = secondary_state;
32 sl_unlock(&vcpu->lock);
33
34 return next;
35}
36
37/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010038 * Returns the number of VMs configured to run.
39 */
40int32_t api_vm_get_count(void)
41{
42 return secondary_vm_count;
43}
44
45/**
46 * Returns the number of vcpus configured in the given VM.
47 */
48int32_t api_vcpu_get_count(uint32_t vm_idx)
49{
Andrew Scull7364a8e2018-07-19 15:39:29 +010050 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010051 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +010052 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010053
54 return secondary_vm[vm_idx].vcpu_count;
55}
56
57/**
58 * Runs the given vcpu of the given vm.
59 */
60int32_t api_vcpu_run(uint32_t vm_idx, uint32_t vcpu_idx, struct vcpu **next)
61{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010063 struct vcpu *vcpu;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010064 int32_t ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010065
66 /* Only the primary VM can switch vcpus. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 if (cpu()->current->vm != &primary_vm) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070
Andrew Scull7364a8e2018-07-19 15:39:29 +010071 if (vm_idx >= secondary_vm_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010073 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010074
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010075 vm = secondary_vm + vm_idx;
76 if (vcpu_idx >= vm->vcpu_count) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010077 return HF_VCPU_WAIT_FOR_INTERRUPT;
Andrew Scull7364a8e2018-07-19 15:39:29 +010078 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010079
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080 vcpu = vm->vcpus + vcpu_idx;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010081
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010082 sl_lock(&vcpu->lock);
83 if (vcpu->state != vcpu_state_ready) {
84 ret = HF_VCPU_WAIT_FOR_INTERRUPT;
85 } else {
86 vcpu->state = vcpu_state_running;
87 vm_set_current(vm);
88 *next = vcpu;
89 ret = HF_VCPU_YIELD;
90 }
91 sl_unlock(&vcpu->lock);
92
93 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010094}
95
96/**
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010097 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010098 * vm.
99 */
100struct vcpu *api_wait_for_interrupt(void)
101{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100102 return api_switch_to_primary(HF_VCPU_WAIT_FOR_INTERRUPT,
103 vcpu_state_blocked_interrupt);
104}
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100105
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100106/**
107 * Configures the VM to send/receive data through the specified pages. The pages
108 * must not be shared.
109 */
110int32_t api_vm_configure(paddr_t send, paddr_t recv)
111{
112 struct vm *vm = cpu()->current->vm;
113 int32_t ret;
114
115 /* Fail if addresses are not page-aligned. */
116 if ((recv & (PAGE_SIZE - 1)) || (send & (PAGE_SIZE - 1))) {
117 return -1;
118 }
119
120 sl_lock(&vm->lock);
121
122 /* We only allow these to be setup once. */
123 if (vm->rpc.recv || vm->rpc.send) {
124 ret = -1;
125 goto exit;
126 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100127
128 /*
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100129 * TODO: Once memory sharing is implemented, we need to make sure that
130 * these pages aren't and won't be shared.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100131 */
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100132
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100133 /*
134 * Check that both pages are acessible from the VM, i.e., ensure that
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100135 * the caller isn't trying to use another VM's memory.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100136 */
137 if (!mm_ptable_is_mapped(&vm->ptable, recv, 0) ||
138 !mm_ptable_is_mapped(&vm->ptable, send, 0)) {
139 ret = -1;
140 goto exit;
141 }
142
143 /* Map the send page as read-only in the hypervisor address space. */
144 if (!mm_map((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, send,
145 MM_MODE_R)) {
146 ret = -1;
147 goto exit;
148 }
149
150 /*
151 * Map the receive page as writable in the hypervisor address space. On
152 * failure, unmap the send page before returning.
153 */
154 if (!mm_map((vaddr_t)recv, (vaddr_t)recv + PAGE_SIZE, recv,
155 MM_MODE_W)) {
156 mm_unmap((vaddr_t)send, (vaddr_t)send + PAGE_SIZE, 0);
157 ret = -1;
158 goto exit;
159 }
160
161 /* Save pointers to the pages. */
162 vm->rpc.send = (const void *)(vaddr_t)send;
163 vm->rpc.recv = (void *)(vaddr_t)recv;
164
165 /* TODO: Notify any waiters. */
166
167 ret = 0;
168exit:
169 sl_unlock(&vm->lock);
170
171 return ret;
172}
173
174/**
175 * Sends an RPC request from the primary VM to a secondary VM. Data is copied
176 * from the caller's send buffer to the destination's receive buffer.
177 */
178int32_t api_rpc_request(uint32_t vm_idx, size_t size)
179{
180 struct vm *from = cpu()->current->vm;
181 struct vm *to;
182 const void *from_buf;
183 int32_t ret;
184
185 /* Basic argument validation. */
186 if (size > PAGE_SIZE || vm_idx >= secondary_vm_count) {
187 return -1;
188 }
189
190 /* Only the primary VM can make calls. */
191 if (from != &primary_vm) {
192 return -1;
193 }
194
195 /*
196 * Check that the sender has configured its send buffer. It is safe to
197 * use from_buf after releasing the lock because the buffer cannot be
198 * modified once it's configured.
199 */
200 sl_lock(&from->lock);
201 from_buf = from->rpc.send;
202 sl_unlock(&from->lock);
203 if (!from_buf) {
204 return -1;
205 }
206
207 to = secondary_vm + vm_idx;
208 sl_lock(&to->lock);
209
210 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
211 /* Fail if the target isn't currently ready to receive data. */
212 ret = -1;
213 } else {
214 /* Copy data. */
215 memcpy(to->rpc.recv, from_buf, size);
216 to->rpc.recv_bytes = size;
217
218 if (!to->rpc.recv_waiter) {
219 to->rpc.state = rpc_state_pending;
220 ret = 0;
221 } else {
222 struct vcpu *to_vcpu = to->rpc.recv_waiter;
223
224 to->rpc.state = rpc_state_inflight;
225
226 /*
227 * Take target vcpu out of waiter list and mark as ready
228 * to run again.
229 */
230 sl_lock(&to_vcpu->lock);
231 to->rpc.recv_waiter = to_vcpu->rpc_next;
232 to_vcpu->state = vcpu_state_ready;
233 arch_regs_set_retval(&to_vcpu->regs, size);
234 sl_unlock(&to_vcpu->lock);
235
236 ret = to_vcpu - to->vcpus + 1;
237 }
238 }
239
240 sl_unlock(&to->lock);
241
242 return ret;
243}
244
245/**
246 * Reads a request sent from a previous call to api_rpc_request. If one isn't
247 * available, this function can optionally block the caller until one becomes
248 * available.
249 *
250 * Once the caller has completed handling a request, it must indicate it by
251 * either calling api_rpc_reply or api_rpc_ack. No new requests can be accepted
252 * until the current one is acknowledged.
253 */
254int32_t api_rpc_read_request(bool block, struct vcpu **next)
255{
256 struct vcpu *vcpu = cpu()->current;
257 struct vm *vm = vcpu->vm;
258 int32_t ret;
259
260 /* Only the secondary VMs can receive calls. */
261 if (vm == &primary_vm) {
262 return -1;
263 }
264
265 sl_lock(&vm->lock);
266 if (vm->rpc.state == rpc_state_pending) {
267 ret = vm->rpc.recv_bytes;
268 vm->rpc.state = rpc_state_inflight;
269 } else if (!block) {
270 ret = -1;
271 } else {
272 sl_lock(&vcpu->lock);
273 vcpu->state = vcpu_state_blocked_rpc;
274
275 /* Push vcpu into waiter list. */
276 vcpu->rpc_next = vm->rpc.recv_waiter;
277 vm->rpc.recv_waiter = vcpu;
278 sl_unlock(&vcpu->lock);
279
280 /* Switch back to primary vm. */
281 *next = &primary_vm.vcpus[cpu_index(cpu())];
282 vm_set_current(&primary_vm);
283
284 /*
285 * Inidicate to primary VM that this vcpu blocked waiting for an
286 * interrupt.
287 */
288 arch_regs_set_retval(&(*next)->regs,
289 HF_VCPU_WAIT_FOR_INTERRUPT);
290 ret = 0;
291 }
292 sl_unlock(&vm->lock);
293
294 return ret;
295}
296
297/**
298 * Sends a reply from a secondary VM to the primary VM. Data is copied from the
299 * caller's send buffer to the destination's receive buffer.
300 *
301 * It can optionally acknowledge the pending request.
302 */
303int32_t api_rpc_reply(size_t size, bool ack, struct vcpu **next)
304{
305 struct vm *from = cpu()->current->vm;
306 struct vm *to;
307 const void *from_buf;
308 /* Basic argument validation. */
309 if (size > PAGE_SIZE) {
310 return -1;
311 }
312
313 /* Only the secondary VM can send responses. */
314 if (from == &primary_vm) {
315 return -1;
316 }
317
318 /* Acknowledge the current pending request if requested. */
319 if (ack) {
320 api_rpc_ack();
321 }
322
323 /*
324 * Check that the sender has configured its send buffer. It is safe to
325 * use from_buf after releasing the lock because the buffer cannot be
326 * modified once it's configured.
327 */
328 sl_lock(&from->lock);
329 from_buf = from->rpc.send;
330 sl_unlock(&from->lock);
331 if (!from_buf) {
332 return -1;
333 }
334
335 to = &primary_vm;
336 sl_lock(&to->lock);
337
338 if (to->rpc.state != rpc_state_idle || !to->rpc.recv) {
339 /*
340 * Fail if the target isn't currently ready to receive a
341 * response.
342 */
343 sl_unlock(&to->lock);
344 return -1;
345 }
346
347 /* Copy data. */
348 memcpy(to->rpc.recv, from_buf, size);
349 to->rpc.recv_bytes = size;
350 to->rpc.state = rpc_state_inflight;
351 sl_unlock(&to->lock);
352
353 /*
354 * Switch back to primary VM so that it is aware that a response was
355 * received. But we leave the current vcpu still runnable.
356 */
357 *next = api_switch_to_primary((size << 8) | HF_VCPU_RESPONSE_READY,
358 vcpu_state_ready);
359
360 return 0;
361}
362
363/**
364 * Acknowledges that either a request or a reply has been received and handled.
365 * After this call completes, the caller will be able to receive additional
366 * requests or replies.
367 */
368int32_t api_rpc_ack(void)
369{
370 struct vm *vm = cpu()->current->vm;
371 int32_t ret;
372
373 sl_lock(&vm->lock);
374 if (vm->rpc.state != rpc_state_inflight) {
375 ret = -1;
376 } else {
377 ret = 0;
378 vm->rpc.state = rpc_state_idle;
379 }
380 sl_unlock(&vm->lock);
381
382 if (ret == 0) {
383 /* TODO: Notify waiters, if any. */
384 }
385
386 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100387}