blob: b17d101a75401873998eba1ffc1b4477cf0b3bea [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Scull13652af2018-09-17 14:49:08 +010019#include <assert.h>
20
Andrew Walbran318f5732018-11-20 16:23:42 +000021#include "hf/arch/cpu.h"
22
23#include "hf/dlog.h"
Andrew Scull6386f252018-12-06 13:29:10 +000024#include "hf/mm.h"
25#include "hf/spinlock.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010026#include "hf/std.h"
27#include "hf/vm.h"
28
Andrew Scullf35a5c92018-08-07 18:09:46 +010029#include "vmapi/hf/call.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010030
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000031/*
32 * To eliminate the risk of deadlocks, we define a partial order for the
33 * acquisition of locks held concurrently by the same physical CPU. Our current
34 * ordering requirements are as follows:
35 *
36 * vm::lock -> vcpu::lock
Andrew Scull6386f252018-12-06 13:29:10 +000037 *
38 * Locks of the same kind require the lock of lowest address to be locked first,
39 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000040 */
41
Andrew Scullaa039b32018-10-04 15:02:26 +010042static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010043 "Currently, a page is mapped for the send and receive buffers so "
44 "the maximum request is the size of a page.");
45
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000046static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000047
48/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000049 * Initialises the API page pool by taking ownership of the contents of the
50 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000051 */
52void api_init(struct mpool *ppool)
53{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000054 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000055}
56
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010057/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010058 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010059 *
60 * This triggers the scheduling logic to run. Run in the context of secondary VM
61 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
62 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010063 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010064static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000065 struct hf_vcpu_run_return primary_ret,
66 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010067{
Andrew Scull19503262018-09-20 14:48:39 +010068 struct vm *primary = vm_get(HF_PRIMARY_VM_ID);
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010069 struct vcpu *next = &primary->vcpus[cpu_index(current->cpu)];
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010070
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010071 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Scull6d2db332018-10-10 15:28:17 +010072 arch_regs_set_retval(&next->regs,
73 hf_vcpu_run_return_encode(primary_ret));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010074
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000075 /* Mark the current vcpu as waiting. */
76 sl_lock(&current->lock);
77 current->state = secondary_state;
78 sl_unlock(&current->lock);
79
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080 return next;
81}
82
83/**
Andrew Scull33fecd32019-01-08 14:48:27 +000084 * Returns to the primary vm and signals that the vcpu still has work to do so.
85 */
86struct vcpu *api_preempt(struct vcpu *current)
87{
88 struct hf_vcpu_run_return ret = {
89 .code = HF_VCPU_RUN_PREEMPTED,
90 };
91
92 return api_switch_to_primary(current, ret, vcpu_state_ready);
93}
94
95/**
96 * Returns to the primary vm to allow this cpu to be used for other tasks as the
97 * vcpu does not have work to do at this moment. The current vcpu is marked as
98 * ready to be scheduled again.
Andrew Scullaa039b32018-10-04 15:02:26 +010099 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100100struct vcpu *api_yield(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100101{
Andrew Scull6d2db332018-10-10 15:28:17 +0100102 struct hf_vcpu_run_return ret = {
103 .code = HF_VCPU_RUN_YIELD,
104 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000105
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000106 return api_switch_to_primary(current, ret, vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100107}
108
109/**
110 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
111 * vm.
112 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100113struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100114{
Andrew Scull6d2db332018-10-10 15:28:17 +0100115 struct hf_vcpu_run_return ret = {
116 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
117 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000118
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000119 return api_switch_to_primary(current, ret,
120 vcpu_state_blocked_interrupt);
Andrew Scullaa039b32018-10-04 15:02:26 +0100121}
122
123/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000124 * Returns the ID of the VM.
125 */
126int64_t api_vm_get_id(const struct vcpu *current)
127{
128 return current->vm->id;
129}
130
131/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100132 * Returns the number of VMs configured to run.
133 */
Andrew Scullc0e569a2018-10-02 18:05:21 +0100134int64_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100135{
Andrew Scull19503262018-09-20 14:48:39 +0100136 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100137}
138
139/**
140 * Returns the number of vcpus configured in the given VM.
141 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100142int64_t api_vcpu_get_count(uint32_t vm_id, const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100143{
Andrew Scull19503262018-09-20 14:48:39 +0100144 struct vm *vm;
145
146 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100147 if (current->vm->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100148 return -1;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100149 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100150
Andrew Scull19503262018-09-20 14:48:39 +0100151 vm = vm_get(vm_id);
152 if (vm == NULL) {
153 return -1;
154 }
155
156 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100157}
158
159/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000160 * This function is called by the architecture-specific context switching
161 * function to indicate that register state for the given vcpu has been saved
162 * and can therefore be used by other pcpus.
163 */
164void api_regs_state_saved(struct vcpu *vcpu)
165{
166 sl_lock(&vcpu->lock);
167 vcpu->regs_available = true;
168 sl_unlock(&vcpu->lock);
169}
170
171/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000172 * Retrieves the next waiter and removes it from the wait list if the VM's
173 * mailbox is in a writable state.
174 */
175static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
176{
177 struct wait_entry *entry;
178 struct vm *vm = locked_vm.vm;
179
180 if (vm->mailbox.state != mailbox_state_empty ||
181 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
182 /* The mailbox is not writable or there are no waiters. */
183 return NULL;
184 }
185
186 /* Remove waiter from the wait list. */
187 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
188 wait_links);
189 list_remove(&entry->wait_links);
190 return entry;
191}
192
193/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000194 * Prepares the vcpu to run by updating its state and fetching whether a return
195 * value needs to be forced onto the vCPU.
196 */
197static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
198 struct retval_state *vcpu_retval)
199{
200 bool ret;
201
202 sl_lock(&vcpu->lock);
203 if (vcpu->state != vcpu_state_ready) {
204 ret = false;
205 goto out;
206 }
207
208 vcpu->cpu = current->cpu;
209 vcpu->state = vcpu_state_running;
210
211 /* Fetch return value to inject into vCPU if there is one. */
212 *vcpu_retval = vcpu->retval;
213 if (vcpu_retval->force) {
214 vcpu->retval.force = false;
215 }
216
217 /*
218 * Wait until the registers become available. Care must be taken when
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000219 * looping on this: it shouldn't be done while holding other locks to
220 * avoid deadlocks.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000221 */
222 while (!vcpu->regs_available) {
223 sl_unlock(&vcpu->lock);
224 sl_lock(&vcpu->lock);
225 }
226
227 /*
228 * Mark the registers as unavailable now that we're about to reflect
229 * them onto the real registers. This will also prevent another physical
230 * CPU from trying to read these registers.
231 */
232 vcpu->regs_available = false;
233
234 ret = true;
235
236out:
237 sl_unlock(&vcpu->lock);
238 return ret;
239}
240
241/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100242 * Runs the given vcpu of the given vm.
243 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100244struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100245 const struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100246 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100247{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100248 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100249 struct vcpu *vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000250 struct retval_state vcpu_retval;
Andrew Scull6d2db332018-10-10 15:28:17 +0100251 struct hf_vcpu_run_return ret = {
252 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
253 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100254
255 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100256 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100257 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100258 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100259
Andrew Scull19503262018-09-20 14:48:39 +0100260 /* Only secondary VM vcpus can be run. */
261 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100262 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100263 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100264
Andrew Scull19503262018-09-20 14:48:39 +0100265 /* The requested VM must exist. */
266 vm = vm_get(vm_id);
267 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100268 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100269 }
270
271 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100272 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100273 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100274 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100275
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000276 /* Update state if allowed. */
Andrew Scullf3d45592018-09-20 14:30:22 +0100277 vcpu = &vm->vcpus[vcpu_idx];
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000278 if (!api_vcpu_prepare_run(current, vcpu, &vcpu_retval)) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100279 ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000280 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100281 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000282
Andrew Scull33fecd32019-01-08 14:48:27 +0000283 /* Switch to the vcpu. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000284 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000285
Andrew Scull33fecd32019-01-08 14:48:27 +0000286 /*
287 * Set a placeholder return code to the scheduler. This will be
288 * overwritten when the switch back to the primary occurs.
289 */
290 ret.code = HF_VCPU_RUN_PREEMPTED;
291
292 /* Update return value for the next vcpu if one was injected. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000293 if (vcpu_retval.force) {
294 arch_regs_set_retval(&vcpu->regs, vcpu_retval.value);
295 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100296
Andrew Scull6d2db332018-10-10 15:28:17 +0100297out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100298 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100299}
300
301/**
Andrew Scull81e85092018-12-12 12:56:20 +0000302 * Check that the mode indicates memory that is valid, owned and exclusive.
303 */
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000304static bool api_mode_valid_owned_and_exclusive(int mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000305{
306 return (mode & (MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED)) ==
307 0;
308}
309
310/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000311 * Determines the value to be returned by api_vm_configure and api_mailbox_clear
312 * after they've succeeded. If a secondary VM is running and there are waiters,
313 * it also switches back to the primary VM for it to wake waiters up.
314 */
315static int64_t api_waiter_result(struct vm_locked locked_vm,
316 struct vcpu *current, struct vcpu **next)
317{
318 struct vm *vm = locked_vm.vm;
319 struct hf_vcpu_run_return ret = {
320 .code = HF_VCPU_RUN_NOTIFY_WAITERS,
321 };
322
323 if (list_empty(&vm->mailbox.waiter_list)) {
324 /* No waiters, nothing else to do. */
325 return 0;
326 }
327
328 if (vm->id == HF_PRIMARY_VM_ID) {
329 /* The caller is the primary VM. Tell it to wake up waiters. */
330 return 1;
331 }
332
333 /*
334 * Switch back to the primary VM, informing it that there are waiters
335 * that need to be notified.
336 */
337 *next = api_switch_to_primary(current, ret, vcpu_state_ready);
338
339 return 0;
340}
341
342/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100343 * Configures the VM to send/receive data through the specified pages. The pages
344 * must not be shared.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000345 *
346 * Returns:
347 * - -1 on failure.
348 * - 0 on success if no further action is needed.
349 * - 1 if it was called by the primary VM and the primary VM now needs to wake
350 * up or kick waiters. Waiters should be retrieved by calling
351 * hf_mailbox_waiter_get.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100352 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000353int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
354 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100355{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100356 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000357 struct vm_locked locked;
Andrew Scull80871322018-08-06 12:04:09 +0100358 paddr_t pa_send_begin;
359 paddr_t pa_send_end;
360 paddr_t pa_recv_begin;
361 paddr_t pa_recv_end;
Andrew Scull220e6212018-12-21 18:09:00 +0000362 int orig_send_mode;
363 int orig_recv_mode;
364 struct mpool local_page_pool;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100365 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100366
367 /* Fail if addresses are not page-aligned. */
Andrew Scull265ada92018-07-30 15:19:01 +0100368 if ((ipa_addr(send) & (PAGE_SIZE - 1)) ||
369 (ipa_addr(recv) & (PAGE_SIZE - 1))) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100370 return -1;
371 }
372
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000373 /* Convert to physical addresses. */
374 pa_send_begin = pa_from_ipa(send);
375 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
376
377 pa_recv_begin = pa_from_ipa(recv);
378 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
379
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100380 /* Fail if the same page is used for the send and receive pages. */
381 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
Andrew Scull220e6212018-12-21 18:09:00 +0000382 return -1;
383 }
384
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000385 vm_lock(vm, &locked);
Andrew Scull220e6212018-12-21 18:09:00 +0000386
387 /* We only allow these to be setup once. */
388 if (vm->mailbox.send || vm->mailbox.recv) {
389 goto fail;
390 }
391
392 /*
393 * Ensure the pages are valid, owned and exclusive to the VM and that
394 * the VM has the required access to the memory.
395 */
396 if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
397 &orig_send_mode) ||
398 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
399 (orig_send_mode & MM_MODE_R) == 0 ||
400 (orig_send_mode & MM_MODE_W) == 0) {
401 goto fail;
402 }
403
404 if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
405 &orig_recv_mode) ||
406 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
407 (orig_recv_mode & MM_MODE_R) == 0) {
408 goto fail;
409 }
410
411 /*
412 * Create a local pool so any freed memory can't be used by another
413 * thread. This is to ensure the original mapping can be restored if any
414 * stage of the process fails.
415 */
416 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
417
418 /* Take memory ownership away from the VM and mark as shared. */
419 if (!mm_vm_identity_map(
420 &vm->ptable, pa_send_begin, pa_send_end,
421 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
422 NULL, &local_page_pool)) {
423 goto fail_free_pool;
424 }
425
426 if (!mm_vm_identity_map(&vm->ptable, pa_recv_begin, pa_recv_end,
427 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
428 NULL, &local_page_pool)) {
429 /* TODO: partial defrag of failed range. */
430 /* Recover any memory consumed in failed mapping. */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000431 mm_vm_defrag(&vm->ptable, &local_page_pool);
Andrew Scull220e6212018-12-21 18:09:00 +0000432 goto fail_undo_send;
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100433 }
434
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100435 /* Map the send page as read-only in the hypervisor address space. */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000436 vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
Andrew Scull220e6212018-12-21 18:09:00 +0000437 MM_MODE_R, &local_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100438 if (!vm->mailbox.send) {
Andrew Scull220e6212018-12-21 18:09:00 +0000439 /* TODO: partial defrag of failed range. */
440 /* Recover any memory consumed in failed mapping. */
441 mm_defrag(&local_page_pool);
442 goto fail_undo_send_and_recv;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100443 }
444
445 /*
446 * Map the receive page as writable in the hypervisor address space. On
447 * failure, unmap the send page before returning.
448 */
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000449 vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
Andrew Scull220e6212018-12-21 18:09:00 +0000450 MM_MODE_W, &local_page_pool);
Andrew Scullaa039b32018-10-04 15:02:26 +0100451 if (!vm->mailbox.recv) {
Andrew Scull220e6212018-12-21 18:09:00 +0000452 /* TODO: partial defrag of failed range. */
453 /* Recover any memory consumed in failed mapping. */
454 mm_defrag(&local_page_pool);
455 goto fail_undo_all;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100456 }
457
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000458 /* Tell caller about waiters, if any. */
459 ret = api_waiter_result(locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +0000460 goto exit;
461
462 /*
463 * The following mappings will not require more memory than is available
464 * in the local pool.
465 */
466fail_undo_all:
467 vm->mailbox.send = NULL;
Andrew Scullda241972019-01-05 18:17:48 +0000468 mm_unmap(pa_send_begin, pa_send_end, &local_page_pool);
Andrew Scull220e6212018-12-21 18:09:00 +0000469
470fail_undo_send_and_recv:
471 mm_vm_identity_map(&vm->ptable, pa_recv_begin, pa_recv_end,
472 orig_recv_mode, NULL, &local_page_pool);
473
474fail_undo_send:
475 mm_vm_identity_map(&vm->ptable, pa_send_begin, pa_send_end,
476 orig_send_mode, NULL, &local_page_pool);
477
478fail_free_pool:
479 mpool_fini(&local_page_pool);
480
481fail:
482 ret = -1;
483
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100484exit:
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000485 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100486
487 return ret;
488}
489
490/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100491 * Copies data from the sender's send buffer to the recipient's receive buffer
492 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000493 *
494 * If the recipient's receive buffer is busy, it can optionally register the
495 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100496 */
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000497int64_t api_mailbox_send(uint32_t vm_id, size_t size, bool notify,
498 struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100499{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100500 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100501 struct vm *to;
502 const void *from_buf;
Andrew Scullaa039b32018-10-04 15:02:26 +0100503 uint16_t vcpu;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100504 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100505
Andrew Scullaa039b32018-10-04 15:02:26 +0100506 /* Limit the size of transfer. */
507 if (size > HF_MAILBOX_SIZE) {
Andrew Scull19503262018-09-20 14:48:39 +0100508 return -1;
509 }
510
511 /* Disallow reflexive requests as this suggests an error in the VM. */
512 if (vm_id == from->id) {
513 return -1;
514 }
515
516 /* Ensure the target VM exists. */
517 to = vm_get(vm_id);
518 if (to == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100519 return -1;
520 }
521
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100522 /*
523 * Check that the sender has configured its send buffer. It is safe to
524 * use from_buf after releasing the lock because the buffer cannot be
525 * modified once it's configured.
526 */
527 sl_lock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100528 from_buf = from->mailbox.send;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100529 sl_unlock(&from->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100530 if (from_buf == NULL) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100531 return -1;
532 }
533
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100534 sl_lock(&to->lock);
535
Andrew Scullaa039b32018-10-04 15:02:26 +0100536 if (to->mailbox.state != mailbox_state_empty ||
537 to->mailbox.recv == NULL) {
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000538 /*
539 * Fail if the target isn't currently ready to receive data,
540 * setting up for notification if requested.
541 */
542 if (notify) {
543 struct wait_entry *entry = &current->vm->wentry[vm_id];
544
545 /* Append waiter only if it's not there yet. */
546 if (list_empty(&entry->wait_links)) {
547 list_append(&to->mailbox.waiter_list,
548 &entry->wait_links);
549 }
550 }
551
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100552 ret = -1;
Andrew Scullaa039b32018-10-04 15:02:26 +0100553 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100554 }
555
Andrew Scullaa039b32018-10-04 15:02:26 +0100556 /* Copy data. */
557 memcpy(to->mailbox.recv, from_buf, size);
558 to->mailbox.recv_bytes = size;
559 to->mailbox.recv_from_id = from->id;
560 to->mailbox.state = mailbox_state_read;
561
562 /* Messages for the primary VM are delivered directly. */
563 if (to->id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000564 struct hf_vcpu_run_return primary_ret = {
565 .code = HF_VCPU_RUN_MESSAGE,
566 .message.size = size,
567 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000568
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000569 *next = api_switch_to_primary(current, primary_ret,
570 vcpu_state_ready);
Andrew Scullaa039b32018-10-04 15:02:26 +0100571 ret = 0;
572 goto out;
573 }
574
575 /*
576 * Try to find a vcpu to handle the message and tell the scheduler to
577 * run it.
578 */
579 if (to->mailbox.recv_waiter == NULL) {
580 /*
581 * The scheduler must choose a vcpu to interrupt so it can
582 * handle the message.
583 */
584 to->mailbox.state = mailbox_state_received;
585 vcpu = HF_INVALID_VCPU;
586 } else {
587 struct vcpu *to_vcpu = to->mailbox.recv_waiter;
588
589 /*
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000590 * Take target vcpu out of waiter list and mark it as ready to
591 * run again.
Andrew Scullaa039b32018-10-04 15:02:26 +0100592 */
593 sl_lock(&to_vcpu->lock);
594 to->mailbox.recv_waiter = to_vcpu->mailbox_next;
595 to_vcpu->state = vcpu_state_ready;
596
597 /* Return from HF_MAILBOX_RECEIVE. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000598 to_vcpu->retval.force = true;
599 to_vcpu->retval.value = hf_mailbox_receive_return_encode(
600 (struct hf_mailbox_receive_return){
601 .vm_id = to->mailbox.recv_from_id,
602 .size = size,
603 });
Andrew Scullaa039b32018-10-04 15:02:26 +0100604
605 sl_unlock(&to_vcpu->lock);
606
607 vcpu = to_vcpu - to->vcpus;
608 }
609
610 /* Return to the primary VM directly or with a switch. */
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000611 if (from->id == HF_PRIMARY_VM_ID) {
612 ret = vcpu;
613 } else {
614 struct hf_vcpu_run_return primary_ret = {
615 .code = HF_VCPU_RUN_WAKE_UP,
616 .wake_up.vm_id = to->id,
617 .wake_up.vcpu = vcpu,
618 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000619
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000620 *next = api_switch_to_primary(current, primary_ret,
621 vcpu_state_ready);
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000622 ret = 0;
623 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100624
625out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100626 sl_unlock(&to->lock);
627
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +0000628 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100629}
630
631/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100632 * Receives a message from the mailbox. If one isn't available, this function
633 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100634 *
Andrew Scullaa039b32018-10-04 15:02:26 +0100635 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100636 */
Andrew Scull6d2db332018-10-10 15:28:17 +0100637struct hf_mailbox_receive_return api_mailbox_receive(bool block,
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100638 struct vcpu *current,
Andrew Scull6d2db332018-10-10 15:28:17 +0100639 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100640{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100641 struct vm *vm = current->vm;
Andrew Scull6d2db332018-10-10 15:28:17 +0100642 struct hf_mailbox_receive_return ret = {
643 .vm_id = HF_INVALID_VM_ID,
644 };
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100645
Andrew Scullaa039b32018-10-04 15:02:26 +0100646 /*
647 * The primary VM will receive messages as a status code from running
648 * vcpus and must not call this function.
649 */
Andrew Scull19503262018-09-20 14:48:39 +0100650 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100651 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100652 }
653
654 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100655
Andrew Scullaa039b32018-10-04 15:02:26 +0100656 /* Return pending messages without blocking. */
657 if (vm->mailbox.state == mailbox_state_received) {
658 vm->mailbox.state = mailbox_state_read;
Andrew Scull6d2db332018-10-10 15:28:17 +0100659 ret.vm_id = vm->mailbox.recv_from_id;
660 ret.size = vm->mailbox.recv_bytes;
Andrew Scullaa039b32018-10-04 15:02:26 +0100661 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100662 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100663
664 /* No pending message so fail if not allowed to block. */
665 if (!block) {
Andrew Scullaa039b32018-10-04 15:02:26 +0100666 goto out;
667 }
668
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100669 sl_lock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100670
671 /* Push vcpu into waiter list. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100672 current->mailbox_next = vm->mailbox.recv_waiter;
673 vm->mailbox.recv_waiter = current;
674 sl_unlock(&current->lock);
Andrew Scullaa039b32018-10-04 15:02:26 +0100675
676 /* Switch back to primary vm to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +0000677 {
678 struct hf_vcpu_run_return run_return = {
679 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
680 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000681
Andrew Walbranb4816552018-12-05 17:35:42 +0000682 *next = api_switch_to_primary(current, run_return,
683 vcpu_state_blocked_mailbox);
684 }
Andrew Scullaa039b32018-10-04 15:02:26 +0100685out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100686 sl_unlock(&vm->lock);
687
688 return ret;
689}
690
691/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000692 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
693 * by this function, the caller must have called api_mailbox_send before with
694 * the notify argument set to true, and this call must have failed because the
695 * mailbox was not available.
696 *
697 * It should be called repeatedly to retrieve a list of VMs.
698 *
699 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
700 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100701 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000702int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100703{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100704 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000705 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100706 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100707
708 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000709 if (list_empty(&vm->mailbox.ready_list)) {
710 ret = -1;
711 goto exit;
712 }
713
714 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
715 ready_links);
716 list_remove(&entry->ready_links);
717 ret = entry - vm->wentry;
718
719exit:
720 sl_unlock(&vm->lock);
721 return ret;
722}
723
724/**
725 * Retrieves the next VM waiting to be notified that the mailbox of the
726 * specified VM became writable. Only primary VMs are allowed to call this.
727 *
728 * Returns -1 if there are no waiters, or the VM id of the next waiter
729 * otherwise.
730 */
731int64_t api_mailbox_waiter_get(uint32_t vm_id, const struct vcpu *current)
732{
733 struct vm *vm;
734 struct vm_locked locked;
735 struct wait_entry *entry;
736 struct vm *waiting_vm;
737
738 /* Only primary VMs are allowed to call this function. */
739 if (current->vm->id != HF_PRIMARY_VM_ID) {
740 return -1;
741 }
742
743 vm = vm_get(vm_id);
744 if (vm == NULL) {
745 return -1;
746 }
747
748 /* Check if there are outstanding notifications from given vm. */
749 vm_lock(vm, &locked);
750 entry = api_fetch_waiter(locked);
751 vm_unlock(&locked);
752
753 if (entry == NULL) {
754 return -1;
755 }
756
757 /* Enqueue notification to waiting VM. */
758 waiting_vm = entry->waiting_vm;
759
760 sl_lock(&waiting_vm->lock);
761 if (list_empty(&entry->ready_links)) {
762 list_append(&waiting_vm->mailbox.ready_list,
763 &entry->ready_links);
764 }
765 sl_unlock(&waiting_vm->lock);
766
767 return waiting_vm->id;
768}
769
770/**
771 * Clears the caller's mailbox so that a new message can be received. The caller
772 * must have copied out all data they wish to preserve as new messages will
773 * overwrite the old and will arrive asynchronously.
774 *
775 * Returns:
776 * - -1 on failure, if the mailbox hasn't been read or is already empty.
777 * - 0 on success if no further action is needed.
778 * - 1 if it was called by the primary VM and the primary VM now needs to wake
779 * up or kick waiters. Waiters should be retrieved by calling
780 * hf_mailbox_waiter_get.
781 */
782int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next)
783{
784 struct vm *vm = current->vm;
785 struct vm_locked locked;
786 int64_t ret;
787
788 vm_lock(vm, &locked);
Andrew Scullaa039b32018-10-04 15:02:26 +0100789 if (vm->mailbox.state == mailbox_state_read) {
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000790 ret = api_waiter_result(locked, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +0100791 vm->mailbox.state = mailbox_state_empty;
792 } else {
793 ret = -1;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100794 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000795 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100796
797 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100798}
Andrew Walbran318f5732018-11-20 16:23:42 +0000799
800/**
801 * Enables or disables a given interrupt ID for the calling vCPU.
802 *
803 * Returns 0 on success, or -1 if the intid is invalid.
804 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000805int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +0000806{
807 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
808 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000809
Andrew Walbran318f5732018-11-20 16:23:42 +0000810 if (intid >= HF_NUM_INTIDS) {
811 return -1;
812 }
813
814 sl_lock(&current->lock);
815 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +0000816 /*
817 * If it is pending and was not enabled before, increment the
818 * count.
819 */
820 if (current->interrupts.interrupt_pending[intid_index] &
821 ~current->interrupts.interrupt_enabled[intid_index] &
822 intid_mask) {
823 current->interrupts.enabled_and_pending_count++;
824 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000825 current->interrupts.interrupt_enabled[intid_index] |=
826 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +0000827 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +0000828 /*
829 * If it is pending and was enabled before, decrement the count.
830 */
831 if (current->interrupts.interrupt_pending[intid_index] &
832 current->interrupts.interrupt_enabled[intid_index] &
833 intid_mask) {
834 current->interrupts.enabled_and_pending_count--;
835 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000836 current->interrupts.interrupt_enabled[intid_index] &=
837 ~intid_mask;
838 }
839
840 sl_unlock(&current->lock);
841 return 0;
842}
843
844/**
845 * Returns the ID of the next pending interrupt for the calling vCPU, and
846 * acknowledges it (i.e. marks it as no longer pending). Returns
847 * HF_INVALID_INTID if there are no pending interrupts.
848 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000849uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +0000850{
851 uint8_t i;
852 uint32_t first_interrupt = HF_INVALID_INTID;
Andrew Walbran318f5732018-11-20 16:23:42 +0000853
854 /*
855 * Find the first enabled and pending interrupt ID, return it, and
856 * deactivate it.
857 */
858 sl_lock(&current->lock);
859 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
860 uint32_t enabled_and_pending =
861 current->interrupts.interrupt_enabled[i] &
862 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000863
Andrew Walbran318f5732018-11-20 16:23:42 +0000864 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +0000865 uint8_t bit_index = ctz(enabled_and_pending);
866 /*
867 * Mark it as no longer pending and decrement the count.
868 */
869 current->interrupts.interrupt_pending[i] &=
870 ~(1u << bit_index);
871 current->interrupts.enabled_and_pending_count--;
872 first_interrupt =
873 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +0000874 break;
875 }
876 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000877
878 sl_unlock(&current->lock);
879 return first_interrupt;
880}
881
882/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +0000883 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +0000884 * given VM and vCPU.
885 */
886static inline bool is_injection_allowed(uint32_t target_vm_id,
887 struct vcpu *current)
888{
889 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000890
Andrew Walbran318f5732018-11-20 16:23:42 +0000891 /*
892 * The primary VM is allowed to inject interrupts into any VM. Secondary
893 * VMs are only allowed to inject interrupts into their own vCPUs.
894 */
895 return current_vm_id == HF_PRIMARY_VM_ID ||
896 current_vm_id == target_vm_id;
897}
898
899/**
900 * Injects a virtual interrupt of the given ID into the given target vCPU.
901 * This doesn't cause the vCPU to actually be run immediately; it will be taken
902 * when the vCPU is next run, which is up to the scheduler.
903 *
Andrew Walbran3d84a262018-12-13 14:41:19 +0000904 * Returns:
905 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
906 * ID is invalid, or the current VM is not allowed to inject interrupts to
907 * the target VM.
908 * - 0 on success if no further action is needed.
909 * - 1 if it was called by the primary VM and the primary VM now needs to wake
910 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +0000911 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +0000912int64_t api_interrupt_inject(uint32_t target_vm_id, uint32_t target_vcpu_idx,
Andrew Walbran318f5732018-11-20 16:23:42 +0000913 uint32_t intid, struct vcpu *current,
914 struct vcpu **next)
915{
916 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
917 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
918 struct vcpu *target_vcpu;
919 struct vm *target_vm = vm_get(target_vm_id);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000920 bool need_vm_lock;
Andrew Walbran3d84a262018-12-13 14:41:19 +0000921 int64_t ret = 0;
Andrew Walbran318f5732018-11-20 16:23:42 +0000922
923 if (intid >= HF_NUM_INTIDS) {
924 return -1;
925 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000926
Andrew Walbran318f5732018-11-20 16:23:42 +0000927 if (target_vm == NULL) {
928 return -1;
929 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000930
Andrew Walbran318f5732018-11-20 16:23:42 +0000931 if (target_vcpu_idx >= target_vm->vcpu_count) {
932 /* The requested vcpu must exist. */
933 return -1;
934 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000935
Andrew Walbran318f5732018-11-20 16:23:42 +0000936 if (!is_injection_allowed(target_vm_id, current)) {
937 return -1;
938 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000939
Andrew Walbran318f5732018-11-20 16:23:42 +0000940 target_vcpu = &target_vm->vcpus[target_vcpu_idx];
941
942 dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
943 target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
944
945 sl_lock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +0000946 /*
947 * If we need the target_vm lock we need to release the target_vcpu lock
948 * first to maintain the correct order of locks. In-between releasing
949 * and acquiring it again the state of the vCPU could change in such a
950 * way that we don't actually need to touch the target_vm after all, but
951 * that's alright: we'll take the target_vm lock anyway, but it's safe,
952 * just perhaps a little slow in this unusual case. The reverse is not
953 * possible: if need_vm_lock is false, we don't release the target_vcpu
954 * lock until we are done, so nothing should change in such as way that
955 * we need the VM lock after all.
956 */
Andrew Walbran3d84a262018-12-13 14:41:19 +0000957 need_vm_lock =
958 (target_vcpu->interrupts.interrupt_enabled[intid_index] &
959 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
960 intid_mask) &&
961 target_vcpu->state == vcpu_state_blocked_mailbox;
Andrew Walbran69520dc2018-12-06 11:39:38 +0000962 if (need_vm_lock) {
963 sl_unlock(&target_vcpu->lock);
964 sl_lock(&target_vm->lock);
965 sl_lock(&target_vcpu->lock);
966 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000967
Andrew Walbran3d84a262018-12-13 14:41:19 +0000968 /*
969 * We only need to change state and (maybe) trigger a virtual IRQ if it
970 * is enabled and was not previously pending. Otherwise we can skip
971 * everything except setting the pending bit.
972 *
973 * If you change this logic make sure to update the need_vm_lock logic
974 * above to match.
975 */
976 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
977 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
978 intid_mask)) {
979 goto out;
980 }
981
982 /* Increment the count. */
983 target_vcpu->interrupts.enabled_and_pending_count++;
Andrew Walbran318f5732018-11-20 16:23:42 +0000984
Andrew Walbran69520dc2018-12-06 11:39:38 +0000985 /*
Andrew Scull6386f252018-12-06 13:29:10 +0000986 * Only need to update state if there was not already an interrupt
987 * enabled and pending.
Andrew Walbran69520dc2018-12-06 11:39:38 +0000988 */
Andrew Walbran3d84a262018-12-13 14:41:19 +0000989 if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
990 goto out;
991 }
Andrew Walbran318f5732018-11-20 16:23:42 +0000992
Andrew Walbran3d84a262018-12-13 14:41:19 +0000993 if (target_vcpu->state == vcpu_state_blocked_interrupt) {
994 target_vcpu->state = vcpu_state_ready;
995 } else if (target_vcpu->state == vcpu_state_blocked_mailbox) {
996 /*
997 * If you change this logic make sure to update the need_vm_lock
998 * logic above to match.
999 */
1000 target_vcpu->state = vcpu_state_ready;
Andrew Walbran69520dc2018-12-06 11:39:38 +00001001
Andrew Walbran3d84a262018-12-13 14:41:19 +00001002 /* Take target vCPU out of mailbox recv_waiter list. */
1003 /*
Andrew Scull6386f252018-12-06 13:29:10 +00001004 * TODO: Consider using a doubly-linked list for the receive
1005 * waiter list to avoid the linear search here.
Andrew Walbran3d84a262018-12-13 14:41:19 +00001006 */
1007 struct vcpu **previous_next_pointer =
1008 &target_vm->mailbox.recv_waiter;
1009 while (*previous_next_pointer != NULL &&
1010 *previous_next_pointer != target_vcpu) {
Andrew Walbran69520dc2018-12-06 11:39:38 +00001011 /*
Andrew Walbran3d84a262018-12-13 14:41:19 +00001012 * TODO(qwandor): Do we need to lock the vCPUs somehow
1013 * while we walk the linked list, or is the VM lock
1014 * enough?
Andrew Walbran69520dc2018-12-06 11:39:38 +00001015 */
Andrew Walbran3d84a262018-12-13 14:41:19 +00001016 previous_next_pointer =
1017 &(*previous_next_pointer)->mailbox_next;
Andrew Walbran318f5732018-11-20 16:23:42 +00001018 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001019
Andrew Walbran3d84a262018-12-13 14:41:19 +00001020 if (*previous_next_pointer == NULL) {
1021 dlog("Target VCPU state is vcpu_state_blocked_mailbox "
1022 "but is not in VM mailbox waiter list. This "
1023 "should never happen.\n");
1024 } else {
1025 *previous_next_pointer = target_vcpu->mailbox_next;
Andrew Walbran318f5732018-11-20 16:23:42 +00001026 }
1027 }
1028
Andrew Walbran3d84a262018-12-13 14:41:19 +00001029 if (current->vm->id == HF_PRIMARY_VM_ID) {
1030 /*
1031 * If the call came from the primary VM, let it know that it
1032 * should run or kick the target vCPU.
1033 */
1034 ret = 1;
1035 } else if (current != target_vcpu) {
1036 /*
1037 * Switch to the primary so that it can switch to the target, or
1038 * kick it if it is already running on a different physical CPU.
1039 */
1040 struct hf_vcpu_run_return ret = {
1041 .code = HF_VCPU_RUN_WAKE_UP,
1042 .wake_up.vm_id = target_vm_id,
1043 .wake_up.vcpu = target_vcpu_idx,
1044 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001045
Andrew Walbran3d84a262018-12-13 14:41:19 +00001046 *next = api_switch_to_primary(current, ret, vcpu_state_ready);
1047 }
1048
1049out:
1050 /* Either way, make it pending. */
1051 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
1052
Andrew Walbran318f5732018-11-20 16:23:42 +00001053 sl_unlock(&target_vcpu->lock);
Andrew Walbran69520dc2018-12-06 11:39:38 +00001054 if (need_vm_lock) {
1055 sl_unlock(&target_vm->lock);
1056 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001057
Andrew Walbran3d84a262018-12-13 14:41:19 +00001058 return ret;
Andrew Walbran318f5732018-11-20 16:23:42 +00001059}
Andrew Scull6386f252018-12-06 13:29:10 +00001060
1061/**
1062 * Clears a region of physical memory by overwriting it with zeros. The data is
1063 * flushed from the cache so the memory has been cleared across the system.
1064 */
1065static bool api_clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
1066{
1067 /*
1068 * TODO: change this to a cpu local single page window rather than a
1069 * global mapping of the whole range. Such an approach will limit
1070 * the changes to stage-1 tables and will allow only local
1071 * invalidation.
1072 */
1073 void *ptr = mm_identity_map(begin, end, MM_MODE_W, ppool);
1074 size_t size = pa_addr(end) - pa_addr(begin);
1075
1076 if (!ptr) {
1077 /* TODO: partial defrag of failed range. */
1078 /* Recover any memory consumed in failed mapping. */
1079 mm_defrag(ppool);
1080 return false;
1081 }
1082
1083 memset(ptr, 0, size);
1084 arch_mm_write_back_dcache(ptr, size);
1085 mm_unmap(begin, end, ppool);
1086
1087 return true;
1088}
1089
1090/**
1091 * Shares memory from the calling VM with another. The memory can be shared in
1092 * different modes.
1093 *
1094 * TODO: the interface for sharing memory will need to be enhanced to allow
1095 * sharing with different modes e.g. read-only, informing the recipient
1096 * of the memory they have been given, opting to not wipe the memory and
1097 * possibly allowing multiple blocks to be transferred. What this will
1098 * look like is TBD.
1099 */
1100int64_t api_share_memory(uint32_t vm_id, ipaddr_t addr, size_t size,
1101 enum hf_share share, struct vcpu *current)
1102{
1103 struct vm *from = current->vm;
1104 struct vm *to;
1105 int orig_from_mode;
1106 int from_mode;
1107 int to_mode;
1108 ipaddr_t begin;
1109 ipaddr_t end;
1110 paddr_t pa_begin;
1111 paddr_t pa_end;
1112 struct mpool local_page_pool;
1113 int64_t ret;
1114
1115 /* Disallow reflexive shares as this suggests an error in the VM. */
1116 if (vm_id == from->id) {
1117 return -1;
1118 }
1119
1120 /* Ensure the target VM exists. */
1121 to = vm_get(vm_id);
1122 if (to == NULL) {
1123 return -1;
1124 }
1125
1126 begin = addr;
1127 end = ipa_add(addr, size);
1128
1129 /* Fail if addresses are not page-aligned. */
1130 if ((ipa_addr(begin) & (PAGE_SIZE - 1)) ||
1131 (ipa_addr(end) & (PAGE_SIZE - 1))) {
1132 return -1;
1133 }
1134
1135 /* Convert the sharing request to memory management modes. */
1136 switch (share) {
1137 case HF_MEMORY_GIVE:
1138 from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED;
1139 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
1140 break;
1141
1142 case HF_MEMORY_LEND:
1143 from_mode = MM_MODE_INVALID;
1144 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED;
1145 break;
1146
1147 case HF_MEMORY_SHARE:
1148 from_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_SHARED;
1149 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED |
1150 MM_MODE_SHARED;
1151 break;
1152
1153 default:
1154 /* The input is untrusted so might not be a valid value. */
1155 return -1;
1156 }
1157
1158 /*
1159 * Create a local pool so any freed memory can't be used by another
1160 * thread. This is to ensure the original mapping can be restored if any
1161 * stage of the process fails.
1162 */
1163 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1164
1165 sl_lock_both(&from->lock, &to->lock);
1166
1167 /*
1168 * Ensure that the memory range is mapped with the same mode so that
1169 * changes can be reverted if the process fails.
1170 */
1171 if (!mm_vm_get_mode(&from->ptable, begin, end, &orig_from_mode)) {
1172 goto fail;
1173 }
1174
1175 /*
1176 * Ensure the memory range is valid for the sender. If it isn't, the
1177 * sender has either shared it with another VM already or has no claim
1178 * to the memory.
1179 */
1180 if (orig_from_mode & MM_MODE_INVALID) {
1181 goto fail;
1182 }
1183
1184 /*
1185 * The sender must own the memory and have exclusive access to it in
1186 * order to share it. Alternatively, it is giving memory back to the
1187 * owning VM.
1188 */
1189 if (orig_from_mode & MM_MODE_UNOWNED) {
1190 int orig_to_mode;
1191
1192 if (share != HF_MEMORY_GIVE ||
1193 !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode) ||
1194 orig_to_mode & MM_MODE_UNOWNED) {
1195 goto fail;
1196 }
1197 } else if (orig_from_mode & MM_MODE_SHARED) {
1198 goto fail;
1199 }
1200
1201 pa_begin = pa_from_ipa(begin);
1202 pa_end = pa_from_ipa(end);
1203
1204 /*
1205 * First update the mapping for the sender so there is not overlap with
1206 * the recipient.
1207 */
1208 if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
1209 NULL, &local_page_pool)) {
1210 goto fail;
1211 }
1212
1213 /* Clear the memory so no VM or device can see the previous contents. */
1214 if (!api_clear_memory(pa_begin, pa_end, &local_page_pool)) {
1215 goto fail_return_to_sender;
1216 }
1217
1218 /* Complete the transfer by mapping the memory into the recipient. */
1219 if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
1220 &local_page_pool)) {
1221 /* TODO: partial defrag of failed range. */
1222 /* Recover any memory consumed in failed mapping. */
1223 mm_vm_defrag(&from->ptable, &local_page_pool);
1224 goto fail_return_to_sender;
1225 }
1226
1227 ret = 0;
1228 goto out;
1229
1230fail_return_to_sender:
1231 mm_vm_identity_map(&from->ptable, pa_begin, pa_end, orig_from_mode,
1232 NULL, &local_page_pool);
1233
1234fail:
1235 ret = -1;
1236
1237out:
1238 sl_unlock(&from->lock);
1239 sl_unlock(&to->lock);
1240
1241 mpool_fini(&local_page_pool);
1242
1243 return ret;
1244}