blob: 92328c80661600ad73c4cb909f16f8bf357e8903 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Walbran318f5732018-11-20 16:23:42 +000019#include "hf/arch/cpu.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000020#include "hf/arch/timer.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000021
Andrew Scull877ae4b2019-07-02 12:52:33 +010022#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000023#include "hf/dlog.h"
Andrew Scull6386f252018-12-06 13:29:10 +000024#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010025#include "hf/plat/console.h"
Jose Marinho40d55f32019-07-01 15:41:54 +010026#include "hf/spci_internal.h"
Andrew Scull6386f252018-12-06 13:29:10 +000027#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010028#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010029#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010030#include "hf/vm.h"
31
Andrew Scullf35a5c92018-08-07 18:09:46 +010032#include "vmapi/hf/call.h"
Jose Marinhoa1dfeda2019-02-27 16:46:03 +000033#include "vmapi/hf/spci.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010034
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000035/*
36 * To eliminate the risk of deadlocks, we define a partial order for the
37 * acquisition of locks held concurrently by the same physical CPU. Our current
38 * ordering requirements are as follows:
39 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010040 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000041 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010042 * Locks of the same kind require the lock of lowest address to be locked first,
43 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000044 */
45
Andrew Scullaa039b32018-10-04 15:02:26 +010046static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010047 "Currently, a page is mapped for the send and receive buffers so "
48 "the maximum request is the size of a page.");
49
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000050static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000051
52/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000053 * Initialises the API page pool by taking ownership of the contents of the
54 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000055 */
56void api_init(struct mpool *ppool)
57{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000058 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000059}
60
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010061/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010062 * Switches the physical CPU back to the corresponding vcpu of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010063 *
64 * This triggers the scheduling logic to run. Run in the context of secondary VM
65 * to cause HF_VCPU_RUN to return and the primary VM to regain control of the
66 * cpu.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010067 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010068static struct vcpu *api_switch_to_primary(struct vcpu *current,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000069 struct hf_vcpu_run_return primary_ret,
70 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010071{
Andrew Walbran42347a92019-05-09 13:59:03 +010072 struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
Andrew Walbrane1310df2019-04-29 17:28:28 +010073 struct vcpu *next = vm_get_vcpu(primary, cpu_index(current->cpu));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010074
Andrew Walbran508e63c2018-12-20 17:02:37 +000075 /*
76 * If the secondary is blocked but has a timer running, sleep until the
77 * timer fires rather than indefinitely.
78 */
Andrew Scullb06d1752019-02-04 10:15:48 +000079 switch (primary_ret.code) {
80 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
81 case HF_VCPU_RUN_WAIT_FOR_MESSAGE:
82 primary_ret.sleep.ns =
83 arch_timer_enabled_current()
84 ? arch_timer_remaining_ns_current()
85 : HF_SLEEP_INDEFINITE;
86 break;
87
88 default:
89 /* Do nothing. */
90 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +000091 }
92
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010093 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +010094 arch_regs_set_retval(
95 &next->regs,
96 (struct spci_value){
97 .func = hf_vcpu_run_return_encode(primary_ret)});
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010098
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000099 /* Mark the current vcpu as waiting. */
100 sl_lock(&current->lock);
101 current->state = secondary_state;
102 sl_unlock(&current->lock);
103
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100104 return next;
105}
106
107/**
Andrew Scull33fecd32019-01-08 14:48:27 +0000108 * Returns to the primary vm and signals that the vcpu still has work to do so.
109 */
110struct vcpu *api_preempt(struct vcpu *current)
111{
112 struct hf_vcpu_run_return ret = {
113 .code = HF_VCPU_RUN_PREEMPTED,
114 };
115
Andrew Sculld6ee1102019-04-05 22:12:42 +0100116 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
Andrew Scull33fecd32019-01-08 14:48:27 +0000117}
118
119/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100120 * Puts the current vcpu in wait for interrupt mode, and returns to the primary
121 * vm.
122 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100123struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100124{
Andrew Scull6d2db332018-10-10 15:28:17 +0100125 struct hf_vcpu_run_return ret = {
126 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
127 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000128
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000129 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100130 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100131}
132
133/**
Andrew Walbran33645652019-04-15 12:29:31 +0100134 * Puts the current vCPU in off mode, and returns to the primary VM.
135 */
136struct vcpu *api_vcpu_off(struct vcpu *current)
137{
138 struct hf_vcpu_run_return ret = {
139 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
140 };
141
142 /*
143 * Disable the timer, so the scheduler doesn't get told to call back
144 * based on it.
145 */
146 arch_timer_disable_current();
147
148 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
149}
150
151/**
Andrew Scull66d62bf2019-02-01 13:54:10 +0000152 * Returns to the primary vm to allow this cpu to be used for other tasks as the
153 * vcpu does not have work to do at this moment. The current vcpu is marked as
Andrew Walbran16075b62019-09-03 17:11:07 +0100154 * ready to be scheduled again.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000155 */
Andrew Walbran16075b62019-09-03 17:11:07 +0100156void api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000157{
Andrew Walbran16075b62019-09-03 17:11:07 +0100158 struct hf_vcpu_run_return primary_ret = {
Andrew Scull66d62bf2019-02-01 13:54:10 +0000159 .code = HF_VCPU_RUN_YIELD,
160 };
161
162 if (current->vm->id == HF_PRIMARY_VM_ID) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000163 /* Noop on the primary as it makes the scheduling decisions. */
Andrew Walbran16075b62019-09-03 17:11:07 +0100164 return;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000165 }
166
Andrew Walbran16075b62019-09-03 17:11:07 +0100167 *next = api_switch_to_primary(current, primary_ret, VCPU_STATE_READY);
Andrew Scull66d62bf2019-02-01 13:54:10 +0000168}
169
170/**
Andrew Walbran33645652019-04-15 12:29:31 +0100171 * Switches to the primary so that it can switch to the target, or kick it if it
172 * is already running on a different physical CPU.
173 */
174struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
175{
176 struct hf_vcpu_run_return ret = {
177 .code = HF_VCPU_RUN_WAKE_UP,
178 .wake_up.vm_id = target_vcpu->vm->id,
179 .wake_up.vcpu = vcpu_index(target_vcpu),
180 };
181 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
182}
183
184/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000185 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000186 */
187struct vcpu *api_abort(struct vcpu *current)
188{
189 struct hf_vcpu_run_return ret = {
190 .code = HF_VCPU_RUN_ABORTED,
191 };
192
193 dlog("Aborting VM %u vCPU %u\n", current->vm->id, vcpu_index(current));
194
195 if (current->vm->id == HF_PRIMARY_VM_ID) {
196 /* TODO: what to do when the primary aborts? */
197 for (;;) {
198 /* Do nothing. */
199 }
200 }
201
202 atomic_store_explicit(&current->vm->aborting, true,
203 memory_order_relaxed);
204
205 /* TODO: free resources once all vCPUs abort. */
206
Andrew Sculld6ee1102019-04-05 22:12:42 +0100207 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000208}
209
210/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000211 * Returns the ID of the VM.
212 */
Andrew Walbrand230f662019-10-07 18:03:36 +0100213struct spci_value api_spci_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000214{
Andrew Walbrand230f662019-10-07 18:03:36 +0100215 return (struct spci_value){.func = SPCI_SUCCESS_32,
216 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000217}
218
219/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100220 * Returns the number of VMs configured to run.
221 */
Andrew Walbran52d99672019-06-25 15:51:11 +0100222spci_vm_count_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100223{
Andrew Scull19503262018-09-20 14:48:39 +0100224 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100225}
226
227/**
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100228 * Returns the number of vCPUs configured in the given VM, or 0 if there is no
229 * such VM or the caller is not the primary VM.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100230 */
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100231spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
232 const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100233{
Andrew Scull19503262018-09-20 14:48:39 +0100234 struct vm *vm;
235
236 /* Only the primary VM needs to know about vcpus for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100237 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100238 return 0;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100239 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100240
Andrew Walbran42347a92019-05-09 13:59:03 +0100241 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100242 if (vm == NULL) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100243 return 0;
Andrew Scull19503262018-09-20 14:48:39 +0100244 }
245
246 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100247}
248
249/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000250 * This function is called by the architecture-specific context switching
251 * function to indicate that register state for the given vcpu has been saved
252 * and can therefore be used by other pcpus.
253 */
254void api_regs_state_saved(struct vcpu *vcpu)
255{
256 sl_lock(&vcpu->lock);
257 vcpu->regs_available = true;
258 sl_unlock(&vcpu->lock);
259}
260
261/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000262 * Retrieves the next waiter and removes it from the wait list if the VM's
263 * mailbox is in a writable state.
264 */
265static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
266{
267 struct wait_entry *entry;
268 struct vm *vm = locked_vm.vm;
269
Andrew Sculld6ee1102019-04-05 22:12:42 +0100270 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000271 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
272 /* The mailbox is not writable or there are no waiters. */
273 return NULL;
274 }
275
276 /* Remove waiter from the wait list. */
277 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
278 wait_links);
279 list_remove(&entry->wait_links);
280 return entry;
281}
282
283/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000284 * Assuming that the arguments have already been checked by the caller, injects
285 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
286 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
287 * is next run, which is up to the scheduler.
288 *
289 * Returns:
290 * - 0 on success if no further action is needed.
291 * - 1 if it was called by the primary VM and the primary VM now needs to wake
292 * up or kick the target vCPU.
293 */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100294static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
Andrew Walbran508e63c2018-12-20 17:02:37 +0000295 uint32_t intid, struct vcpu *current,
296 struct vcpu **next)
297{
298 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
299 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000300 int64_t ret = 0;
301
302 sl_lock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000303
304 /*
305 * We only need to change state and (maybe) trigger a virtual IRQ if it
306 * is enabled and was not previously pending. Otherwise we can skip
307 * everything except setting the pending bit.
308 *
309 * If you change this logic make sure to update the need_vm_lock logic
310 * above to match.
311 */
312 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
313 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
314 intid_mask)) {
315 goto out;
316 }
317
318 /* Increment the count. */
319 target_vcpu->interrupts.enabled_and_pending_count++;
320
321 /*
322 * Only need to update state if there was not already an
323 * interrupt enabled and pending.
324 */
325 if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
326 goto out;
327 }
328
Andrew Walbran508e63c2018-12-20 17:02:37 +0000329 if (current->vm->id == HF_PRIMARY_VM_ID) {
330 /*
331 * If the call came from the primary VM, let it know that it
332 * should run or kick the target vCPU.
333 */
334 ret = 1;
335 } else if (current != target_vcpu && next != NULL) {
Andrew Walbran33645652019-04-15 12:29:31 +0100336 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000337 }
338
339out:
340 /* Either way, make it pending. */
341 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
342
343 sl_unlock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000344
345 return ret;
346}
347
348/**
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100349 * Constructs an SPCI_MSG_SEND value to return from a successful SPCI_MSG_POLL
350 * or SPCI_MSG_WAIT call.
351 */
352static struct spci_value spci_msg_recv_return(const struct vm *receiver)
353{
354 return (struct spci_value){
355 .func = SPCI_MSG_SEND_32,
Andrew Walbran70bc8622019-10-07 14:15:58 +0100356 .arg1 = (receiver->mailbox.recv_sender << 16) | receiver->id,
357 .arg3 = receiver->mailbox.recv_size,
358 .arg4 = receiver->mailbox.recv_attributes};
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100359}
360
361/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000362 * Prepares the vcpu to run by updating its state and fetching whether a return
363 * value needs to be forced onto the vCPU.
364 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000365static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbran508e63c2018-12-20 17:02:37 +0000366 struct hf_vcpu_run_return *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000367{
Andrew Scullb06d1752019-02-04 10:15:48 +0000368 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000369 bool ret;
370
Andrew Scullb06d1752019-02-04 10:15:48 +0000371 /*
Andrew Scull4caadaf2019-07-03 13:13:47 +0100372 * Wait until the registers become available. All locks must be released
373 * between iterations of this loop to avoid potential deadlocks if, on
374 * any path, a lock needs to be taken after taking the decision to
375 * switch context but before the registers have been saved.
Andrew Scullb06d1752019-02-04 10:15:48 +0000376 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100377 * The VM lock is not needed in the common case so it must only be taken
378 * when it is going to be needed. This ensures there are no inter-vCPU
379 * dependencies in the common run case meaning the sensitive context
380 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000381 */
382 for (;;) {
383 sl_lock(&vcpu->lock);
384
385 /* The VM needs to be locked to deliver mailbox messages. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100386 need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
Andrew Scullb06d1752019-02-04 10:15:48 +0000387 if (need_vm_lock) {
388 sl_unlock(&vcpu->lock);
389 sl_lock(&vcpu->vm->lock);
390 sl_lock(&vcpu->lock);
391 }
392
393 if (vcpu->regs_available) {
394 break;
395 }
396
Andrew Sculld6ee1102019-04-05 22:12:42 +0100397 if (vcpu->state == VCPU_STATE_RUNNING) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000398 /*
399 * vCPU is running on another pCPU.
400 *
Andrew Walbranabf88fb2019-06-21 12:17:47 +0100401 * It's ok not to return the sleep duration here because
Andrew Scullb06d1752019-02-04 10:15:48 +0000402 * the other physical CPU that is currently running this
Andrew Walbranabf88fb2019-06-21 12:17:47 +0100403 * vCPU will return the sleep duration if needed. The
404 * default return value is
405 * HF_VCPU_RUN_WAIT_FOR_INTERRUPT, so no need to set it
406 * explicitly.
Andrew Scullb06d1752019-02-04 10:15:48 +0000407 */
408 ret = false;
409 goto out;
410 }
411
412 sl_unlock(&vcpu->lock);
413 if (need_vm_lock) {
414 sl_unlock(&vcpu->vm->lock);
415 }
416 }
Andrew Scull9726c252019-01-23 13:44:19 +0000417
418 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100419 if (vcpu->state != VCPU_STATE_ABORTED) {
Andrew Scull82331282019-01-25 10:29:34 +0000420 dlog("Aborting VM %u vCPU %u\n", vcpu->vm->id,
421 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100422 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000423 }
424 ret = false;
425 goto out;
426 }
427
Andrew Walbran508e63c2018-12-20 17:02:37 +0000428 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100429 case VCPU_STATE_RUNNING:
430 case VCPU_STATE_OFF:
431 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000432 ret = false;
433 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000434
Andrew Sculld6ee1102019-04-05 22:12:42 +0100435 case VCPU_STATE_BLOCKED_MAILBOX:
Andrew Scullb06d1752019-02-04 10:15:48 +0000436 /*
437 * A pending message allows the vCPU to run so the message can
438 * be delivered directly.
439 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100440 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100441 arch_regs_set_retval(&vcpu->regs,
442 spci_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100443 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000444 break;
445 }
446 /* Fall through. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100447 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000448 /* Allow virtual interrupts to be delivered. */
449 if (vcpu->interrupts.enabled_and_pending_count > 0) {
450 break;
451 }
452
453 /* The timer expired so allow the interrupt to be delivered. */
Andrew Walbran508e63c2018-12-20 17:02:37 +0000454 if (arch_timer_pending(&vcpu->regs)) {
455 break;
456 }
457
458 /*
459 * The vCPU is not ready to run, return the appropriate code to
460 * the primary which called vcpu_run.
461 */
462 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000463 run_ret->code =
Andrew Sculld6ee1102019-04-05 22:12:42 +0100464 vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
Andrew Scullb06d1752019-02-04 10:15:48 +0000465 ? HF_VCPU_RUN_WAIT_FOR_MESSAGE
466 : HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000467 run_ret->sleep.ns =
468 arch_timer_remaining_ns(&vcpu->regs);
469 }
470
471 ret = false;
472 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000473
Andrew Sculld6ee1102019-04-05 22:12:42 +0100474 case VCPU_STATE_READY:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000475 break;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000476 }
477
Andrew Scullb06d1752019-02-04 10:15:48 +0000478 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000479 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100480 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000481
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000482 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000483 * Mark the registers as unavailable now that we're about to reflect
484 * them onto the real registers. This will also prevent another physical
485 * CPU from trying to read these registers.
486 */
487 vcpu->regs_available = false;
488
489 ret = true;
490
491out:
492 sl_unlock(&vcpu->lock);
Andrew Scullb06d1752019-02-04 10:15:48 +0000493 if (need_vm_lock) {
494 sl_unlock(&vcpu->vm->lock);
495 }
496
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000497 return ret;
498}
499
500/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100501 * Runs the given vcpu of the given vm.
502 */
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100503struct hf_vcpu_run_return api_vcpu_run(spci_vm_id_t vm_id,
504 spci_vcpu_index_t vcpu_idx,
Andrew Scull38772ab2019-01-24 15:16:50 +0000505 const struct vcpu *current,
506 struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100507{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100508 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100509 struct vcpu *vcpu;
Andrew Scull6d2db332018-10-10 15:28:17 +0100510 struct hf_vcpu_run_return ret = {
511 .code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
Andrew Scullb06d1752019-02-04 10:15:48 +0000512 .sleep.ns = HF_SLEEP_INDEFINITE,
Andrew Scull6d2db332018-10-10 15:28:17 +0100513 };
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100514
515 /* Only the primary VM can switch vcpus. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100516 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100517 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100518 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100519
Andrew Scull19503262018-09-20 14:48:39 +0100520 /* Only secondary VM vcpus can be run. */
521 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100522 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100523 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100524
Andrew Scull19503262018-09-20 14:48:39 +0100525 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100526 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100527 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100528 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100529 }
530
531 /* The requested vcpu must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100532 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100533 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100534 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100535
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000536 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100537 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000538 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000539 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100540 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000541
Andrew Walbran508e63c2018-12-20 17:02:37 +0000542 /*
543 * Inject timer interrupt if timer has expired. It's safe to access
544 * vcpu->regs here because api_vcpu_prepare_run already made sure that
545 * regs_available was true (and then set it to false) before returning
546 * true.
547 */
548 if (arch_timer_pending(&vcpu->regs)) {
549 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100550 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
551 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000552
553 /*
554 * Set the mask bit so the hardware interrupt doesn't fire
555 * again. Ideally we wouldn't do this because it affects what
556 * the secondary vCPU sees, but if we don't then we end up with
557 * a loop of the interrupt firing each time we try to return to
558 * the secondary vCPU.
559 */
560 arch_timer_mask(&vcpu->regs);
561 }
562
Andrew Scull33fecd32019-01-08 14:48:27 +0000563 /* Switch to the vcpu. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000564 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000565
Andrew Scull33fecd32019-01-08 14:48:27 +0000566 /*
567 * Set a placeholder return code to the scheduler. This will be
568 * overwritten when the switch back to the primary occurs.
569 */
570 ret.code = HF_VCPU_RUN_PREEMPTED;
571
Andrew Scull6d2db332018-10-10 15:28:17 +0100572out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100573 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100574}
575
576/**
Andrew Scull81e85092018-12-12 12:56:20 +0000577 * Check that the mode indicates memory that is valid, owned and exclusive.
578 */
Andrew Scullcbefbdb2019-01-11 16:36:26 +0000579static bool api_mode_valid_owned_and_exclusive(int mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000580{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100581 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
582 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000583}
584
585/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000586 * Determines the value to be returned by api_vm_configure and api_mailbox_clear
587 * after they've succeeded. If a secondary VM is running and there are waiters,
588 * it also switches back to the primary VM for it to wake waiters up.
589 */
590static int64_t api_waiter_result(struct vm_locked locked_vm,
591 struct vcpu *current, struct vcpu **next)
592{
593 struct vm *vm = locked_vm.vm;
594 struct hf_vcpu_run_return ret = {
595 .code = HF_VCPU_RUN_NOTIFY_WAITERS,
596 };
597
598 if (list_empty(&vm->mailbox.waiter_list)) {
599 /* No waiters, nothing else to do. */
600 return 0;
601 }
602
603 if (vm->id == HF_PRIMARY_VM_ID) {
604 /* The caller is the primary VM. Tell it to wake up waiters. */
605 return 1;
606 }
607
608 /*
609 * Switch back to the primary VM, informing it that there are waiters
610 * that need to be notified.
611 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100612 *next = api_switch_to_primary(current, ret, VCPU_STATE_READY);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000613
614 return 0;
615}
616
617/**
Andrew Sculle1322792019-07-01 17:46:10 +0100618 * Configures the hypervisor's stage-1 view of the send and receive pages. The
619 * stage-1 page tables must be locked so memory cannot be taken by another core
620 * which could result in this transaction being unable to roll back in the case
621 * of an error.
622 */
623static bool api_vm_configure_stage1(struct vm_locked vm_locked,
624 paddr_t pa_send_begin, paddr_t pa_send_end,
625 paddr_t pa_recv_begin, paddr_t pa_recv_end,
626 struct mpool *local_page_pool)
627{
628 bool ret;
629 struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
630
631 /* Map the send page as read-only in the hypervisor address space. */
632 vm_locked.vm->mailbox.send =
633 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
634 MM_MODE_R, local_page_pool);
635 if (!vm_locked.vm->mailbox.send) {
636 /* TODO: partial defrag of failed range. */
637 /* Recover any memory consumed in failed mapping. */
638 mm_defrag(mm_stage1_locked, local_page_pool);
639 goto fail;
640 }
641
642 /*
643 * Map the receive page as writable in the hypervisor address space. On
644 * failure, unmap the send page before returning.
645 */
646 vm_locked.vm->mailbox.recv =
647 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
648 MM_MODE_W, local_page_pool);
649 if (!vm_locked.vm->mailbox.recv) {
650 /* TODO: partial defrag of failed range. */
651 /* Recover any memory consumed in failed mapping. */
652 mm_defrag(mm_stage1_locked, local_page_pool);
653 goto fail_undo_send;
654 }
655
656 ret = true;
657 goto out;
658
659 /*
660 * The following mappings will not require more memory than is available
661 * in the local pool.
662 */
663fail_undo_send:
664 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100665 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
666 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100667
668fail:
669 ret = false;
670
671out:
672 mm_unlock_stage1(&mm_stage1_locked);
673
674 return ret;
675}
676
677/**
678 * Configures the send and receive pages in the VM stage-2 and hypervisor
679 * stage-1 page tables. Locking of the page tables combined with a local memory
680 * pool ensures there will always be enough memory to recover from any errors
681 * that arise.
682 */
683static bool api_vm_configure_pages(struct vm_locked vm_locked,
684 paddr_t pa_send_begin, paddr_t pa_send_end,
685 int orig_send_mode, paddr_t pa_recv_begin,
686 paddr_t pa_recv_end, int orig_recv_mode)
687{
688 bool ret;
689 struct mpool local_page_pool;
690
691 /*
692 * Create a local pool so any freed memory can't be used by another
693 * thread. This is to ensure the original mapping can be restored if any
694 * stage of the process fails.
695 */
696 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
697
698 /* Take memory ownership away from the VM and mark as shared. */
699 if (!mm_vm_identity_map(
700 &vm_locked.vm->ptable, pa_send_begin, pa_send_end,
701 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
702 NULL, &local_page_pool)) {
703 goto fail;
704 }
705
706 if (!mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
707 pa_recv_end,
708 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
709 NULL, &local_page_pool)) {
710 /* TODO: partial defrag of failed range. */
711 /* Recover any memory consumed in failed mapping. */
712 mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
713 goto fail_undo_send;
714 }
715
716 if (!api_vm_configure_stage1(vm_locked, pa_send_begin, pa_send_end,
717 pa_recv_begin, pa_recv_end,
718 &local_page_pool)) {
719 goto fail_undo_send_and_recv;
720 }
721
722 ret = true;
723 goto out;
724
725 /*
726 * The following mappings will not require more memory than is available
727 * in the local pool.
728 */
729fail_undo_send_and_recv:
Andrew Scull7e8de322019-07-02 13:00:56 +0100730 CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_recv_begin,
731 pa_recv_end, orig_recv_mode, NULL,
732 &local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100733
734fail_undo_send:
Andrew Scull7e8de322019-07-02 13:00:56 +0100735 CHECK(mm_vm_identity_map(&vm_locked.vm->ptable, pa_send_begin,
736 pa_send_end, orig_send_mode, NULL,
737 &local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100738
739fail:
740 ret = false;
741
742out:
743 mpool_fini(&local_page_pool);
744
745 return ret;
746}
747
748/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100749 * Configures the VM to send/receive data through the specified pages. The pages
750 * must not be shared.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000751 *
752 * Returns:
753 * - -1 on failure.
754 * - 0 on success if no further action is needed.
755 * - 1 if it was called by the primary VM and the primary VM now needs to wake
756 * up or kick waiters. Waiters should be retrieved by calling
757 * hf_mailbox_waiter_get.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100758 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000759int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
760 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100761{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100762 struct vm *vm = current->vm;
Andrew Sculle1322792019-07-01 17:46:10 +0100763 struct vm_locked vm_locked;
Andrew Scull80871322018-08-06 12:04:09 +0100764 paddr_t pa_send_begin;
765 paddr_t pa_send_end;
766 paddr_t pa_recv_begin;
767 paddr_t pa_recv_end;
Andrew Scull220e6212018-12-21 18:09:00 +0000768 int orig_send_mode;
769 int orig_recv_mode;
Andrew Scullc0e569a2018-10-02 18:05:21 +0100770 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100771
772 /* Fail if addresses are not page-aligned. */
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000773 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
774 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100775 return -1;
776 }
777
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000778 /* Convert to physical addresses. */
779 pa_send_begin = pa_from_ipa(send);
780 pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
781
782 pa_recv_begin = pa_from_ipa(recv);
783 pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
784
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100785 /* Fail if the same page is used for the send and receive pages. */
786 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
Andrew Scull220e6212018-12-21 18:09:00 +0000787 return -1;
788 }
789
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100790 /*
791 * The hypervisor's memory map must be locked for the duration of this
792 * operation to ensure there will be sufficient memory to recover from
793 * any failures.
794 *
795 * TODO: the scope of the can be reduced but will require restructuring
796 * to keep a single unlock point.
797 */
Andrew Sculle1322792019-07-01 17:46:10 +0100798 vm_locked = vm_lock(vm);
Andrew Scull220e6212018-12-21 18:09:00 +0000799
800 /* We only allow these to be setup once. */
801 if (vm->mailbox.send || vm->mailbox.recv) {
802 goto fail;
803 }
804
805 /*
806 * Ensure the pages are valid, owned and exclusive to the VM and that
807 * the VM has the required access to the memory.
808 */
809 if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
810 &orig_send_mode) ||
811 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
812 (orig_send_mode & MM_MODE_R) == 0 ||
813 (orig_send_mode & MM_MODE_W) == 0) {
814 goto fail;
815 }
816
817 if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
818 &orig_recv_mode) ||
819 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
820 (orig_recv_mode & MM_MODE_R) == 0) {
821 goto fail;
822 }
823
Andrew Sculle1322792019-07-01 17:46:10 +0100824 if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
825 orig_send_mode, pa_recv_begin, pa_recv_end,
826 orig_recv_mode)) {
827 goto fail;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100828 }
829
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000830 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +0100831 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +0000832 goto exit;
833
Andrew Scull220e6212018-12-21 18:09:00 +0000834fail:
835 ret = -1;
836
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100837exit:
Andrew Sculle1322792019-07-01 17:46:10 +0100838 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100839
840 return ret;
841}
842
843/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100844 * Copies data from the sender's send buffer to the recipient's receive buffer
845 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000846 *
847 * If the recipient's receive buffer is busy, it can optionally register the
848 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100849 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100850struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
851 spci_vm_id_t receiver_vm_id, uint32_t size,
852 uint32_t attributes, struct vcpu *current,
853 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100854{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100855 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100856 struct vm *to;
Jose Marinho75509b42019-04-09 09:34:59 +0100857
858 struct two_vm_locked vm_from_to_lock;
859
Andrew Scullb06d1752019-02-04 10:15:48 +0000860 struct hf_vcpu_run_return primary_ret = {
861 .code = HF_VCPU_RUN_MESSAGE,
862 };
Andrew Walbran70bc8622019-10-07 14:15:58 +0100863 const void *from_msg;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100864
Andrew Walbran70bc8622019-10-07 14:15:58 +0100865 struct spci_value ret;
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000866 bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
867 SPCI_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +0100868
Andrew Walbran70bc8622019-10-07 14:15:58 +0100869 /* Ensure sender VM ID corresponds to the current VM. */
870 if (sender_vm_id != from->id) {
871 return spci_error(SPCI_INVALID_PARAMETERS);
872 }
873
874 /* Disallow reflexive requests as this suggests an error in the VM. */
875 if (receiver_vm_id == from->id) {
876 return spci_error(SPCI_INVALID_PARAMETERS);
877 }
878
879 /* Limit the size of transfer. */
880 if (size > SPCI_MSG_PAYLOAD_MAX) {
881 return spci_error(SPCI_INVALID_PARAMETERS);
882 }
883
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000884 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +0100885 * Check that the sender has configured its send buffer. If the tx
886 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
887 * be safely accessed after releasing the lock since the tx mailbox
888 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000889 */
890 sl_lock(&from->lock);
891 from_msg = from->mailbox.send;
892 sl_unlock(&from->lock);
893
894 if (from_msg == NULL) {
Andrew Walbran70bc8622019-10-07 14:15:58 +0100895 return spci_error(SPCI_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100896 }
897
Andrew Walbran70bc8622019-10-07 14:15:58 +0100898 /* Ensure the receiver VM exists. */
899 to = vm_find(receiver_vm_id);
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000900 if (to == NULL) {
Andrew Walbran70bc8622019-10-07 14:15:58 +0100901 return spci_error(SPCI_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100902 }
903
Jose Marinho75509b42019-04-09 09:34:59 +0100904 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +0100905 * Hafnium needs to hold the lock on <to> before the mailbox state is
Jose Marinho75509b42019-04-09 09:34:59 +0100906 * checked. The lock on <to> must be held until the information is
907 * copied to <to> Rx buffer. Since in
908 * spci_msg_handle_architected_message we may call api_spci_share_memory
909 * which must hold the <from> lock, we must hold the <from> lock at this
910 * point to prevent a deadlock scenario.
911 */
912 vm_from_to_lock = vm_lock_both(to, from);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100913
Andrew Sculld6ee1102019-04-05 22:12:42 +0100914 if (to->mailbox.state != MAILBOX_STATE_EMPTY ||
Andrew Scullaa039b32018-10-04 15:02:26 +0100915 to->mailbox.recv == NULL) {
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000916 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +0100917 * Fail if the receiver isn't currently ready to receive data,
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000918 * setting up for notification if requested.
919 */
920 if (notify) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +0000921 struct wait_entry *entry =
Andrew Walbran70bc8622019-10-07 14:15:58 +0100922 &from->wait_entries[receiver_vm_id];
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000923
924 /* Append waiter only if it's not there yet. */
925 if (list_empty(&entry->wait_links)) {
926 list_append(&to->mailbox.waiter_list,
927 &entry->wait_links);
928 }
929 }
930
Andrew Walbran70bc8622019-10-07 14:15:58 +0100931 ret = spci_error(SPCI_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +0100932 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100933 }
934
Andrew Walbran70bc8622019-10-07 14:15:58 +0100935 /* Handle legacy memory sharing messages. */
936 if ((attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK) ==
937 SPCI_MSG_SEND_LEGACY_MEMORY) {
Jose Marinho75509b42019-04-09 09:34:59 +0100938 /*
939 * Buffer holding the internal copy of the shared memory
940 * regions.
941 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100942 struct spci_architected_message_header
943 *architected_message_replica =
944 (struct spci_architected_message_header *)
945 cpu_get_buffer(current->cpu->id);
Jose Marinho20713fa2019-08-07 15:42:07 +0100946 uint32_t message_buffer_size =
947 cpu_get_buffer_size(current->cpu->id);
Jose Marinho75509b42019-04-09 09:34:59 +0100948
949 struct spci_architected_message_header *architected_header =
Andrew Walbran70bc8622019-10-07 14:15:58 +0100950 (struct spci_architected_message_header *)from_msg;
Jose Marinho75509b42019-04-09 09:34:59 +0100951
Andrew Walbran70bc8622019-10-07 14:15:58 +0100952 if (size > message_buffer_size) {
953 ret = spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100954 goto out;
955 }
956
Andrew Walbran70bc8622019-10-07 14:15:58 +0100957 if (size < sizeof(struct spci_architected_message_header)) {
958 ret = spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100959 goto out;
960 }
961
Andrew Walbran70bc8622019-10-07 14:15:58 +0100962 /* Copy the architected message into the internal buffer. */
963 memcpy_s(architected_message_replica, message_buffer_size,
964 architected_header, size);
Jose Marinho75509b42019-04-09 09:34:59 +0100965
966 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +0100967 * Note that architected_message_replica is passed as the third
968 * parameter to spci_msg_handle_architected_message. The
969 * execution flow commencing at
970 * spci_msg_handle_architected_message will make several
971 * accesses to fields in architected_message_replica. The memory
972 * area architected_message_replica must be exclusively owned by
973 * Hafnium so that TOCTOU issues do not arise.
Jose Marinho75509b42019-04-09 09:34:59 +0100974 */
975 ret = spci_msg_handle_architected_message(
976 vm_from_to_lock.vm1, vm_from_to_lock.vm2,
Andrew Walbran70bc8622019-10-07 14:15:58 +0100977 architected_message_replica, size);
Jose Marinho75509b42019-04-09 09:34:59 +0100978
Andrew Walbran70bc8622019-10-07 14:15:58 +0100979 if (ret.func != SPCI_SUCCESS_32) {
Jose Marinho75509b42019-04-09 09:34:59 +0100980 goto out;
981 }
982 } else {
983 /* Copy data. */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100984 memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg,
985 size);
986 to->mailbox.recv_size = size;
987 to->mailbox.recv_sender = sender_vm_id;
988 to->mailbox.recv_attributes = 0;
989 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100990 }
991
Andrew Scullb06d1752019-02-04 10:15:48 +0000992 primary_ret.message.vm_id = to->id;
Andrew Scullaa039b32018-10-04 15:02:26 +0100993
994 /* Messages for the primary VM are delivered directly. */
995 if (to->id == HF_PRIMARY_VM_ID) {
Andrew Walbranf1bd6322019-10-03 16:45:11 +0100996 /*
997 * Only tell the primary VM the size if the message is for it,
998 * to avoid leaking data about messages for other VMs.
999 */
1000 primary_ret.message.size = size;
1001
Andrew Sculld6ee1102019-04-05 22:12:42 +01001002 to->mailbox.state = MAILBOX_STATE_READ;
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +00001003 *next = api_switch_to_primary(current, primary_ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001004 VCPU_STATE_READY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001005 goto out;
1006 }
1007
Andrew Sculld6ee1102019-04-05 22:12:42 +01001008 to->mailbox.state = MAILBOX_STATE_RECEIVED;
Andrew Scullaa039b32018-10-04 15:02:26 +01001009
1010 /* Return to the primary VM directly or with a switch. */
Andrew Scullb06d1752019-02-04 10:15:48 +00001011 if (from->id != HF_PRIMARY_VM_ID) {
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +00001012 *next = api_switch_to_primary(current, primary_ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001013 VCPU_STATE_READY);
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001014 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001015
1016out:
Jose Marinho75509b42019-04-09 09:34:59 +01001017 vm_unlock(&vm_from_to_lock.vm1);
1018 vm_unlock(&vm_from_to_lock.vm2);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001019
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001020 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001021}
1022
1023/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001024 * Checks whether the vCPU's attempt to block for a message has already been
1025 * interrupted or whether it is allowed to block.
1026 */
1027bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
1028{
1029 bool interrupted;
1030
1031 sl_lock(&current->lock);
1032
1033 /*
1034 * Don't block if there are enabled and pending interrupts, to match
1035 * behaviour of wait_for_interrupt.
1036 */
1037 interrupted = (current->interrupts.enabled_and_pending_count > 0);
1038
1039 sl_unlock(&current->lock);
1040
1041 return interrupted;
1042}
1043
1044/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001045 * Receives a message from the mailbox. If one isn't available, this function
1046 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001047 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001048 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001049 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001050struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
1051 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001052{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001053 struct vm *vm = current->vm;
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001054 struct spci_value return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001055
Andrew Scullaa039b32018-10-04 15:02:26 +01001056 /*
1057 * The primary VM will receive messages as a status code from running
1058 * vcpus and must not call this function.
1059 */
Andrew Scull19503262018-09-20 14:48:39 +01001060 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001061 return spci_error(SPCI_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001062 }
1063
1064 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001065
Andrew Scullaa039b32018-10-04 15:02:26 +01001066 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001067 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1068 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001069 return_code = spci_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001070 goto out;
1071 }
1072
1073 /* No pending message so fail if not allowed to block. */
1074 if (!block) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001075 return_code = spci_error(SPCI_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001076 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001077 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001078
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001079 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001080 * From this point onward this call can only be interrupted or a message
1081 * received. If a message is received the return value will be set at
1082 * that time to SPCI_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001083 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001084 return_code = spci_error(SPCI_INTERRUPTED);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001085 if (api_spci_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001086 goto out;
1087 }
1088
Andrew Scullaa039b32018-10-04 15:02:26 +01001089 /* Switch back to primary vm to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +00001090 {
1091 struct hf_vcpu_run_return run_return = {
Andrew Scullb06d1752019-02-04 10:15:48 +00001092 .code = HF_VCPU_RUN_WAIT_FOR_MESSAGE,
Andrew Walbranb4816552018-12-05 17:35:42 +00001093 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001094
Andrew Walbranb4816552018-12-05 17:35:42 +00001095 *next = api_switch_to_primary(current, run_return,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001096 VCPU_STATE_BLOCKED_MAILBOX);
Andrew Walbranb4816552018-12-05 17:35:42 +00001097 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001098out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001099 sl_unlock(&vm->lock);
1100
Jose Marinho3e2442f2019-03-12 13:30:37 +00001101 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001102}
1103
1104/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001105 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1106 * by this function, the caller must have called api_mailbox_send before with
1107 * the notify argument set to true, and this call must have failed because the
1108 * mailbox was not available.
1109 *
1110 * It should be called repeatedly to retrieve a list of VMs.
1111 *
1112 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1113 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001114 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001115int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001116{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001117 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001118 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001119 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001120
1121 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001122 if (list_empty(&vm->mailbox.ready_list)) {
1123 ret = -1;
1124 goto exit;
1125 }
1126
1127 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1128 ready_links);
1129 list_remove(&entry->ready_links);
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001130 ret = entry - vm->wait_entries;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001131
1132exit:
1133 sl_unlock(&vm->lock);
1134 return ret;
1135}
1136
1137/**
1138 * Retrieves the next VM waiting to be notified that the mailbox of the
1139 * specified VM became writable. Only primary VMs are allowed to call this.
1140 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001141 * Returns -1 on failure or if there are no waiters; the VM id of the next
1142 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001143 */
Andrew Walbran42347a92019-05-09 13:59:03 +01001144int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001145{
1146 struct vm *vm;
1147 struct vm_locked locked;
1148 struct wait_entry *entry;
1149 struct vm *waiting_vm;
1150
1151 /* Only primary VMs are allowed to call this function. */
1152 if (current->vm->id != HF_PRIMARY_VM_ID) {
1153 return -1;
1154 }
1155
Andrew Walbran42347a92019-05-09 13:59:03 +01001156 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001157 if (vm == NULL) {
1158 return -1;
1159 }
1160
1161 /* Check if there are outstanding notifications from given vm. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001162 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001163 entry = api_fetch_waiter(locked);
1164 vm_unlock(&locked);
1165
1166 if (entry == NULL) {
1167 return -1;
1168 }
1169
1170 /* Enqueue notification to waiting VM. */
1171 waiting_vm = entry->waiting_vm;
1172
1173 sl_lock(&waiting_vm->lock);
1174 if (list_empty(&entry->ready_links)) {
1175 list_append(&waiting_vm->mailbox.ready_list,
1176 &entry->ready_links);
1177 }
1178 sl_unlock(&waiting_vm->lock);
1179
1180 return waiting_vm->id;
1181}
1182
1183/**
1184 * Clears the caller's mailbox so that a new message can be received. The caller
1185 * must have copied out all data they wish to preserve as new messages will
1186 * overwrite the old and will arrive asynchronously.
1187 *
1188 * Returns:
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001189 * - -1 on failure, if the mailbox hasn't been read.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001190 * - 0 on success if no further action is needed.
1191 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1192 * up or kick waiters. Waiters should be retrieved by calling
1193 * hf_mailbox_waiter_get.
1194 */
1195int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next)
1196{
1197 struct vm *vm = current->vm;
1198 struct vm_locked locked;
1199 int64_t ret;
1200
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001201 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001202 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001203 case MAILBOX_STATE_EMPTY:
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001204 ret = 0;
1205 break;
1206
Andrew Sculld6ee1102019-04-05 22:12:42 +01001207 case MAILBOX_STATE_RECEIVED:
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001208 ret = -1;
1209 break;
1210
Andrew Sculld6ee1102019-04-05 22:12:42 +01001211 case MAILBOX_STATE_READ:
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001212 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001213 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001214 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001215 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001216 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001217
1218 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001219}
Andrew Walbran318f5732018-11-20 16:23:42 +00001220
1221/**
1222 * Enables or disables a given interrupt ID for the calling vCPU.
1223 *
1224 * Returns 0 on success, or -1 if the intid is invalid.
1225 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001226int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001227{
1228 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
1229 uint32_t intid_mask = 1u << (intid % INTERRUPT_REGISTER_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001230
Andrew Walbran318f5732018-11-20 16:23:42 +00001231 if (intid >= HF_NUM_INTIDS) {
1232 return -1;
1233 }
1234
1235 sl_lock(&current->lock);
1236 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001237 /*
1238 * If it is pending and was not enabled before, increment the
1239 * count.
1240 */
1241 if (current->interrupts.interrupt_pending[intid_index] &
1242 ~current->interrupts.interrupt_enabled[intid_index] &
1243 intid_mask) {
1244 current->interrupts.enabled_and_pending_count++;
1245 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001246 current->interrupts.interrupt_enabled[intid_index] |=
1247 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001248 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001249 /*
1250 * If it is pending and was enabled before, decrement the count.
1251 */
1252 if (current->interrupts.interrupt_pending[intid_index] &
1253 current->interrupts.interrupt_enabled[intid_index] &
1254 intid_mask) {
1255 current->interrupts.enabled_and_pending_count--;
1256 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001257 current->interrupts.interrupt_enabled[intid_index] &=
1258 ~intid_mask;
1259 }
1260
1261 sl_unlock(&current->lock);
1262 return 0;
1263}
1264
1265/**
1266 * Returns the ID of the next pending interrupt for the calling vCPU, and
1267 * acknowledges it (i.e. marks it as no longer pending). Returns
1268 * HF_INVALID_INTID if there are no pending interrupts.
1269 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001270uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001271{
1272 uint8_t i;
1273 uint32_t first_interrupt = HF_INVALID_INTID;
Andrew Walbran318f5732018-11-20 16:23:42 +00001274
1275 /*
1276 * Find the first enabled and pending interrupt ID, return it, and
1277 * deactivate it.
1278 */
1279 sl_lock(&current->lock);
1280 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1281 uint32_t enabled_and_pending =
1282 current->interrupts.interrupt_enabled[i] &
1283 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001284
Andrew Walbran318f5732018-11-20 16:23:42 +00001285 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001286 uint8_t bit_index = ctz(enabled_and_pending);
1287 /*
1288 * Mark it as no longer pending and decrement the count.
1289 */
1290 current->interrupts.interrupt_pending[i] &=
1291 ~(1u << bit_index);
1292 current->interrupts.enabled_and_pending_count--;
1293 first_interrupt =
1294 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001295 break;
1296 }
1297 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001298
1299 sl_unlock(&current->lock);
1300 return first_interrupt;
1301}
1302
1303/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001304 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001305 * given VM and vCPU.
1306 */
1307static inline bool is_injection_allowed(uint32_t target_vm_id,
1308 struct vcpu *current)
1309{
1310 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001311
Andrew Walbran318f5732018-11-20 16:23:42 +00001312 /*
1313 * The primary VM is allowed to inject interrupts into any VM. Secondary
1314 * VMs are only allowed to inject interrupts into their own vCPUs.
1315 */
1316 return current_vm_id == HF_PRIMARY_VM_ID ||
1317 current_vm_id == target_vm_id;
1318}
1319
1320/**
1321 * Injects a virtual interrupt of the given ID into the given target vCPU.
1322 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1323 * when the vCPU is next run, which is up to the scheduler.
1324 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001325 * Returns:
1326 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1327 * ID is invalid, or the current VM is not allowed to inject interrupts to
1328 * the target VM.
1329 * - 0 on success if no further action is needed.
1330 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1331 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001332 */
Andrew Walbran42347a92019-05-09 13:59:03 +01001333int64_t api_interrupt_inject(spci_vm_id_t target_vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +01001334 spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001335 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001336{
Andrew Walbran318f5732018-11-20 16:23:42 +00001337 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001338 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001339
1340 if (intid >= HF_NUM_INTIDS) {
1341 return -1;
1342 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001343
Andrew Walbran318f5732018-11-20 16:23:42 +00001344 if (target_vm == NULL) {
1345 return -1;
1346 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001347
Andrew Walbran318f5732018-11-20 16:23:42 +00001348 if (target_vcpu_idx >= target_vm->vcpu_count) {
1349 /* The requested vcpu must exist. */
1350 return -1;
1351 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001352
Andrew Walbran318f5732018-11-20 16:23:42 +00001353 if (!is_injection_allowed(target_vm_id, current)) {
1354 return -1;
1355 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001356
Andrew Walbrane1310df2019-04-29 17:28:28 +01001357 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001358
1359 dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
1360 target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001361 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001362}
Andrew Scull6386f252018-12-06 13:29:10 +00001363
1364/**
1365 * Clears a region of physical memory by overwriting it with zeros. The data is
1366 * flushed from the cache so the memory has been cleared across the system.
1367 */
1368static bool api_clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
1369{
1370 /*
1371 * TODO: change this to a cpu local single page window rather than a
1372 * global mapping of the whole range. Such an approach will limit
1373 * the changes to stage-1 tables and will allow only local
1374 * invalidation.
1375 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001376 bool ret;
1377 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
1378 void *ptr =
1379 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
Andrew Walbran2cb43392019-04-17 12:52:45 +01001380 size_t size = pa_difference(begin, end);
Andrew Scull6386f252018-12-06 13:29:10 +00001381
1382 if (!ptr) {
1383 /* TODO: partial defrag of failed range. */
1384 /* Recover any memory consumed in failed mapping. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001385 mm_defrag(stage1_locked, ppool);
1386 goto fail;
Andrew Scull6386f252018-12-06 13:29:10 +00001387 }
1388
Andrew Scull2b5fbad2019-04-05 13:55:56 +01001389 memset_s(ptr, size, 0, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +01001390 arch_mm_flush_dcache(ptr, size);
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001391 mm_unmap(stage1_locked, begin, end, ppool);
Andrew Scull6386f252018-12-06 13:29:10 +00001392
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001393 ret = true;
1394 goto out;
1395
1396fail:
1397 ret = false;
1398
1399out:
1400 mm_unlock_stage1(&stage1_locked);
1401
1402 return ret;
Andrew Scull6386f252018-12-06 13:29:10 +00001403}
1404
Jose Marinho75509b42019-04-09 09:34:59 +01001405/** TODO: Move function to spci_architectted_message.c. */
1406/**
1407 * Shares memory from the calling VM with another. The memory can be shared in
1408 * different modes.
1409 *
1410 * This function requires the calling context to hold the <to> and <from> locks.
1411 *
1412 * Returns:
1413 * In case of error one of the following values is returned:
1414 * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
1415 * erroneous;
Andrew Walbran379aa722019-10-07 14:16:34 +01001416 * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
Jose Marinho75509b42019-04-09 09:34:59 +01001417 * the request.
1418 * Success is indicated by SPCI_SUCCESS.
1419 */
Andrew Walbran70bc8622019-10-07 14:15:58 +01001420struct spci_value api_spci_share_memory(
1421 struct vm_locked to_locked, struct vm_locked from_locked,
1422 struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
1423 enum spci_memory_share share)
Jose Marinho75509b42019-04-09 09:34:59 +01001424{
1425 struct vm *to = to_locked.vm;
1426 struct vm *from = from_locked.vm;
1427 int orig_from_mode;
1428 int from_mode;
1429 int to_mode;
1430 struct mpool local_page_pool;
Andrew Walbran70bc8622019-10-07 14:15:58 +01001431 struct spci_value ret;
Jose Marinho75509b42019-04-09 09:34:59 +01001432 paddr_t pa_begin;
1433 paddr_t pa_end;
1434 ipaddr_t begin;
1435 ipaddr_t end;
1436
1437 size_t size;
1438
1439 /* Disallow reflexive shares as this suggests an error in the VM. */
1440 if (to == from) {
Andrew Walbran70bc8622019-10-07 14:15:58 +01001441 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +01001442 }
1443
1444 /*
1445 * Create a local pool so any freed memory can't be used by another
1446 * thread. This is to ensure the original mapping can be restored if any
1447 * stage of the process fails.
1448 */
1449 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1450
1451 /* Obtain the single contiguous set of pages from the memory_region. */
1452 /* TODO: Add support for multiple constituent regions. */
1453 size = memory_region->constituents[0].page_count * PAGE_SIZE;
1454 begin = ipa_init(memory_region->constituents[0].address);
1455 end = ipa_add(begin, size);
1456
1457 /*
1458 * Check if the state transition is lawful for both VMs involved
1459 * in the memory exchange, ensure that all constituents of a memory
1460 * region being shared are at the same state.
1461 */
1462 if (!spci_msg_check_transition(to, from, share, &orig_from_mode, begin,
1463 end, memory_to_attributes, &from_mode,
1464 &to_mode)) {
Andrew Walbran70bc8622019-10-07 14:15:58 +01001465 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +01001466 }
1467
1468 pa_begin = pa_from_ipa(begin);
1469 pa_end = pa_from_ipa(end);
1470
1471 /*
1472 * First update the mapping for the sender so there is not overlap with
1473 * the recipient.
1474 */
1475 if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
1476 NULL, &local_page_pool)) {
Andrew Walbran70bc8622019-10-07 14:15:58 +01001477 ret = spci_error(SPCI_NO_MEMORY);
Jose Marinho75509b42019-04-09 09:34:59 +01001478 goto out;
1479 }
1480
1481 /* Complete the transfer by mapping the memory into the recipient. */
1482 if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
1483 &local_page_pool)) {
1484 /* TODO: partial defrag of failed range. */
1485 /* Recover any memory consumed in failed mapping. */
1486 mm_vm_defrag(&from->ptable, &local_page_pool);
1487
Andrew Walbran70bc8622019-10-07 14:15:58 +01001488 ret = spci_error(SPCI_NO_MEMORY);
Jose Marinho75509b42019-04-09 09:34:59 +01001489
1490 CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
1491 orig_from_mode, NULL,
1492 &local_page_pool));
1493
1494 goto out;
1495 }
1496
Andrew Walbran70bc8622019-10-07 14:15:58 +01001497 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +01001498
1499out:
Jose Marinho75509b42019-04-09 09:34:59 +01001500 mpool_fini(&local_page_pool);
1501
1502 return ret;
1503}
1504
Andrew Scull6386f252018-12-06 13:29:10 +00001505/**
1506 * Shares memory from the calling VM with another. The memory can be shared in
1507 * different modes.
1508 *
1509 * TODO: the interface for sharing memory will need to be enhanced to allow
1510 * sharing with different modes e.g. read-only, informing the recipient
1511 * of the memory they have been given, opting to not wipe the memory and
1512 * possibly allowing multiple blocks to be transferred. What this will
1513 * look like is TBD.
1514 */
Andrew Walbran42347a92019-05-09 13:59:03 +01001515int64_t api_share_memory(spci_vm_id_t vm_id, ipaddr_t addr, size_t size,
Andrew Scull6386f252018-12-06 13:29:10 +00001516 enum hf_share share, struct vcpu *current)
1517{
1518 struct vm *from = current->vm;
1519 struct vm *to;
1520 int orig_from_mode;
1521 int from_mode;
1522 int to_mode;
1523 ipaddr_t begin;
1524 ipaddr_t end;
1525 paddr_t pa_begin;
1526 paddr_t pa_end;
1527 struct mpool local_page_pool;
1528 int64_t ret;
1529
1530 /* Disallow reflexive shares as this suggests an error in the VM. */
1531 if (vm_id == from->id) {
1532 return -1;
1533 }
1534
1535 /* Ensure the target VM exists. */
Andrew Walbran42347a92019-05-09 13:59:03 +01001536 to = vm_find(vm_id);
Andrew Scull6386f252018-12-06 13:29:10 +00001537 if (to == NULL) {
1538 return -1;
1539 }
1540
1541 begin = addr;
1542 end = ipa_add(addr, size);
1543
1544 /* Fail if addresses are not page-aligned. */
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +00001545 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
1546 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Andrew Scull6386f252018-12-06 13:29:10 +00001547 return -1;
1548 }
1549
1550 /* Convert the sharing request to memory management modes. */
1551 switch (share) {
1552 case HF_MEMORY_GIVE:
1553 from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED;
1554 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
1555 break;
1556
1557 case HF_MEMORY_LEND:
1558 from_mode = MM_MODE_INVALID;
1559 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED;
1560 break;
1561
1562 case HF_MEMORY_SHARE:
1563 from_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_SHARED;
1564 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_UNOWNED |
1565 MM_MODE_SHARED;
1566 break;
1567
1568 default:
1569 /* The input is untrusted so might not be a valid value. */
1570 return -1;
1571 }
1572
1573 /*
1574 * Create a local pool so any freed memory can't be used by another
1575 * thread. This is to ensure the original mapping can be restored if any
1576 * stage of the process fails.
1577 */
1578 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1579
1580 sl_lock_both(&from->lock, &to->lock);
1581
1582 /*
1583 * Ensure that the memory range is mapped with the same mode so that
1584 * changes can be reverted if the process fails.
1585 */
1586 if (!mm_vm_get_mode(&from->ptable, begin, end, &orig_from_mode)) {
1587 goto fail;
1588 }
1589
Andrew Scullb5f49e02019-10-02 13:20:47 +01001590 /* Ensure the address range is normal memory and not a device. */
1591 if (orig_from_mode & MM_MODE_D) {
1592 goto fail;
1593 }
1594
Andrew Scull6386f252018-12-06 13:29:10 +00001595 /*
1596 * Ensure the memory range is valid for the sender. If it isn't, the
1597 * sender has either shared it with another VM already or has no claim
1598 * to the memory.
1599 */
1600 if (orig_from_mode & MM_MODE_INVALID) {
1601 goto fail;
1602 }
1603
1604 /*
1605 * The sender must own the memory and have exclusive access to it in
1606 * order to share it. Alternatively, it is giving memory back to the
1607 * owning VM.
1608 */
1609 if (orig_from_mode & MM_MODE_UNOWNED) {
1610 int orig_to_mode;
1611
1612 if (share != HF_MEMORY_GIVE ||
1613 !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode) ||
1614 orig_to_mode & MM_MODE_UNOWNED) {
1615 goto fail;
1616 }
1617 } else if (orig_from_mode & MM_MODE_SHARED) {
1618 goto fail;
1619 }
1620
1621 pa_begin = pa_from_ipa(begin);
1622 pa_end = pa_from_ipa(end);
1623
1624 /*
1625 * First update the mapping for the sender so there is not overlap with
1626 * the recipient.
1627 */
1628 if (!mm_vm_identity_map(&from->ptable, pa_begin, pa_end, from_mode,
1629 NULL, &local_page_pool)) {
1630 goto fail;
1631 }
1632
1633 /* Clear the memory so no VM or device can see the previous contents. */
1634 if (!api_clear_memory(pa_begin, pa_end, &local_page_pool)) {
1635 goto fail_return_to_sender;
1636 }
1637
1638 /* Complete the transfer by mapping the memory into the recipient. */
1639 if (!mm_vm_identity_map(&to->ptable, pa_begin, pa_end, to_mode, NULL,
1640 &local_page_pool)) {
1641 /* TODO: partial defrag of failed range. */
1642 /* Recover any memory consumed in failed mapping. */
1643 mm_vm_defrag(&from->ptable, &local_page_pool);
1644 goto fail_return_to_sender;
1645 }
1646
1647 ret = 0;
1648 goto out;
1649
1650fail_return_to_sender:
Andrew Scull7e8de322019-07-02 13:00:56 +01001651 CHECK(mm_vm_identity_map(&from->ptable, pa_begin, pa_end,
1652 orig_from_mode, NULL, &local_page_pool));
Andrew Scull6386f252018-12-06 13:29:10 +00001653
1654fail:
1655 ret = -1;
1656
1657out:
1658 sl_unlock(&from->lock);
1659 sl_unlock(&to->lock);
1660
1661 mpool_fini(&local_page_pool);
1662
1663 return ret;
1664}
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001665
1666/** Returns the version of the implemented SPCI specification. */
Andrew Walbran7f920af2019-09-03 17:09:30 +01001667struct spci_value api_spci_version(void)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001668{
1669 /*
1670 * Ensure that both major and minor revision representation occupies at
1671 * most 15 bits.
1672 */
1673 static_assert(0x8000 > SPCI_VERSION_MAJOR,
1674 "Major revision representation take more than 15 bits.");
1675 static_assert(0x10000 > SPCI_VERSION_MINOR,
1676 "Minor revision representation take more than 16 bits.");
1677
Andrew Walbran7f920af2019-09-03 17:09:30 +01001678 struct spci_value ret = {
1679 .func = SPCI_SUCCESS_32,
Andrew Walbran455c53a2019-10-10 13:56:19 +01001680 .arg2 = (SPCI_VERSION_MAJOR << SPCI_VERSION_MAJOR_OFFSET) |
Andrew Walbran7f920af2019-09-03 17:09:30 +01001681 SPCI_VERSION_MINOR};
1682 return ret;
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001683}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001684
1685int64_t api_debug_log(char c, struct vcpu *current)
1686{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001687 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001688 struct vm *vm = current->vm;
1689 struct vm_locked vm_locked = vm_lock(vm);
1690
Andrew Sculld54e1be2019-08-20 11:09:42 +01001691 if (c == '\n' || c == '\0') {
1692 flush = true;
1693 } else {
1694 vm->log_buffer[vm->log_buffer_length++] = c;
1695 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1696 }
1697
1698 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001699 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1700 vm->log_buffer_length);
1701 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001702 }
1703
1704 vm_unlock(&vm_locked);
1705
1706 return 0;
1707}