blob: 988e5f61e0612681771eb74b46862785101a533e [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Walbran318f5732018-11-20 16:23:42 +000019#include "hf/arch/cpu.h"
Andrew Walbran2619e0a2020-01-10 16:37:50 +000020#include "hf/arch/tee.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000021#include "hf/arch/timer.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000022
Andrew Scull877ae4b2019-07-02 12:52:33 +010023#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000024#include "hf/dlog.h"
Andrew Scull6386f252018-12-06 13:29:10 +000025#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010026#include "hf/plat/console.h"
Jose Marinho40d55f32019-07-01 15:41:54 +010027#include "hf/spci_internal.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000028#include "hf/spci_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000029#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010030#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010031#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010032#include "hf/vm.h"
33
Andrew Scullf35a5c92018-08-07 18:09:46 +010034#include "vmapi/hf/call.h"
Jose Marinhoa1dfeda2019-02-27 16:46:03 +000035#include "vmapi/hf/spci.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010036
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000037/*
38 * To eliminate the risk of deadlocks, we define a partial order for the
39 * acquisition of locks held concurrently by the same physical CPU. Our current
40 * ordering requirements are as follows:
41 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010042 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000043 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010044 * Locks of the same kind require the lock of lowest address to be locked first,
45 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000046 */
47
Andrew Scullaa039b32018-10-04 15:02:26 +010048static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010049 "Currently, a page is mapped for the send and receive buffers so "
50 "the maximum request is the size of a page.");
51
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000052static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
53 "The page pool entry size must be at least as big as the mailbox "
54 "size, so that memory region descriptors can be copied from the "
55 "mailbox for memory sharing.");
56
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000057static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000058
59/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000060 * Initialises the API page pool by taking ownership of the contents of the
61 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000062 */
63void api_init(struct mpool *ppool)
64{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000065 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000066}
67
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000069 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010070 *
71 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranf0c314d2019-10-02 14:24:26 +010072 * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010073 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010074static struct vcpu *api_switch_to_primary(struct vcpu *current,
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +010075 struct spci_value primary_ret,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000076 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010077{
Andrew Walbran42347a92019-05-09 13:59:03 +010078 struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
Andrew Walbrane1310df2019-04-29 17:28:28 +010079 struct vcpu *next = vm_get_vcpu(primary, cpu_index(current->cpu));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080
Andrew Walbran508e63c2018-12-20 17:02:37 +000081 /*
82 * If the secondary is blocked but has a timer running, sleep until the
83 * timer fires rather than indefinitely.
84 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +010085 switch (primary_ret.func) {
86 case HF_SPCI_RUN_WAIT_FOR_INTERRUPT:
87 case SPCI_MSG_WAIT_32: {
88 if (arch_timer_enabled_current()) {
89 uint64_t remaining_ns =
90 arch_timer_remaining_ns_current();
91
92 if (remaining_ns == 0) {
93 /*
94 * Timer is pending, so the current vCPU should
95 * be run again right away.
96 */
97 primary_ret.func = SPCI_INTERRUPT_32;
98 /*
99 * primary_ret.arg1 should already be set to the
100 * current VM ID and vCPU ID.
101 */
102 primary_ret.arg2 = 0;
103 } else {
104 primary_ret.arg2 = remaining_ns;
105 }
106 } else {
107 primary_ret.arg2 = SPCI_SLEEP_INDEFINITE;
108 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000109 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100110 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000111
112 default:
113 /* Do nothing. */
114 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000115 }
116
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100117 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100118 arch_regs_set_retval(&next->regs, primary_ret);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000120 /* Mark the current vCPU as waiting. */
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000121 sl_lock(&current->lock);
122 current->state = secondary_state;
123 sl_unlock(&current->lock);
124
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100125 return next;
126}
127
128/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000129 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000130 */
131struct vcpu *api_preempt(struct vcpu *current)
132{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100133 struct spci_value ret = {
134 .func = SPCI_INTERRUPT_32,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000135 .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000136 };
137
Andrew Sculld6ee1102019-04-05 22:12:42 +0100138 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
Andrew Scull33fecd32019-01-08 14:48:27 +0000139}
140
141/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000142 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000143 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100144 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100145struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100146{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100147 struct spci_value ret = {
148 .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000149 .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100150 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000151
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000152 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100153 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100154}
155
156/**
Andrew Walbran33645652019-04-15 12:29:31 +0100157 * Puts the current vCPU in off mode, and returns to the primary VM.
158 */
159struct vcpu *api_vcpu_off(struct vcpu *current)
160{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100161 struct spci_value ret = {
162 .func = HF_SPCI_RUN_WAIT_FOR_INTERRUPT,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000163 .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100164 };
165
166 /*
167 * Disable the timer, so the scheduler doesn't get told to call back
168 * based on it.
169 */
170 arch_timer_disable_current();
171
172 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
173}
174
175/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000176 * Returns to the primary VM to allow this CPU to be used for other tasks as the
177 * vCPU does not have work to do at this moment. The current vCPU is marked as
Andrew Walbran16075b62019-09-03 17:11:07 +0100178 * ready to be scheduled again.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000179 */
Andrew Walbran16075b62019-09-03 17:11:07 +0100180void api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000181{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100182 struct spci_value primary_ret = {
183 .func = SPCI_YIELD_32,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000184 .arg1 = spci_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull66d62bf2019-02-01 13:54:10 +0000185 };
186
187 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000188 /* NOOP on the primary as it makes the scheduling decisions. */
Andrew Walbran16075b62019-09-03 17:11:07 +0100189 return;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000190 }
191
Andrew Walbran16075b62019-09-03 17:11:07 +0100192 *next = api_switch_to_primary(current, primary_ret, VCPU_STATE_READY);
Andrew Scull66d62bf2019-02-01 13:54:10 +0000193}
194
195/**
Andrew Walbran33645652019-04-15 12:29:31 +0100196 * Switches to the primary so that it can switch to the target, or kick it if it
197 * is already running on a different physical CPU.
198 */
199struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
200{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100201 struct spci_value ret = {
202 .func = HF_SPCI_RUN_WAKE_UP,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000203 .arg1 = spci_vm_vcpu(target_vcpu->vm->id,
204 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100205 };
206 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
207}
208
209/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000210 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000211 */
212struct vcpu *api_abort(struct vcpu *current)
213{
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100214 struct spci_value ret = spci_error(SPCI_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000215
Andrew Walbran17eebf92020-02-05 16:35:49 +0000216 dlog_notice("Aborting VM %u vCPU %u\n", current->vm->id,
217 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000218
219 if (current->vm->id == HF_PRIMARY_VM_ID) {
220 /* TODO: what to do when the primary aborts? */
221 for (;;) {
222 /* Do nothing. */
223 }
224 }
225
226 atomic_store_explicit(&current->vm->aborting, true,
227 memory_order_relaxed);
228
229 /* TODO: free resources once all vCPUs abort. */
230
Andrew Sculld6ee1102019-04-05 22:12:42 +0100231 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000232}
233
234/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000235 * Returns the ID of the VM.
236 */
Andrew Walbrand230f662019-10-07 18:03:36 +0100237struct spci_value api_spci_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000238{
Andrew Walbrand230f662019-10-07 18:03:36 +0100239 return (struct spci_value){.func = SPCI_SUCCESS_32,
240 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000241}
242
243/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100244 * Returns the number of VMs configured to run.
245 */
Andrew Walbran52d99672019-06-25 15:51:11 +0100246spci_vm_count_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100247{
Andrew Scull19503262018-09-20 14:48:39 +0100248 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100249}
250
251/**
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100252 * Returns the number of vCPUs configured in the given VM, or 0 if there is no
253 * such VM or the caller is not the primary VM.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100254 */
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100255spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
256 const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100257{
Andrew Scull19503262018-09-20 14:48:39 +0100258 struct vm *vm;
259
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000260 /* Only the primary VM needs to know about vCPUs for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100261 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100262 return 0;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100263 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100264
Andrew Walbran42347a92019-05-09 13:59:03 +0100265 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100266 if (vm == NULL) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100267 return 0;
Andrew Scull19503262018-09-20 14:48:39 +0100268 }
269
270 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100271}
272
273/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000274 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000275 * function to indicate that register state for the given vCPU has been saved
276 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000277 */
278void api_regs_state_saved(struct vcpu *vcpu)
279{
280 sl_lock(&vcpu->lock);
281 vcpu->regs_available = true;
282 sl_unlock(&vcpu->lock);
283}
284
285/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000286 * Retrieves the next waiter and removes it from the wait list if the VM's
287 * mailbox is in a writable state.
288 */
289static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
290{
291 struct wait_entry *entry;
292 struct vm *vm = locked_vm.vm;
293
Andrew Sculld6ee1102019-04-05 22:12:42 +0100294 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000295 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
296 /* The mailbox is not writable or there are no waiters. */
297 return NULL;
298 }
299
300 /* Remove waiter from the wait list. */
301 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
302 wait_links);
303 list_remove(&entry->wait_links);
304 return entry;
305}
306
307/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000308 * Assuming that the arguments have already been checked by the caller, injects
309 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
310 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
311 * is next run, which is up to the scheduler.
312 *
313 * Returns:
314 * - 0 on success if no further action is needed.
315 * - 1 if it was called by the primary VM and the primary VM now needs to wake
316 * up or kick the target vCPU.
317 */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100318static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
Andrew Walbran508e63c2018-12-20 17:02:37 +0000319 uint32_t intid, struct vcpu *current,
320 struct vcpu **next)
321{
322 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +0100323 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000324 int64_t ret = 0;
325
326 sl_lock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000327
328 /*
329 * We only need to change state and (maybe) trigger a virtual IRQ if it
330 * is enabled and was not previously pending. Otherwise we can skip
331 * everything except setting the pending bit.
332 *
333 * If you change this logic make sure to update the need_vm_lock logic
334 * above to match.
335 */
336 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
337 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
338 intid_mask)) {
339 goto out;
340 }
341
342 /* Increment the count. */
343 target_vcpu->interrupts.enabled_and_pending_count++;
344
345 /*
346 * Only need to update state if there was not already an
347 * interrupt enabled and pending.
348 */
349 if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
350 goto out;
351 }
352
Andrew Walbran508e63c2018-12-20 17:02:37 +0000353 if (current->vm->id == HF_PRIMARY_VM_ID) {
354 /*
355 * If the call came from the primary VM, let it know that it
356 * should run or kick the target vCPU.
357 */
358 ret = 1;
359 } else if (current != target_vcpu && next != NULL) {
Andrew Walbran33645652019-04-15 12:29:31 +0100360 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000361 }
362
363out:
364 /* Either way, make it pending. */
365 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
366
367 sl_unlock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000368
369 return ret;
370}
371
372/**
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100373 * Constructs an SPCI_MSG_SEND value to return from a successful SPCI_MSG_POLL
374 * or SPCI_MSG_WAIT call.
375 */
376static struct spci_value spci_msg_recv_return(const struct vm *receiver)
377{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000378 switch (receiver->mailbox.recv_func) {
379 case SPCI_MSG_SEND_32:
380 return (struct spci_value){
381 .func = SPCI_MSG_SEND_32,
382 .arg1 = (receiver->mailbox.recv_sender << 16) |
383 receiver->id,
384 .arg3 = receiver->mailbox.recv_size};
385 case SPCI_MEM_DONATE_32:
386 case SPCI_MEM_LEND_32:
387 case SPCI_MEM_SHARE_32:
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000388 return (struct spci_value){.func = receiver->mailbox.recv_func,
Andrew Walbran0f6cede2020-01-10 15:38:09 +0000389 .arg3 = receiver->mailbox.recv_size,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000390 .arg4 = receiver->mailbox.recv_size};
391 default:
392 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000393 dlog_error("Tried to return an invalid message function %#x\n",
394 receiver->mailbox.recv_func);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000395 return spci_error(SPCI_DENIED);
396 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100397}
398
399/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000400 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000401 * value needs to be forced onto the vCPU.
402 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000403static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100404 struct spci_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000405{
Andrew Scullb06d1752019-02-04 10:15:48 +0000406 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000407 bool ret;
408
Andrew Scullb06d1752019-02-04 10:15:48 +0000409 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000410 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000411 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100412 * The VM lock is not needed in the common case so it must only be taken
413 * when it is going to be needed. This ensures there are no inter-vCPU
414 * dependencies in the common run case meaning the sensitive context
415 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000416 */
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000417 sl_lock(&vcpu->lock);
Andrew Scullb06d1752019-02-04 10:15:48 +0000418
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000419 /* The VM needs to be locked to deliver mailbox messages. */
420 need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
421 if (need_vm_lock) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000422 sl_unlock(&vcpu->lock);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000423 sl_lock(&vcpu->vm->lock);
424 sl_lock(&vcpu->lock);
425 }
426
427 /*
428 * If the vCPU is already running somewhere then we can't run it here
429 * simultaneously. While it is actually running then the state should be
430 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
431 * stops running but while Hafnium is in the process of switching back
432 * to the primary there will be a brief period while the state has been
433 * updated but `regs_available` is still false (until
434 * `api_regs_state_saved` is called). We can't start running it again
435 * until this has finished, so count this state as still running for the
436 * purposes of this check.
437 */
438 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
439 /*
440 * vCPU is running on another pCPU.
441 *
442 * It's okay not to return the sleep duration here because the
443 * other physical CPU that is currently running this vCPU will
444 * return the sleep duration if needed.
445 */
446 *run_ret = spci_error(SPCI_BUSY);
447 ret = false;
448 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000449 }
Andrew Scull9726c252019-01-23 13:44:19 +0000450
451 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100452 if (vcpu->state != VCPU_STATE_ABORTED) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000453 dlog_notice("Aborting VM %u vCPU %u\n", vcpu->vm->id,
454 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100455 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000456 }
457 ret = false;
458 goto out;
459 }
460
Andrew Walbran508e63c2018-12-20 17:02:37 +0000461 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100462 case VCPU_STATE_RUNNING:
463 case VCPU_STATE_OFF:
464 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000465 ret = false;
466 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000467
Andrew Sculld6ee1102019-04-05 22:12:42 +0100468 case VCPU_STATE_BLOCKED_MAILBOX:
Andrew Scullb06d1752019-02-04 10:15:48 +0000469 /*
470 * A pending message allows the vCPU to run so the message can
471 * be delivered directly.
472 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100473 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100474 arch_regs_set_retval(&vcpu->regs,
475 spci_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100476 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000477 break;
478 }
479 /* Fall through. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100480 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000481 /* Allow virtual interrupts to be delivered. */
482 if (vcpu->interrupts.enabled_and_pending_count > 0) {
483 break;
484 }
485
Andrew Walbran508e63c2018-12-20 17:02:37 +0000486 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000487 uint64_t timer_remaining_ns =
488 arch_timer_remaining_ns(&vcpu->regs);
489
490 /*
491 * The timer expired so allow the interrupt to be
492 * delivered.
493 */
494 if (timer_remaining_ns == 0) {
495 break;
496 }
497
498 /*
499 * The vCPU is not ready to run, return the appropriate
500 * code to the primary which called vcpu_run.
501 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100502 run_ret->func =
Andrew Sculld6ee1102019-04-05 22:12:42 +0100503 vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100504 ? SPCI_MSG_WAIT_32
505 : HF_SPCI_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000506 run_ret->arg1 =
507 spci_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000508 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000509 }
510
511 ret = false;
512 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000513
Andrew Sculld6ee1102019-04-05 22:12:42 +0100514 case VCPU_STATE_READY:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000515 break;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000516 }
517
Andrew Scullb06d1752019-02-04 10:15:48 +0000518 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000519 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100520 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000521
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000522 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000523 * Mark the registers as unavailable now that we're about to reflect
524 * them onto the real registers. This will also prevent another physical
525 * CPU from trying to read these registers.
526 */
527 vcpu->regs_available = false;
528
529 ret = true;
530
531out:
532 sl_unlock(&vcpu->lock);
Andrew Scullb06d1752019-02-04 10:15:48 +0000533 if (need_vm_lock) {
534 sl_unlock(&vcpu->vm->lock);
535 }
536
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000537 return ret;
538}
539
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100540struct spci_value api_spci_run(spci_vm_id_t vm_id, spci_vcpu_index_t vcpu_idx,
541 const struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100542{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100543 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100544 struct vcpu *vcpu;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100545 struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100546
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000547 /* Only the primary VM can switch vCPUs. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100548 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100549 ret.arg2 = SPCI_DENIED;
Andrew Scull6d2db332018-10-10 15:28:17 +0100550 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100551 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100552
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000553 /* Only secondary VM vCPUs can be run. */
Andrew Scull19503262018-09-20 14:48:39 +0100554 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100555 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100556 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100557
Andrew Scull19503262018-09-20 14:48:39 +0100558 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100559 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100560 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100561 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100562 }
563
Fuad Tabbaed294af2019-12-20 10:43:01 +0000564 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100565 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100566 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100567 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100568
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000569 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100570 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000571 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000572 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100573 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000574
Andrew Walbran508e63c2018-12-20 17:02:37 +0000575 /*
576 * Inject timer interrupt if timer has expired. It's safe to access
577 * vcpu->regs here because api_vcpu_prepare_run already made sure that
578 * regs_available was true (and then set it to false) before returning
579 * true.
580 */
581 if (arch_timer_pending(&vcpu->regs)) {
582 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100583 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
584 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000585
586 /*
587 * Set the mask bit so the hardware interrupt doesn't fire
588 * again. Ideally we wouldn't do this because it affects what
589 * the secondary vCPU sees, but if we don't then we end up with
590 * a loop of the interrupt firing each time we try to return to
591 * the secondary vCPU.
592 */
593 arch_timer_mask(&vcpu->regs);
594 }
595
Fuad Tabbaed294af2019-12-20 10:43:01 +0000596 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000597 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000598
Andrew Scull33fecd32019-01-08 14:48:27 +0000599 /*
600 * Set a placeholder return code to the scheduler. This will be
601 * overwritten when the switch back to the primary occurs.
602 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100603 ret.func = SPCI_INTERRUPT_32;
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000604 ret.arg1 = spci_vm_vcpu(vm_id, vcpu_idx);
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100605 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000606
Andrew Scull6d2db332018-10-10 15:28:17 +0100607out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100608 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100609}
610
611/**
Andrew Scull81e85092018-12-12 12:56:20 +0000612 * Check that the mode indicates memory that is valid, owned and exclusive.
613 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100614static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000615{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100616 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
617 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000618}
619
620/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +0000621 * Determines the value to be returned by api_vm_configure and spci_rx_release
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000622 * after they've succeeded. If a secondary VM is running and there are waiters,
623 * it also switches back to the primary VM for it to wake waiters up.
624 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000625static struct spci_value api_waiter_result(struct vm_locked locked_vm,
626 struct vcpu *current,
627 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000628{
629 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000630
631 if (list_empty(&vm->mailbox.waiter_list)) {
632 /* No waiters, nothing else to do. */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000633 return (struct spci_value){.func = SPCI_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000634 }
635
636 if (vm->id == HF_PRIMARY_VM_ID) {
637 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000638 return (struct spci_value){.func = SPCI_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000639 }
640
641 /*
642 * Switch back to the primary VM, informing it that there are waiters
643 * that need to be notified.
644 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000645 *next = api_switch_to_primary(
646 current, (struct spci_value){.func = SPCI_RX_RELEASE_32},
647 VCPU_STATE_READY);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000648
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000649 return (struct spci_value){.func = SPCI_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000650}
651
652/**
Andrew Sculle1322792019-07-01 17:46:10 +0100653 * Configures the hypervisor's stage-1 view of the send and receive pages. The
654 * stage-1 page tables must be locked so memory cannot be taken by another core
655 * which could result in this transaction being unable to roll back in the case
656 * of an error.
657 */
658static bool api_vm_configure_stage1(struct vm_locked vm_locked,
659 paddr_t pa_send_begin, paddr_t pa_send_end,
660 paddr_t pa_recv_begin, paddr_t pa_recv_end,
661 struct mpool *local_page_pool)
662{
663 bool ret;
664 struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
665
666 /* Map the send page as read-only in the hypervisor address space. */
667 vm_locked.vm->mailbox.send =
668 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
669 MM_MODE_R, local_page_pool);
670 if (!vm_locked.vm->mailbox.send) {
671 /* TODO: partial defrag of failed range. */
672 /* Recover any memory consumed in failed mapping. */
673 mm_defrag(mm_stage1_locked, local_page_pool);
674 goto fail;
675 }
676
677 /*
678 * Map the receive page as writable in the hypervisor address space. On
679 * failure, unmap the send page before returning.
680 */
681 vm_locked.vm->mailbox.recv =
682 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
683 MM_MODE_W, local_page_pool);
684 if (!vm_locked.vm->mailbox.recv) {
685 /* TODO: partial defrag of failed range. */
686 /* Recover any memory consumed in failed mapping. */
687 mm_defrag(mm_stage1_locked, local_page_pool);
688 goto fail_undo_send;
689 }
690
691 ret = true;
692 goto out;
693
694 /*
695 * The following mappings will not require more memory than is available
696 * in the local pool.
697 */
698fail_undo_send:
699 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100700 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
701 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100702
703fail:
704 ret = false;
705
706out:
707 mm_unlock_stage1(&mm_stage1_locked);
708
709 return ret;
710}
711
712/**
713 * Configures the send and receive pages in the VM stage-2 and hypervisor
714 * stage-1 page tables. Locking of the page tables combined with a local memory
715 * pool ensures there will always be enough memory to recover from any errors
716 * that arise.
717 */
718static bool api_vm_configure_pages(struct vm_locked vm_locked,
719 paddr_t pa_send_begin, paddr_t pa_send_end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100720 uint32_t orig_send_mode,
721 paddr_t pa_recv_begin, paddr_t pa_recv_end,
722 uint32_t orig_recv_mode)
Andrew Sculle1322792019-07-01 17:46:10 +0100723{
724 bool ret;
725 struct mpool local_page_pool;
726
727 /*
728 * Create a local pool so any freed memory can't be used by another
729 * thread. This is to ensure the original mapping can be restored if any
730 * stage of the process fails.
731 */
732 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
733
734 /* Take memory ownership away from the VM and mark as shared. */
Andrew Scull3c257452019-11-26 13:32:50 +0000735 if (!vm_identity_map(
736 vm_locked, pa_send_begin, pa_send_end,
Andrew Sculle1322792019-07-01 17:46:10 +0100737 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000738 &local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100739 goto fail;
740 }
741
Andrew Scull3c257452019-11-26 13:32:50 +0000742 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
743 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
744 &local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100745 /* TODO: partial defrag of failed range. */
746 /* Recover any memory consumed in failed mapping. */
747 mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
748 goto fail_undo_send;
749 }
750
751 if (!api_vm_configure_stage1(vm_locked, pa_send_begin, pa_send_end,
752 pa_recv_begin, pa_recv_end,
753 &local_page_pool)) {
754 goto fail_undo_send_and_recv;
755 }
756
757 ret = true;
758 goto out;
759
760 /*
761 * The following mappings will not require more memory than is available
762 * in the local pool.
763 */
764fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +0000765 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
766 orig_recv_mode, &local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +0100767
768fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +0000769 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
770 orig_send_mode, &local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +0100771
772fail:
773 ret = false;
774
775out:
776 mpool_fini(&local_page_pool);
777
778 return ret;
779}
780
781/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100782 * Configures the VM to send/receive data through the specified pages. The pages
783 * must not be shared.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000784 *
785 * Returns:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000786 * - SPCI_ERROR SPCI_INVALID_PARAMETERS if the given addresses are not properly
787 * aligned or are the same.
788 * - SPCI_ERROR SPCI_NO_MEMORY if the hypervisor was unable to map the buffers
789 * due to insuffient page table memory.
790 * - SPCI_ERROR SPCI_DENIED if the pages are already mapped or are not owned by
791 * the caller.
792 * - SPCI_SUCCESS on success if no further action is needed.
793 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
794 * needs to wake up or kick waiters.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100795 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000796struct spci_value api_spci_rxtx_map(ipaddr_t send, ipaddr_t recv,
797 uint32_t page_count, struct vcpu *current,
798 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100799{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100800 struct vm *vm = current->vm;
Andrew Sculle1322792019-07-01 17:46:10 +0100801 struct vm_locked vm_locked;
Andrew Scull80871322018-08-06 12:04:09 +0100802 paddr_t pa_send_begin;
803 paddr_t pa_send_end;
804 paddr_t pa_recv_begin;
805 paddr_t pa_recv_end;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100806 uint32_t orig_send_mode;
807 uint32_t orig_recv_mode;
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000808 struct spci_value ret;
809
810 /* Hafnium only supports a fixed size of RX/TX buffers. */
811 if (page_count != HF_MAILBOX_SIZE / SPCI_PAGE_SIZE) {
812 return spci_error(SPCI_INVALID_PARAMETERS);
813 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100814
815 /* Fail if addresses are not page-aligned. */
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000816 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
817 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000818 return spci_error(SPCI_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100819 }
820
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000821 /* Convert to physical addresses. */
822 pa_send_begin = pa_from_ipa(send);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000823 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000824
825 pa_recv_begin = pa_from_ipa(recv);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000826 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000827
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100828 /* Fail if the same page is used for the send and receive pages. */
829 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000830 return spci_error(SPCI_INVALID_PARAMETERS);
Andrew Scull220e6212018-12-21 18:09:00 +0000831 }
832
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100833 /*
834 * The hypervisor's memory map must be locked for the duration of this
835 * operation to ensure there will be sufficient memory to recover from
836 * any failures.
837 *
838 * TODO: the scope of the can be reduced but will require restructuring
839 * to keep a single unlock point.
840 */
Andrew Sculle1322792019-07-01 17:46:10 +0100841 vm_locked = vm_lock(vm);
Andrew Scull220e6212018-12-21 18:09:00 +0000842
843 /* We only allow these to be setup once. */
844 if (vm->mailbox.send || vm->mailbox.recv) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000845 ret = spci_error(SPCI_DENIED);
846 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000847 }
848
849 /*
850 * Ensure the pages are valid, owned and exclusive to the VM and that
851 * the VM has the required access to the memory.
852 */
853 if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
854 &orig_send_mode) ||
855 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
856 (orig_send_mode & MM_MODE_R) == 0 ||
857 (orig_send_mode & MM_MODE_W) == 0) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000858 ret = spci_error(SPCI_DENIED);
859 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000860 }
861
862 if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
863 &orig_recv_mode) ||
864 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
865 (orig_recv_mode & MM_MODE_R) == 0) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000866 ret = spci_error(SPCI_DENIED);
867 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000868 }
869
Andrew Sculle1322792019-07-01 17:46:10 +0100870 if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
871 orig_send_mode, pa_recv_begin, pa_recv_end,
872 orig_recv_mode)) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000873 ret = spci_error(SPCI_NO_MEMORY);
874 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100875 }
876
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000877 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +0100878 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +0000879
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100880exit:
Andrew Sculle1322792019-07-01 17:46:10 +0100881 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100882
883 return ret;
884}
885
886/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100887 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
888 * registers the `from` VM to be notified when it becomes available.
889 */
Andrew Walbranf76f5752019-12-03 18:33:08 +0000890static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100891{
892 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
893 to.vm->mailbox.recv == NULL) {
894 /*
895 * Fail if the receiver isn't currently ready to receive data,
896 * setting up for notification if requested.
897 */
898 if (notify) {
899 struct wait_entry *entry =
Andrew Walbranaad8f982019-12-04 10:56:39 +0000900 vm_get_wait_entry(from, to.vm->id);
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100901
902 /* Append waiter only if it's not there yet. */
903 if (list_empty(&entry->wait_links)) {
904 list_append(&to.vm->mailbox.waiter_list,
905 &entry->wait_links);
906 }
907 }
908
909 return true;
910 }
911
912 return false;
913}
914
915/**
916 * Notifies the `to` VM about the message currently in its mailbox, possibly
917 * with the help of the primary VM.
918 */
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000919static struct spci_value deliver_msg(struct vm_locked to, spci_vm_id_t from_id,
920 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100921{
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000922 struct spci_value ret = (struct spci_value){.func = SPCI_SUCCESS_32};
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100923 struct spci_value primary_ret = {
924 .func = SPCI_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +0000925 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100926 };
927
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100928 /* Messages for the primary VM are delivered directly. */
929 if (to.vm->id == HF_PRIMARY_VM_ID) {
930 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000931 * Only tell the primary VM the size and other details if the
932 * message is for it, to avoid leaking data about messages for
933 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100934 */
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000935 primary_ret = spci_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100936
937 to.vm->mailbox.state = MAILBOX_STATE_READ;
938 *next = api_switch_to_primary(current, primary_ret,
939 VCPU_STATE_READY);
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000940 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100941 }
942
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000943 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
944
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000945 /* Messages for the TEE are sent on via the dispatcher. */
946 if (to.vm->id == HF_TEE_VM_ID) {
947 struct spci_value call = spci_msg_recv_return(to.vm);
948
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000949 ret = arch_tee_call(call);
950 /*
951 * After the call to the TEE completes it must have finished
952 * reading its RX buffer, so it is ready for another message.
953 */
954 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000955 /*
956 * Don't return to the primary VM in this case, as the TEE is
957 * not (yet) scheduled via SPCI.
958 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000959 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000960 }
961
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100962 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +0000963 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100964 *next = api_switch_to_primary(current, primary_ret,
965 VCPU_STATE_READY);
966 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000967
968 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100969}
970
971/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100972 * Copies data from the sender's send buffer to the recipient's receive buffer
973 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000974 *
975 * If the recipient's receive buffer is busy, it can optionally register the
976 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100977 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100978struct spci_value api_spci_msg_send(spci_vm_id_t sender_vm_id,
979 spci_vm_id_t receiver_vm_id, uint32_t size,
980 uint32_t attributes, struct vcpu *current,
981 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100982{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100983 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100984 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +0000985 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +0100986 const void *from_msg;
Andrew Walbran70bc8622019-10-07 14:15:58 +0100987 struct spci_value ret;
Jose Marinhoa1dfeda2019-02-27 16:46:03 +0000988 bool notify = (attributes & SPCI_MSG_SEND_NOTIFY_MASK) ==
989 SPCI_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +0100990
Andrew Walbran70bc8622019-10-07 14:15:58 +0100991 /* Ensure sender VM ID corresponds to the current VM. */
992 if (sender_vm_id != from->id) {
993 return spci_error(SPCI_INVALID_PARAMETERS);
994 }
995
996 /* Disallow reflexive requests as this suggests an error in the VM. */
997 if (receiver_vm_id == from->id) {
998 return spci_error(SPCI_INVALID_PARAMETERS);
999 }
1000
1001 /* Limit the size of transfer. */
1002 if (size > SPCI_MSG_PAYLOAD_MAX) {
1003 return spci_error(SPCI_INVALID_PARAMETERS);
1004 }
1005
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001006 /* Ensure the receiver VM exists. */
1007 to = vm_find(receiver_vm_id);
1008 if (to == NULL) {
1009 return spci_error(SPCI_INVALID_PARAMETERS);
1010 }
1011
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001012 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001013 * Check that the sender has configured its send buffer. If the tx
1014 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1015 * be safely accessed after releasing the lock since the tx mailbox
1016 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001017 */
1018 sl_lock(&from->lock);
1019 from_msg = from->mailbox.send;
1020 sl_unlock(&from->lock);
1021
1022 if (from_msg == NULL) {
Andrew Walbran70bc8622019-10-07 14:15:58 +01001023 return spci_error(SPCI_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001024 }
1025
Andrew Walbran82d6d152019-12-24 15:02:06 +00001026 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001027
Andrew Walbran82d6d152019-12-24 15:02:06 +00001028 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbran70bc8622019-10-07 14:15:58 +01001029 ret = spci_error(SPCI_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001030 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001031 }
1032
Andrew Walbran82d6d152019-12-24 15:02:06 +00001033 /* Copy data. */
1034 memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX, from_msg, size);
1035 to->mailbox.recv_size = size;
1036 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001037 to->mailbox.recv_func = SPCI_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001038 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001039
1040out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001041 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001042
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001043 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001044}
1045
1046/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001047 * Checks whether the vCPU's attempt to block for a message has already been
1048 * interrupted or whether it is allowed to block.
1049 */
1050bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
1051{
1052 bool interrupted;
1053
1054 sl_lock(&current->lock);
1055
1056 /*
1057 * Don't block if there are enabled and pending interrupts, to match
1058 * behaviour of wait_for_interrupt.
1059 */
1060 interrupted = (current->interrupts.enabled_and_pending_count > 0);
1061
1062 sl_unlock(&current->lock);
1063
1064 return interrupted;
1065}
1066
1067/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001068 * Receives a message from the mailbox. If one isn't available, this function
1069 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001070 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001071 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001072 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001073struct spci_value api_spci_msg_recv(bool block, struct vcpu *current,
1074 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001075{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001076 struct vm *vm = current->vm;
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001077 struct spci_value return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001078
Andrew Scullaa039b32018-10-04 15:02:26 +01001079 /*
1080 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001081 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001082 */
Andrew Scull19503262018-09-20 14:48:39 +01001083 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001084 return spci_error(SPCI_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001085 }
1086
1087 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001088
Andrew Scullaa039b32018-10-04 15:02:26 +01001089 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001090 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1091 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001092 return_code = spci_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001093 goto out;
1094 }
1095
1096 /* No pending message so fail if not allowed to block. */
1097 if (!block) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001098 return_code = spci_error(SPCI_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001099 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001100 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001101
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001102 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001103 * From this point onward this call can only be interrupted or a message
1104 * received. If a message is received the return value will be set at
1105 * that time to SPCI_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001106 */
Andrew Walbrand4d2fa12019-10-01 16:47:25 +01001107 return_code = spci_error(SPCI_INTERRUPTED);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001108 if (api_spci_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001109 goto out;
1110 }
1111
Fuad Tabbaed294af2019-12-20 10:43:01 +00001112 /* Switch back to primary VM to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +00001113 {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +01001114 struct spci_value run_return = {
1115 .func = SPCI_MSG_WAIT_32,
Andrew Walbran4db5f3a2019-11-04 11:42:42 +00001116 .arg1 = spci_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001117 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001118
Andrew Walbranb4816552018-12-05 17:35:42 +00001119 *next = api_switch_to_primary(current, run_return,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001120 VCPU_STATE_BLOCKED_MAILBOX);
Andrew Walbranb4816552018-12-05 17:35:42 +00001121 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001122out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001123 sl_unlock(&vm->lock);
1124
Jose Marinho3e2442f2019-03-12 13:30:37 +00001125 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001126}
1127
1128/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001129 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1130 * by this function, the caller must have called api_mailbox_send before with
1131 * the notify argument set to true, and this call must have failed because the
1132 * mailbox was not available.
1133 *
1134 * It should be called repeatedly to retrieve a list of VMs.
1135 *
1136 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1137 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001138 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001139int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001140{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001141 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001142 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001143 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001144
1145 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001146 if (list_empty(&vm->mailbox.ready_list)) {
1147 ret = -1;
1148 goto exit;
1149 }
1150
1151 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1152 ready_links);
1153 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001154 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001155
1156exit:
1157 sl_unlock(&vm->lock);
1158 return ret;
1159}
1160
1161/**
1162 * Retrieves the next VM waiting to be notified that the mailbox of the
1163 * specified VM became writable. Only primary VMs are allowed to call this.
1164 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001165 * Returns -1 on failure or if there are no waiters; the VM id of the next
1166 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001167 */
Andrew Walbran42347a92019-05-09 13:59:03 +01001168int64_t api_mailbox_waiter_get(spci_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001169{
1170 struct vm *vm;
1171 struct vm_locked locked;
1172 struct wait_entry *entry;
1173 struct vm *waiting_vm;
1174
1175 /* Only primary VMs are allowed to call this function. */
1176 if (current->vm->id != HF_PRIMARY_VM_ID) {
1177 return -1;
1178 }
1179
Andrew Walbran42347a92019-05-09 13:59:03 +01001180 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001181 if (vm == NULL) {
1182 return -1;
1183 }
1184
Fuad Tabbaed294af2019-12-20 10:43:01 +00001185 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001186 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001187 entry = api_fetch_waiter(locked);
1188 vm_unlock(&locked);
1189
1190 if (entry == NULL) {
1191 return -1;
1192 }
1193
1194 /* Enqueue notification to waiting VM. */
1195 waiting_vm = entry->waiting_vm;
1196
1197 sl_lock(&waiting_vm->lock);
1198 if (list_empty(&entry->ready_links)) {
1199 list_append(&waiting_vm->mailbox.ready_list,
1200 &entry->ready_links);
1201 }
1202 sl_unlock(&waiting_vm->lock);
1203
1204 return waiting_vm->id;
1205}
1206
1207/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001208 * Releases the caller's mailbox so that a new message can be received. The
1209 * caller must have copied out all data they wish to preserve as new messages
1210 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001211 *
1212 * Returns:
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001213 * - SPCI_ERROR SPCI_DENIED on failure, if the mailbox hasn't been read.
1214 * - SPCI_SUCCESS on success if no further action is needed.
1215 * - SPCI_RX_RELEASE if it was called by the primary VM and the primary VM now
1216 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001217 * hf_mailbox_waiter_get.
1218 */
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001219struct spci_value api_spci_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001220{
1221 struct vm *vm = current->vm;
1222 struct vm_locked locked;
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001223 struct spci_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001224
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001225 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001226 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001227 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001228 case MAILBOX_STATE_RECEIVED:
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001229 ret = spci_error(SPCI_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001230 break;
1231
Andrew Sculld6ee1102019-04-05 22:12:42 +01001232 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001233 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001234 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001235 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001236 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001237 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001238
1239 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001240}
Andrew Walbran318f5732018-11-20 16:23:42 +00001241
1242/**
1243 * Enables or disables a given interrupt ID for the calling vCPU.
1244 *
1245 * Returns 0 on success, or -1 if the intid is invalid.
1246 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001247int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001248{
1249 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +01001250 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001251
Andrew Walbran318f5732018-11-20 16:23:42 +00001252 if (intid >= HF_NUM_INTIDS) {
1253 return -1;
1254 }
1255
1256 sl_lock(&current->lock);
1257 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001258 /*
1259 * If it is pending and was not enabled before, increment the
1260 * count.
1261 */
1262 if (current->interrupts.interrupt_pending[intid_index] &
1263 ~current->interrupts.interrupt_enabled[intid_index] &
1264 intid_mask) {
1265 current->interrupts.enabled_and_pending_count++;
1266 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001267 current->interrupts.interrupt_enabled[intid_index] |=
1268 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001269 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001270 /*
1271 * If it is pending and was enabled before, decrement the count.
1272 */
1273 if (current->interrupts.interrupt_pending[intid_index] &
1274 current->interrupts.interrupt_enabled[intid_index] &
1275 intid_mask) {
1276 current->interrupts.enabled_and_pending_count--;
1277 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001278 current->interrupts.interrupt_enabled[intid_index] &=
1279 ~intid_mask;
1280 }
1281
1282 sl_unlock(&current->lock);
1283 return 0;
1284}
1285
1286/**
1287 * Returns the ID of the next pending interrupt for the calling vCPU, and
1288 * acknowledges it (i.e. marks it as no longer pending). Returns
1289 * HF_INVALID_INTID if there are no pending interrupts.
1290 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001291uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001292{
1293 uint8_t i;
1294 uint32_t first_interrupt = HF_INVALID_INTID;
Andrew Walbran318f5732018-11-20 16:23:42 +00001295
1296 /*
1297 * Find the first enabled and pending interrupt ID, return it, and
1298 * deactivate it.
1299 */
1300 sl_lock(&current->lock);
1301 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1302 uint32_t enabled_and_pending =
1303 current->interrupts.interrupt_enabled[i] &
1304 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001305
Andrew Walbran318f5732018-11-20 16:23:42 +00001306 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001307 uint8_t bit_index = ctz(enabled_and_pending);
1308 /*
1309 * Mark it as no longer pending and decrement the count.
1310 */
1311 current->interrupts.interrupt_pending[i] &=
Andrew Walbrane52006c2019-10-22 18:01:28 +01001312 ~(1U << bit_index);
Andrew Walbran3d84a262018-12-13 14:41:19 +00001313 current->interrupts.enabled_and_pending_count--;
1314 first_interrupt =
1315 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001316 break;
1317 }
1318 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001319
1320 sl_unlock(&current->lock);
1321 return first_interrupt;
1322}
1323
1324/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001325 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001326 * given VM and vCPU.
1327 */
1328static inline bool is_injection_allowed(uint32_t target_vm_id,
1329 struct vcpu *current)
1330{
1331 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001332
Andrew Walbran318f5732018-11-20 16:23:42 +00001333 /*
1334 * The primary VM is allowed to inject interrupts into any VM. Secondary
1335 * VMs are only allowed to inject interrupts into their own vCPUs.
1336 */
1337 return current_vm_id == HF_PRIMARY_VM_ID ||
1338 current_vm_id == target_vm_id;
1339}
1340
1341/**
1342 * Injects a virtual interrupt of the given ID into the given target vCPU.
1343 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1344 * when the vCPU is next run, which is up to the scheduler.
1345 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001346 * Returns:
1347 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1348 * ID is invalid, or the current VM is not allowed to inject interrupts to
1349 * the target VM.
1350 * - 0 on success if no further action is needed.
1351 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1352 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001353 */
Andrew Walbran42347a92019-05-09 13:59:03 +01001354int64_t api_interrupt_inject(spci_vm_id_t target_vm_id,
Andrew Walbranb037d5b2019-06-25 17:19:41 +01001355 spci_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001356 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001357{
Andrew Walbran318f5732018-11-20 16:23:42 +00001358 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001359 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001360
1361 if (intid >= HF_NUM_INTIDS) {
1362 return -1;
1363 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001364
Andrew Walbran318f5732018-11-20 16:23:42 +00001365 if (target_vm == NULL) {
1366 return -1;
1367 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001368
Andrew Walbran318f5732018-11-20 16:23:42 +00001369 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001370 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001371 return -1;
1372 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001373
Andrew Walbran318f5732018-11-20 16:23:42 +00001374 if (!is_injection_allowed(target_vm_id, current)) {
1375 return -1;
1376 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001377
Andrew Walbrane1310df2019-04-29 17:28:28 +01001378 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001379
Andrew Walbran17eebf92020-02-05 16:35:49 +00001380 dlog_info("Injecting IRQ %d for VM %d vCPU %d from VM %d vCPU %d\n",
1381 intid, target_vm_id, target_vcpu_idx, current->vm->id,
1382 current->cpu->id);
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001383 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001384}
Andrew Scull6386f252018-12-06 13:29:10 +00001385
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001386/** Returns the version of the implemented SPCI specification. */
Andrew Walbran9fd29072020-04-22 12:12:14 +01001387struct spci_value api_spci_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001388{
1389 /*
1390 * Ensure that both major and minor revision representation occupies at
1391 * most 15 bits.
1392 */
1393 static_assert(0x8000 > SPCI_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001394 "Major revision representation takes more than 15 bits.");
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001395 static_assert(0x10000 > SPCI_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001396 "Minor revision representation takes more than 16 bits.");
1397 if (requested_version & SPCI_VERSION_RESERVED_BIT) {
1398 /* Invalid encoding, return an error. */
1399 return (struct spci_value){.func = SPCI_NOT_SUPPORTED};
1400 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001401
Andrew Walbran9fd29072020-04-22 12:12:14 +01001402 return (struct spci_value){
1403 .func = (SPCI_VERSION_MAJOR << SPCI_VERSION_MAJOR_OFFSET) |
Andrew Walbran7f920af2019-09-03 17:09:30 +01001404 SPCI_VERSION_MINOR};
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001405}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001406
1407int64_t api_debug_log(char c, struct vcpu *current)
1408{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001409 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001410 struct vm *vm = current->vm;
1411 struct vm_locked vm_locked = vm_lock(vm);
1412
Andrew Sculld54e1be2019-08-20 11:09:42 +01001413 if (c == '\n' || c == '\0') {
1414 flush = true;
1415 } else {
1416 vm->log_buffer[vm->log_buffer_length++] = c;
1417 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1418 }
1419
1420 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001421 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1422 vm->log_buffer_length);
1423 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001424 }
1425
1426 vm_unlock(&vm_locked);
1427
1428 return 0;
1429}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001430
1431/**
1432 * Discovery function returning information about the implementation of optional
1433 * SPCI interfaces.
1434 */
1435struct spci_value api_spci_features(uint32_t function_id)
1436{
1437 switch (function_id) {
1438 case SPCI_ERROR_32:
1439 case SPCI_SUCCESS_32:
1440 case SPCI_ID_GET_32:
1441 case SPCI_YIELD_32:
1442 case SPCI_VERSION_32:
1443 case SPCI_FEATURES_32:
1444 case SPCI_MSG_SEND_32:
1445 case SPCI_MSG_POLL_32:
1446 case SPCI_MSG_WAIT_32:
1447 return (struct spci_value){.func = SPCI_SUCCESS_32};
1448 default:
1449 return spci_error(SPCI_NOT_SUPPORTED);
1450 }
1451}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001452
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001453struct spci_value api_spci_mem_send(uint32_t share_func, ipaddr_t address,
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001454 uint32_t page_count,
Andrew Walbran0f6cede2020-01-10 15:38:09 +00001455 uint32_t fragment_length, uint32_t length,
Andrew Walbran382e9182020-03-04 11:27:27 +00001456 spci_cookie_t cookie, struct vcpu *current,
Andrew Walbran0f6cede2020-01-10 15:38:09 +00001457 struct vcpu **next)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001458{
1459 struct vm *from = current->vm;
1460 struct vm *to;
1461 const void *from_msg;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001462 struct spci_memory_region *memory_region;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001463 struct spci_value ret;
1464
1465 if (ipa_addr(address) != 0 || page_count != 0) {
1466 /*
1467 * Hafnium only supports passing the descriptor in the TX
1468 * mailbox.
1469 */
1470 return spci_error(SPCI_INVALID_PARAMETERS);
1471 }
1472
Andrew Walbran0f6cede2020-01-10 15:38:09 +00001473 if ((cookie == 0) != (fragment_length == length)) {
1474 /* Cookie is required iff there are multiple fragments. */
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001475 return spci_error(SPCI_INVALID_PARAMETERS);
1476 }
1477
1478 /*
1479 * Check that the sender has configured its send buffer. If the TX
1480 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1481 * be safely accessed after releasing the lock since the TX mailbox
1482 * address can only be configured once.
1483 */
1484 sl_lock(&from->lock);
1485 from_msg = from->mailbox.send;
1486 sl_unlock(&from->lock);
1487
1488 if (from_msg == NULL) {
1489 return spci_error(SPCI_INVALID_PARAMETERS);
1490 }
1491
1492 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001493 * Copy the memory region descriptor to a fresh page from the memory
1494 * pool. This prevents the sender from changing it underneath us, and
1495 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001496 */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001497 if (length > HF_MAILBOX_SIZE || length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001498 return spci_error(SPCI_INVALID_PARAMETERS);
1499 }
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001500 memory_region =
1501 (struct spci_memory_region *)mpool_alloc(&api_page_pool);
1502 if (memory_region == NULL) {
1503 dlog_verbose("Failed to allocate memory region copy.\n");
1504 return spci_error(SPCI_NO_MEMORY);
1505 }
1506 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001507
1508 /* The sender must match the caller. */
1509 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001510 dlog_verbose("Memory region sender doesn't match caller.\n");
1511 ret = spci_error(SPCI_INVALID_PARAMETERS);
1512 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001513 }
1514
1515 if (memory_region->attribute_count != 1) {
1516 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001517 dlog_verbose(
1518 "Multi-way memory sharing not supported (got %d "
1519 "attribute descriptors, expected 0).\n",
1520 memory_region->attribute_count);
1521 ret = spci_error(SPCI_INVALID_PARAMETERS);
1522 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001523 }
1524
1525 /*
1526 * Ensure that the receiver VM exists and isn't the same as the sender.
1527 */
1528 to = vm_find(memory_region->attributes[0].receiver);
1529 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001530 dlog_verbose("Invalid receiver.\n");
1531 ret = spci_error(SPCI_INVALID_PARAMETERS);
1532 goto out;
1533 }
1534
1535 if (to->id == HF_TEE_VM_ID) {
1536 /*
1537 * The 'to' VM lock is only needed in the case that it is the
1538 * TEE VM.
1539 */
1540 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
1541
1542 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
1543 ret = spci_error(SPCI_BUSY);
1544 goto out_unlock;
1545 }
1546
1547 ret = spci_memory_send(to, vm_to_from_lock.vm2, memory_region,
1548 length, share_func, &api_page_pool);
1549 /*
1550 * spci_memory_send takes ownership of the memory_region, so
1551 * make sure we don't free it.
1552 */
1553 memory_region = NULL;
1554
1555 if (ret.func == SPCI_SUCCESS_32 && to->id == HF_TEE_VM_ID) {
1556 /* Forward memory send message on to TEE. */
1557 memcpy_s(to->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
1558 memory_region, length);
1559 mpool_free(&api_page_pool, memory_region);
1560 memory_region = NULL;
1561 to->mailbox.recv_size = length;
1562 to->mailbox.recv_sender = from->id;
1563 to->mailbox.recv_func = share_func;
1564 ret = deliver_msg(vm_to_from_lock.vm1, from->id,
1565 current, next);
1566 }
1567
1568 out_unlock:
1569 vm_unlock(&vm_to_from_lock.vm1);
1570 vm_unlock(&vm_to_from_lock.vm2);
1571 } else {
1572 struct vm_locked from_locked = vm_lock(from);
1573
1574 ret = spci_memory_send(to, from_locked, memory_region, length,
1575 share_func, &api_page_pool);
1576 /*
1577 * spci_memory_send takes ownership of the memory_region, so
1578 * make sure we don't free it.
1579 */
1580 memory_region = NULL;
1581
1582 vm_unlock(&from_locked);
1583 }
1584
1585out:
1586 if (memory_region != NULL) {
1587 mpool_free(&api_page_pool, memory_region);
1588 }
1589
1590 return ret;
1591}
1592
Andrew Walbran382e9182020-03-04 11:27:27 +00001593struct spci_value api_spci_mem_retrieve_req(
1594 ipaddr_t address, uint32_t page_count, uint32_t fragment_length,
1595 uint32_t length, spci_cookie_t cookie, struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001596{
1597 struct vm *to = current->vm;
1598 struct vm_locked to_locked;
1599 const void *to_msg;
1600 struct spci_memory_retrieve_request *retrieve_request;
1601 uint32_t message_buffer_size;
1602 struct spci_value ret;
1603
1604 if (ipa_addr(address) != 0 || page_count != 0) {
1605 /*
1606 * Hafnium only supports passing the descriptor in the TX
1607 * mailbox.
1608 */
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001609 return spci_error(SPCI_INVALID_PARAMETERS);
1610 }
1611
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001612 if (fragment_length == length && cookie != 0) {
1613 /* Cookie is only allowed if there are multiple fragments. */
1614 dlog_verbose("Unexpected cookie %d.\n", cookie);
1615 return spci_error(SPCI_INVALID_PARAMETERS);
1616 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001617
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001618 retrieve_request =
1619 (struct spci_memory_retrieve_request *)cpu_get_buffer(
1620 current->cpu);
1621 message_buffer_size = cpu_get_buffer_size(current->cpu);
1622 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
1623 dlog_verbose("Retrieve request too long.\n");
1624 return spci_error(SPCI_INVALID_PARAMETERS);
1625 }
1626
1627 to_locked = vm_lock(to);
1628 to_msg = to->mailbox.send;
1629
1630 if (to_msg == NULL) {
1631 dlog_verbose("TX buffer not setup.\n");
1632 ret = spci_error(SPCI_INVALID_PARAMETERS);
1633 goto out;
1634 }
1635
1636 /*
1637 * Copy the retrieve request descriptor to an internal buffer, so that
1638 * the caller can't change it underneath us.
1639 */
1640 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
1641
1642 if (msg_receiver_busy(to_locked, NULL, false)) {
1643 /*
1644 * Can't retrieve memory information if the mailbox is not
1645 * available.
1646 */
1647 dlog_verbose("RX buffer not ready.\n");
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001648 ret = spci_error(SPCI_BUSY);
1649 goto out;
1650 }
1651
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001652 ret = spci_memory_retrieve(to_locked, retrieve_request, length,
1653 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001654
1655out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001656 vm_unlock(&to_locked);
1657 return ret;
1658}
1659
1660struct spci_value api_spci_mem_relinquish(struct vcpu *current)
1661{
1662 struct vm *from = current->vm;
1663 struct vm_locked from_locked;
1664 const void *from_msg;
1665 struct spci_mem_relinquish *relinquish_request;
1666 uint32_t message_buffer_size;
1667 struct spci_value ret;
1668 uint32_t length;
1669
1670 from_locked = vm_lock(from);
1671 from_msg = from->mailbox.send;
1672
1673 if (from_msg == NULL) {
1674 dlog_verbose("TX buffer not setup.\n");
1675 ret = spci_error(SPCI_INVALID_PARAMETERS);
1676 goto out;
1677 }
1678
1679 /*
1680 * Calculate length from relinquish descriptor before copying. We will
1681 * check again later to make sure it hasn't changed.
1682 */
1683 length = sizeof(struct spci_mem_relinquish) +
1684 ((struct spci_mem_relinquish *)from_msg)->endpoint_count *
1685 sizeof(spci_vm_id_t);
1686 /*
1687 * Copy the relinquish descriptor to an internal buffer, so that the
1688 * caller can't change it underneath us.
1689 */
1690 relinquish_request =
1691 (struct spci_mem_relinquish *)cpu_get_buffer(current->cpu);
1692 message_buffer_size = cpu_get_buffer_size(current->cpu);
1693 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
1694 dlog_verbose("Relinquish message too long.\n");
1695 ret = spci_error(SPCI_INVALID_PARAMETERS);
1696 goto out;
1697 }
1698 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
1699
1700 if (sizeof(struct spci_mem_relinquish) +
1701 relinquish_request->endpoint_count * sizeof(spci_vm_id_t) !=
1702 length) {
1703 dlog_verbose(
1704 "Endpoint count changed while copying to internal "
1705 "buffer.\n");
1706 ret = spci_error(SPCI_INVALID_PARAMETERS);
1707 goto out;
1708 }
1709
1710 ret = spci_memory_relinquish(from_locked, relinquish_request,
1711 &api_page_pool);
1712
1713out:
1714 vm_unlock(&from_locked);
1715 return ret;
1716}
1717
1718struct spci_value api_spci_mem_reclaim(uint32_t handle, uint32_t flags,
1719 struct vcpu *current)
1720{
1721 struct vm *to = current->vm;
1722 struct vm_locked to_locked;
1723 struct spci_value ret;
1724
1725 to_locked = vm_lock(to);
1726
1727 if ((handle & SPCI_MEMORY_HANDLE_ALLOCATOR_MASK) ==
1728 SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
1729 ret = spci_memory_reclaim(to_locked, handle,
1730 flags & SPCI_MEM_RECLAIM_CLEAR,
1731 &api_page_pool);
1732 } else {
1733 dlog_verbose(
1734 "Tried to reclaim handle %#x not allocated by "
1735 "hypervisor.\n",
1736 handle);
1737 ret = spci_error(SPCI_INVALID_PARAMETERS);
1738 }
1739
1740 vm_unlock(&to_locked);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001741
1742 return ret;
1743}