blob: 41ccdacb8bcfb313c09d962138a2a3fa4ef3b7e7 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010018
Andrew Walbran318f5732018-11-20 16:23:42 +000019#include "hf/arch/cpu.h"
Andrew Walbran2619e0a2020-01-10 16:37:50 +000020#include "hf/arch/tee.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000021#include "hf/arch/timer.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000022
Andrew Scull877ae4b2019-07-02 12:52:33 +010023#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000024#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025#include "hf/ffa_internal.h"
26#include "hf/ffa_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000027#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010028#include "hf/plat/console.h"
Andrew Scull6386f252018-12-06 13:29:10 +000029#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010030#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010031#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010032#include "hf/vm.h"
33
Andrew Scullf35a5c92018-08-07 18:09:46 +010034#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010035#include "vmapi/hf/ffa.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010036
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000037/*
38 * To eliminate the risk of deadlocks, we define a partial order for the
39 * acquisition of locks held concurrently by the same physical CPU. Our current
40 * ordering requirements are as follows:
41 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010042 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000043 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010044 * Locks of the same kind require the lock of lowest address to be locked first,
45 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000046 */
47
Andrew Scullaa039b32018-10-04 15:02:26 +010048static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010049 "Currently, a page is mapped for the send and receive buffers so "
50 "the maximum request is the size of a page.");
51
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000052static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
53 "The page pool entry size must be at least as big as the mailbox "
54 "size, so that memory region descriptors can be copied from the "
55 "mailbox for memory sharing.");
56
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000057static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000058
59/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000060 * Initialises the API page pool by taking ownership of the contents of the
61 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000062 */
63void api_init(struct mpool *ppool)
64{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000065 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000066}
67
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010068/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000069 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +010070 *
71 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010072 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010073 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +010074static struct vcpu *api_switch_to_primary(struct vcpu *current,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010075 struct ffa_value primary_ret,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000076 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010077{
Andrew Walbran42347a92019-05-09 13:59:03 +010078 struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
Andrew Walbrane1310df2019-04-29 17:28:28 +010079 struct vcpu *next = vm_get_vcpu(primary, cpu_index(current->cpu));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010080
Andrew Walbran508e63c2018-12-20 17:02:37 +000081 /*
82 * If the secondary is blocked but has a timer running, sleep until the
83 * timer fires rather than indefinitely.
84 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +010085 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010086 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
87 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +010088 if (arch_timer_enabled_current()) {
89 uint64_t remaining_ns =
90 arch_timer_remaining_ns_current();
91
92 if (remaining_ns == 0) {
93 /*
94 * Timer is pending, so the current vCPU should
95 * be run again right away.
96 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010097 primary_ret.func = FFA_INTERRUPT_32;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +010098 /*
99 * primary_ret.arg1 should already be set to the
100 * current VM ID and vCPU ID.
101 */
102 primary_ret.arg2 = 0;
103 } else {
104 primary_ret.arg2 = remaining_ns;
105 }
106 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100108 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000109 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100110 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000111
112 default:
113 /* Do nothing. */
114 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000115 }
116
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100117 /* Set the return value for the primary VM's call to HF_VCPU_RUN. */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100118 arch_regs_set_retval(&next->regs, primary_ret);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100119
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000120 /* Mark the current vCPU as waiting. */
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000121 sl_lock(&current->lock);
122 current->state = secondary_state;
123 sl_unlock(&current->lock);
124
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100125 return next;
126}
127
128/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000129 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000130 */
131struct vcpu *api_preempt(struct vcpu *current)
132{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100133 struct ffa_value ret = {
134 .func = FFA_INTERRUPT_32,
135 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000136 };
137
Andrew Sculld6ee1102019-04-05 22:12:42 +0100138 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
Andrew Scull33fecd32019-01-08 14:48:27 +0000139}
140
141/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000142 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000143 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100144 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100145struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100146{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100147 struct ffa_value ret = {
148 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
149 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100150 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000151
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000152 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100153 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100154}
155
156/**
Andrew Walbran33645652019-04-15 12:29:31 +0100157 * Puts the current vCPU in off mode, and returns to the primary VM.
158 */
159struct vcpu *api_vcpu_off(struct vcpu *current)
160{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100161 struct ffa_value ret = {
162 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
163 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100164 };
165
166 /*
167 * Disable the timer, so the scheduler doesn't get told to call back
168 * based on it.
169 */
170 arch_timer_disable_current();
171
172 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
173}
174
175/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000176 * Returns to the primary VM to allow this CPU to be used for other tasks as the
177 * vCPU does not have work to do at this moment. The current vCPU is marked as
Andrew Walbran16075b62019-09-03 17:11:07 +0100178 * ready to be scheduled again.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000179 */
Andrew Walbran16075b62019-09-03 17:11:07 +0100180void api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000181{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100182 struct ffa_value primary_ret = {
183 .func = FFA_YIELD_32,
184 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull66d62bf2019-02-01 13:54:10 +0000185 };
186
187 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000188 /* NOOP on the primary as it makes the scheduling decisions. */
Andrew Walbran16075b62019-09-03 17:11:07 +0100189 return;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000190 }
191
Andrew Walbran16075b62019-09-03 17:11:07 +0100192 *next = api_switch_to_primary(current, primary_ret, VCPU_STATE_READY);
Andrew Scull66d62bf2019-02-01 13:54:10 +0000193}
194
195/**
Andrew Walbran33645652019-04-15 12:29:31 +0100196 * Switches to the primary so that it can switch to the target, or kick it if it
197 * is already running on a different physical CPU.
198 */
199struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
200{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100201 struct ffa_value ret = {
202 .func = HF_FFA_RUN_WAKE_UP,
203 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
204 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100205 };
206 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
207}
208
209/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000210 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000211 */
212struct vcpu *api_abort(struct vcpu *current)
213{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100214 struct ffa_value ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000215
Andrew Walbran17eebf92020-02-05 16:35:49 +0000216 dlog_notice("Aborting VM %u vCPU %u\n", current->vm->id,
217 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000218
219 if (current->vm->id == HF_PRIMARY_VM_ID) {
220 /* TODO: what to do when the primary aborts? */
221 for (;;) {
222 /* Do nothing. */
223 }
224 }
225
226 atomic_store_explicit(&current->vm->aborting, true,
227 memory_order_relaxed);
228
229 /* TODO: free resources once all vCPUs abort. */
230
Andrew Sculld6ee1102019-04-05 22:12:42 +0100231 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000232}
233
234/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000235 * Returns the ID of the VM.
236 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000238{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100239 return (struct ffa_value){.func = FFA_SUCCESS_32,
240 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000241}
242
243/**
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100244 * Returns the number of VMs configured to run.
245 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246ffa_vm_count_t api_vm_get_count(void)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100247{
Andrew Scull19503262018-09-20 14:48:39 +0100248 return vm_get_count();
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100249}
250
251/**
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100252 * Returns the number of vCPUs configured in the given VM, or 0 if there is no
253 * such VM or the caller is not the primary VM.
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100254 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100255ffa_vcpu_count_t api_vcpu_get_count(ffa_vm_id_t vm_id,
256 const struct vcpu *current)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100257{
Andrew Scull19503262018-09-20 14:48:39 +0100258 struct vm *vm;
259
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000260 /* Only the primary VM needs to know about vCPUs for scheduling. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100261 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100262 return 0;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100263 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100264
Andrew Walbran42347a92019-05-09 13:59:03 +0100265 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100266 if (vm == NULL) {
Andrew Walbranc6d23c42019-06-26 13:30:42 +0100267 return 0;
Andrew Scull19503262018-09-20 14:48:39 +0100268 }
269
270 return vm->vcpu_count;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100271}
272
273/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000274 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000275 * function to indicate that register state for the given vCPU has been saved
276 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000277 */
278void api_regs_state_saved(struct vcpu *vcpu)
279{
280 sl_lock(&vcpu->lock);
281 vcpu->regs_available = true;
282 sl_unlock(&vcpu->lock);
283}
284
285/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000286 * Retrieves the next waiter and removes it from the wait list if the VM's
287 * mailbox is in a writable state.
288 */
289static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
290{
291 struct wait_entry *entry;
292 struct vm *vm = locked_vm.vm;
293
Andrew Sculld6ee1102019-04-05 22:12:42 +0100294 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000295 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
296 /* The mailbox is not writable or there are no waiters. */
297 return NULL;
298 }
299
300 /* Remove waiter from the wait list. */
301 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
302 wait_links);
303 list_remove(&entry->wait_links);
304 return entry;
305}
306
307/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000308 * Assuming that the arguments have already been checked by the caller, injects
309 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
310 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
311 * is next run, which is up to the scheduler.
312 *
313 * Returns:
314 * - 0 on success if no further action is needed.
315 * - 1 if it was called by the primary VM and the primary VM now needs to wake
316 * up or kick the target vCPU.
317 */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100318static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
Andrew Walbran508e63c2018-12-20 17:02:37 +0000319 uint32_t intid, struct vcpu *current,
320 struct vcpu **next)
321{
322 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +0100323 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000324 int64_t ret = 0;
325
326 sl_lock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000327
328 /*
329 * We only need to change state and (maybe) trigger a virtual IRQ if it
330 * is enabled and was not previously pending. Otherwise we can skip
331 * everything except setting the pending bit.
332 *
333 * If you change this logic make sure to update the need_vm_lock logic
334 * above to match.
335 */
336 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
337 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
338 intid_mask)) {
339 goto out;
340 }
341
342 /* Increment the count. */
343 target_vcpu->interrupts.enabled_and_pending_count++;
344
345 /*
346 * Only need to update state if there was not already an
347 * interrupt enabled and pending.
348 */
349 if (target_vcpu->interrupts.enabled_and_pending_count != 1) {
350 goto out;
351 }
352
Andrew Walbran508e63c2018-12-20 17:02:37 +0000353 if (current->vm->id == HF_PRIMARY_VM_ID) {
354 /*
355 * If the call came from the primary VM, let it know that it
356 * should run or kick the target vCPU.
357 */
358 ret = 1;
359 } else if (current != target_vcpu && next != NULL) {
Andrew Walbran33645652019-04-15 12:29:31 +0100360 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000361 }
362
363out:
364 /* Either way, make it pending. */
365 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
366
367 sl_unlock(&target_vcpu->lock);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000368
369 return ret;
370}
371
372/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100373 * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
374 * or FFA_MSG_WAIT call.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100375 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100376static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100377{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000378 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100379 case FFA_MSG_SEND_32:
380 return (struct ffa_value){
381 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000382 .arg1 = (receiver->mailbox.recv_sender << 16) |
383 receiver->id,
384 .arg3 = receiver->mailbox.recv_size};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000385 default:
386 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000387 dlog_error("Tried to return an invalid message function %#x\n",
388 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100389 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000390 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100391}
392
393/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000394 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000395 * value needs to be forced onto the vCPU.
396 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000397static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100398 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000399{
Andrew Scullb06d1752019-02-04 10:15:48 +0000400 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000401 bool ret;
402
Andrew Scullb06d1752019-02-04 10:15:48 +0000403 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000404 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000405 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100406 * The VM lock is not needed in the common case so it must only be taken
407 * when it is going to be needed. This ensures there are no inter-vCPU
408 * dependencies in the common run case meaning the sensitive context
409 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000410 */
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000411 sl_lock(&vcpu->lock);
Andrew Scullb06d1752019-02-04 10:15:48 +0000412
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000413 /* The VM needs to be locked to deliver mailbox messages. */
414 need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
415 if (need_vm_lock) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000416 sl_unlock(&vcpu->lock);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000417 sl_lock(&vcpu->vm->lock);
418 sl_lock(&vcpu->lock);
419 }
420
421 /*
422 * If the vCPU is already running somewhere then we can't run it here
423 * simultaneously. While it is actually running then the state should be
424 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
425 * stops running but while Hafnium is in the process of switching back
426 * to the primary there will be a brief period while the state has been
427 * updated but `regs_available` is still false (until
428 * `api_regs_state_saved` is called). We can't start running it again
429 * until this has finished, so count this state as still running for the
430 * purposes of this check.
431 */
432 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
433 /*
434 * vCPU is running on another pCPU.
435 *
436 * It's okay not to return the sleep duration here because the
437 * other physical CPU that is currently running this vCPU will
438 * return the sleep duration if needed.
439 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100440 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000441 ret = false;
442 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000443 }
Andrew Scull9726c252019-01-23 13:44:19 +0000444
445 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100446 if (vcpu->state != VCPU_STATE_ABORTED) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000447 dlog_notice("Aborting VM %u vCPU %u\n", vcpu->vm->id,
448 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100449 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000450 }
451 ret = false;
452 goto out;
453 }
454
Andrew Walbran508e63c2018-12-20 17:02:37 +0000455 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100456 case VCPU_STATE_RUNNING:
457 case VCPU_STATE_OFF:
458 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000459 ret = false;
460 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000461
Andrew Sculld6ee1102019-04-05 22:12:42 +0100462 case VCPU_STATE_BLOCKED_MAILBOX:
Andrew Scullb06d1752019-02-04 10:15:48 +0000463 /*
464 * A pending message allows the vCPU to run so the message can
465 * be delivered directly.
466 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100467 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100468 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100469 ffa_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100470 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000471 break;
472 }
473 /* Fall through. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100474 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000475 /* Allow virtual interrupts to be delivered. */
476 if (vcpu->interrupts.enabled_and_pending_count > 0) {
477 break;
478 }
479
Andrew Walbran508e63c2018-12-20 17:02:37 +0000480 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000481 uint64_t timer_remaining_ns =
482 arch_timer_remaining_ns(&vcpu->regs);
483
484 /*
485 * The timer expired so allow the interrupt to be
486 * delivered.
487 */
488 if (timer_remaining_ns == 0) {
489 break;
490 }
491
492 /*
493 * The vCPU is not ready to run, return the appropriate
494 * code to the primary which called vcpu_run.
495 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100496 run_ret->func =
Andrew Sculld6ee1102019-04-05 22:12:42 +0100497 vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100498 ? FFA_MSG_WAIT_32
499 : HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4db5f3a2019-11-04 11:42:42 +0000500 run_ret->arg1 =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100501 ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000502 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000503 }
504
505 ret = false;
506 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000507
Andrew Sculld6ee1102019-04-05 22:12:42 +0100508 case VCPU_STATE_READY:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000509 break;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000510 }
511
Andrew Scullb06d1752019-02-04 10:15:48 +0000512 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000513 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100514 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000515
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000516 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000517 * Mark the registers as unavailable now that we're about to reflect
518 * them onto the real registers. This will also prevent another physical
519 * CPU from trying to read these registers.
520 */
521 vcpu->regs_available = false;
522
523 ret = true;
524
525out:
526 sl_unlock(&vcpu->lock);
Andrew Scullb06d1752019-02-04 10:15:48 +0000527 if (need_vm_lock) {
528 sl_unlock(&vcpu->vm->lock);
529 }
530
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000531 return ret;
532}
533
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100534struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
535 const struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100536{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100537 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100538 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100539 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100540
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000541 /* Only the primary VM can switch vCPUs. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100542 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100543 ret.arg2 = FFA_DENIED;
Andrew Scull6d2db332018-10-10 15:28:17 +0100544 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100545 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100546
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000547 /* Only secondary VM vCPUs can be run. */
Andrew Scull19503262018-09-20 14:48:39 +0100548 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100549 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100550 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100551
Andrew Scull19503262018-09-20 14:48:39 +0100552 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100553 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100554 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100555 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100556 }
557
Fuad Tabbaed294af2019-12-20 10:43:01 +0000558 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100559 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100560 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100561 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100562
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000563 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100564 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000565 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000566 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100567 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000568
Andrew Walbran508e63c2018-12-20 17:02:37 +0000569 /*
570 * Inject timer interrupt if timer has expired. It's safe to access
571 * vcpu->regs here because api_vcpu_prepare_run already made sure that
572 * regs_available was true (and then set it to false) before returning
573 * true.
574 */
575 if (arch_timer_pending(&vcpu->regs)) {
576 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100577 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
578 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000579
580 /*
581 * Set the mask bit so the hardware interrupt doesn't fire
582 * again. Ideally we wouldn't do this because it affects what
583 * the secondary vCPU sees, but if we don't then we end up with
584 * a loop of the interrupt firing each time we try to return to
585 * the secondary vCPU.
586 */
587 arch_timer_mask(&vcpu->regs);
588 }
589
Fuad Tabbaed294af2019-12-20 10:43:01 +0000590 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000591 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000592
Andrew Scull33fecd32019-01-08 14:48:27 +0000593 /*
594 * Set a placeholder return code to the scheduler. This will be
595 * overwritten when the switch back to the primary occurs.
596 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100597 ret.func = FFA_INTERRUPT_32;
598 ret.arg1 = ffa_vm_vcpu(vm_id, vcpu_idx);
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100599 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000600
Andrew Scull6d2db332018-10-10 15:28:17 +0100601out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100602 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100603}
604
605/**
Andrew Scull81e85092018-12-12 12:56:20 +0000606 * Check that the mode indicates memory that is valid, owned and exclusive.
607 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100608static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000609{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100610 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
611 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000612}
613
614/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100615 * Determines the value to be returned by api_vm_configure and ffa_rx_release
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000616 * after they've succeeded. If a secondary VM is running and there are waiters,
617 * it also switches back to the primary VM for it to wake waiters up.
618 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100619static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
620 struct vcpu *current,
621 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000622{
623 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000624
625 if (list_empty(&vm->mailbox.waiter_list)) {
626 /* No waiters, nothing else to do. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100627 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000628 }
629
630 if (vm->id == HF_PRIMARY_VM_ID) {
631 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100632 return (struct ffa_value){.func = FFA_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000633 }
634
635 /*
636 * Switch back to the primary VM, informing it that there are waiters
637 * that need to be notified.
638 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000639 *next = api_switch_to_primary(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100640 current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000641 VCPU_STATE_READY);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000642
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100643 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000644}
645
646/**
Andrew Sculle1322792019-07-01 17:46:10 +0100647 * Configures the hypervisor's stage-1 view of the send and receive pages. The
648 * stage-1 page tables must be locked so memory cannot be taken by another core
649 * which could result in this transaction being unable to roll back in the case
650 * of an error.
651 */
652static bool api_vm_configure_stage1(struct vm_locked vm_locked,
653 paddr_t pa_send_begin, paddr_t pa_send_end,
654 paddr_t pa_recv_begin, paddr_t pa_recv_end,
655 struct mpool *local_page_pool)
656{
657 bool ret;
658 struct mm_stage1_locked mm_stage1_locked = mm_lock_stage1();
659
660 /* Map the send page as read-only in the hypervisor address space. */
661 vm_locked.vm->mailbox.send =
662 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
663 MM_MODE_R, local_page_pool);
664 if (!vm_locked.vm->mailbox.send) {
665 /* TODO: partial defrag of failed range. */
666 /* Recover any memory consumed in failed mapping. */
667 mm_defrag(mm_stage1_locked, local_page_pool);
668 goto fail;
669 }
670
671 /*
672 * Map the receive page as writable in the hypervisor address space. On
673 * failure, unmap the send page before returning.
674 */
675 vm_locked.vm->mailbox.recv =
676 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
677 MM_MODE_W, local_page_pool);
678 if (!vm_locked.vm->mailbox.recv) {
679 /* TODO: partial defrag of failed range. */
680 /* Recover any memory consumed in failed mapping. */
681 mm_defrag(mm_stage1_locked, local_page_pool);
682 goto fail_undo_send;
683 }
684
685 ret = true;
686 goto out;
687
688 /*
689 * The following mappings will not require more memory than is available
690 * in the local pool.
691 */
692fail_undo_send:
693 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100694 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
695 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100696
697fail:
698 ret = false;
699
700out:
701 mm_unlock_stage1(&mm_stage1_locked);
702
703 return ret;
704}
705
706/**
707 * Configures the send and receive pages in the VM stage-2 and hypervisor
708 * stage-1 page tables. Locking of the page tables combined with a local memory
709 * pool ensures there will always be enough memory to recover from any errors
710 * that arise.
711 */
712static bool api_vm_configure_pages(struct vm_locked vm_locked,
713 paddr_t pa_send_begin, paddr_t pa_send_end,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100714 uint32_t orig_send_mode,
715 paddr_t pa_recv_begin, paddr_t pa_recv_end,
716 uint32_t orig_recv_mode)
Andrew Sculle1322792019-07-01 17:46:10 +0100717{
718 bool ret;
719 struct mpool local_page_pool;
720
721 /*
722 * Create a local pool so any freed memory can't be used by another
723 * thread. This is to ensure the original mapping can be restored if any
724 * stage of the process fails.
725 */
726 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
727
728 /* Take memory ownership away from the VM and mark as shared. */
Andrew Scull3c257452019-11-26 13:32:50 +0000729 if (!vm_identity_map(
730 vm_locked, pa_send_begin, pa_send_end,
Andrew Sculle1322792019-07-01 17:46:10 +0100731 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000732 &local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100733 goto fail;
734 }
735
Andrew Scull3c257452019-11-26 13:32:50 +0000736 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
737 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
738 &local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100739 /* TODO: partial defrag of failed range. */
740 /* Recover any memory consumed in failed mapping. */
741 mm_vm_defrag(&vm_locked.vm->ptable, &local_page_pool);
742 goto fail_undo_send;
743 }
744
745 if (!api_vm_configure_stage1(vm_locked, pa_send_begin, pa_send_end,
746 pa_recv_begin, pa_recv_end,
747 &local_page_pool)) {
748 goto fail_undo_send_and_recv;
749 }
750
751 ret = true;
752 goto out;
753
754 /*
755 * The following mappings will not require more memory than is available
756 * in the local pool.
757 */
758fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +0000759 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
760 orig_recv_mode, &local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +0100761
762fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +0000763 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
764 orig_send_mode, &local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +0100765
766fail:
767 ret = false;
768
769out:
770 mpool_fini(&local_page_pool);
771
772 return ret;
773}
774
775/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100776 * Configures the VM to send/receive data through the specified pages. The pages
777 * must not be shared.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000778 *
779 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100780 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000781 * aligned or are the same.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100782 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000783 * due to insuffient page table memory.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100784 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000785 * the caller.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100786 * - FFA_SUCCESS on success if no further action is needed.
787 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000788 * needs to wake up or kick waiters.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100789 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100790struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
791 uint32_t page_count, struct vcpu *current,
792 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100793{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100794 struct vm *vm = current->vm;
Andrew Sculle1322792019-07-01 17:46:10 +0100795 struct vm_locked vm_locked;
Andrew Scull80871322018-08-06 12:04:09 +0100796 paddr_t pa_send_begin;
797 paddr_t pa_send_end;
798 paddr_t pa_recv_begin;
799 paddr_t pa_recv_end;
Andrew Walbran1281ed42019-10-22 17:23:40 +0100800 uint32_t orig_send_mode;
801 uint32_t orig_recv_mode;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100802 struct ffa_value ret;
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000803
804 /* Hafnium only supports a fixed size of RX/TX buffers. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100805 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
806 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000807 }
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100808
809 /* Fail if addresses are not page-aligned. */
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000810 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
811 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100812 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100813 }
814
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000815 /* Convert to physical addresses. */
816 pa_send_begin = pa_from_ipa(send);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000817 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000818
819 pa_recv_begin = pa_from_ipa(recv);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000820 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
Andrew Scullc2eb6a32018-12-13 16:54:24 +0000821
Andrew Scullc9ccb3f2018-08-13 15:27:12 +0100822 /* Fail if the same page is used for the send and receive pages. */
823 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100824 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Scull220e6212018-12-21 18:09:00 +0000825 }
826
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100827 /*
828 * The hypervisor's memory map must be locked for the duration of this
829 * operation to ensure there will be sufficient memory to recover from
830 * any failures.
831 *
832 * TODO: the scope of the can be reduced but will require restructuring
833 * to keep a single unlock point.
834 */
Andrew Sculle1322792019-07-01 17:46:10 +0100835 vm_locked = vm_lock(vm);
Andrew Scull220e6212018-12-21 18:09:00 +0000836
837 /* We only allow these to be setup once. */
838 if (vm->mailbox.send || vm->mailbox.recv) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100839 ret = ffa_error(FFA_DENIED);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000840 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000841 }
842
843 /*
844 * Ensure the pages are valid, owned and exclusive to the VM and that
845 * the VM has the required access to the memory.
846 */
847 if (!mm_vm_get_mode(&vm->ptable, send, ipa_add(send, PAGE_SIZE),
848 &orig_send_mode) ||
849 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
850 (orig_send_mode & MM_MODE_R) == 0 ||
851 (orig_send_mode & MM_MODE_W) == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100852 ret = ffa_error(FFA_DENIED);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000853 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000854 }
855
856 if (!mm_vm_get_mode(&vm->ptable, recv, ipa_add(recv, PAGE_SIZE),
857 &orig_recv_mode) ||
858 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
859 (orig_recv_mode & MM_MODE_R) == 0) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100860 ret = ffa_error(FFA_DENIED);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000861 goto exit;
Andrew Scull220e6212018-12-21 18:09:00 +0000862 }
863
Andrew Sculle1322792019-07-01 17:46:10 +0100864 if (!api_vm_configure_pages(vm_locked, pa_send_begin, pa_send_end,
865 orig_send_mode, pa_recv_begin, pa_recv_end,
866 orig_recv_mode)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100867 ret = ffa_error(FFA_NO_MEMORY);
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000868 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100869 }
870
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000871 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +0100872 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +0000873
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100874exit:
Andrew Sculle1322792019-07-01 17:46:10 +0100875 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100876
877 return ret;
878}
879
880/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100881 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
882 * registers the `from` VM to be notified when it becomes available.
883 */
Andrew Walbranf76f5752019-12-03 18:33:08 +0000884static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100885{
886 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
887 to.vm->mailbox.recv == NULL) {
888 /*
889 * Fail if the receiver isn't currently ready to receive data,
890 * setting up for notification if requested.
891 */
892 if (notify) {
893 struct wait_entry *entry =
Andrew Walbranaad8f982019-12-04 10:56:39 +0000894 vm_get_wait_entry(from, to.vm->id);
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100895
896 /* Append waiter only if it's not there yet. */
897 if (list_empty(&entry->wait_links)) {
898 list_append(&to.vm->mailbox.waiter_list,
899 &entry->wait_links);
900 }
901 }
902
903 return true;
904 }
905
906 return false;
907}
908
909/**
910 * Notifies the `to` VM about the message currently in its mailbox, possibly
911 * with the help of the primary VM.
912 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100913static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
914 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100915{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100916 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
917 struct ffa_value primary_ret = {
918 .func = FFA_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +0000919 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100920 };
921
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100922 /* Messages for the primary VM are delivered directly. */
923 if (to.vm->id == HF_PRIMARY_VM_ID) {
924 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000925 * Only tell the primary VM the size and other details if the
926 * message is for it, to avoid leaking data about messages for
927 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100928 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100929 primary_ret = ffa_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100930
931 to.vm->mailbox.state = MAILBOX_STATE_READ;
932 *next = api_switch_to_primary(current, primary_ret,
933 VCPU_STATE_READY);
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000934 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100935 }
936
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000937 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
938
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000939 /* Messages for the TEE are sent on via the dispatcher. */
940 if (to.vm->id == HF_TEE_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100941 struct ffa_value call = ffa_msg_recv_return(to.vm);
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000942
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000943 ret = arch_tee_call(call);
944 /*
945 * After the call to the TEE completes it must have finished
946 * reading its RX buffer, so it is ready for another message.
947 */
948 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000949 /*
950 * Don't return to the primary VM in this case, as the TEE is
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100951 * not (yet) scheduled via FF-A.
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000952 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +0000953 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000954 }
955
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100956 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +0000957 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100958 *next = api_switch_to_primary(current, primary_ret,
959 VCPU_STATE_READY);
960 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +0000961
962 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +0100963}
964
965/**
Andrew Scullaa039b32018-10-04 15:02:26 +0100966 * Copies data from the sender's send buffer to the recipient's receive buffer
967 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +0000968 *
969 * If the recipient's receive buffer is busy, it can optionally register the
970 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100971 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100972struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
973 ffa_vm_id_t receiver_vm_id, uint32_t size,
974 uint32_t attributes, struct vcpu *current,
975 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100976{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100977 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100978 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +0000979 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +0100980 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100981 struct ffa_value ret;
982 bool notify =
983 (attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +0100984
Andrew Walbran70bc8622019-10-07 14:15:58 +0100985 /* Ensure sender VM ID corresponds to the current VM. */
986 if (sender_vm_id != from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100987 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +0100988 }
989
990 /* Disallow reflexive requests as this suggests an error in the VM. */
991 if (receiver_vm_id == from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100992 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +0100993 }
994
995 /* Limit the size of transfer. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100996 if (size > FFA_MSG_PAYLOAD_MAX) {
997 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +0100998 }
999
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001000 /* Ensure the receiver VM exists. */
1001 to = vm_find(receiver_vm_id);
1002 if (to == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001003 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001004 }
1005
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001006 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001007 * Check that the sender has configured its send buffer. If the tx
1008 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1009 * be safely accessed after releasing the lock since the tx mailbox
1010 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001011 */
1012 sl_lock(&from->lock);
1013 from_msg = from->mailbox.send;
1014 sl_unlock(&from->lock);
1015
1016 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001018 }
1019
Andrew Walbran82d6d152019-12-24 15:02:06 +00001020 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001021
Andrew Walbran82d6d152019-12-24 15:02:06 +00001022 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001023 ret = ffa_error(FFA_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001024 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001025 }
1026
Andrew Walbran82d6d152019-12-24 15:02:06 +00001027 /* Copy data. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001028 memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
Andrew Walbran82d6d152019-12-24 15:02:06 +00001029 to->mailbox.recv_size = size;
1030 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001031 to->mailbox.recv_func = FFA_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001032 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001033
1034out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001035 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001036
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001037 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001038}
1039
1040/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001041 * Checks whether the vCPU's attempt to block for a message has already been
1042 * interrupted or whether it is allowed to block.
1043 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001044bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
Andrew Scullec52ddf2019-08-20 10:41:01 +01001045{
1046 bool interrupted;
1047
1048 sl_lock(&current->lock);
1049
1050 /*
1051 * Don't block if there are enabled and pending interrupts, to match
1052 * behaviour of wait_for_interrupt.
1053 */
1054 interrupted = (current->interrupts.enabled_and_pending_count > 0);
1055
1056 sl_unlock(&current->lock);
1057
1058 return interrupted;
1059}
1060
1061/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001062 * Receives a message from the mailbox. If one isn't available, this function
1063 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001064 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001065 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001066 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001067struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
1068 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001069{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001070 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001071 struct ffa_value return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001072
Andrew Scullaa039b32018-10-04 15:02:26 +01001073 /*
1074 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001075 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001076 */
Andrew Scull19503262018-09-20 14:48:39 +01001077 if (vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001078 return ffa_error(FFA_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001079 }
1080
1081 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001082
Andrew Scullaa039b32018-10-04 15:02:26 +01001083 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001084 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1085 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001086 return_code = ffa_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001087 goto out;
1088 }
1089
1090 /* No pending message so fail if not allowed to block. */
1091 if (!block) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001092 return_code = ffa_error(FFA_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001093 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001094 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001095
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001096 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001097 * From this point onward this call can only be interrupted or a message
1098 * received. If a message is received the return value will be set at
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001099 * that time to FFA_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001100 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001101 return_code = ffa_error(FFA_INTERRUPTED);
1102 if (api_ffa_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001103 goto out;
1104 }
1105
Fuad Tabbaed294af2019-12-20 10:43:01 +00001106 /* Switch back to primary VM to block. */
Andrew Walbranb4816552018-12-05 17:35:42 +00001107 {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001108 struct ffa_value run_return = {
1109 .func = FFA_MSG_WAIT_32,
1110 .arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001111 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001112
Andrew Walbranb4816552018-12-05 17:35:42 +00001113 *next = api_switch_to_primary(current, run_return,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001114 VCPU_STATE_BLOCKED_MAILBOX);
Andrew Walbranb4816552018-12-05 17:35:42 +00001115 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001116out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001117 sl_unlock(&vm->lock);
1118
Jose Marinho3e2442f2019-03-12 13:30:37 +00001119 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001120}
1121
1122/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001123 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1124 * by this function, the caller must have called api_mailbox_send before with
1125 * the notify argument set to true, and this call must have failed because the
1126 * mailbox was not available.
1127 *
1128 * It should be called repeatedly to retrieve a list of VMs.
1129 *
1130 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1131 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001132 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001133int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001134{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001135 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001136 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001137 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001138
1139 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001140 if (list_empty(&vm->mailbox.ready_list)) {
1141 ret = -1;
1142 goto exit;
1143 }
1144
1145 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1146 ready_links);
1147 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001148 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001149
1150exit:
1151 sl_unlock(&vm->lock);
1152 return ret;
1153}
1154
1155/**
1156 * Retrieves the next VM waiting to be notified that the mailbox of the
1157 * specified VM became writable. Only primary VMs are allowed to call this.
1158 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001159 * Returns -1 on failure or if there are no waiters; the VM id of the next
1160 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001161 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001162int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001163{
1164 struct vm *vm;
1165 struct vm_locked locked;
1166 struct wait_entry *entry;
1167 struct vm *waiting_vm;
1168
1169 /* Only primary VMs are allowed to call this function. */
1170 if (current->vm->id != HF_PRIMARY_VM_ID) {
1171 return -1;
1172 }
1173
Andrew Walbran42347a92019-05-09 13:59:03 +01001174 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001175 if (vm == NULL) {
1176 return -1;
1177 }
1178
Fuad Tabbaed294af2019-12-20 10:43:01 +00001179 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001180 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001181 entry = api_fetch_waiter(locked);
1182 vm_unlock(&locked);
1183
1184 if (entry == NULL) {
1185 return -1;
1186 }
1187
1188 /* Enqueue notification to waiting VM. */
1189 waiting_vm = entry->waiting_vm;
1190
1191 sl_lock(&waiting_vm->lock);
1192 if (list_empty(&entry->ready_links)) {
1193 list_append(&waiting_vm->mailbox.ready_list,
1194 &entry->ready_links);
1195 }
1196 sl_unlock(&waiting_vm->lock);
1197
1198 return waiting_vm->id;
1199}
1200
1201/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001202 * Releases the caller's mailbox so that a new message can be received. The
1203 * caller must have copied out all data they wish to preserve as new messages
1204 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001205 *
1206 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001207 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
1208 * - FFA_SUCCESS on success if no further action is needed.
1209 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001210 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001211 * hf_mailbox_waiter_get.
1212 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001213struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001214{
1215 struct vm *vm = current->vm;
1216 struct vm_locked locked;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001217 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001218
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001219 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001220 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001221 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001222 case MAILBOX_STATE_RECEIVED:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001223 ret = ffa_error(FFA_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001224 break;
1225
Andrew Sculld6ee1102019-04-05 22:12:42 +01001226 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001227 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001228 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001229 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001230 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001231 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001232
1233 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001234}
Andrew Walbran318f5732018-11-20 16:23:42 +00001235
1236/**
1237 * Enables or disables a given interrupt ID for the calling vCPU.
1238 *
1239 * Returns 0 on success, or -1 if the intid is invalid.
1240 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001241int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001242{
1243 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +01001244 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001245
Andrew Walbran318f5732018-11-20 16:23:42 +00001246 if (intid >= HF_NUM_INTIDS) {
1247 return -1;
1248 }
1249
1250 sl_lock(&current->lock);
1251 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001252 /*
1253 * If it is pending and was not enabled before, increment the
1254 * count.
1255 */
1256 if (current->interrupts.interrupt_pending[intid_index] &
1257 ~current->interrupts.interrupt_enabled[intid_index] &
1258 intid_mask) {
1259 current->interrupts.enabled_and_pending_count++;
1260 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001261 current->interrupts.interrupt_enabled[intid_index] |=
1262 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001263 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001264 /*
1265 * If it is pending and was enabled before, decrement the count.
1266 */
1267 if (current->interrupts.interrupt_pending[intid_index] &
1268 current->interrupts.interrupt_enabled[intid_index] &
1269 intid_mask) {
1270 current->interrupts.enabled_and_pending_count--;
1271 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001272 current->interrupts.interrupt_enabled[intid_index] &=
1273 ~intid_mask;
1274 }
1275
1276 sl_unlock(&current->lock);
1277 return 0;
1278}
1279
1280/**
1281 * Returns the ID of the next pending interrupt for the calling vCPU, and
1282 * acknowledges it (i.e. marks it as no longer pending). Returns
1283 * HF_INVALID_INTID if there are no pending interrupts.
1284 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001285uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001286{
1287 uint8_t i;
1288 uint32_t first_interrupt = HF_INVALID_INTID;
Andrew Walbran318f5732018-11-20 16:23:42 +00001289
1290 /*
1291 * Find the first enabled and pending interrupt ID, return it, and
1292 * deactivate it.
1293 */
1294 sl_lock(&current->lock);
1295 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1296 uint32_t enabled_and_pending =
1297 current->interrupts.interrupt_enabled[i] &
1298 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001299
Andrew Walbran318f5732018-11-20 16:23:42 +00001300 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001301 uint8_t bit_index = ctz(enabled_and_pending);
1302 /*
1303 * Mark it as no longer pending and decrement the count.
1304 */
1305 current->interrupts.interrupt_pending[i] &=
Andrew Walbrane52006c2019-10-22 18:01:28 +01001306 ~(1U << bit_index);
Andrew Walbran3d84a262018-12-13 14:41:19 +00001307 current->interrupts.enabled_and_pending_count--;
1308 first_interrupt =
1309 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001310 break;
1311 }
1312 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001313
1314 sl_unlock(&current->lock);
1315 return first_interrupt;
1316}
1317
1318/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001319 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001320 * given VM and vCPU.
1321 */
1322static inline bool is_injection_allowed(uint32_t target_vm_id,
1323 struct vcpu *current)
1324{
1325 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001326
Andrew Walbran318f5732018-11-20 16:23:42 +00001327 /*
1328 * The primary VM is allowed to inject interrupts into any VM. Secondary
1329 * VMs are only allowed to inject interrupts into their own vCPUs.
1330 */
1331 return current_vm_id == HF_PRIMARY_VM_ID ||
1332 current_vm_id == target_vm_id;
1333}
1334
1335/**
1336 * Injects a virtual interrupt of the given ID into the given target vCPU.
1337 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1338 * when the vCPU is next run, which is up to the scheduler.
1339 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001340 * Returns:
1341 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1342 * ID is invalid, or the current VM is not allowed to inject interrupts to
1343 * the target VM.
1344 * - 0 on success if no further action is needed.
1345 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1346 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001347 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001348int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
1349 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001350 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001351{
Andrew Walbran318f5732018-11-20 16:23:42 +00001352 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001353 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001354
1355 if (intid >= HF_NUM_INTIDS) {
1356 return -1;
1357 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001358
Andrew Walbran318f5732018-11-20 16:23:42 +00001359 if (target_vm == NULL) {
1360 return -1;
1361 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001362
Andrew Walbran318f5732018-11-20 16:23:42 +00001363 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001364 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001365 return -1;
1366 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001367
Andrew Walbran318f5732018-11-20 16:23:42 +00001368 if (!is_injection_allowed(target_vm_id, current)) {
1369 return -1;
1370 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001371
Andrew Walbrane1310df2019-04-29 17:28:28 +01001372 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001373
Andrew Walbran17eebf92020-02-05 16:35:49 +00001374 dlog_info("Injecting IRQ %d for VM %d vCPU %d from VM %d vCPU %d\n",
1375 intid, target_vm_id, target_vcpu_idx, current->vm->id,
1376 current->cpu->id);
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001377 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001378}
Andrew Scull6386f252018-12-06 13:29:10 +00001379
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001380/** Returns the version of the implemented FF-A specification. */
1381struct ffa_value api_ffa_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001382{
1383 /*
1384 * Ensure that both major and minor revision representation occupies at
1385 * most 15 bits.
1386 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001387 static_assert(0x8000 > FFA_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001388 "Major revision representation takes more than 15 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001389 static_assert(0x10000 > FFA_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001390 "Minor revision representation takes more than 16 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001391 if (requested_version & FFA_VERSION_RESERVED_BIT) {
Andrew Walbran9fd29072020-04-22 12:12:14 +01001392 /* Invalid encoding, return an error. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001393 return (struct ffa_value){.func = FFA_NOT_SUPPORTED};
Andrew Walbran9fd29072020-04-22 12:12:14 +01001394 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001395
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001396 return (struct ffa_value){
1397 .func = (FFA_VERSION_MAJOR << FFA_VERSION_MAJOR_OFFSET) |
1398 FFA_VERSION_MINOR};
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001399}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001400
1401int64_t api_debug_log(char c, struct vcpu *current)
1402{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001403 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001404 struct vm *vm = current->vm;
1405 struct vm_locked vm_locked = vm_lock(vm);
1406
Andrew Sculld54e1be2019-08-20 11:09:42 +01001407 if (c == '\n' || c == '\0') {
1408 flush = true;
1409 } else {
1410 vm->log_buffer[vm->log_buffer_length++] = c;
1411 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1412 }
1413
1414 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001415 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1416 vm->log_buffer_length);
1417 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001418 }
1419
1420 vm_unlock(&vm_locked);
1421
1422 return 0;
1423}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001424
1425/**
1426 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001427 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001428 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001429struct ffa_value api_ffa_features(uint32_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001430{
1431 switch (function_id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001432 case FFA_ERROR_32:
1433 case FFA_SUCCESS_32:
1434 case FFA_INTERRUPT_32:
1435 case FFA_VERSION_32:
1436 case FFA_FEATURES_32:
1437 case FFA_RX_RELEASE_32:
1438 case FFA_RXTX_MAP_64:
1439 case FFA_ID_GET_32:
1440 case FFA_MSG_POLL_32:
1441 case FFA_MSG_WAIT_32:
1442 case FFA_YIELD_32:
1443 case FFA_RUN_32:
1444 case FFA_MSG_SEND_32:
1445 case FFA_MEM_DONATE_32:
1446 case FFA_MEM_LEND_32:
1447 case FFA_MEM_SHARE_32:
1448 case FFA_MEM_RETRIEVE_REQ_32:
1449 case FFA_MEM_RETRIEVE_RESP_32:
1450 case FFA_MEM_RELINQUISH_32:
1451 case FFA_MEM_RECLAIM_32:
1452 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001453 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001454 return ffa_error(FFA_NOT_SUPPORTED);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001455 }
1456}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001457
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001458struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
1459 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001460 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001461{
1462 struct vm *from = current->vm;
1463 struct vm *to;
1464 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001465 struct ffa_memory_region *memory_region;
1466 struct ffa_value ret;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001467
1468 if (ipa_addr(address) != 0 || page_count != 0) {
1469 /*
1470 * Hafnium only supports passing the descriptor in the TX
1471 * mailbox.
1472 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001473 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001474 }
1475
Andrew Walbrana65a1322020-04-06 19:32:32 +01001476 if (fragment_length != length) {
1477 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001478 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001479 }
1480
1481 /*
1482 * Check that the sender has configured its send buffer. If the TX
1483 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1484 * be safely accessed after releasing the lock since the TX mailbox
1485 * address can only be configured once.
1486 */
1487 sl_lock(&from->lock);
1488 from_msg = from->mailbox.send;
1489 sl_unlock(&from->lock);
1490
1491 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001492 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001493 }
1494
1495 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001496 * Copy the memory region descriptor to a fresh page from the memory
1497 * pool. This prevents the sender from changing it underneath us, and
1498 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001499 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001500 if (fragment_length > HF_MAILBOX_SIZE ||
1501 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001502 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001503 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001504 memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001505 if (memory_region == NULL) {
1506 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001507 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001508 }
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001509 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001510
1511 /* The sender must match the caller. */
1512 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001513 dlog_verbose("Memory region sender doesn't match caller.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001514 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001515 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001516 }
1517
Andrew Walbrana65a1322020-04-06 19:32:32 +01001518 if (memory_region->receiver_count != 1) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001519 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001520 dlog_verbose(
1521 "Multi-way memory sharing not supported (got %d "
Andrew Walbrana65a1322020-04-06 19:32:32 +01001522 "endpoint memory access descriptors, expected 1).\n",
1523 memory_region->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001524 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001525 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001526 }
1527
1528 /*
1529 * Ensure that the receiver VM exists and isn't the same as the sender.
1530 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01001531 to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001532 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001533 dlog_verbose("Invalid receiver.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001534 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001535 goto out;
1536 }
1537
1538 if (to->id == HF_TEE_VM_ID) {
1539 /*
1540 * The 'to' VM lock is only needed in the case that it is the
1541 * TEE VM.
1542 */
1543 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
1544
1545 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001546 ret = ffa_error(FFA_BUSY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001547 goto out_unlock;
1548 }
1549
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001550 ret = ffa_memory_tee_send(
1551 vm_to_from_lock.vm2, vm_to_from_lock.vm1, memory_region,
1552 length, fragment_length, share_func, &api_page_pool);
1553 /*
1554 * ffa_tee_memory_send takes ownership of the memory_region, so
1555 * make sure we don't free it.
1556 */
1557 memory_region = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001558
1559 out_unlock:
1560 vm_unlock(&vm_to_from_lock.vm1);
1561 vm_unlock(&vm_to_from_lock.vm2);
1562 } else {
1563 struct vm_locked from_locked = vm_lock(from);
1564
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001565 ret = ffa_memory_send(from_locked, memory_region, length,
1566 fragment_length, share_func,
1567 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001568 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001569 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001570 * make sure we don't free it.
1571 */
1572 memory_region = NULL;
1573
1574 vm_unlock(&from_locked);
1575 }
1576
1577out:
1578 if (memory_region != NULL) {
1579 mpool_free(&api_page_pool, memory_region);
1580 }
1581
1582 return ret;
1583}
1584
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001585struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
1586 uint32_t fragment_length,
1587 ipaddr_t address, uint32_t page_count,
1588 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001589{
1590 struct vm *to = current->vm;
1591 struct vm_locked to_locked;
1592 const void *to_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001593 struct ffa_memory_region *retrieve_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001594 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001595 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001596
1597 if (ipa_addr(address) != 0 || page_count != 0) {
1598 /*
1599 * Hafnium only supports passing the descriptor in the TX
1600 * mailbox.
1601 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001602 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001603 }
1604
Andrew Walbrana65a1322020-04-06 19:32:32 +01001605 if (fragment_length != length) {
1606 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001607 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001608 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001609
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001610 retrieve_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001611 (struct ffa_memory_region *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001612 message_buffer_size = cpu_get_buffer_size(current->cpu);
1613 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
1614 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001615 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001616 }
1617
1618 to_locked = vm_lock(to);
1619 to_msg = to->mailbox.send;
1620
1621 if (to_msg == NULL) {
1622 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001623 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001624 goto out;
1625 }
1626
1627 /*
1628 * Copy the retrieve request descriptor to an internal buffer, so that
1629 * the caller can't change it underneath us.
1630 */
1631 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
1632
1633 if (msg_receiver_busy(to_locked, NULL, false)) {
1634 /*
1635 * Can't retrieve memory information if the mailbox is not
1636 * available.
1637 */
1638 dlog_verbose("RX buffer not ready.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001639 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001640 goto out;
1641 }
1642
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001643 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
1644 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001645
1646out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001647 vm_unlock(&to_locked);
1648 return ret;
1649}
1650
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001651struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001652{
1653 struct vm *from = current->vm;
1654 struct vm_locked from_locked;
1655 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001656 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001657 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001658 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001659 uint32_t length;
1660
1661 from_locked = vm_lock(from);
1662 from_msg = from->mailbox.send;
1663
1664 if (from_msg == NULL) {
1665 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001666 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001667 goto out;
1668 }
1669
1670 /*
1671 * Calculate length from relinquish descriptor before copying. We will
1672 * check again later to make sure it hasn't changed.
1673 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001674 length = sizeof(struct ffa_mem_relinquish) +
1675 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
1676 sizeof(ffa_vm_id_t);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001677 /*
1678 * Copy the relinquish descriptor to an internal buffer, so that the
1679 * caller can't change it underneath us.
1680 */
1681 relinquish_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001682 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001683 message_buffer_size = cpu_get_buffer_size(current->cpu);
1684 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
1685 dlog_verbose("Relinquish message too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001686 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001687 goto out;
1688 }
1689 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
1690
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001691 if (sizeof(struct ffa_mem_relinquish) +
1692 relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001693 length) {
1694 dlog_verbose(
1695 "Endpoint count changed while copying to internal "
1696 "buffer.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001697 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001698 goto out;
1699 }
1700
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001701 ret = ffa_memory_relinquish(from_locked, relinquish_request,
1702 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001703
1704out:
1705 vm_unlock(&from_locked);
1706 return ret;
1707}
1708
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001709static struct ffa_value ffa_mem_reclaim_tee(struct vm_locked to_locked,
1710 struct vm_locked from_locked,
1711 ffa_memory_handle_t handle,
1712 ffa_memory_region_flags_t flags,
1713 struct cpu *cpu)
Andrew Walbran290b0c92020-02-03 16:37:14 +00001714{
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001715 struct ffa_value tee_ret;
Andrew Walbran290b0c92020-02-03 16:37:14 +00001716 uint32_t length;
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001717 uint32_t fragment_length;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001718 struct ffa_memory_region *memory_region =
1719 (struct ffa_memory_region *)cpu_get_buffer(cpu);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001720 uint32_t message_buffer_size = cpu_get_buffer_size(cpu);
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001721 uint32_t request_length = ffa_memory_lender_retrieve_request_init(
Andrew Walbran290b0c92020-02-03 16:37:14 +00001722 from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
1723
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001724 CHECK(request_length <= HF_MAILBOX_SIZE);
1725
Andrew Walbran290b0c92020-02-03 16:37:14 +00001726 /* Retrieve memory region information from the TEE. */
1727 tee_ret = arch_tee_call(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001728 (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
1729 .arg1 = request_length,
1730 .arg2 = request_length});
1731 if (tee_ret.func == FFA_ERROR_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001732 dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2);
1733 return tee_ret;
1734 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001735 if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001736 dlog_verbose(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001737 "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
Andrew Walbran290b0c92020-02-03 16:37:14 +00001738 tee_ret.func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001739 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001740 }
1741
1742 length = tee_ret.arg1;
1743 fragment_length = tee_ret.arg2;
1744
1745 if (fragment_length > HF_MAILBOX_SIZE ||
1746 fragment_length > message_buffer_size) {
1747 dlog_verbose("Invalid fragment length %d (max %d).\n", length,
1748 HF_MAILBOX_SIZE);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001749 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001750 }
1751
1752 /* TODO: Support fragmentation. */
1753 if (fragment_length != length) {
1754 dlog_verbose(
1755 "Message fragmentation not yet supported (fragment "
1756 "length %d but length %d).\n",
1757 fragment_length, length);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001758 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001759 }
1760
1761 /*
1762 * Copy the memory region descriptor to an internal buffer, so that the
1763 * sender can't change it underneath us.
1764 */
1765 memcpy_s(memory_region, message_buffer_size,
1766 from_locked.vm->mailbox.send, fragment_length);
1767
1768 /*
1769 * Validate that transition is allowed (e.g. that caller is owner),
1770 * forward the reclaim request to the TEE, and update page tables.
1771 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001772 return ffa_memory_tee_reclaim(to_locked, handle, memory_region,
1773 flags & FFA_MEM_RECLAIM_CLEAR,
1774 &api_page_pool);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001775}
1776
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001777struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
1778 ffa_memory_region_flags_t flags,
1779 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001780{
1781 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001782 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001783
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001784 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
1785 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00001786 struct vm_locked to_locked = vm_lock(to);
1787
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001788 ret = ffa_memory_reclaim(to_locked, handle,
1789 flags & FFA_MEM_RECLAIM_CLEAR,
1790 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001791
Andrew Walbran290b0c92020-02-03 16:37:14 +00001792 vm_unlock(&to_locked);
1793 } else {
1794 struct vm *from = vm_find(HF_TEE_VM_ID);
1795 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
1796
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001797 ret = ffa_mem_reclaim_tee(vm_to_from_lock.vm1,
1798 vm_to_from_lock.vm2, handle, flags,
1799 current->cpu);
Andrew Walbran290b0c92020-02-03 16:37:14 +00001800
1801 vm_unlock(&vm_to_from_lock.vm1);
1802 vm_unlock(&vm_to_from_lock.vm2);
1803 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001804
1805 return ret;
1806}