blob: 9d0a37bce3b3ceb173272cc36fdf2a4a7558071e [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
J-Alves13318e32021-02-22 17:21:00 +00002 * Copyright 2021 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010010
Andrew Walbran318f5732018-11-20 16:23:42 +000011#include "hf/arch/cpu.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000012#include "hf/arch/ffa.h"
Olivier Deprez96a2a262020-06-11 17:21:38 +020013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/arch/plat/ffa.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000016#include "hf/arch/timer.h"
Olivier Deprez764fd2e2020-07-29 15:14:09 +020017#include "hf/arch/vm.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000018
Andrew Scull877ae4b2019-07-02 12:52:33 +010019#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000020#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
22#include "hf/ffa_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000023#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010024#include "hf/plat/console.h"
Manish Pandeya5f39fb2020-09-11 09:47:11 +010025#include "hf/plat/interrupts.h"
Andrew Scull6386f252018-12-06 13:29:10 +000026#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010027#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010028#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010029#include "hf/vm.h"
30
Andrew Scullf35a5c92018-08-07 18:09:46 +010031#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010032#include "vmapi/hf/ffa.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010033
Fuad Tabbae4efcc32020-07-16 15:37:27 +010034static_assert(sizeof(struct ffa_partition_info) == 8,
35 "Partition information descriptor size doesn't match the one in "
36 "the FF-A 1.0 EAC specification, Table 82.");
37
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000038/*
39 * To eliminate the risk of deadlocks, we define a partial order for the
40 * acquisition of locks held concurrently by the same physical CPU. Our current
41 * ordering requirements are as follows:
42 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010043 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000044 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010045 * Locks of the same kind require the lock of lowest address to be locked first,
46 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000047 */
48
Andrew Scullaa039b32018-10-04 15:02:26 +010049static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010050 "Currently, a page is mapped for the send and receive buffers so "
51 "the maximum request is the size of a page.");
52
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000053static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
54 "The page pool entry size must be at least as big as the mailbox "
55 "size, so that memory region descriptors can be copied from the "
56 "mailbox for memory sharing.");
57
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000058static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000059
60/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000061 * Initialises the API page pool by taking ownership of the contents of the
62 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000063 */
64void api_init(struct mpool *ppool)
65{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000066 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000067}
68
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010069/**
J-Alvesad6a0432021-04-09 16:06:21 +010070 * Get target VM vCPU:
71 * If VM is UP then return first vCPU.
72 * If VM is MP then return vCPU whose index matches current CPU index.
73 */
74static struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
75{
76 ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
77 struct vcpu *vcpu = NULL;
78
79 if (vm->vcpu_count == 1) {
80 vcpu = vm_get_vcpu(vm, 0);
81 } else if (current_cpu_index < vm->vcpu_count) {
82 vcpu = vm_get_vcpu(vm, current_cpu_index);
83 }
84
85 return vcpu;
86}
87
88/**
J-Alvesfe7f7372020-11-09 11:32:12 +000089 * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
90 * is given as argument of the function.
91 *
92 * Called to change the context between SPs for direct messaging (when Hafnium
93 * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
94 *
95 * This function works for partitions that are:
J-Alvesad6a0432021-04-09 16:06:21 +010096 * - UP migratable.
J-Alvesfe7f7372020-11-09 11:32:12 +000097 * - MP with pinned Execution Contexts.
98 */
99static struct vcpu *api_switch_to_vm(struct vcpu *current,
100 struct ffa_value to_ret,
101 enum vcpu_state vcpu_state,
102 ffa_vm_id_t to_id)
103{
104 struct vm *to_vm = vm_find(to_id);
J-Alvesad6a0432021-04-09 16:06:21 +0100105 struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current);
J-Alvesfe7f7372020-11-09 11:32:12 +0000106
107 CHECK(next != NULL);
108
109 /* Set the return value for the target VM. */
110 arch_regs_set_retval(&next->regs, to_ret);
111
112 /* Set the current vCPU state. */
113 sl_lock(&current->lock);
114 current->state = vcpu_state;
115 sl_unlock(&current->lock);
116
117 return next;
118}
119
120/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000121 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100122 *
123 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100124 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100125 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100126static struct vcpu *api_switch_to_primary(struct vcpu *current,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100127 struct ffa_value primary_ret,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000128 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100129{
Andrew Walbran508e63c2018-12-20 17:02:37 +0000130 /*
131 * If the secondary is blocked but has a timer running, sleep until the
132 * timer fires rather than indefinitely.
133 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100134 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100135 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
136 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100137 if (arch_timer_enabled_current()) {
138 uint64_t remaining_ns =
139 arch_timer_remaining_ns_current();
140
141 if (remaining_ns == 0) {
142 /*
143 * Timer is pending, so the current vCPU should
144 * be run again right away.
145 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100146 primary_ret.func = FFA_INTERRUPT_32;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100147 /*
148 * primary_ret.arg1 should already be set to the
149 * current VM ID and vCPU ID.
150 */
151 primary_ret.arg2 = 0;
152 } else {
153 primary_ret.arg2 = remaining_ns;
154 }
155 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100156 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100157 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000158 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100159 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000160
161 default:
162 /* Do nothing. */
163 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000164 }
165
J-Alvesfe7f7372020-11-09 11:32:12 +0000166 return api_switch_to_vm(current, primary_ret, secondary_state,
167 HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100168}
169
170/**
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200171 * Choose next vCPU to run to be the counterpart vCPU in the other
172 * world (run the normal world if currently running in the secure
173 * world). Set current vCPU state to the given vcpu_state parameter.
174 * Set FF-A return values to the target vCPU in the other world.
175 *
176 * Called in context of a direct message response from a secure
177 * partition to a VM.
178 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100179struct vcpu *api_switch_to_other_world(struct vcpu *current,
180 struct ffa_value other_world_ret,
181 enum vcpu_state vcpu_state)
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200182{
J-Alvesfe7f7372020-11-09 11:32:12 +0000183 return api_switch_to_vm(current, other_world_ret, vcpu_state,
184 HF_OTHER_WORLD_ID);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200185}
186
187/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100188 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
189 * registers the `from` VM to be notified when it becomes available.
190 */
191static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
192{
193 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
194 to.vm->mailbox.recv == NULL) {
195 /*
196 * Fail if the receiver isn't currently ready to receive data,
197 * setting up for notification if requested.
198 */
199 if (notify) {
200 struct wait_entry *entry =
201 vm_get_wait_entry(from, to.vm->id);
202
203 /* Append waiter only if it's not there yet. */
204 if (list_empty(&entry->wait_links)) {
205 list_append(&to.vm->mailbox.waiter_list,
206 &entry->wait_links);
207 }
208 }
209
210 return true;
211 }
212
213 return false;
214}
215
216/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000217 * Returns true if the given vCPU is executing in context of an
218 * FFA_MSG_SEND_DIRECT_REQ invocation.
219 */
220static bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
221{
222 return locked.vcpu->direct_request_origin_vm_id != HF_INVALID_VM_ID;
223}
224
225/**
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100226 * Returns true if the VM owning the given vCPU is supporting managed exit and
227 * the vCPU is currently processing a managed exit.
228 */
229static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
230{
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100231 return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100232 vcpu_locked.vcpu->processing_managed_exit);
233}
234
235/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000236 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000237 */
238struct vcpu *api_preempt(struct vcpu *current)
239{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100240 struct ffa_value ret = {
241 .func = FFA_INTERRUPT_32,
242 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000243 };
244
Andrew Sculld6ee1102019-04-05 22:12:42 +0100245 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
Andrew Scull33fecd32019-01-08 14:48:27 +0000246}
247
248/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000249 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000250 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100251 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100252struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100253{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100254 struct ffa_value ret = {
255 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
256 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100257 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000258
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000259 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100260 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100261}
262
263/**
Andrew Walbran33645652019-04-15 12:29:31 +0100264 * Puts the current vCPU in off mode, and returns to the primary VM.
265 */
266struct vcpu *api_vcpu_off(struct vcpu *current)
267{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100268 struct ffa_value ret = {
269 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
270 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100271 };
272
273 /*
274 * Disable the timer, so the scheduler doesn't get told to call back
275 * based on it.
276 */
277 arch_timer_disable_current();
278
279 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
280}
281
282/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000283 * Returns to the primary VM to allow this CPU to be used for other tasks as the
284 * vCPU does not have work to do at this moment. The current vCPU is marked as
Andrew Walbran16075b62019-09-03 17:11:07 +0100285 * ready to be scheduled again.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000286 */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000287struct ffa_value api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000288{
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000289 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
290 struct vcpu_locked current_locked;
291 bool is_direct_request_ongoing;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000292
293 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000294 /* NOOP on the primary as it makes the scheduling decisions. */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000295 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000296 }
297
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000298 current_locked = vcpu_lock(current);
299 is_direct_request_ongoing =
300 is_ffa_direct_msg_request_ongoing(current_locked);
301 vcpu_unlock(&current_locked);
302
303 if (is_direct_request_ongoing) {
304 return ffa_error(FFA_DENIED);
305 }
306
307 *next = api_switch_to_primary(
308 current,
309 (struct ffa_value){.func = FFA_YIELD_32,
310 .arg1 = ffa_vm_vcpu(current->vm->id,
311 vcpu_index(current))},
312 VCPU_STATE_READY);
313
314 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000315}
316
317/**
Andrew Walbran33645652019-04-15 12:29:31 +0100318 * Switches to the primary so that it can switch to the target, or kick it if it
319 * is already running on a different physical CPU.
320 */
321struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
322{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100323 struct ffa_value ret = {
324 .func = HF_FFA_RUN_WAKE_UP,
325 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
326 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100327 };
328 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
329}
330
331/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000332 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000333 */
334struct vcpu *api_abort(struct vcpu *current)
335{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100336 struct ffa_value ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000337
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100338 dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000339 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000340
341 if (current->vm->id == HF_PRIMARY_VM_ID) {
342 /* TODO: what to do when the primary aborts? */
343 for (;;) {
344 /* Do nothing. */
345 }
346 }
347
348 atomic_store_explicit(&current->vm->aborting, true,
349 memory_order_relaxed);
350
351 /* TODO: free resources once all vCPUs abort. */
352
Andrew Sculld6ee1102019-04-05 22:12:42 +0100353 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000354}
355
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100356struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
357 const struct ffa_uuid *uuid)
358{
359 struct vm *current_vm = current->vm;
360 struct vm_locked current_vm_locked;
361 ffa_vm_count_t vm_count = 0;
362 bool uuid_is_null = ffa_uuid_is_null(uuid);
363 struct ffa_value ret;
364 uint32_t size;
365 struct ffa_partition_info partitions[MAX_VMS];
366
367 /*
368 * Iterate through the VMs to find the ones with a matching UUID.
369 * A Null UUID retrieves information for all VMs.
370 */
371 for (uint16_t index = 0; index < vm_get_count(); ++index) {
372 const struct vm *vm = vm_find_index(index);
373
374 if (uuid_is_null || ffa_uuid_equal(uuid, &vm->uuid)) {
375 partitions[vm_count].vm_id = vm->id;
376 partitions[vm_count].vcpu_count = vm->vcpu_count;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100377 partitions[vm_count].properties =
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100378 plat_ffa_partition_properties(current_vm->id,
379 vm);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100380
381 ++vm_count;
382 }
383 }
384
385 /* Unrecognized UUID: does not match any of the VMs and is not Null. */
386 if (vm_count == 0) {
387 return ffa_error(FFA_INVALID_PARAMETERS);
388 }
389
390 size = vm_count * sizeof(partitions[0]);
391 if (size > FFA_MSG_PAYLOAD_MAX) {
392 dlog_error(
393 "Partition information does not fit in the VM's RX "
394 "buffer.\n");
395 return ffa_error(FFA_NO_MEMORY);
396 }
397
398 /*
399 * Partition information is returned in the VM's RX buffer, which is why
400 * the lock is needed.
401 */
402 current_vm_locked = vm_lock(current_vm);
403
404 if (msg_receiver_busy(current_vm_locked, NULL, false)) {
405 /*
406 * Can't retrieve memory information if the mailbox is not
407 * available.
408 */
409 dlog_verbose("RX buffer not ready.\n");
410 ret = ffa_error(FFA_BUSY);
411 goto out_unlock;
412 }
413
414 /* Populate the VM's RX buffer with the partition information. */
415 memcpy_s(current_vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, partitions,
416 size);
417 current_vm->mailbox.recv_size = size;
418 current_vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
419 current_vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
420 current_vm->mailbox.state = MAILBOX_STATE_READ;
421
422 /* Return the count of partition information descriptors in w2. */
423 ret = (struct ffa_value){.func = FFA_SUCCESS_32, .arg2 = vm_count};
424
425out_unlock:
426 vm_unlock(&current_vm_locked);
427
428 return ret;
429}
430
Andrew Scull9726c252019-01-23 13:44:19 +0000431/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000432 * Returns the ID of the VM.
433 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100434struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000435{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100436 return (struct ffa_value){.func = FFA_SUCCESS_32,
437 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000438}
439
440/**
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000441 * Returns the ID of the SPMC.
442 */
443struct ffa_value api_ffa_spm_id_get(void)
444{
J-Alves3829fc02021-03-18 12:49:18 +0000445#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
446 /*
447 * Return the SPMC ID that was fetched during FF-A
448 * initialization.
449 */
450 return (struct ffa_value){.func = FFA_SUCCESS_32,
451 .arg2 = arch_ffa_spmc_id_get()};
452#else
453 return ffa_error(FFA_NOT_SUPPORTED);
454#endif
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000455}
456
457/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000458 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000459 * function to indicate that register state for the given vCPU has been saved
460 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000461 */
462void api_regs_state_saved(struct vcpu *vcpu)
463{
464 sl_lock(&vcpu->lock);
465 vcpu->regs_available = true;
466 sl_unlock(&vcpu->lock);
467}
468
469/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000470 * Retrieves the next waiter and removes it from the wait list if the VM's
471 * mailbox is in a writable state.
472 */
473static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
474{
475 struct wait_entry *entry;
476 struct vm *vm = locked_vm.vm;
477
Andrew Sculld6ee1102019-04-05 22:12:42 +0100478 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000479 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
480 /* The mailbox is not writable or there are no waiters. */
481 return NULL;
482 }
483
484 /* Remove waiter from the wait list. */
485 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
486 wait_links);
487 list_remove(&entry->wait_links);
488 return entry;
489}
490
491/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000492 * Assuming that the arguments have already been checked by the caller, injects
493 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
494 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
495 * is next run, which is up to the scheduler.
496 *
497 * Returns:
498 * - 0 on success if no further action is needed.
499 * - 1 if it was called by the primary VM and the primary VM now needs to wake
500 * up or kick the target vCPU.
501 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100502int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
503 uint32_t intid, struct vcpu *current,
504 struct vcpu **next)
Andrew Walbran508e63c2018-12-20 17:02:37 +0000505{
Manish Pandey35e452f2021-02-18 21:36:34 +0000506 struct vcpu *target_vcpu = target_locked.vcpu;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000507 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +0000508 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
509 uint32_t intid_mask = 1U << intid_shift;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000510 int64_t ret = 0;
511
Andrew Walbran508e63c2018-12-20 17:02:37 +0000512 /*
Manish Pandey35e452f2021-02-18 21:36:34 +0000513 * We only need to change state and (maybe) trigger a virtual interrupt
514 * if it is enabled and was not previously pending. Otherwise we can
515 * skip everything except setting the pending bit.
Andrew Walbran508e63c2018-12-20 17:02:37 +0000516 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000517 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
518 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
Andrew Walbran508e63c2018-12-20 17:02:37 +0000519 intid_mask)) {
520 goto out;
521 }
522
523 /* Increment the count. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000524 if ((target_vcpu->interrupts.interrupt_type[intid_index] &
525 intid_mask) == (INTERRUPT_TYPE_IRQ << intid_shift)) {
526 vcpu_irq_count_increment(target_locked);
527 } else {
528 vcpu_fiq_count_increment(target_locked);
529 }
Andrew Walbran508e63c2018-12-20 17:02:37 +0000530
531 /*
532 * Only need to update state if there was not already an
533 * interrupt enabled and pending.
534 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000535 if (vcpu_interrupt_count_get(target_locked) != 1) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000536 goto out;
537 }
538
Andrew Walbran508e63c2018-12-20 17:02:37 +0000539 if (current->vm->id == HF_PRIMARY_VM_ID) {
540 /*
541 * If the call came from the primary VM, let it know that it
542 * should run or kick the target vCPU.
543 */
544 ret = 1;
Manish Pandey35e452f2021-02-18 21:36:34 +0000545 } else if (current != target_vcpu && next != NULL) {
546 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000547 }
548
549out:
550 /* Either way, make it pending. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000551 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000552
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200553 return ret;
554}
555
556/* Wrapper to internal_interrupt_inject with locking of target vCPU */
557static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
558 uint32_t intid, struct vcpu *current,
559 struct vcpu **next)
560{
561 int64_t ret;
562 struct vcpu_locked target_locked;
563
564 target_locked = vcpu_lock(target_vcpu);
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100565 ret = api_interrupt_inject_locked(target_locked, intid, current, next);
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200566 vcpu_unlock(&target_locked);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000567
568 return ret;
569}
570
571/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100572 * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
573 * or FFA_MSG_WAIT call.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100574 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100575static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100576{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000577 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100578 case FFA_MSG_SEND_32:
579 return (struct ffa_value){
580 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000581 .arg1 = (receiver->mailbox.recv_sender << 16) |
582 receiver->id,
583 .arg3 = receiver->mailbox.recv_size};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000584 default:
585 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000586 dlog_error("Tried to return an invalid message function %#x\n",
587 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100588 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000589 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100590}
591
592/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000593 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000594 * value needs to be forced onto the vCPU.
595 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000596static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100597 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000598{
Max Shvetsov40108e72020-08-27 12:39:50 +0100599 struct vcpu_locked vcpu_locked;
600 struct vm_locked vm_locked;
Andrew Scullb06d1752019-02-04 10:15:48 +0000601 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000602 bool ret;
603
Andrew Scullb06d1752019-02-04 10:15:48 +0000604 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000605 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000606 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100607 * The VM lock is not needed in the common case so it must only be taken
608 * when it is going to be needed. This ensures there are no inter-vCPU
609 * dependencies in the common run case meaning the sensitive context
610 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000611 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100612 vcpu_locked = vcpu_lock(vcpu);
613
614#if SECURE_WORLD == 1
615
616 if (vcpu_secondary_reset_and_start(vcpu_locked, vcpu->vm->secondary_ep,
617 0)) {
618 dlog_verbose("%s secondary cold boot vmid %#x vcpu id %#x\n",
619 __func__, vcpu->vm->id, current->cpu->id);
620 }
621
622#endif
Andrew Scullb06d1752019-02-04 10:15:48 +0000623
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000624 /* The VM needs to be locked to deliver mailbox messages. */
625 need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
626 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100627 vcpu_unlock(&vcpu_locked);
628 vm_locked = vm_lock(vcpu->vm);
629 vcpu_locked = vcpu_lock(vcpu);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000630 }
631
632 /*
633 * If the vCPU is already running somewhere then we can't run it here
634 * simultaneously. While it is actually running then the state should be
635 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
636 * stops running but while Hafnium is in the process of switching back
637 * to the primary there will be a brief period while the state has been
638 * updated but `regs_available` is still false (until
639 * `api_regs_state_saved` is called). We can't start running it again
640 * until this has finished, so count this state as still running for the
641 * purposes of this check.
642 */
643 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
644 /*
645 * vCPU is running on another pCPU.
646 *
647 * It's okay not to return the sleep duration here because the
648 * other physical CPU that is currently running this vCPU will
649 * return the sleep duration if needed.
650 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100651 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000652 ret = false;
653 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000654 }
Andrew Scull9726c252019-01-23 13:44:19 +0000655
656 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100657 if (vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100658 dlog_notice("Aborting VM %#x vCPU %u\n", vcpu->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000659 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100660 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000661 }
662 ret = false;
663 goto out;
664 }
665
Andrew Walbran508e63c2018-12-20 17:02:37 +0000666 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100667 case VCPU_STATE_RUNNING:
668 case VCPU_STATE_OFF:
669 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000670 ret = false;
671 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000672
Andrew Sculld6ee1102019-04-05 22:12:42 +0100673 case VCPU_STATE_BLOCKED_MAILBOX:
Andrew Scullb06d1752019-02-04 10:15:48 +0000674 /*
675 * A pending message allows the vCPU to run so the message can
676 * be delivered directly.
677 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100678 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100679 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100680 ffa_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100681 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000682 break;
683 }
684 /* Fall through. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100685 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000686 /* Allow virtual interrupts to be delivered. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000687 if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000688 break;
689 }
690
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100691 uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
692
Andrew Walbran508e63c2018-12-20 17:02:37 +0000693 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100694 timer_remaining_ns =
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000695 arch_timer_remaining_ns(&vcpu->regs);
696
697 /*
698 * The timer expired so allow the interrupt to be
699 * delivered.
700 */
701 if (timer_remaining_ns == 0) {
702 break;
703 }
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100704 }
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000705
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100706 /*
707 * The vCPU is not ready to run, return the appropriate code to
708 * the primary which called vcpu_run.
709 */
710 run_ret->func = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100711 ? FFA_MSG_WAIT_32
712 : HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100713 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
714 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000715
716 ret = false;
717 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000718
Andrew Sculld6ee1102019-04-05 22:12:42 +0100719 case VCPU_STATE_READY:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000720 break;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000721 }
722
Andrew Scullb06d1752019-02-04 10:15:48 +0000723 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000724 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100725 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000726
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000727 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000728 * Mark the registers as unavailable now that we're about to reflect
729 * them onto the real registers. This will also prevent another physical
730 * CPU from trying to read these registers.
731 */
732 vcpu->regs_available = false;
733
734 ret = true;
735
736out:
Max Shvetsov40108e72020-08-27 12:39:50 +0100737 vcpu_unlock(&vcpu_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000738 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100739 vm_unlock(&vm_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000740 }
741
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000742 return ret;
743}
744
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100745struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
746 const struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100747{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100748 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100749 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100750 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100751
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000752 /* Only the primary VM can switch vCPUs. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100753 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100754 ret.arg2 = FFA_DENIED;
Andrew Scull6d2db332018-10-10 15:28:17 +0100755 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100756 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100757
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000758 /* Only secondary VM vCPUs can be run. */
Andrew Scull19503262018-09-20 14:48:39 +0100759 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100760 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100761 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100762
Andrew Scull19503262018-09-20 14:48:39 +0100763 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100764 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100765 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100766 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100767 }
768
Fuad Tabbaed294af2019-12-20 10:43:01 +0000769 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100770 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100771 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100772 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100773
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000774 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100775 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000776 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000777 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100778 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000779
Andrew Walbran508e63c2018-12-20 17:02:37 +0000780 /*
781 * Inject timer interrupt if timer has expired. It's safe to access
782 * vcpu->regs here because api_vcpu_prepare_run already made sure that
783 * regs_available was true (and then set it to false) before returning
784 * true.
785 */
786 if (arch_timer_pending(&vcpu->regs)) {
787 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100788 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
789 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000790
791 /*
792 * Set the mask bit so the hardware interrupt doesn't fire
793 * again. Ideally we wouldn't do this because it affects what
794 * the secondary vCPU sees, but if we don't then we end up with
795 * a loop of the interrupt firing each time we try to return to
796 * the secondary vCPU.
797 */
798 arch_timer_mask(&vcpu->regs);
799 }
800
Fuad Tabbaed294af2019-12-20 10:43:01 +0000801 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000802 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000803
Andrew Scull33fecd32019-01-08 14:48:27 +0000804 /*
805 * Set a placeholder return code to the scheduler. This will be
806 * overwritten when the switch back to the primary occurs.
807 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100808 ret.func = FFA_INTERRUPT_32;
809 ret.arg1 = ffa_vm_vcpu(vm_id, vcpu_idx);
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100810 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000811
Andrew Scull6d2db332018-10-10 15:28:17 +0100812out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100813 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100814}
815
816/**
Andrew Scull81e85092018-12-12 12:56:20 +0000817 * Check that the mode indicates memory that is valid, owned and exclusive.
818 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100819static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000820{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100821 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
822 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000823}
824
825/**
Andrew Walbranc8a01972020-09-22 11:23:30 +0100826 * Determines the value to be returned by api_ffa_rxtx_map and
827 * api_ffa_rx_release after they've succeeded. If a secondary VM is running and
828 * there are waiters, it also switches back to the primary VM for it to wake
829 * waiters up.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000830 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100831static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
832 struct vcpu *current,
833 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000834{
835 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000836
837 if (list_empty(&vm->mailbox.waiter_list)) {
838 /* No waiters, nothing else to do. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100839 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000840 }
841
842 if (vm->id == HF_PRIMARY_VM_ID) {
843 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100844 return (struct ffa_value){.func = FFA_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000845 }
846
847 /*
848 * Switch back to the primary VM, informing it that there are waiters
849 * that need to be notified.
850 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000851 *next = api_switch_to_primary(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100852 current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000853 VCPU_STATE_READY);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000854
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100855 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000856}
857
858/**
Manish Pandeyd34f8892020-06-19 17:41:07 +0100859 * Configures the hypervisor's stage-1 view of the send and receive pages.
Andrew Sculle1322792019-07-01 17:46:10 +0100860 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100861static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
862 struct vm_locked vm_locked,
Andrew Sculle1322792019-07-01 17:46:10 +0100863 paddr_t pa_send_begin, paddr_t pa_send_end,
864 paddr_t pa_recv_begin, paddr_t pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200865 uint32_t extra_attributes,
Andrew Sculle1322792019-07-01 17:46:10 +0100866 struct mpool *local_page_pool)
867{
868 bool ret;
Andrew Sculle1322792019-07-01 17:46:10 +0100869
870 /* Map the send page as read-only in the hypervisor address space. */
871 vm_locked.vm->mailbox.send =
872 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200873 MM_MODE_R | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100874 if (!vm_locked.vm->mailbox.send) {
875 /* TODO: partial defrag of failed range. */
876 /* Recover any memory consumed in failed mapping. */
877 mm_defrag(mm_stage1_locked, local_page_pool);
878 goto fail;
879 }
880
881 /*
882 * Map the receive page as writable in the hypervisor address space. On
883 * failure, unmap the send page before returning.
884 */
885 vm_locked.vm->mailbox.recv =
886 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200887 MM_MODE_W | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100888 if (!vm_locked.vm->mailbox.recv) {
889 /* TODO: partial defrag of failed range. */
890 /* Recover any memory consumed in failed mapping. */
891 mm_defrag(mm_stage1_locked, local_page_pool);
892 goto fail_undo_send;
893 }
894
895 ret = true;
896 goto out;
897
898 /*
899 * The following mappings will not require more memory than is available
900 * in the local pool.
901 */
902fail_undo_send:
903 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100904 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
905 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100906
907fail:
908 ret = false;
909
910out:
Andrew Sculle1322792019-07-01 17:46:10 +0100911 return ret;
912}
913
914/**
Manish Pandeyd34f8892020-06-19 17:41:07 +0100915 * Sanity checks and configures the send and receive pages in the VM stage-2
916 * and hypervisor stage-1 page tables.
917 *
918 * Returns:
919 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +0100920 * aligned, are the same or have invalid attributes.
Manish Pandeyd34f8892020-06-19 17:41:07 +0100921 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
922 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +0100923 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Manish Pandeyd34f8892020-06-19 17:41:07 +0100924 * - FFA_SUCCESS on success if no further action is needed.
Andrew Sculle1322792019-07-01 17:46:10 +0100925 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100926
927struct ffa_value api_vm_configure_pages(
928 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
929 ipaddr_t send, ipaddr_t recv, uint32_t page_count,
930 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +0100931{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100932 struct ffa_value ret;
933 paddr_t pa_send_begin;
934 paddr_t pa_send_end;
935 paddr_t pa_recv_begin;
936 paddr_t pa_recv_end;
937 uint32_t orig_send_mode;
938 uint32_t orig_recv_mode;
Olivier Deprez96a2a262020-06-11 17:21:38 +0200939 uint32_t extra_attributes;
Manish Pandeyd34f8892020-06-19 17:41:07 +0100940
941 /* We only allow these to be setup once. */
942 if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
943 ret = ffa_error(FFA_DENIED);
944 goto out;
945 }
946
947 /* Hafnium only supports a fixed size of RX/TX buffers. */
948 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
949 ret = ffa_error(FFA_INVALID_PARAMETERS);
950 goto out;
951 }
952
953 /* Fail if addresses are not page-aligned. */
954 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
955 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
956 ret = ffa_error(FFA_INVALID_PARAMETERS);
957 goto out;
958 }
959
960 /* Convert to physical addresses. */
961 pa_send_begin = pa_from_ipa(send);
962 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
963 pa_recv_begin = pa_from_ipa(recv);
964 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
965
966 /* Fail if the same page is used for the send and receive pages. */
967 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
968 ret = ffa_error(FFA_INVALID_PARAMETERS);
969 goto out;
970 }
Andrew Sculle1322792019-07-01 17:46:10 +0100971
972 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +0100973 * Ensure the pages are valid, owned and exclusive to the VM and that
974 * the VM has the required access to the memory.
Andrew Sculle1322792019-07-01 17:46:10 +0100975 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800976 if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
977 &orig_send_mode) ||
Manish Pandeyd34f8892020-06-19 17:41:07 +0100978 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
979 (orig_send_mode & MM_MODE_R) == 0 ||
980 (orig_send_mode & MM_MODE_W) == 0) {
Daniel Boulby6f8941e2021-06-14 18:27:18 +0100981 ret = ffa_error(FFA_INVALID_PARAMETERS);
Manish Pandeyd34f8892020-06-19 17:41:07 +0100982 goto out;
983 }
984
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -0800985 if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
986 &orig_recv_mode) ||
Manish Pandeyd34f8892020-06-19 17:41:07 +0100987 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
988 (orig_recv_mode & MM_MODE_R) == 0) {
Daniel Boulby6f8941e2021-06-14 18:27:18 +0100989 ret = ffa_error(FFA_INVALID_PARAMETERS);
Manish Pandeyd34f8892020-06-19 17:41:07 +0100990 goto out;
991 }
Andrew Sculle1322792019-07-01 17:46:10 +0100992
993 /* Take memory ownership away from the VM and mark as shared. */
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -0800994 uint32_t mode =
995 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W;
996 if (vm_locked.vm->el0_partition) {
997 mode |= MM_MODE_USER | MM_MODE_NG;
998 }
999
1000 if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end, mode,
1001 local_page_pool, NULL)) {
Manish Pandeyd34f8892020-06-19 17:41:07 +01001002 ret = ffa_error(FFA_NO_MEMORY);
1003 goto out;
Andrew Sculle1322792019-07-01 17:46:10 +01001004 }
1005
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001006 mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
1007 if (vm_locked.vm->el0_partition) {
1008 mode |= MM_MODE_USER | MM_MODE_NG;
1009 }
1010
1011 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end, mode,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001012 local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +01001013 /* TODO: partial defrag of failed range. */
1014 /* Recover any memory consumed in failed mapping. */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001015 mm_vm_defrag(&vm_locked.vm->ptable, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001016 goto fail_undo_send;
1017 }
1018
Olivier Deprez96a2a262020-06-11 17:21:38 +02001019 /* Get extra send/recv pages mapping attributes for the given VM ID. */
1020 extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
1021
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001022 /*
1023 * For EL0 partitions, since both the partition and the hypervisor code
1024 * use the EL2&0 translation regime, it is critical to mark the mappings
1025 * of the send and recv buffers as non-global in the TLB. For one, if we
1026 * dont mark it as non-global, it would cause TLB conflicts since there
1027 * would be an identity mapping with non-global attribute in the
1028 * partitions page tables, but another identity mapping in the
1029 * hypervisor page tables with the global attribute. The other issue is
1030 * one of security, we dont want other partitions to be able to access
1031 * other partitions buffers through cached translations.
1032 */
1033 if (vm_locked.vm->el0_partition) {
1034 extra_attributes |= MM_MODE_NG;
1035 }
1036
Manish Pandeyd34f8892020-06-19 17:41:07 +01001037 if (!api_vm_configure_stage1(mm_stage1_locked, vm_locked, pa_send_begin,
1038 pa_send_end, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001039 extra_attributes, local_page_pool)) {
Andrew Sculle1322792019-07-01 17:46:10 +01001040 goto fail_undo_send_and_recv;
1041 }
1042
Manish Pandeyd34f8892020-06-19 17:41:07 +01001043 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +01001044 goto out;
1045
Andrew Sculle1322792019-07-01 17:46:10 +01001046fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +00001047 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001048 orig_send_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +01001049
1050fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +00001051 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001052 orig_send_mode, local_page_pool, NULL));
1053 ret = ffa_error(FFA_NO_MEMORY);
Andrew Sculle1322792019-07-01 17:46:10 +01001054
1055out:
Andrew Sculle1322792019-07-01 17:46:10 +01001056 return ret;
1057}
1058
1059/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001060 * Configures the VM to send/receive data through the specified pages. The pages
Manish Pandeyd34f8892020-06-19 17:41:07 +01001061 * must not be shared. Locking of the page tables combined with a local memory
1062 * pool ensures there will always be enough memory to recover from any errors
1063 * that arise. The stage-1 page tables must be locked so memory cannot be taken
1064 * by another core which could result in this transaction being unable to roll
1065 * back in the case of an error.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001066 *
1067 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001068 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001069 * aligned, are the same or have invalid attributes.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001070 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001071 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001072 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001073 * - FFA_SUCCESS on success if no further action is needed.
1074 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001075 * needs to wake up or kick waiters.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001076 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001077struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
1078 uint32_t page_count, struct vcpu *current,
1079 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001080{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001081 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001082 struct ffa_value ret;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001083 struct vm_locked vm_locked;
1084 struct mm_stage1_locked mm_stage1_locked;
1085 struct mpool local_page_pool;
Andrew Scull220e6212018-12-21 18:09:00 +00001086
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001087 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001088 * Create a local pool so any freed memory can't be used by another
1089 * thread. This is to ensure the original mapping can be restored if any
1090 * stage of the process fails.
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001091 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001092 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1093
Andrew Sculle1322792019-07-01 17:46:10 +01001094 vm_locked = vm_lock(vm);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001095 mm_stage1_locked = mm_lock_stage1();
Andrew Scull220e6212018-12-21 18:09:00 +00001096
Manish Pandeyd34f8892020-06-19 17:41:07 +01001097 ret = api_vm_configure_pages(mm_stage1_locked, vm_locked, send, recv,
1098 page_count, &local_page_pool);
1099 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001100 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001101 }
1102
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001103 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +01001104 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +00001105
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001106exit:
Manish Pandeyd34f8892020-06-19 17:41:07 +01001107 mpool_fini(&local_page_pool);
1108
1109 mm_unlock_stage1(&mm_stage1_locked);
Andrew Sculle1322792019-07-01 17:46:10 +01001110 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001111
1112 return ret;
1113}
1114
1115/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001116 * Unmaps the RX/TX buffer pair with a partition or partition manager from the
1117 * translation regime of the caller. Unmap the region for the hypervisor and
1118 * set the memory region to owned and exclusive for the component. Since the
1119 * memory region mapped in the page table, when the buffers were originally
1120 * created we can safely remap it.
1121 *
1122 * Returns:
1123 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
1124 * behalf of the caller.
1125 * - FFA_SUCCESS on success if no further action is needed.
1126 */
1127struct ffa_value api_ffa_rxtx_unmap(ffa_vm_id_t allocator_id,
1128 struct vcpu *current)
1129{
1130 struct vm *vm = current->vm;
1131 struct vm_locked vm_locked;
1132 struct mm_stage1_locked mm_stage1_locked;
1133 paddr_t send_pa_begin;
1134 paddr_t send_pa_end;
1135 paddr_t recv_pa_begin;
1136 paddr_t recv_pa_end;
1137
1138 /*
1139 * Check there is a buffer pair registered on behalf of the caller.
1140 * Since forwarding is not yet supported the allocator ID MBZ.
1141 */
1142 if (allocator_id != 0) {
1143 dlog_error(
1144 "Forwarding MAP/UNMAP from the hypervisor is not yet "
1145 "supported so vm id must be zero.\n");
1146 return ffa_error(FFA_INVALID_PARAMETERS);
1147 }
1148
1149 /* Get send and receive buffers. */
1150 if (vm->mailbox.send == NULL || vm->mailbox.recv == NULL) {
1151 dlog_error(
1152 "No buffer pair registered on behalf of the caller.\n");
1153 return ffa_error(FFA_INVALID_PARAMETERS);
1154 }
1155
1156 /* Currently a mailbox size of 1 page is assumed. */
1157 send_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.send));
1158 send_pa_end = pa_add(send_pa_begin, HF_MAILBOX_SIZE);
1159 recv_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.recv));
1160 recv_pa_end = pa_add(recv_pa_begin, HF_MAILBOX_SIZE);
1161
1162 vm_locked = vm_lock(vm);
1163 mm_stage1_locked = mm_lock_stage1();
1164
1165 /*
1166 * Set the memory region of the buffers back to the default mode
1167 * for the VM. Since this memory region was already mapped for the
1168 * RXTX buffers we can safely remap them.
1169 */
1170 CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
1171 MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
1172 NULL));
1173
1174 CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
1175 MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
1176 NULL));
1177
1178 /* Unmap the buffers in the partition manager. */
1179 CHECK(mm_unmap(mm_stage1_locked, send_pa_begin, send_pa_end,
1180 &api_page_pool));
1181 CHECK(mm_unmap(mm_stage1_locked, recv_pa_begin, recv_pa_end,
1182 &api_page_pool));
1183
1184 vm->mailbox.send = NULL;
1185 vm->mailbox.recv = NULL;
1186
1187 mm_unlock_stage1(&mm_stage1_locked);
1188 vm_unlock(&vm_locked);
1189
1190 return (struct ffa_value){.func = FFA_SUCCESS_32};
1191}
1192
1193/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001194 * Notifies the `to` VM about the message currently in its mailbox, possibly
1195 * with the help of the primary VM.
1196 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001197static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
1198 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001199{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001200 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1201 struct ffa_value primary_ret = {
1202 .func = FFA_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +00001203 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001204 };
1205
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001206 /* Messages for the primary VM are delivered directly. */
1207 if (to.vm->id == HF_PRIMARY_VM_ID) {
1208 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001209 * Only tell the primary VM the size and other details if the
1210 * message is for it, to avoid leaking data about messages for
1211 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001212 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001213 primary_ret = ffa_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001214
1215 to.vm->mailbox.state = MAILBOX_STATE_READ;
1216 *next = api_switch_to_primary(current, primary_ret,
1217 VCPU_STATE_READY);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001218 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001219 }
1220
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001221 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1222
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001223 /* Messages for the TEE are sent on via the dispatcher. */
1224 if (to.vm->id == HF_TEE_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001225 struct ffa_value call = ffa_msg_recv_return(to.vm);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001226
Olivier Deprez112d2b52020-09-30 07:39:23 +02001227 ret = arch_other_world_call(call);
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001228 /*
1229 * After the call to the TEE completes it must have finished
1230 * reading its RX buffer, so it is ready for another message.
1231 */
1232 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001233 /*
1234 * Don't return to the primary VM in this case, as the TEE is
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001235 * not (yet) scheduled via FF-A.
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001236 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001237 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001238 }
1239
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001240 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +00001241 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001242 *next = api_switch_to_primary(current, primary_ret,
1243 VCPU_STATE_READY);
1244 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001245
1246 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001247}
1248
1249/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001250 * Copies data from the sender's send buffer to the recipient's receive buffer
1251 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +00001252 *
1253 * If the recipient's receive buffer is busy, it can optionally register the
1254 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001255 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001256struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
1257 ffa_vm_id_t receiver_vm_id, uint32_t size,
1258 uint32_t attributes, struct vcpu *current,
1259 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001260{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001261 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001262 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +00001263 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +01001264 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001265 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001266 struct vcpu_locked current_locked;
1267 bool is_direct_request_ongoing;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001268 bool notify =
1269 (attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +01001270
Andrew Walbran70bc8622019-10-07 14:15:58 +01001271 /* Ensure sender VM ID corresponds to the current VM. */
1272 if (sender_vm_id != from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001273 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001274 }
1275
1276 /* Disallow reflexive requests as this suggests an error in the VM. */
1277 if (receiver_vm_id == from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001278 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001279 }
1280
1281 /* Limit the size of transfer. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001282 if (size > FFA_MSG_PAYLOAD_MAX) {
1283 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001284 }
1285
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001286 /*
1287 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1288 * invocation.
1289 */
1290 current_locked = vcpu_lock(current);
1291 is_direct_request_ongoing =
1292 is_ffa_direct_msg_request_ongoing(current_locked);
1293 vcpu_unlock(&current_locked);
1294
1295 if (is_direct_request_ongoing) {
1296 return ffa_error(FFA_DENIED);
1297 }
1298
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001299 /* Ensure the receiver VM exists. */
1300 to = vm_find(receiver_vm_id);
1301 if (to == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001302 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001303 }
1304
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001305 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001306 * Check that the sender has configured its send buffer. If the tx
1307 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1308 * be safely accessed after releasing the lock since the tx mailbox
1309 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001310 */
1311 sl_lock(&from->lock);
1312 from_msg = from->mailbox.send;
1313 sl_unlock(&from->lock);
1314
1315 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001316 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001317 }
1318
Andrew Walbran82d6d152019-12-24 15:02:06 +00001319 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001320
Andrew Walbran82d6d152019-12-24 15:02:06 +00001321 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001322 ret = ffa_error(FFA_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001323 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001324 }
1325
Andrew Walbran82d6d152019-12-24 15:02:06 +00001326 /* Copy data. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001327 memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
Andrew Walbran82d6d152019-12-24 15:02:06 +00001328 to->mailbox.recv_size = size;
1329 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001330 to->mailbox.recv_func = FFA_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001331 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001332
1333out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001334 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001335
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001336 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001337}
1338
1339/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001340 * Checks whether the vCPU's attempt to block for a message has already been
1341 * interrupted or whether it is allowed to block.
1342 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001343bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
Andrew Scullec52ddf2019-08-20 10:41:01 +01001344{
Manish Pandey35e452f2021-02-18 21:36:34 +00001345 struct vcpu_locked current_locked;
Andrew Scullec52ddf2019-08-20 10:41:01 +01001346 bool interrupted;
1347
Manish Pandey35e452f2021-02-18 21:36:34 +00001348 current_locked = vcpu_lock(current);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001349
1350 /*
1351 * Don't block if there are enabled and pending interrupts, to match
1352 * behaviour of wait_for_interrupt.
1353 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001354 interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001355
Manish Pandey35e452f2021-02-18 21:36:34 +00001356 vcpu_unlock(&current_locked);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001357
1358 return interrupted;
1359}
1360
1361/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001362 * Receives a message from the mailbox. If one isn't available, this function
1363 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001364 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001365 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001366 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001367struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
1368 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001369{
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001370 bool is_direct_request_ongoing;
1371 struct vcpu_locked current_locked;
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001372 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001373 struct ffa_value return_code;
J-Alvesb37fd082020-10-22 12:29:21 +01001374 bool is_from_secure_world =
1375 (current->vm->id & HF_VM_ID_WORLD_MASK) != 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001376
Andrew Scullaa039b32018-10-04 15:02:26 +01001377 /*
1378 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001379 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001380 */
J-Alvesb37fd082020-10-22 12:29:21 +01001381 if (!is_from_secure_world && vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001382 return ffa_error(FFA_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001383 }
1384
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001385 /*
1386 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1387 * invocation.
1388 */
1389 current_locked = vcpu_lock(current);
1390 is_direct_request_ongoing =
1391 is_ffa_direct_msg_request_ongoing(current_locked);
1392 vcpu_unlock(&current_locked);
1393
1394 if (is_direct_request_ongoing) {
1395 return ffa_error(FFA_DENIED);
1396 }
1397
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001398 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001399
Andrew Scullaa039b32018-10-04 15:02:26 +01001400 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001401 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1402 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001403 return_code = ffa_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001404 goto out;
1405 }
1406
1407 /* No pending message so fail if not allowed to block. */
1408 if (!block) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001409 return_code = ffa_error(FFA_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001410 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001411 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001412
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001413 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001414 * From this point onward this call can only be interrupted or a message
1415 * received. If a message is received the return value will be set at
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001416 * that time to FFA_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001417 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001418 return_code = ffa_error(FFA_INTERRUPTED);
1419 if (api_ffa_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001420 goto out;
1421 }
1422
J-Alvesb37fd082020-10-22 12:29:21 +01001423 if (is_from_secure_world) {
1424 /* Return to other world if caller is a SP. */
1425 *next = api_switch_to_other_world(
1426 current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
1427 VCPU_STATE_BLOCKED_MAILBOX);
1428 } else {
1429 /* Switch back to primary VM to block. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001430 struct ffa_value run_return = {
1431 .func = FFA_MSG_WAIT_32,
1432 .arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001433 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001434
Andrew Walbranb4816552018-12-05 17:35:42 +00001435 *next = api_switch_to_primary(current, run_return,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001436 VCPU_STATE_BLOCKED_MAILBOX);
Andrew Walbranb4816552018-12-05 17:35:42 +00001437 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001438out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001439 sl_unlock(&vm->lock);
1440
Jose Marinho3e2442f2019-03-12 13:30:37 +00001441 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001442}
1443
1444/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001445 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1446 * by this function, the caller must have called api_mailbox_send before with
1447 * the notify argument set to true, and this call must have failed because the
1448 * mailbox was not available.
1449 *
1450 * It should be called repeatedly to retrieve a list of VMs.
1451 *
1452 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1453 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001454 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001455int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001456{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001457 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001458 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001459 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001460
1461 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001462 if (list_empty(&vm->mailbox.ready_list)) {
1463 ret = -1;
1464 goto exit;
1465 }
1466
1467 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1468 ready_links);
1469 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001470 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001471
1472exit:
1473 sl_unlock(&vm->lock);
1474 return ret;
1475}
1476
1477/**
1478 * Retrieves the next VM waiting to be notified that the mailbox of the
1479 * specified VM became writable. Only primary VMs are allowed to call this.
1480 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001481 * Returns -1 on failure or if there are no waiters; the VM id of the next
1482 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001483 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001484int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001485{
1486 struct vm *vm;
1487 struct vm_locked locked;
1488 struct wait_entry *entry;
1489 struct vm *waiting_vm;
1490
1491 /* Only primary VMs are allowed to call this function. */
1492 if (current->vm->id != HF_PRIMARY_VM_ID) {
1493 return -1;
1494 }
1495
Andrew Walbran42347a92019-05-09 13:59:03 +01001496 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001497 if (vm == NULL) {
1498 return -1;
1499 }
1500
Fuad Tabbaed294af2019-12-20 10:43:01 +00001501 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001502 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001503 entry = api_fetch_waiter(locked);
1504 vm_unlock(&locked);
1505
1506 if (entry == NULL) {
1507 return -1;
1508 }
1509
1510 /* Enqueue notification to waiting VM. */
1511 waiting_vm = entry->waiting_vm;
1512
1513 sl_lock(&waiting_vm->lock);
1514 if (list_empty(&entry->ready_links)) {
1515 list_append(&waiting_vm->mailbox.ready_list,
1516 &entry->ready_links);
1517 }
1518 sl_unlock(&waiting_vm->lock);
1519
1520 return waiting_vm->id;
1521}
1522
1523/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001524 * Releases the caller's mailbox so that a new message can be received. The
1525 * caller must have copied out all data they wish to preserve as new messages
1526 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001527 *
1528 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001529 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
1530 * - FFA_SUCCESS on success if no further action is needed.
1531 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001532 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001533 * hf_mailbox_waiter_get.
1534 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001535struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001536{
1537 struct vm *vm = current->vm;
1538 struct vm_locked locked;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001539 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001540
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001541 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001542 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001543 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001544 case MAILBOX_STATE_RECEIVED:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001545 ret = ffa_error(FFA_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001546 break;
1547
Andrew Sculld6ee1102019-04-05 22:12:42 +01001548 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001549 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001550 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001551 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001552 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001553 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001554
1555 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001556}
Andrew Walbran318f5732018-11-20 16:23:42 +00001557
1558/**
1559 * Enables or disables a given interrupt ID for the calling vCPU.
1560 *
1561 * Returns 0 on success, or -1 if the intid is invalid.
1562 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001563int64_t api_interrupt_enable(uint32_t intid, bool enable,
1564 enum interrupt_type type, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001565{
Manish Pandey35e452f2021-02-18 21:36:34 +00001566 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001567 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +00001568 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
1569 uint32_t intid_mask = 1U << intid_shift;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001570
Andrew Walbran318f5732018-11-20 16:23:42 +00001571 if (intid >= HF_NUM_INTIDS) {
1572 return -1;
1573 }
1574
Manish Pandey35e452f2021-02-18 21:36:34 +00001575 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001576 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001577 /*
1578 * If it is pending and was not enabled before, increment the
1579 * count.
1580 */
1581 if (current->interrupts.interrupt_pending[intid_index] &
1582 ~current->interrupts.interrupt_enabled[intid_index] &
1583 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001584 if ((current->interrupts.interrupt_type[intid_index] &
1585 intid_mask) ==
1586 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1587 vcpu_irq_count_increment(current_locked);
1588 } else {
1589 vcpu_fiq_count_increment(current_locked);
1590 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001591 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001592 current->interrupts.interrupt_enabled[intid_index] |=
1593 intid_mask;
Manish Pandey35e452f2021-02-18 21:36:34 +00001594
1595 if (type == INTERRUPT_TYPE_IRQ) {
1596 current->interrupts.interrupt_type[intid_index] &=
1597 ~intid_mask;
1598 } else if (type == INTERRUPT_TYPE_FIQ) {
1599 current->interrupts.interrupt_type[intid_index] |=
1600 intid_mask;
1601 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001602 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001603 /*
1604 * If it is pending and was enabled before, decrement the count.
1605 */
1606 if (current->interrupts.interrupt_pending[intid_index] &
1607 current->interrupts.interrupt_enabled[intid_index] &
1608 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001609 if ((current->interrupts.interrupt_type[intid_index] &
1610 intid_mask) ==
1611 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1612 vcpu_irq_count_decrement(current_locked);
1613 } else {
1614 vcpu_fiq_count_decrement(current_locked);
1615 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001616 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001617 current->interrupts.interrupt_enabled[intid_index] &=
1618 ~intid_mask;
Manish Pandey35e452f2021-02-18 21:36:34 +00001619 current->interrupts.interrupt_type[intid_index] &= ~intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001620 }
1621
Manish Pandey35e452f2021-02-18 21:36:34 +00001622 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001623 return 0;
1624}
1625
1626/**
1627 * Returns the ID of the next pending interrupt for the calling vCPU, and
1628 * acknowledges it (i.e. marks it as no longer pending). Returns
1629 * HF_INVALID_INTID if there are no pending interrupts.
1630 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001631uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001632{
1633 uint8_t i;
1634 uint32_t first_interrupt = HF_INVALID_INTID;
Manish Pandey35e452f2021-02-18 21:36:34 +00001635 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001636
1637 /*
1638 * Find the first enabled and pending interrupt ID, return it, and
1639 * deactivate it.
1640 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001641 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001642 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1643 uint32_t enabled_and_pending =
1644 current->interrupts.interrupt_enabled[i] &
1645 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001646
Andrew Walbran318f5732018-11-20 16:23:42 +00001647 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001648 uint8_t bit_index = ctz(enabled_and_pending);
Manish Pandey35e452f2021-02-18 21:36:34 +00001649 uint32_t intid_mask = 1U << bit_index;
1650
Andrew Walbran3d84a262018-12-13 14:41:19 +00001651 /*
1652 * Mark it as no longer pending and decrement the count.
1653 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001654 current->interrupts.interrupt_pending[i] &= ~intid_mask;
1655
1656 if ((current->interrupts.interrupt_type[i] &
1657 intid_mask) == (INTERRUPT_TYPE_IRQ << bit_index)) {
1658 vcpu_irq_count_decrement(current_locked);
1659 } else {
1660 vcpu_fiq_count_decrement(current_locked);
1661 }
1662
Andrew Walbran3d84a262018-12-13 14:41:19 +00001663 first_interrupt =
1664 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001665 break;
1666 }
1667 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001668
Manish Pandey35e452f2021-02-18 21:36:34 +00001669 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001670 return first_interrupt;
1671}
1672
1673/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001674 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001675 * given VM and vCPU.
1676 */
1677static inline bool is_injection_allowed(uint32_t target_vm_id,
1678 struct vcpu *current)
1679{
1680 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001681
Andrew Walbran318f5732018-11-20 16:23:42 +00001682 /*
1683 * The primary VM is allowed to inject interrupts into any VM. Secondary
1684 * VMs are only allowed to inject interrupts into their own vCPUs.
1685 */
1686 return current_vm_id == HF_PRIMARY_VM_ID ||
1687 current_vm_id == target_vm_id;
1688}
1689
1690/**
1691 * Injects a virtual interrupt of the given ID into the given target vCPU.
1692 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1693 * when the vCPU is next run, which is up to the scheduler.
1694 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001695 * Returns:
1696 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1697 * ID is invalid, or the current VM is not allowed to inject interrupts to
1698 * the target VM.
1699 * - 0 on success if no further action is needed.
1700 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1701 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001702 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001703int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
1704 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001705 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001706{
Andrew Walbran318f5732018-11-20 16:23:42 +00001707 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001708 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001709
1710 if (intid >= HF_NUM_INTIDS) {
1711 return -1;
1712 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001713
Andrew Walbran318f5732018-11-20 16:23:42 +00001714 if (target_vm == NULL) {
1715 return -1;
1716 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001717
Andrew Walbran318f5732018-11-20 16:23:42 +00001718 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001719 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001720 return -1;
1721 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001722
Andrew Walbran318f5732018-11-20 16:23:42 +00001723 if (!is_injection_allowed(target_vm_id, current)) {
1724 return -1;
1725 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001726
Andrew Walbrane1310df2019-04-29 17:28:28 +01001727 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001728
Manish Pandey35e452f2021-02-18 21:36:34 +00001729 dlog_verbose(
1730 "Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
1731 "%u\n",
1732 intid, target_vm_id, target_vcpu_idx, current->vm->id,
1733 vcpu_index(current));
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001734 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001735}
Andrew Scull6386f252018-12-06 13:29:10 +00001736
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001737/** Returns the version of the implemented FF-A specification. */
1738struct ffa_value api_ffa_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001739{
1740 /*
1741 * Ensure that both major and minor revision representation occupies at
1742 * most 15 bits.
1743 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001744 static_assert(0x8000 > FFA_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001745 "Major revision representation takes more than 15 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001746 static_assert(0x10000 > FFA_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001747 "Minor revision representation takes more than 16 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001748 if (requested_version & FFA_VERSION_RESERVED_BIT) {
Andrew Walbran9fd29072020-04-22 12:12:14 +01001749 /* Invalid encoding, return an error. */
J-Alves13318e32021-02-22 17:21:00 +00001750 return (struct ffa_value){.func = (uint32_t)FFA_NOT_SUPPORTED};
Andrew Walbran9fd29072020-04-22 12:12:14 +01001751 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001752
Daniel Boulby6e32c612021-02-17 15:09:41 +00001753 return ((struct ffa_value){.func = FFA_VERSION_COMPILED});
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001754}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001755
1756int64_t api_debug_log(char c, struct vcpu *current)
1757{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001758 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001759 struct vm *vm = current->vm;
1760 struct vm_locked vm_locked = vm_lock(vm);
1761
Andrew Sculld54e1be2019-08-20 11:09:42 +01001762 if (c == '\n' || c == '\0') {
1763 flush = true;
1764 } else {
1765 vm->log_buffer[vm->log_buffer_length++] = c;
1766 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1767 }
1768
1769 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001770 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1771 vm->log_buffer_length);
1772 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001773 }
1774
1775 vm_unlock(&vm_locked);
1776
1777 return 0;
1778}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001779
1780/**
1781 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001782 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001783 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001784struct ffa_value api_ffa_features(uint32_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001785{
1786 switch (function_id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001787 case FFA_ERROR_32:
1788 case FFA_SUCCESS_32:
1789 case FFA_INTERRUPT_32:
1790 case FFA_VERSION_32:
1791 case FFA_FEATURES_32:
1792 case FFA_RX_RELEASE_32:
1793 case FFA_RXTX_MAP_64:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001794 case FFA_RXTX_UNMAP_32:
Fuad Tabbae4efcc32020-07-16 15:37:27 +01001795 case FFA_PARTITION_INFO_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001796 case FFA_ID_GET_32:
1797 case FFA_MSG_POLL_32:
1798 case FFA_MSG_WAIT_32:
1799 case FFA_YIELD_32:
1800 case FFA_RUN_32:
1801 case FFA_MSG_SEND_32:
1802 case FFA_MEM_DONATE_32:
1803 case FFA_MEM_LEND_32:
1804 case FFA_MEM_SHARE_32:
1805 case FFA_MEM_RETRIEVE_REQ_32:
1806 case FFA_MEM_RETRIEVE_RESP_32:
1807 case FFA_MEM_RELINQUISH_32:
1808 case FFA_MEM_RECLAIM_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001809 case FFA_MSG_SEND_DIRECT_RESP_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001810 case FFA_MSG_SEND_DIRECT_RESP_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001811 case FFA_MSG_SEND_DIRECT_REQ_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001812 case FFA_MSG_SEND_DIRECT_REQ_32:
J-Alves3829fc02021-03-18 12:49:18 +00001813#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001814 /* FF-A v1.1 features. */
1815 case FFA_SPM_ID_GET_32:
J-Alves3829fc02021-03-18 12:49:18 +00001816#endif
1817 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001818 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001819 return ffa_error(FFA_NOT_SUPPORTED);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001820 }
1821}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001822
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001823/**
J-Alves645eabe2021-02-22 16:08:27 +00001824 * FF-A specification states that x2/w2 Must Be Zero for direct messaging
1825 * interfaces.
1826 */
1827static inline bool api_ffa_dir_msg_is_arg2_zero(struct ffa_value args)
1828{
1829 return args.arg2 == 0U;
1830}
1831
1832/**
J-Alves76d99af2021-03-10 17:42:11 +00001833 * Limits size of arguments in ffa_value structure to 32-bit.
1834 */
1835static struct ffa_value api_ffa_value_copy32(struct ffa_value args)
1836{
1837 return (struct ffa_value){
1838 .func = (uint32_t)args.func,
1839 .arg1 = (uint32_t)args.arg1,
1840 .arg2 = (uint32_t)0,
1841 .arg3 = (uint32_t)args.arg3,
1842 .arg4 = (uint32_t)args.arg4,
1843 .arg5 = (uint32_t)args.arg5,
1844 .arg6 = (uint32_t)args.arg6,
1845 .arg7 = (uint32_t)args.arg7,
1846 };
1847}
1848
1849/**
1850 * Helper to copy direct message payload, depending on SMC used and expected
1851 * registers size.
1852 */
1853static struct ffa_value api_ffa_dir_msg_value(struct ffa_value args)
1854{
1855 if (args.func == FFA_MSG_SEND_DIRECT_REQ_32 ||
1856 args.func == FFA_MSG_SEND_DIRECT_RESP_32) {
1857 return api_ffa_value_copy32(args);
1858 }
1859
1860 return (struct ffa_value){
1861 .func = args.func,
1862 .arg1 = args.arg1,
1863 .arg2 = 0,
1864 .arg3 = args.arg3,
1865 .arg4 = args.arg4,
1866 .arg5 = args.arg5,
1867 .arg6 = args.arg6,
1868 .arg7 = args.arg7,
1869 };
1870}
1871
1872/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001873 * Send an FF-A direct message request.
1874 */
1875struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
1876 ffa_vm_id_t receiver_vm_id,
1877 struct ffa_value args,
1878 struct vcpu *current,
1879 struct vcpu **next)
1880{
J-Alves17228f72021-04-20 17:13:19 +01001881 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001882 struct vm *receiver_vm;
1883 struct vcpu *receiver_vcpu;
1884 struct two_vcpu_locked vcpus_locked;
1885
J-Alves645eabe2021-02-22 16:08:27 +00001886 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
1887 return ffa_error(FFA_INVALID_PARAMETERS);
1888 }
1889
Olivier Deprez55a189e2021-06-09 15:45:27 +02001890 if (!plat_ffa_is_direct_request_valid(current, sender_vm_id,
1891 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00001892 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001893 }
1894
Olivier Deprez55a189e2021-06-09 15:45:27 +02001895 if (plat_ffa_direct_request_forward(receiver_vm_id, args, &ret)) {
J-Alves17228f72021-04-20 17:13:19 +01001896 return ret;
1897 }
1898
1899 ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
1900
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001901 receiver_vm = vm_find(receiver_vm_id);
1902 if (receiver_vm == NULL) {
1903 return ffa_error(FFA_INVALID_PARAMETERS);
1904 }
1905
1906 /*
1907 * Per PSA FF-A EAC spec section 4.4.1 the firmware framework supports
1908 * UP (migratable) or MP partitions with a number of vCPUs matching the
1909 * number of PEs in the system. It further states that MP partitions
1910 * accepting direct request messages cannot migrate.
1911 */
J-Alvesad6a0432021-04-09 16:06:21 +01001912 receiver_vcpu = api_ffa_get_vm_vcpu(receiver_vm, current);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001913 if (receiver_vcpu == NULL) {
1914 return ffa_error(FFA_INVALID_PARAMETERS);
1915 }
1916
1917 vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
1918
1919 /*
1920 * If destination vCPU is executing or already received an
1921 * FFA_MSG_SEND_DIRECT_REQ then return to caller hinting recipient is
1922 * busy. There is a brief period of time where the vCPU state has
1923 * changed but regs_available is still false thus consider this case as
1924 * the vCPU not yet ready to receive a direct message request.
1925 */
1926 if (is_ffa_direct_msg_request_ongoing(vcpus_locked.vcpu1) ||
1927 receiver_vcpu->state == VCPU_STATE_RUNNING ||
1928 !receiver_vcpu->regs_available) {
1929 ret = ffa_error(FFA_BUSY);
1930 goto out;
1931 }
1932
1933 if (atomic_load_explicit(&receiver_vcpu->vm->aborting,
1934 memory_order_relaxed)) {
1935 if (receiver_vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +01001936 dlog_notice("Aborting VM %#x vCPU %u\n",
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001937 receiver_vcpu->vm->id,
1938 vcpu_index(receiver_vcpu));
1939 receiver_vcpu->state = VCPU_STATE_ABORTED;
1940 }
1941
1942 ret = ffa_error(FFA_ABORTED);
1943 goto out;
1944 }
1945
1946 switch (receiver_vcpu->state) {
1947 case VCPU_STATE_OFF:
1948 case VCPU_STATE_RUNNING:
1949 case VCPU_STATE_ABORTED:
1950 case VCPU_STATE_READY:
1951 case VCPU_STATE_BLOCKED_INTERRUPT:
1952 ret = ffa_error(FFA_BUSY);
1953 goto out;
1954 case VCPU_STATE_BLOCKED_MAILBOX:
1955 /*
1956 * Expect target vCPU to be blocked after having called
1957 * ffa_msg_wait or sent a direct message response.
1958 */
1959 break;
1960 }
1961
1962 /* Inject timer interrupt if any pending */
1963 if (arch_timer_pending(&receiver_vcpu->regs)) {
Manish Pandeya5f39fb2020-09-11 09:47:11 +01001964 api_interrupt_inject_locked(vcpus_locked.vcpu1,
1965 HF_VIRTUAL_TIMER_INTID, current,
1966 NULL);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001967
1968 arch_timer_mask(&receiver_vcpu->regs);
1969 }
1970
1971 /* The receiver vCPU runs upon direct message invocation */
1972 receiver_vcpu->cpu = current->cpu;
1973 receiver_vcpu->state = VCPU_STATE_RUNNING;
1974 receiver_vcpu->regs_available = false;
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001975 receiver_vcpu->direct_request_origin_vm_id = sender_vm_id;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001976
J-Alves76d99af2021-03-10 17:42:11 +00001977 arch_regs_set_retval(&receiver_vcpu->regs, api_ffa_dir_msg_value(args));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001978
1979 current->state = VCPU_STATE_BLOCKED_MAILBOX;
1980
1981 /* Switch to receiver vCPU targeted to by direct msg request */
1982 *next = receiver_vcpu;
1983
1984 /*
1985 * Since this flow will lead to a VM switch, the return value will not
1986 * be applied to current vCPU.
1987 */
1988
1989out:
1990 sl_unlock(&receiver_vcpu->lock);
1991 sl_unlock(&current->lock);
1992
1993 return ret;
1994}
1995
1996/**
1997 * Send an FF-A direct message response.
1998 */
1999struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
2000 ffa_vm_id_t receiver_vm_id,
2001 struct ffa_value args,
2002 struct vcpu *current,
2003 struct vcpu **next)
2004{
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002005 struct vcpu_locked current_locked;
J-Alves645eabe2021-02-22 16:08:27 +00002006
2007 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
2008 return ffa_error(FFA_INVALID_PARAMETERS);
2009 }
2010
J-Alves76d99af2021-03-10 17:42:11 +00002011 struct ffa_value to_ret = api_ffa_dir_msg_value(args);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002012
Olivier Deprez55a189e2021-06-09 15:45:27 +02002013 if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
2014 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00002015 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002016 }
2017
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002018 current_locked = vcpu_lock(current);
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002019 if (api_ffa_is_managed_exit_ongoing(current_locked)) {
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002020 /*
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002021 * No need for REQ/RESP state management as managed exit does
2022 * not have corresponding REQ pair.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002023 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002024 if (receiver_vm_id != HF_PRIMARY_VM_ID) {
2025 vcpu_unlock(&current_locked);
2026 return ffa_error(FFA_DENIED);
2027 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002028
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002029 plat_interrupts_set_priority_mask(0xff);
2030 current->processing_managed_exit = false;
2031 } else {
2032 /*
2033 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
2034 * defined originator.
2035 */
2036 if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
2037 /*
2038 * Sending direct response but direct request origin
2039 * vCPU is not set.
2040 */
2041 vcpu_unlock(&current_locked);
2042 return ffa_error(FFA_DENIED);
2043 }
2044
2045 if (current->direct_request_origin_vm_id != receiver_vm_id) {
2046 vcpu_unlock(&current_locked);
2047 return ffa_error(FFA_DENIED);
2048 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002049 }
2050
2051 /* Clear direct request origin for the caller. */
2052 current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
2053
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002054 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002055
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002056 if (!vm_id_is_current_world(receiver_vm_id)) {
J-Alvesfe7f7372020-11-09 11:32:12 +00002057 *next = api_switch_to_other_world(current, to_ret,
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002058 VCPU_STATE_BLOCKED_MAILBOX);
2059 } else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
J-Alvesfe7f7372020-11-09 11:32:12 +00002060 *next = api_switch_to_primary(current, to_ret,
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002061 VCPU_STATE_BLOCKED_MAILBOX);
J-Alvesfe7f7372020-11-09 11:32:12 +00002062 } else if (vm_id_is_current_world(receiver_vm_id)) {
2063 /*
2064 * It is expected the receiver_vm_id to be from an SP, otherwise
2065 * 'arch_other_world_is_direct_response_valid' should have
2066 * made function return error before getting to this point.
2067 */
2068 *next = api_switch_to_vm(current, to_ret,
2069 VCPU_STATE_BLOCKED_MAILBOX,
2070 receiver_vm_id);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002071 } else {
2072 panic("Invalid direct message response invocation");
2073 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002074
2075 return (struct ffa_value){.func = FFA_INTERRUPT_32};
2076}
2077
J-Alves84658fc2021-06-17 14:37:32 +01002078static bool api_memory_region_check_flags(
2079 struct ffa_memory_region *memory_region, uint32_t share_func)
2080{
2081 switch (share_func) {
2082 case FFA_MEM_SHARE_32:
2083 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2084 0U) {
2085 return false;
2086 }
2087 /* Intentional fall-through */
2088 case FFA_MEM_LEND_32:
2089 case FFA_MEM_DONATE_32: {
2090 /* Bits 31:2 Must Be Zero. */
2091 ffa_memory_receiver_flags_t to_mask =
2092 ~(FFA_MEMORY_REGION_FLAG_CLEAR |
2093 FFA_MEMORY_REGION_FLAG_TIME_SLICE);
2094
2095 if ((memory_region->flags & to_mask) != 0U) {
2096 return false;
2097 }
2098 break;
2099 }
2100 default:
2101 panic("Check for mem send calls only.\n");
2102 }
2103
2104 /* Last check reserved values are 0 */
2105 return true;
2106}
2107
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002108struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
2109 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002110 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002111{
2112 struct vm *from = current->vm;
2113 struct vm *to;
2114 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002115 struct ffa_memory_region *memory_region;
2116 struct ffa_value ret;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002117
2118 if (ipa_addr(address) != 0 || page_count != 0) {
2119 /*
2120 * Hafnium only supports passing the descriptor in the TX
2121 * mailbox.
2122 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002123 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002124 }
2125
Andrew Walbranca808b12020-05-15 17:22:28 +01002126 if (fragment_length > length) {
2127 dlog_verbose(
2128 "Fragment length %d greater than total length %d.\n",
2129 fragment_length, length);
2130 return ffa_error(FFA_INVALID_PARAMETERS);
2131 }
2132 if (fragment_length < sizeof(struct ffa_memory_region) +
2133 sizeof(struct ffa_memory_access)) {
2134 dlog_verbose(
2135 "Initial fragment length %d smaller than header size "
2136 "%d.\n",
2137 fragment_length,
2138 sizeof(struct ffa_memory_region) +
2139 sizeof(struct ffa_memory_access));
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002140 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002141 }
2142
2143 /*
2144 * Check that the sender has configured its send buffer. If the TX
2145 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2146 * be safely accessed after releasing the lock since the TX mailbox
2147 * address can only be configured once.
2148 */
2149 sl_lock(&from->lock);
2150 from_msg = from->mailbox.send;
2151 sl_unlock(&from->lock);
2152
2153 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002154 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002155 }
2156
2157 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002158 * Copy the memory region descriptor to a fresh page from the memory
2159 * pool. This prevents the sender from changing it underneath us, and
2160 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002161 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002162 if (fragment_length > HF_MAILBOX_SIZE ||
2163 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002164 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002165 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002166 memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002167 if (memory_region == NULL) {
2168 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002169 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002170 }
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002171 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002172
2173 /* The sender must match the caller. */
2174 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002175 dlog_verbose("Memory region sender doesn't match caller.\n");
J-Alves99948662021-07-28 18:07:04 +01002176 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002177 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002178 }
2179
J-Alves84658fc2021-06-17 14:37:32 +01002180 if (!api_memory_region_check_flags(memory_region, share_func)) {
2181 dlog_verbose(
2182 "Memory region reserved arguments must be zero.\n");
2183 ret = ffa_error(FFA_INVALID_PARAMETERS);
2184 goto out;
2185 }
2186
Andrew Walbrana65a1322020-04-06 19:32:32 +01002187 if (memory_region->receiver_count != 1) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002188 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002189 dlog_verbose(
2190 "Multi-way memory sharing not supported (got %d "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002191 "endpoint memory access descriptors, expected 1).\n",
2192 memory_region->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002193 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002194 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002195 }
2196
2197 /*
2198 * Ensure that the receiver VM exists and isn't the same as the sender.
2199 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002200 to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002201 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002202 dlog_verbose("Invalid receiver.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002203 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002204 goto out;
2205 }
2206
2207 if (to->id == HF_TEE_VM_ID) {
2208 /*
2209 * The 'to' VM lock is only needed in the case that it is the
2210 * TEE VM.
2211 */
2212 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2213
2214 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002215 ret = ffa_error(FFA_BUSY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002216 goto out_unlock;
2217 }
2218
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002219 ret = ffa_memory_tee_send(
2220 vm_to_from_lock.vm2, vm_to_from_lock.vm1, memory_region,
2221 length, fragment_length, share_func, &api_page_pool);
2222 /*
2223 * ffa_tee_memory_send takes ownership of the memory_region, so
2224 * make sure we don't free it.
2225 */
2226 memory_region = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002227
2228 out_unlock:
2229 vm_unlock(&vm_to_from_lock.vm1);
2230 vm_unlock(&vm_to_from_lock.vm2);
2231 } else {
2232 struct vm_locked from_locked = vm_lock(from);
2233
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002234 ret = ffa_memory_send(from_locked, memory_region, length,
2235 fragment_length, share_func,
2236 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002237 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002238 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002239 * make sure we don't free it.
2240 */
2241 memory_region = NULL;
2242
2243 vm_unlock(&from_locked);
2244 }
2245
2246out:
2247 if (memory_region != NULL) {
2248 mpool_free(&api_page_pool, memory_region);
2249 }
2250
2251 return ret;
2252}
2253
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002254struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
2255 uint32_t fragment_length,
2256 ipaddr_t address, uint32_t page_count,
2257 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002258{
2259 struct vm *to = current->vm;
2260 struct vm_locked to_locked;
2261 const void *to_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002262 struct ffa_memory_region *retrieve_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002263 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002264 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002265
2266 if (ipa_addr(address) != 0 || page_count != 0) {
2267 /*
2268 * Hafnium only supports passing the descriptor in the TX
2269 * mailbox.
2270 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002271 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002272 }
2273
Andrew Walbrana65a1322020-04-06 19:32:32 +01002274 if (fragment_length != length) {
2275 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002276 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002277 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002278
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002279 retrieve_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002280 (struct ffa_memory_region *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002281 message_buffer_size = cpu_get_buffer_size(current->cpu);
2282 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2283 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002284 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002285 }
2286
2287 to_locked = vm_lock(to);
2288 to_msg = to->mailbox.send;
2289
2290 if (to_msg == NULL) {
2291 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002292 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002293 goto out;
2294 }
2295
2296 /*
2297 * Copy the retrieve request descriptor to an internal buffer, so that
2298 * the caller can't change it underneath us.
2299 */
2300 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
2301
2302 if (msg_receiver_busy(to_locked, NULL, false)) {
2303 /*
2304 * Can't retrieve memory information if the mailbox is not
2305 * available.
2306 */
2307 dlog_verbose("RX buffer not ready.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002308 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002309 goto out;
2310 }
2311
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002312 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
2313 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002314
2315out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002316 vm_unlock(&to_locked);
2317 return ret;
2318}
2319
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002320struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002321{
2322 struct vm *from = current->vm;
2323 struct vm_locked from_locked;
2324 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002325 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002326 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002327 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002328 uint32_t length;
2329
2330 from_locked = vm_lock(from);
2331 from_msg = from->mailbox.send;
2332
2333 if (from_msg == NULL) {
2334 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002335 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002336 goto out;
2337 }
2338
2339 /*
2340 * Calculate length from relinquish descriptor before copying. We will
2341 * check again later to make sure it hasn't changed.
2342 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002343 length = sizeof(struct ffa_mem_relinquish) +
2344 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
2345 sizeof(ffa_vm_id_t);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002346 /*
2347 * Copy the relinquish descriptor to an internal buffer, so that the
2348 * caller can't change it underneath us.
2349 */
2350 relinquish_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002351 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002352 message_buffer_size = cpu_get_buffer_size(current->cpu);
2353 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2354 dlog_verbose("Relinquish message too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002355 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002356 goto out;
2357 }
2358 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
2359
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002360 if (sizeof(struct ffa_mem_relinquish) +
2361 relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002362 length) {
2363 dlog_verbose(
2364 "Endpoint count changed while copying to internal "
2365 "buffer.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002366 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002367 goto out;
2368 }
2369
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002370 ret = ffa_memory_relinquish(from_locked, relinquish_request,
2371 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002372
2373out:
2374 vm_unlock(&from_locked);
2375 return ret;
2376}
2377
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002378struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
2379 ffa_memory_region_flags_t flags,
2380 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002381{
2382 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002383 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002384
Olivier Deprez55a189e2021-06-09 15:45:27 +02002385 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00002386 struct vm_locked to_locked = vm_lock(to);
2387
Andrew Walbranca808b12020-05-15 17:22:28 +01002388 ret = ffa_memory_reclaim(to_locked, handle, flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002389 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002390
Andrew Walbran290b0c92020-02-03 16:37:14 +00002391 vm_unlock(&to_locked);
2392 } else {
2393 struct vm *from = vm_find(HF_TEE_VM_ID);
2394 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2395
Andrew Walbranca808b12020-05-15 17:22:28 +01002396 ret = ffa_memory_tee_reclaim(vm_to_from_lock.vm1,
2397 vm_to_from_lock.vm2, handle, flags,
2398 &api_page_pool);
2399
2400 vm_unlock(&vm_to_from_lock.vm1);
2401 vm_unlock(&vm_to_from_lock.vm2);
2402 }
2403
2404 return ret;
2405}
2406
2407struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
2408 uint32_t fragment_offset,
2409 ffa_vm_id_t sender_vm_id,
2410 struct vcpu *current)
2411{
2412 struct vm *to = current->vm;
2413 struct vm_locked to_locked;
2414 struct ffa_value ret;
2415
2416 /* Sender ID MBZ at virtual instance. */
2417 if (sender_vm_id != 0) {
2418 return ffa_error(FFA_INVALID_PARAMETERS);
2419 }
2420
2421 to_locked = vm_lock(to);
2422
2423 if (msg_receiver_busy(to_locked, NULL, false)) {
2424 /*
2425 * Can't retrieve memory information if the mailbox is not
2426 * available.
2427 */
2428 dlog_verbose("RX buffer not ready.\n");
2429 ret = ffa_error(FFA_BUSY);
2430 goto out;
2431 }
2432
2433 ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
2434 &api_page_pool);
2435
2436out:
2437 vm_unlock(&to_locked);
2438 return ret;
2439}
2440
2441struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
2442 uint32_t fragment_length,
2443 ffa_vm_id_t sender_vm_id,
2444 struct vcpu *current)
2445{
2446 struct vm *from = current->vm;
2447 const void *from_msg;
2448 void *fragment_copy;
2449 struct ffa_value ret;
2450
2451 /* Sender ID MBZ at virtual instance. */
2452 if (sender_vm_id != 0) {
2453 return ffa_error(FFA_INVALID_PARAMETERS);
2454 }
2455
2456 /*
2457 * Check that the sender has configured its send buffer. If the TX
2458 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2459 * be safely accessed after releasing the lock since the TX mailbox
2460 * address can only be configured once.
2461 */
2462 sl_lock(&from->lock);
2463 from_msg = from->mailbox.send;
2464 sl_unlock(&from->lock);
2465
2466 if (from_msg == NULL) {
2467 return ffa_error(FFA_INVALID_PARAMETERS);
2468 }
2469
2470 /*
2471 * Copy the fragment to a fresh page from the memory pool. This prevents
2472 * the sender from changing it underneath us, and also lets us keep it
2473 * around in the share state table if needed.
2474 */
2475 if (fragment_length > HF_MAILBOX_SIZE ||
2476 fragment_length > MM_PPOOL_ENTRY_SIZE) {
2477 dlog_verbose(
2478 "Fragment length %d larger than mailbox size %d.\n",
2479 fragment_length, HF_MAILBOX_SIZE);
2480 return ffa_error(FFA_INVALID_PARAMETERS);
2481 }
2482 if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
2483 fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2484 0) {
2485 dlog_verbose("Invalid fragment length %d.\n", fragment_length);
2486 return ffa_error(FFA_INVALID_PARAMETERS);
2487 }
2488 fragment_copy = mpool_alloc(&api_page_pool);
2489 if (fragment_copy == NULL) {
2490 dlog_verbose("Failed to allocate fragment copy.\n");
2491 return ffa_error(FFA_NO_MEMORY);
2492 }
2493 memcpy_s(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
2494
2495 /*
2496 * Hafnium doesn't support fragmentation of memory retrieve requests
2497 * (because it doesn't support caller-specified mappings, so a request
2498 * will never be larger than a single page), so this must be part of a
2499 * memory send (i.e. donate, lend or share) request.
2500 *
2501 * We can tell from the handle whether the memory transaction is for the
2502 * TEE or not.
2503 */
2504 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
2505 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
2506 struct vm_locked from_locked = vm_lock(from);
2507
2508 ret = ffa_memory_send_continue(from_locked, fragment_copy,
2509 fragment_length, handle,
2510 &api_page_pool);
2511 /*
2512 * `ffa_memory_send_continue` takes ownership of the
2513 * fragment_copy, so we don't need to free it here.
2514 */
2515 vm_unlock(&from_locked);
2516 } else {
2517 struct vm *to = vm_find(HF_TEE_VM_ID);
2518 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2519
2520 /*
2521 * The TEE RX buffer state is checked in
2522 * `ffa_memory_tee_send_continue` rather than here, as we need
2523 * to return `FFA_MEM_FRAG_RX` with the current offset rather
2524 * than FFA_ERROR FFA_BUSY in case it is busy.
2525 */
2526
2527 ret = ffa_memory_tee_send_continue(
2528 vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy,
2529 fragment_length, handle, &api_page_pool);
2530 /*
2531 * `ffa_memory_tee_send_continue` takes ownership of the
2532 * fragment_copy, so we don't need to free it here.
2533 */
Andrew Walbran290b0c92020-02-03 16:37:14 +00002534
2535 vm_unlock(&vm_to_from_lock.vm1);
2536 vm_unlock(&vm_to_from_lock.vm2);
2537 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002538
2539 return ret;
2540}
Max Shvetsov40108e72020-08-27 12:39:50 +01002541
2542struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
2543 struct vcpu *current)
2544{
2545 struct vm_locked vm_locked;
2546
2547 vm_locked = vm_lock(current->vm);
2548 vm_locked.vm->secondary_ep = entry_point;
2549 vm_unlock(&vm_locked);
2550
2551 return (struct ffa_value){.func = FFA_SUCCESS_32};
2552}
J-Alvesa0f317d2021-06-09 13:31:59 +01002553
2554struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
2555 ffa_vcpu_count_t vcpu_count,
2556 struct vcpu *current)
2557{
2558 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2559 dlog_verbose("Bitmap create for NWd VM IDs only (%x).\n",
2560 vm_id);
2561 return ffa_error(FFA_NOT_SUPPORTED);
2562 }
2563
2564 return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
2565}
2566
2567struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
2568 struct vcpu *current)
2569{
2570 /*
2571 * Validity of use of this interface is the same as for bitmap create.
2572 */
2573 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2574 dlog_verbose("Bitmap destroy for NWd VM IDs only (%x).\n",
2575 vm_id);
2576 return ffa_error(FFA_NOT_SUPPORTED);
2577 }
2578
2579 return plat_ffa_notifications_bitmap_destroy(vm_id);
2580}
J-Alvesc003a7a2021-03-18 13:06:53 +00002581
2582struct ffa_value api_ffa_notification_update_bindings(
2583 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
2584 ffa_notifications_bitmap_t notifications, bool is_bind,
2585 struct vcpu *current)
2586{
2587 struct ffa_value ret = {.func = FFA_SUCCESS_32};
2588 struct vm_locked receiver_locked;
2589 const bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
2590 const ffa_vm_id_t id_to_update =
2591 is_bind ? sender_vm_id : HF_INVALID_VM_ID;
2592 const ffa_vm_id_t id_to_validate =
2593 is_bind ? HF_INVALID_VM_ID : sender_vm_id;
2594
2595 if (!plat_ffa_is_notifications_bind_valid(current, sender_vm_id,
2596 receiver_vm_id)) {
2597 dlog_verbose("Invalid use of notifications bind interface.\n");
2598 return ffa_error(FFA_INVALID_PARAMETERS);
2599 }
2600
2601 if (notifications == 0U) {
2602 dlog_verbose("No notifications have been specified.\n");
2603 return ffa_error(FFA_INVALID_PARAMETERS);
2604 }
2605
2606 /**
2607 * This check assumes receiver is the current VM, and has been enforced
2608 * by 'plat_ffa_is_notifications_bind_valid'.
2609 */
2610 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
2611
2612 if (receiver_locked.vm == NULL) {
2613 dlog_verbose("Receiver doesn't exist!\n");
2614 return ffa_error(FFA_DENIED);
2615 }
2616
2617 if (!vm_are_notifications_enabled(receiver_locked)) {
2618 dlog_verbose("Notifications are not enabled.\n");
2619 ret = ffa_error(FFA_NOT_SUPPORTED);
2620 goto out;
2621 }
2622
2623 if (is_bind && vm_id_is_current_world(sender_vm_id) &&
2624 vm_find(sender_vm_id) == NULL) {
2625 dlog_verbose("Sender VM does not exist!\n");
2626 ret = ffa_error(FFA_INVALID_PARAMETERS);
2627 goto out;
2628 }
2629
2630 /*
2631 * Can't bind/unbind notifications if at least one is bound to a
2632 * different sender.
2633 */
2634 if (!vm_notifications_validate_bound_sender(
2635 receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
2636 id_to_validate, notifications)) {
2637 dlog_verbose("Notifications are bound to other sender.\n");
2638 ret = ffa_error(FFA_DENIED);
2639 goto out;
2640 }
2641
2642 /**
2643 * Check if there is a pending notification within those specified in
2644 * the bitmap.
2645 */
2646 if (vm_are_notifications_pending(receiver_locked,
2647 plat_ffa_is_vm_id(sender_vm_id),
2648 notifications)) {
2649 dlog_verbose("Notifications within '%x' pending.\n",
2650 notifications);
2651 ret = ffa_error(FFA_DENIED);
2652 goto out;
2653 }
2654
2655 vm_notifications_update_bindings(
2656 receiver_locked, plat_ffa_is_vm_id(sender_vm_id), id_to_update,
2657 notifications, is_per_vcpu && is_bind);
2658
2659out:
2660 vm_unlock(&receiver_locked);
2661 return ret;
2662}