blob: 60dc65d5d8c0d404ef2ec14fad05359600551f67 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
J-Alves13318e32021-02-22 17:21:00 +00002 * Copyright 2021 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010010
Andrew Walbran318f5732018-11-20 16:23:42 +000011#include "hf/arch/cpu.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000012#include "hf/arch/ffa.h"
Olivier Deprez96a2a262020-06-11 17:21:38 +020013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/arch/plat/ffa.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000016#include "hf/arch/timer.h"
Olivier Deprez764fd2e2020-07-29 15:14:09 +020017#include "hf/arch/vm.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000018
Andrew Scull877ae4b2019-07-02 12:52:33 +010019#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000020#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
22#include "hf/ffa_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000023#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010024#include "hf/plat/console.h"
Manish Pandeya5f39fb2020-09-11 09:47:11 +010025#include "hf/plat/interrupts.h"
Andrew Scull6386f252018-12-06 13:29:10 +000026#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010027#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010028#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010029#include "hf/vm.h"
30
Andrew Scullf35a5c92018-08-07 18:09:46 +010031#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010032#include "vmapi/hf/ffa.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010033
Fuad Tabbae4efcc32020-07-16 15:37:27 +010034static_assert(sizeof(struct ffa_partition_info) == 8,
35 "Partition information descriptor size doesn't match the one in "
36 "the FF-A 1.0 EAC specification, Table 82.");
37
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000038/*
39 * To eliminate the risk of deadlocks, we define a partial order for the
40 * acquisition of locks held concurrently by the same physical CPU. Our current
41 * ordering requirements are as follows:
42 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010043 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000044 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010045 * Locks of the same kind require the lock of lowest address to be locked first,
46 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000047 */
48
Andrew Scullaa039b32018-10-04 15:02:26 +010049static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010050 "Currently, a page is mapped for the send and receive buffers so "
51 "the maximum request is the size of a page.");
52
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000053static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
54 "The page pool entry size must be at least as big as the mailbox "
55 "size, so that memory region descriptors can be copied from the "
56 "mailbox for memory sharing.");
57
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000058static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000059
60/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000061 * Initialises the API page pool by taking ownership of the contents of the
62 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000063 */
64void api_init(struct mpool *ppool)
65{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000066 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000067}
68
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010069/**
J-Alvesad6a0432021-04-09 16:06:21 +010070 * Get target VM vCPU:
71 * If VM is UP then return first vCPU.
72 * If VM is MP then return vCPU whose index matches current CPU index.
73 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050074struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
J-Alvesad6a0432021-04-09 16:06:21 +010075{
76 ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
77 struct vcpu *vcpu = NULL;
78
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050079 CHECK((vm != NULL) && (current != NULL));
80
J-Alvesad6a0432021-04-09 16:06:21 +010081 if (vm->vcpu_count == 1) {
82 vcpu = vm_get_vcpu(vm, 0);
83 } else if (current_cpu_index < vm->vcpu_count) {
84 vcpu = vm_get_vcpu(vm, current_cpu_index);
85 }
86
87 return vcpu;
88}
89
90/**
J-Alvesfe7f7372020-11-09 11:32:12 +000091 * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
92 * is given as argument of the function.
93 *
94 * Called to change the context between SPs for direct messaging (when Hafnium
95 * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
96 *
97 * This function works for partitions that are:
J-Alvesad6a0432021-04-09 16:06:21 +010098 * - UP migratable.
J-Alvesfe7f7372020-11-09 11:32:12 +000099 * - MP with pinned Execution Contexts.
100 */
101static struct vcpu *api_switch_to_vm(struct vcpu *current,
102 struct ffa_value to_ret,
103 enum vcpu_state vcpu_state,
104 ffa_vm_id_t to_id)
105{
106 struct vm *to_vm = vm_find(to_id);
J-Alvesad6a0432021-04-09 16:06:21 +0100107 struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current);
J-Alvesfe7f7372020-11-09 11:32:12 +0000108
109 CHECK(next != NULL);
110
111 /* Set the return value for the target VM. */
112 arch_regs_set_retval(&next->regs, to_ret);
113
114 /* Set the current vCPU state. */
115 sl_lock(&current->lock);
116 current->state = vcpu_state;
117 sl_unlock(&current->lock);
118
119 return next;
120}
121
122/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000123 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100124 *
125 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100127 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500128struct vcpu *api_switch_to_primary(struct vcpu *current,
129 struct ffa_value primary_ret,
130 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100131{
Andrew Walbran508e63c2018-12-20 17:02:37 +0000132 /*
133 * If the secondary is blocked but has a timer running, sleep until the
134 * timer fires rather than indefinitely.
135 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100136 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100137 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
138 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100139 if (arch_timer_enabled_current()) {
140 uint64_t remaining_ns =
141 arch_timer_remaining_ns_current();
142
143 if (remaining_ns == 0) {
144 /*
145 * Timer is pending, so the current vCPU should
146 * be run again right away.
147 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100148 primary_ret.func = FFA_INTERRUPT_32;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100149 /*
150 * primary_ret.arg1 should already be set to the
151 * current VM ID and vCPU ID.
152 */
153 primary_ret.arg2 = 0;
154 } else {
155 primary_ret.arg2 = remaining_ns;
156 }
157 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100158 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100159 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000160 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100161 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000162
163 default:
164 /* Do nothing. */
165 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000166 }
167
J-Alvesfe7f7372020-11-09 11:32:12 +0000168 return api_switch_to_vm(current, primary_ret, secondary_state,
169 HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100170}
171
172/**
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200173 * Choose next vCPU to run to be the counterpart vCPU in the other
174 * world (run the normal world if currently running in the secure
175 * world). Set current vCPU state to the given vcpu_state parameter.
176 * Set FF-A return values to the target vCPU in the other world.
177 *
178 * Called in context of a direct message response from a secure
179 * partition to a VM.
180 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100181struct vcpu *api_switch_to_other_world(struct vcpu *current,
182 struct ffa_value other_world_ret,
183 enum vcpu_state vcpu_state)
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200184{
J-Alvesfe7f7372020-11-09 11:32:12 +0000185 return api_switch_to_vm(current, other_world_ret, vcpu_state,
186 HF_OTHER_WORLD_ID);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200187}
188
189/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100190 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
191 * registers the `from` VM to be notified when it becomes available.
192 */
193static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
194{
195 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
196 to.vm->mailbox.recv == NULL) {
197 /*
198 * Fail if the receiver isn't currently ready to receive data,
199 * setting up for notification if requested.
200 */
201 if (notify) {
202 struct wait_entry *entry =
203 vm_get_wait_entry(from, to.vm->id);
204
205 /* Append waiter only if it's not there yet. */
206 if (list_empty(&entry->wait_links)) {
207 list_append(&to.vm->mailbox.waiter_list,
208 &entry->wait_links);
209 }
210 }
211
212 return true;
213 }
214
215 return false;
216}
217
218/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000219 * Returns true if the given vCPU is executing in context of an
220 * FFA_MSG_SEND_DIRECT_REQ invocation.
221 */
222static bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
223{
224 return locked.vcpu->direct_request_origin_vm_id != HF_INVALID_VM_ID;
225}
226
227/**
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100228 * Returns true if the VM owning the given vCPU is supporting managed exit and
229 * the vCPU is currently processing a managed exit.
230 */
231static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
232{
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100233 return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100234 vcpu_locked.vcpu->processing_managed_exit);
235}
236
237/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000238 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000239 */
240struct vcpu *api_preempt(struct vcpu *current)
241{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242 struct ffa_value ret = {
243 .func = FFA_INTERRUPT_32,
244 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000245 };
246
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500247 return api_switch_to_primary(current, ret, VCPU_STATE_PREEMPTED);
Andrew Scull33fecd32019-01-08 14:48:27 +0000248}
249
250/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000251 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000252 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100253 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100254struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100255{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100256 struct ffa_value ret = {
257 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
258 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100259 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000260
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000261 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100262 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100263}
264
265/**
Andrew Walbran33645652019-04-15 12:29:31 +0100266 * Puts the current vCPU in off mode, and returns to the primary VM.
267 */
268struct vcpu *api_vcpu_off(struct vcpu *current)
269{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100270 struct ffa_value ret = {
271 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
272 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100273 };
274
275 /*
276 * Disable the timer, so the scheduler doesn't get told to call back
277 * based on it.
278 */
279 arch_timer_disable_current();
280
281 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
282}
283
284/**
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500285 * The current vCPU is blocked on some resource and needs to relinquish
286 * control back to the execution context of the endpoint that originally
287 * allocated cycles to it.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000288 */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000289struct ffa_value api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000290{
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000291 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
292 struct vcpu_locked current_locked;
293 bool is_direct_request_ongoing;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000294
295 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000296 /* NOOP on the primary as it makes the scheduling decisions. */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000297 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000298 }
299
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000300 current_locked = vcpu_lock(current);
301 is_direct_request_ongoing =
302 is_ffa_direct_msg_request_ongoing(current_locked);
303 vcpu_unlock(&current_locked);
304
305 if (is_direct_request_ongoing) {
306 return ffa_error(FFA_DENIED);
307 }
308
309 *next = api_switch_to_primary(
310 current,
311 (struct ffa_value){.func = FFA_YIELD_32,
312 .arg1 = ffa_vm_vcpu(current->vm->id,
313 vcpu_index(current))},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500314 VCPU_STATE_BLOCKED);
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000315
316 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000317}
318
319/**
Andrew Walbran33645652019-04-15 12:29:31 +0100320 * Switches to the primary so that it can switch to the target, or kick it if it
321 * is already running on a different physical CPU.
322 */
323struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
324{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100325 struct ffa_value ret = {
326 .func = HF_FFA_RUN_WAKE_UP,
327 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
328 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100329 };
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500330 return api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
Andrew Walbran33645652019-04-15 12:29:31 +0100331}
332
333/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000334 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000335 */
336struct vcpu *api_abort(struct vcpu *current)
337{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100338 struct ffa_value ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000339
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100340 dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000341 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000342
343 if (current->vm->id == HF_PRIMARY_VM_ID) {
344 /* TODO: what to do when the primary aborts? */
345 for (;;) {
346 /* Do nothing. */
347 }
348 }
349
350 atomic_store_explicit(&current->vm->aborting, true,
351 memory_order_relaxed);
352
353 /* TODO: free resources once all vCPUs abort. */
354
Andrew Sculld6ee1102019-04-05 22:12:42 +0100355 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000356}
357
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100358struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000359 const struct ffa_uuid *uuid,
360 const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100361{
362 struct vm *current_vm = current->vm;
363 struct vm_locked current_vm_locked;
364 ffa_vm_count_t vm_count = 0;
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000365 bool count_flag = (flags && FFA_PARTITION_COUNT_FLAG_MASK) ==
366 FFA_PARTITION_COUNT_FLAG;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100367 bool uuid_is_null = ffa_uuid_is_null(uuid);
368 struct ffa_value ret;
369 uint32_t size;
Olivier Depreze562e542020-06-11 17:31:54 +0200370 struct ffa_partition_info partitions[2 * MAX_VMS];
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100371
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000372 /* Bits 31:1 Must Be Zero */
373 if ((flags & ~FFA_PARTITION_COUNT_FLAG) != 0) {
374 return ffa_error(FFA_INVALID_PARAMETERS);
375 }
376
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100377 /*
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000378 * No need to count if we are returning the number of paritions as we
379 * already know this.
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100380 */
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000381 if (uuid_is_null && count_flag) {
382 vm_count = vm_get_count();
383 } else {
384 /*
385 * Iterate through the VMs to find the ones with a matching
386 * UUID. A Null UUID retrieves information for all VMs.
387 */
388 for (uint16_t index = 0; index < vm_get_count(); ++index) {
389 struct vm *vm = vm_find_index(index);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100390
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000391 if (uuid_is_null || ffa_uuid_equal(uuid, &vm->uuid)) {
392 uint16_t array_index = vm_count;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100393
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000394 ++vm_count;
395 if (count_flag) {
396 continue;
397 }
398
399 partitions[array_index].vm_id = vm->id;
400 partitions[array_index].vcpu_count =
401 vm->vcpu_count;
402 partitions[array_index].properties =
403 plat_ffa_partition_properties(
404 current_vm->id, vm);
405 partitions[array_index].properties |=
406 vm_are_notifications_enabled(vm)
407 ? FFA_PARTITION_NOTIFICATION
408 : 0;
409 }
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100410 }
411 }
412
Olivier Depreze562e542020-06-11 17:31:54 +0200413 /* If UUID is Null vm_count must not be zero at this stage. */
414 CHECK(!uuid_is_null || vm_count != 0);
415
416 /*
417 * When running the Hypervisor:
418 * - If UUID is Null the Hypervisor forwards the query to the SPMC for
419 * it to fill with secure partitions information.
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000420 * - If UUID is non-Null vm_count may be zero because the UUID matches
Olivier Depreze562e542020-06-11 17:31:54 +0200421 * a secure partition and the query is forwarded to the SPMC.
422 * When running the SPMC:
423 * - If UUID is non-Null and vm_count is zero it means there is no such
424 * partition identified in the system.
425 */
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000426 plat_ffa_partition_info_get_forward(uuid, flags, partitions, &vm_count);
Olivier Depreze562e542020-06-11 17:31:54 +0200427
428 /*
429 * Unrecognized UUID: does not match any of the VMs (or SPs)
430 * and is not Null.
431 */
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100432 if (vm_count == 0) {
433 return ffa_error(FFA_INVALID_PARAMETERS);
434 }
435
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000436 /*
437 * If the count flag is set we don't need to return the partition info
438 * descriptors.
439 */
440 if (count_flag) {
441 return (struct ffa_value){.func = FFA_SUCCESS_32,
442 .arg2 = vm_count};
443 }
444
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100445 size = vm_count * sizeof(partitions[0]);
446 if (size > FFA_MSG_PAYLOAD_MAX) {
447 dlog_error(
448 "Partition information does not fit in the VM's RX "
449 "buffer.\n");
450 return ffa_error(FFA_NO_MEMORY);
451 }
452
453 /*
454 * Partition information is returned in the VM's RX buffer, which is why
455 * the lock is needed.
456 */
457 current_vm_locked = vm_lock(current_vm);
458
459 if (msg_receiver_busy(current_vm_locked, NULL, false)) {
460 /*
461 * Can't retrieve memory information if the mailbox is not
462 * available.
463 */
464 dlog_verbose("RX buffer not ready.\n");
465 ret = ffa_error(FFA_BUSY);
466 goto out_unlock;
467 }
468
469 /* Populate the VM's RX buffer with the partition information. */
470 memcpy_s(current_vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, partitions,
471 size);
472 current_vm->mailbox.recv_size = size;
Olivier Depreze562e542020-06-11 17:31:54 +0200473
474 /* Sender is Hypervisor in the normal world (TEE in secure world). */
475 current_vm->mailbox.recv_sender = HF_VM_ID_BASE;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100476 current_vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
477 current_vm->mailbox.state = MAILBOX_STATE_READ;
478
479 /* Return the count of partition information descriptors in w2. */
480 ret = (struct ffa_value){.func = FFA_SUCCESS_32, .arg2 = vm_count};
481
482out_unlock:
483 vm_unlock(&current_vm_locked);
484
485 return ret;
486}
487
Andrew Scull9726c252019-01-23 13:44:19 +0000488/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000489 * Returns the ID of the VM.
490 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100491struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000492{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100493 return (struct ffa_value){.func = FFA_SUCCESS_32,
494 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000495}
496
497/**
Olivier Deprez421677d2021-06-18 12:18:53 +0200498 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
499 * FF-A instances.
500 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000501 */
502struct ffa_value api_ffa_spm_id_get(void)
503{
J-Alves3829fc02021-03-18 12:49:18 +0000504#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
505 /*
506 * Return the SPMC ID that was fetched during FF-A
507 * initialization.
508 */
509 return (struct ffa_value){.func = FFA_SUCCESS_32,
510 .arg2 = arch_ffa_spmc_id_get()};
511#else
512 return ffa_error(FFA_NOT_SUPPORTED);
513#endif
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000514}
515
516/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000517 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000518 * function to indicate that register state for the given vCPU has been saved
519 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000520 */
521void api_regs_state_saved(struct vcpu *vcpu)
522{
523 sl_lock(&vcpu->lock);
524 vcpu->regs_available = true;
525 sl_unlock(&vcpu->lock);
526}
527
528/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000529 * Retrieves the next waiter and removes it from the wait list if the VM's
530 * mailbox is in a writable state.
531 */
532static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
533{
534 struct wait_entry *entry;
535 struct vm *vm = locked_vm.vm;
536
Andrew Sculld6ee1102019-04-05 22:12:42 +0100537 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000538 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
539 /* The mailbox is not writable or there are no waiters. */
540 return NULL;
541 }
542
543 /* Remove waiter from the wait list. */
544 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
545 wait_links);
546 list_remove(&entry->wait_links);
547 return entry;
548}
549
550/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000551 * Assuming that the arguments have already been checked by the caller, injects
552 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
553 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
554 * is next run, which is up to the scheduler.
555 *
556 * Returns:
557 * - 0 on success if no further action is needed.
558 * - 1 if it was called by the primary VM and the primary VM now needs to wake
559 * up or kick the target vCPU.
560 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100561int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
562 uint32_t intid, struct vcpu *current,
563 struct vcpu **next)
Andrew Walbran508e63c2018-12-20 17:02:37 +0000564{
Manish Pandey35e452f2021-02-18 21:36:34 +0000565 struct vcpu *target_vcpu = target_locked.vcpu;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000566 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +0000567 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
568 uint32_t intid_mask = 1U << intid_shift;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000569 int64_t ret = 0;
570
Andrew Walbran508e63c2018-12-20 17:02:37 +0000571 /*
Manish Pandey35e452f2021-02-18 21:36:34 +0000572 * We only need to change state and (maybe) trigger a virtual interrupt
573 * if it is enabled and was not previously pending. Otherwise we can
574 * skip everything except setting the pending bit.
Andrew Walbran508e63c2018-12-20 17:02:37 +0000575 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000576 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
577 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
Andrew Walbran508e63c2018-12-20 17:02:37 +0000578 intid_mask)) {
579 goto out;
580 }
581
582 /* Increment the count. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000583 if ((target_vcpu->interrupts.interrupt_type[intid_index] &
584 intid_mask) == (INTERRUPT_TYPE_IRQ << intid_shift)) {
585 vcpu_irq_count_increment(target_locked);
586 } else {
587 vcpu_fiq_count_increment(target_locked);
588 }
Andrew Walbran508e63c2018-12-20 17:02:37 +0000589
590 /*
591 * Only need to update state if there was not already an
592 * interrupt enabled and pending.
593 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000594 if (vcpu_interrupt_count_get(target_locked) != 1) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000595 goto out;
596 }
597
Andrew Walbran508e63c2018-12-20 17:02:37 +0000598 if (current->vm->id == HF_PRIMARY_VM_ID) {
599 /*
600 * If the call came from the primary VM, let it know that it
601 * should run or kick the target vCPU.
602 */
603 ret = 1;
Manish Pandey35e452f2021-02-18 21:36:34 +0000604 } else if (current != target_vcpu && next != NULL) {
605 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000606 }
607
608out:
609 /* Either way, make it pending. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000610 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000611
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200612 return ret;
613}
614
615/* Wrapper to internal_interrupt_inject with locking of target vCPU */
616static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
617 uint32_t intid, struct vcpu *current,
618 struct vcpu **next)
619{
620 int64_t ret;
621 struct vcpu_locked target_locked;
622
623 target_locked = vcpu_lock(target_vcpu);
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100624 ret = api_interrupt_inject_locked(target_locked, intid, current, next);
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200625 vcpu_unlock(&target_locked);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000626
627 return ret;
628}
629
630/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100631 * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
632 * or FFA_MSG_WAIT call.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100633 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100634static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100635{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000636 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100637 case FFA_MSG_SEND_32:
638 return (struct ffa_value){
639 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000640 .arg1 = (receiver->mailbox.recv_sender << 16) |
641 receiver->id,
642 .arg3 = receiver->mailbox.recv_size};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000643 default:
644 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000645 dlog_error("Tried to return an invalid message function %#x\n",
646 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100647 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000648 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100649}
650
651/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000652 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000653 * value needs to be forced onto the vCPU.
654 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000655static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100656 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000657{
Max Shvetsov40108e72020-08-27 12:39:50 +0100658 struct vcpu_locked vcpu_locked;
659 struct vm_locked vm_locked;
Andrew Scullb06d1752019-02-04 10:15:48 +0000660 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000661 bool ret;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500662 uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000663
Andrew Scullb06d1752019-02-04 10:15:48 +0000664 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000665 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000666 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100667 * The VM lock is not needed in the common case so it must only be taken
668 * when it is going to be needed. This ensures there are no inter-vCPU
669 * dependencies in the common run case meaning the sensitive context
670 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000671 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100672 vcpu_locked = vcpu_lock(vcpu);
673
674#if SECURE_WORLD == 1
675
676 if (vcpu_secondary_reset_and_start(vcpu_locked, vcpu->vm->secondary_ep,
677 0)) {
678 dlog_verbose("%s secondary cold boot vmid %#x vcpu id %#x\n",
679 __func__, vcpu->vm->id, current->cpu->id);
680 }
681
682#endif
Andrew Scullb06d1752019-02-04 10:15:48 +0000683
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000684 /* The VM needs to be locked to deliver mailbox messages. */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500685 need_vm_lock = vcpu->state == VCPU_STATE_WAITING;
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000686 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100687 vcpu_unlock(&vcpu_locked);
688 vm_locked = vm_lock(vcpu->vm);
689 vcpu_locked = vcpu_lock(vcpu);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000690 }
691
692 /*
693 * If the vCPU is already running somewhere then we can't run it here
694 * simultaneously. While it is actually running then the state should be
695 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
696 * stops running but while Hafnium is in the process of switching back
697 * to the primary there will be a brief period while the state has been
698 * updated but `regs_available` is still false (until
699 * `api_regs_state_saved` is called). We can't start running it again
700 * until this has finished, so count this state as still running for the
701 * purposes of this check.
702 */
703 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
704 /*
705 * vCPU is running on another pCPU.
706 *
707 * It's okay not to return the sleep duration here because the
708 * other physical CPU that is currently running this vCPU will
709 * return the sleep duration if needed.
710 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100711 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000712 ret = false;
713 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000714 }
Andrew Scull9726c252019-01-23 13:44:19 +0000715
716 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100717 if (vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100718 dlog_notice("Aborting VM %#x vCPU %u\n", vcpu->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000719 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100720 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000721 }
722 ret = false;
723 goto out;
724 }
725
Andrew Walbran508e63c2018-12-20 17:02:37 +0000726 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100727 case VCPU_STATE_RUNNING:
728 case VCPU_STATE_OFF:
729 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000730 ret = false;
731 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000732
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500733 case VCPU_STATE_WAITING:
734 /*
735 * An initial FFA_RUN is necessary for secondary VM/SP to reach
736 * the message wait loop.
737 */
738 if (!vcpu->is_bootstrapped) {
739 vcpu->is_bootstrapped = true;
740 break;
741 }
742
Andrew Scullb06d1752019-02-04 10:15:48 +0000743 /*
744 * A pending message allows the vCPU to run so the message can
745 * be delivered directly.
746 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100747 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100748 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100749 ffa_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100750 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000751 break;
752 }
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500753
754 if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
755 break;
756 }
757
758 if (arch_timer_enabled(&vcpu->regs)) {
759 timer_remaining_ns =
760 arch_timer_remaining_ns(&vcpu->regs);
761 if (timer_remaining_ns == 0) {
762 break;
763 }
764 } else {
765 dlog_verbose("Timer disabled\n");
766 }
767 run_ret->func = FFA_MSG_WAIT_32;
768 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
769 run_ret->arg2 = timer_remaining_ns;
770 ret = false;
771 goto out;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100772 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000773 /* Allow virtual interrupts to be delivered. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000774 if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000775 break;
776 }
777
Andrew Walbran508e63c2018-12-20 17:02:37 +0000778 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100779 timer_remaining_ns =
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000780 arch_timer_remaining_ns(&vcpu->regs);
781
782 /*
783 * The timer expired so allow the interrupt to be
784 * delivered.
785 */
786 if (timer_remaining_ns == 0) {
787 break;
788 }
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100789 }
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000790
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100791 /*
792 * The vCPU is not ready to run, return the appropriate code to
793 * the primary which called vcpu_run.
794 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500795 run_ret->func = HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100796 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
797 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000798
799 ret = false;
800 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000801
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500802 case VCPU_STATE_BLOCKED:
803 /* A blocked vCPU is run unconditionally. Fall through. */
804 case VCPU_STATE_PREEMPTED:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000805 break;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500806 default:
807 /*
808 * Execution not expected to reach here. Deny the request
809 * gracefully.
810 */
811 *run_ret = ffa_error(FFA_DENIED);
812 ret = false;
813 goto out;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000814 }
815
Andrew Scullb06d1752019-02-04 10:15:48 +0000816 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000817 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100818 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000819
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000820 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000821 * Mark the registers as unavailable now that we're about to reflect
822 * them onto the real registers. This will also prevent another physical
823 * CPU from trying to read these registers.
824 */
825 vcpu->regs_available = false;
826
827 ret = true;
828
829out:
Max Shvetsov40108e72020-08-27 12:39:50 +0100830 vcpu_unlock(&vcpu_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000831 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100832 vm_unlock(&vm_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000833 }
834
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000835 return ret;
836}
837
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100838struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500839 struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100840{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100841 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100842 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100843 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100844
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500845 if (!plat_ffa_run_checks(current, vm_id, &ret, next)) {
846 return ret;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100847 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100848
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700849 if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
850 return ret;
851 }
852
Andrew Scull19503262018-09-20 14:48:39 +0100853 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100854 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100855 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100856 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100857 }
858
Fuad Tabbaed294af2019-12-20 10:43:01 +0000859 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100860 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100861 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100862 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100863
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000864 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100865 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000866 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000867 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100868 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000869
Andrew Walbran508e63c2018-12-20 17:02:37 +0000870 /*
871 * Inject timer interrupt if timer has expired. It's safe to access
872 * vcpu->regs here because api_vcpu_prepare_run already made sure that
873 * regs_available was true (and then set it to false) before returning
874 * true.
875 */
876 if (arch_timer_pending(&vcpu->regs)) {
877 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100878 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
879 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000880
881 /*
882 * Set the mask bit so the hardware interrupt doesn't fire
883 * again. Ideally we wouldn't do this because it affects what
884 * the secondary vCPU sees, but if we don't then we end up with
885 * a loop of the interrupt firing each time we try to return to
886 * the secondary vCPU.
887 */
888 arch_timer_mask(&vcpu->regs);
889 }
890
Fuad Tabbaed294af2019-12-20 10:43:01 +0000891 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000892 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000893
Andrew Scull33fecd32019-01-08 14:48:27 +0000894 /*
895 * Set a placeholder return code to the scheduler. This will be
896 * overwritten when the switch back to the primary occurs.
897 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100898 ret.func = FFA_INTERRUPT_32;
899 ret.arg1 = ffa_vm_vcpu(vm_id, vcpu_idx);
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100900 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000901
Andrew Scull6d2db332018-10-10 15:28:17 +0100902out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100903 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100904}
905
906/**
Andrew Scull81e85092018-12-12 12:56:20 +0000907 * Check that the mode indicates memory that is valid, owned and exclusive.
908 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100909static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000910{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100911 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
912 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000913}
914
915/**
Andrew Walbranc8a01972020-09-22 11:23:30 +0100916 * Determines the value to be returned by api_ffa_rxtx_map and
917 * api_ffa_rx_release after they've succeeded. If a secondary VM is running and
918 * there are waiters, it also switches back to the primary VM for it to wake
919 * waiters up.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000920 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100921static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
922 struct vcpu *current,
923 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000924{
925 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000926
927 if (list_empty(&vm->mailbox.waiter_list)) {
928 /* No waiters, nothing else to do. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100929 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000930 }
931
932 if (vm->id == HF_PRIMARY_VM_ID) {
933 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100934 return (struct ffa_value){.func = FFA_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000935 }
936
937 /*
938 * Switch back to the primary VM, informing it that there are waiters
939 * that need to be notified.
940 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000941 *next = api_switch_to_primary(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100942 current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500943 VCPU_STATE_WAITING);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000944
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100945 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000946}
947
948/**
Manish Pandeyd34f8892020-06-19 17:41:07 +0100949 * Configures the hypervisor's stage-1 view of the send and receive pages.
Andrew Sculle1322792019-07-01 17:46:10 +0100950 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100951static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
952 struct vm_locked vm_locked,
Andrew Sculle1322792019-07-01 17:46:10 +0100953 paddr_t pa_send_begin, paddr_t pa_send_end,
954 paddr_t pa_recv_begin, paddr_t pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200955 uint32_t extra_attributes,
Andrew Sculle1322792019-07-01 17:46:10 +0100956 struct mpool *local_page_pool)
957{
958 bool ret;
Andrew Sculle1322792019-07-01 17:46:10 +0100959
960 /* Map the send page as read-only in the hypervisor address space. */
961 vm_locked.vm->mailbox.send =
962 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200963 MM_MODE_R | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100964 if (!vm_locked.vm->mailbox.send) {
965 /* TODO: partial defrag of failed range. */
966 /* Recover any memory consumed in failed mapping. */
967 mm_defrag(mm_stage1_locked, local_page_pool);
968 goto fail;
969 }
970
971 /*
972 * Map the receive page as writable in the hypervisor address space. On
973 * failure, unmap the send page before returning.
974 */
975 vm_locked.vm->mailbox.recv =
976 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200977 MM_MODE_W | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100978 if (!vm_locked.vm->mailbox.recv) {
979 /* TODO: partial defrag of failed range. */
980 /* Recover any memory consumed in failed mapping. */
981 mm_defrag(mm_stage1_locked, local_page_pool);
982 goto fail_undo_send;
983 }
984
985 ret = true;
986 goto out;
987
988 /*
989 * The following mappings will not require more memory than is available
990 * in the local pool.
991 */
992fail_undo_send:
993 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100994 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
995 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100996
997fail:
998 ret = false;
999
1000out:
Andrew Sculle1322792019-07-01 17:46:10 +01001001 return ret;
1002}
1003
1004/**
Manish Pandeyd34f8892020-06-19 17:41:07 +01001005 * Sanity checks and configures the send and receive pages in the VM stage-2
1006 * and hypervisor stage-1 page tables.
1007 *
1008 * Returns:
1009 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001010 * aligned, are the same or have invalid attributes.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001011 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
1012 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001013 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001014 * - FFA_SUCCESS on success if no further action is needed.
Andrew Sculle1322792019-07-01 17:46:10 +01001015 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001016
1017struct ffa_value api_vm_configure_pages(
1018 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
1019 ipaddr_t send, ipaddr_t recv, uint32_t page_count,
1020 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +01001021{
Manish Pandeyd34f8892020-06-19 17:41:07 +01001022 struct ffa_value ret;
1023 paddr_t pa_send_begin;
1024 paddr_t pa_send_end;
1025 paddr_t pa_recv_begin;
1026 paddr_t pa_recv_end;
1027 uint32_t orig_send_mode;
1028 uint32_t orig_recv_mode;
Olivier Deprez96a2a262020-06-11 17:21:38 +02001029 uint32_t extra_attributes;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001030
1031 /* We only allow these to be setup once. */
1032 if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
1033 ret = ffa_error(FFA_DENIED);
1034 goto out;
1035 }
1036
1037 /* Hafnium only supports a fixed size of RX/TX buffers. */
1038 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
1039 ret = ffa_error(FFA_INVALID_PARAMETERS);
1040 goto out;
1041 }
1042
1043 /* Fail if addresses are not page-aligned. */
1044 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
1045 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
1046 ret = ffa_error(FFA_INVALID_PARAMETERS);
1047 goto out;
1048 }
1049
1050 /* Convert to physical addresses. */
1051 pa_send_begin = pa_from_ipa(send);
1052 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
1053 pa_recv_begin = pa_from_ipa(recv);
1054 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
1055
1056 /* Fail if the same page is used for the send and receive pages. */
1057 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
1058 ret = ffa_error(FFA_INVALID_PARAMETERS);
1059 goto out;
1060 }
Andrew Sculle1322792019-07-01 17:46:10 +01001061
1062 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001063 * Ensure the pages are valid, owned and exclusive to the VM and that
1064 * the VM has the required access to the memory.
Andrew Sculle1322792019-07-01 17:46:10 +01001065 */
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -08001066 if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
1067 &orig_send_mode) ||
Manish Pandeyd34f8892020-06-19 17:41:07 +01001068 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
1069 (orig_send_mode & MM_MODE_R) == 0 ||
1070 (orig_send_mode & MM_MODE_W) == 0) {
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001071 ret = ffa_error(FFA_INVALID_PARAMETERS);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001072 goto out;
1073 }
1074
Raghu Krishnamurthy785d52f2021-02-13 00:02:40 -08001075 if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
1076 &orig_recv_mode) ||
Manish Pandeyd34f8892020-06-19 17:41:07 +01001077 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
1078 (orig_recv_mode & MM_MODE_R) == 0) {
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001079 ret = ffa_error(FFA_INVALID_PARAMETERS);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001080 goto out;
1081 }
Andrew Sculle1322792019-07-01 17:46:10 +01001082
1083 /* Take memory ownership away from the VM and mark as shared. */
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001084 uint32_t mode =
1085 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W;
1086 if (vm_locked.vm->el0_partition) {
1087 mode |= MM_MODE_USER | MM_MODE_NG;
1088 }
1089
1090 if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end, mode,
1091 local_page_pool, NULL)) {
Manish Pandeyd34f8892020-06-19 17:41:07 +01001092 ret = ffa_error(FFA_NO_MEMORY);
1093 goto out;
Andrew Sculle1322792019-07-01 17:46:10 +01001094 }
1095
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001096 mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
1097 if (vm_locked.vm->el0_partition) {
1098 mode |= MM_MODE_USER | MM_MODE_NG;
1099 }
1100
1101 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end, mode,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001102 local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +01001103 /* TODO: partial defrag of failed range. */
1104 /* Recover any memory consumed in failed mapping. */
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -07001105 vm_ptable_defrag(vm_locked, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001106 goto fail_undo_send;
1107 }
1108
Olivier Deprez96a2a262020-06-11 17:21:38 +02001109 /* Get extra send/recv pages mapping attributes for the given VM ID. */
1110 extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
1111
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001112 /*
1113 * For EL0 partitions, since both the partition and the hypervisor code
1114 * use the EL2&0 translation regime, it is critical to mark the mappings
1115 * of the send and recv buffers as non-global in the TLB. For one, if we
1116 * dont mark it as non-global, it would cause TLB conflicts since there
1117 * would be an identity mapping with non-global attribute in the
1118 * partitions page tables, but another identity mapping in the
1119 * hypervisor page tables with the global attribute. The other issue is
1120 * one of security, we dont want other partitions to be able to access
1121 * other partitions buffers through cached translations.
1122 */
1123 if (vm_locked.vm->el0_partition) {
1124 extra_attributes |= MM_MODE_NG;
1125 }
1126
Manish Pandeyd34f8892020-06-19 17:41:07 +01001127 if (!api_vm_configure_stage1(mm_stage1_locked, vm_locked, pa_send_begin,
1128 pa_send_end, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001129 extra_attributes, local_page_pool)) {
Andrew Sculle1322792019-07-01 17:46:10 +01001130 goto fail_undo_send_and_recv;
1131 }
1132
Manish Pandeyd34f8892020-06-19 17:41:07 +01001133 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +01001134 goto out;
1135
Andrew Sculle1322792019-07-01 17:46:10 +01001136fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +00001137 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001138 orig_send_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +01001139
1140fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +00001141 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001142 orig_send_mode, local_page_pool, NULL));
1143 ret = ffa_error(FFA_NO_MEMORY);
Andrew Sculle1322792019-07-01 17:46:10 +01001144
1145out:
Andrew Sculle1322792019-07-01 17:46:10 +01001146 return ret;
1147}
1148
1149/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001150 * Configures the VM to send/receive data through the specified pages. The pages
Manish Pandeyd34f8892020-06-19 17:41:07 +01001151 * must not be shared. Locking of the page tables combined with a local memory
1152 * pool ensures there will always be enough memory to recover from any errors
1153 * that arise. The stage-1 page tables must be locked so memory cannot be taken
1154 * by another core which could result in this transaction being unable to roll
1155 * back in the case of an error.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001156 *
1157 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001158 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001159 * aligned, are the same or have invalid attributes.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001160 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001161 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001162 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001163 * - FFA_SUCCESS on success if no further action is needed.
1164 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001165 * needs to wake up or kick waiters.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001166 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001167struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
1168 uint32_t page_count, struct vcpu *current,
1169 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001170{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001171 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001172 struct ffa_value ret;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001173 struct vm_locked vm_locked;
1174 struct mm_stage1_locked mm_stage1_locked;
1175 struct mpool local_page_pool;
Andrew Scull220e6212018-12-21 18:09:00 +00001176
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001177 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001178 * Create a local pool so any freed memory can't be used by another
1179 * thread. This is to ensure the original mapping can be restored if any
1180 * stage of the process fails.
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001181 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001182 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1183
Andrew Sculle1322792019-07-01 17:46:10 +01001184 vm_locked = vm_lock(vm);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001185 mm_stage1_locked = mm_lock_stage1();
Andrew Scull220e6212018-12-21 18:09:00 +00001186
Manish Pandeyd34f8892020-06-19 17:41:07 +01001187 ret = api_vm_configure_pages(mm_stage1_locked, vm_locked, send, recv,
1188 page_count, &local_page_pool);
1189 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001190 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001191 }
1192
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001193 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +01001194 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +00001195
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001196exit:
Manish Pandeyd34f8892020-06-19 17:41:07 +01001197 mpool_fini(&local_page_pool);
1198
1199 mm_unlock_stage1(&mm_stage1_locked);
Andrew Sculle1322792019-07-01 17:46:10 +01001200 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001201
1202 return ret;
1203}
1204
1205/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001206 * Unmaps the RX/TX buffer pair with a partition or partition manager from the
1207 * translation regime of the caller. Unmap the region for the hypervisor and
1208 * set the memory region to owned and exclusive for the component. Since the
1209 * memory region mapped in the page table, when the buffers were originally
1210 * created we can safely remap it.
1211 *
1212 * Returns:
1213 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
1214 * behalf of the caller.
1215 * - FFA_SUCCESS on success if no further action is needed.
1216 */
1217struct ffa_value api_ffa_rxtx_unmap(ffa_vm_id_t allocator_id,
1218 struct vcpu *current)
1219{
1220 struct vm *vm = current->vm;
1221 struct vm_locked vm_locked;
1222 struct mm_stage1_locked mm_stage1_locked;
1223 paddr_t send_pa_begin;
1224 paddr_t send_pa_end;
1225 paddr_t recv_pa_begin;
1226 paddr_t recv_pa_end;
1227
1228 /*
1229 * Check there is a buffer pair registered on behalf of the caller.
1230 * Since forwarding is not yet supported the allocator ID MBZ.
1231 */
1232 if (allocator_id != 0) {
1233 dlog_error(
1234 "Forwarding MAP/UNMAP from the hypervisor is not yet "
1235 "supported so vm id must be zero.\n");
1236 return ffa_error(FFA_INVALID_PARAMETERS);
1237 }
1238
1239 /* Get send and receive buffers. */
1240 if (vm->mailbox.send == NULL || vm->mailbox.recv == NULL) {
Olivier Deprez86d87ae2021-08-19 14:27:46 +02001241 dlog_verbose(
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001242 "No buffer pair registered on behalf of the caller.\n");
1243 return ffa_error(FFA_INVALID_PARAMETERS);
1244 }
1245
1246 /* Currently a mailbox size of 1 page is assumed. */
1247 send_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.send));
1248 send_pa_end = pa_add(send_pa_begin, HF_MAILBOX_SIZE);
1249 recv_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.recv));
1250 recv_pa_end = pa_add(recv_pa_begin, HF_MAILBOX_SIZE);
1251
1252 vm_locked = vm_lock(vm);
1253 mm_stage1_locked = mm_lock_stage1();
1254
1255 /*
1256 * Set the memory region of the buffers back to the default mode
1257 * for the VM. Since this memory region was already mapped for the
1258 * RXTX buffers we can safely remap them.
1259 */
1260 CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
1261 MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
1262 NULL));
1263
1264 CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
1265 MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
1266 NULL));
1267
1268 /* Unmap the buffers in the partition manager. */
1269 CHECK(mm_unmap(mm_stage1_locked, send_pa_begin, send_pa_end,
1270 &api_page_pool));
1271 CHECK(mm_unmap(mm_stage1_locked, recv_pa_begin, recv_pa_end,
1272 &api_page_pool));
1273
1274 vm->mailbox.send = NULL;
1275 vm->mailbox.recv = NULL;
1276
1277 mm_unlock_stage1(&mm_stage1_locked);
1278 vm_unlock(&vm_locked);
1279
1280 return (struct ffa_value){.func = FFA_SUCCESS_32};
1281}
1282
1283/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001284 * Notifies the `to` VM about the message currently in its mailbox, possibly
1285 * with the help of the primary VM.
1286 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001287static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
1288 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001289{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001290 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1291 struct ffa_value primary_ret = {
1292 .func = FFA_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +00001293 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001294 };
1295
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001296 /* Messages for the primary VM are delivered directly. */
1297 if (to.vm->id == HF_PRIMARY_VM_ID) {
1298 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001299 * Only tell the primary VM the size and other details if the
1300 * message is for it, to avoid leaking data about messages for
1301 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001302 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001303 primary_ret = ffa_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001304
1305 to.vm->mailbox.state = MAILBOX_STATE_READ;
1306 *next = api_switch_to_primary(current, primary_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001307 VCPU_STATE_BLOCKED);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001308 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001309 }
1310
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001311 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1312
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001313 /* Messages for the TEE are sent on via the dispatcher. */
1314 if (to.vm->id == HF_TEE_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001315 struct ffa_value call = ffa_msg_recv_return(to.vm);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001316
Olivier Deprez112d2b52020-09-30 07:39:23 +02001317 ret = arch_other_world_call(call);
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001318 /*
1319 * After the call to the TEE completes it must have finished
1320 * reading its RX buffer, so it is ready for another message.
1321 */
1322 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001323 /*
1324 * Don't return to the primary VM in this case, as the TEE is
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001325 * not (yet) scheduled via FF-A.
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001326 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001327 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001328 }
1329
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001330 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +00001331 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001332 *next = api_switch_to_primary(current, primary_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001333 VCPU_STATE_BLOCKED);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001334 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001335
1336 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001337}
1338
1339/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001340 * Copies data from the sender's send buffer to the recipient's receive buffer
1341 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +00001342 *
1343 * If the recipient's receive buffer is busy, it can optionally register the
1344 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001345 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001346struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
1347 ffa_vm_id_t receiver_vm_id, uint32_t size,
1348 uint32_t attributes, struct vcpu *current,
1349 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001350{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001351 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001352 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +00001353 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +01001354 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001355 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001356 struct vcpu_locked current_locked;
1357 bool is_direct_request_ongoing;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001358 bool notify =
1359 (attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +01001360
Andrew Walbran70bc8622019-10-07 14:15:58 +01001361 /* Ensure sender VM ID corresponds to the current VM. */
1362 if (sender_vm_id != from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001363 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001364 }
1365
1366 /* Disallow reflexive requests as this suggests an error in the VM. */
1367 if (receiver_vm_id == from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001368 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001369 }
1370
1371 /* Limit the size of transfer. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001372 if (size > FFA_MSG_PAYLOAD_MAX) {
1373 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001374 }
1375
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001376 /*
1377 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1378 * invocation.
1379 */
1380 current_locked = vcpu_lock(current);
1381 is_direct_request_ongoing =
1382 is_ffa_direct_msg_request_ongoing(current_locked);
1383 vcpu_unlock(&current_locked);
1384
1385 if (is_direct_request_ongoing) {
1386 return ffa_error(FFA_DENIED);
1387 }
1388
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001389 /* Ensure the receiver VM exists. */
1390 to = vm_find(receiver_vm_id);
1391 if (to == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001392 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001393 }
1394
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001395 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001396 * Check that the sender has configured its send buffer. If the tx
1397 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1398 * be safely accessed after releasing the lock since the tx mailbox
1399 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001400 */
1401 sl_lock(&from->lock);
1402 from_msg = from->mailbox.send;
1403 sl_unlock(&from->lock);
1404
1405 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001406 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001407 }
1408
Andrew Walbran82d6d152019-12-24 15:02:06 +00001409 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001410
Andrew Walbran82d6d152019-12-24 15:02:06 +00001411 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001412 ret = ffa_error(FFA_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001413 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001414 }
1415
Andrew Walbran82d6d152019-12-24 15:02:06 +00001416 /* Copy data. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001417 memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
Andrew Walbran82d6d152019-12-24 15:02:06 +00001418 to->mailbox.recv_size = size;
1419 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001420 to->mailbox.recv_func = FFA_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001421 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001422
1423out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001424 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001425
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001426 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001427}
1428
1429/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001430 * Checks whether the vCPU's attempt to block for a message has already been
1431 * interrupted or whether it is allowed to block.
1432 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001433bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
Andrew Scullec52ddf2019-08-20 10:41:01 +01001434{
Manish Pandey35e452f2021-02-18 21:36:34 +00001435 struct vcpu_locked current_locked;
Andrew Scullec52ddf2019-08-20 10:41:01 +01001436 bool interrupted;
1437
Manish Pandey35e452f2021-02-18 21:36:34 +00001438 current_locked = vcpu_lock(current);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001439
1440 /*
1441 * Don't block if there are enabled and pending interrupts, to match
1442 * behaviour of wait_for_interrupt.
1443 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001444 interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001445
Manish Pandey35e452f2021-02-18 21:36:34 +00001446 vcpu_unlock(&current_locked);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001447
1448 return interrupted;
1449}
1450
1451/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001452 * Receives a message from the mailbox. If one isn't available, this function
1453 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001454 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001455 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001456 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001457struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
1458 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001459{
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001460 bool is_direct_request_ongoing;
1461 struct vcpu_locked current_locked;
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001462 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001463 struct ffa_value return_code;
J-Alvesb37fd082020-10-22 12:29:21 +01001464 bool is_from_secure_world =
1465 (current->vm->id & HF_VM_ID_WORLD_MASK) != 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001466
Andrew Scullaa039b32018-10-04 15:02:26 +01001467 /*
1468 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001469 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001470 */
J-Alvesb37fd082020-10-22 12:29:21 +01001471 if (!is_from_secure_world && vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001472 return ffa_error(FFA_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001473 }
1474
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001475 /*
1476 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1477 * invocation.
1478 */
1479 current_locked = vcpu_lock(current);
1480 is_direct_request_ongoing =
1481 is_ffa_direct_msg_request_ongoing(current_locked);
1482 vcpu_unlock(&current_locked);
1483
1484 if (is_direct_request_ongoing) {
1485 return ffa_error(FFA_DENIED);
1486 }
1487
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001488 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001489
Andrew Scullaa039b32018-10-04 15:02:26 +01001490 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001491 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1492 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001493 return_code = ffa_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001494 goto out;
1495 }
1496
1497 /* No pending message so fail if not allowed to block. */
1498 if (!block) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001499 return_code = ffa_error(FFA_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001500 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001501 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001502
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001503 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001504 * From this point onward this call can only be interrupted or a message
1505 * received. If a message is received the return value will be set at
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001506 * that time to FFA_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001507 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001508 return_code = ffa_error(FFA_INTERRUPTED);
1509 if (api_ffa_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001510 goto out;
1511 }
1512
J-Alvesb37fd082020-10-22 12:29:21 +01001513 if (is_from_secure_world) {
1514 /* Return to other world if caller is a SP. */
1515 *next = api_switch_to_other_world(
1516 current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001517 VCPU_STATE_WAITING);
J-Alvesb37fd082020-10-22 12:29:21 +01001518 } else {
1519 /* Switch back to primary VM to block. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001520 struct ffa_value run_return = {
1521 .func = FFA_MSG_WAIT_32,
1522 .arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001523 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001524
Andrew Walbranb4816552018-12-05 17:35:42 +00001525 *next = api_switch_to_primary(current, run_return,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001526 VCPU_STATE_WAITING);
Andrew Walbranb4816552018-12-05 17:35:42 +00001527 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001528out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001529 sl_unlock(&vm->lock);
1530
Jose Marinho3e2442f2019-03-12 13:30:37 +00001531 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001532}
1533
1534/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001535 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1536 * by this function, the caller must have called api_mailbox_send before with
1537 * the notify argument set to true, and this call must have failed because the
1538 * mailbox was not available.
1539 *
1540 * It should be called repeatedly to retrieve a list of VMs.
1541 *
1542 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1543 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001544 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001545int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001546{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001547 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001548 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001549 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001550
1551 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001552 if (list_empty(&vm->mailbox.ready_list)) {
1553 ret = -1;
1554 goto exit;
1555 }
1556
1557 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1558 ready_links);
1559 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001560 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001561
1562exit:
1563 sl_unlock(&vm->lock);
1564 return ret;
1565}
1566
1567/**
1568 * Retrieves the next VM waiting to be notified that the mailbox of the
1569 * specified VM became writable. Only primary VMs are allowed to call this.
1570 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001571 * Returns -1 on failure or if there are no waiters; the VM id of the next
1572 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001573 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001574int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001575{
1576 struct vm *vm;
1577 struct vm_locked locked;
1578 struct wait_entry *entry;
1579 struct vm *waiting_vm;
1580
1581 /* Only primary VMs are allowed to call this function. */
1582 if (current->vm->id != HF_PRIMARY_VM_ID) {
1583 return -1;
1584 }
1585
Andrew Walbran42347a92019-05-09 13:59:03 +01001586 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001587 if (vm == NULL) {
1588 return -1;
1589 }
1590
Fuad Tabbaed294af2019-12-20 10:43:01 +00001591 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001592 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001593 entry = api_fetch_waiter(locked);
1594 vm_unlock(&locked);
1595
1596 if (entry == NULL) {
1597 return -1;
1598 }
1599
1600 /* Enqueue notification to waiting VM. */
1601 waiting_vm = entry->waiting_vm;
1602
1603 sl_lock(&waiting_vm->lock);
1604 if (list_empty(&entry->ready_links)) {
1605 list_append(&waiting_vm->mailbox.ready_list,
1606 &entry->ready_links);
1607 }
1608 sl_unlock(&waiting_vm->lock);
1609
1610 return waiting_vm->id;
1611}
1612
1613/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001614 * Releases the caller's mailbox so that a new message can be received. The
1615 * caller must have copied out all data they wish to preserve as new messages
1616 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001617 *
1618 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001619 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
1620 * - FFA_SUCCESS on success if no further action is needed.
1621 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001622 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001623 * hf_mailbox_waiter_get.
1624 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001625struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001626{
1627 struct vm *vm = current->vm;
1628 struct vm_locked locked;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001629 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001630
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001631 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001632 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001633 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001634 case MAILBOX_STATE_RECEIVED:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001635 ret = ffa_error(FFA_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001636 break;
1637
Andrew Sculld6ee1102019-04-05 22:12:42 +01001638 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001639 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001640 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001641 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001642 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001643 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001644
1645 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001646}
Andrew Walbran318f5732018-11-20 16:23:42 +00001647
1648/**
1649 * Enables or disables a given interrupt ID for the calling vCPU.
1650 *
1651 * Returns 0 on success, or -1 if the intid is invalid.
1652 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001653int64_t api_interrupt_enable(uint32_t intid, bool enable,
1654 enum interrupt_type type, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001655{
Manish Pandey35e452f2021-02-18 21:36:34 +00001656 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001657 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +00001658 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
1659 uint32_t intid_mask = 1U << intid_shift;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001660
Andrew Walbran318f5732018-11-20 16:23:42 +00001661 if (intid >= HF_NUM_INTIDS) {
1662 return -1;
1663 }
1664
Manish Pandey35e452f2021-02-18 21:36:34 +00001665 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001666 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001667 /*
1668 * If it is pending and was not enabled before, increment the
1669 * count.
1670 */
1671 if (current->interrupts.interrupt_pending[intid_index] &
1672 ~current->interrupts.interrupt_enabled[intid_index] &
1673 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001674 if ((current->interrupts.interrupt_type[intid_index] &
1675 intid_mask) ==
1676 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1677 vcpu_irq_count_increment(current_locked);
1678 } else {
1679 vcpu_fiq_count_increment(current_locked);
1680 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001681 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001682 current->interrupts.interrupt_enabled[intid_index] |=
1683 intid_mask;
Manish Pandey35e452f2021-02-18 21:36:34 +00001684
1685 if (type == INTERRUPT_TYPE_IRQ) {
1686 current->interrupts.interrupt_type[intid_index] &=
1687 ~intid_mask;
1688 } else if (type == INTERRUPT_TYPE_FIQ) {
1689 current->interrupts.interrupt_type[intid_index] |=
1690 intid_mask;
1691 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001692 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001693 /*
1694 * If it is pending and was enabled before, decrement the count.
1695 */
1696 if (current->interrupts.interrupt_pending[intid_index] &
1697 current->interrupts.interrupt_enabled[intid_index] &
1698 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001699 if ((current->interrupts.interrupt_type[intid_index] &
1700 intid_mask) ==
1701 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1702 vcpu_irq_count_decrement(current_locked);
1703 } else {
1704 vcpu_fiq_count_decrement(current_locked);
1705 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001706 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001707 current->interrupts.interrupt_enabled[intid_index] &=
1708 ~intid_mask;
Manish Pandey35e452f2021-02-18 21:36:34 +00001709 current->interrupts.interrupt_type[intid_index] &= ~intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001710 }
1711
Manish Pandey35e452f2021-02-18 21:36:34 +00001712 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001713 return 0;
1714}
1715
1716/**
1717 * Returns the ID of the next pending interrupt for the calling vCPU, and
1718 * acknowledges it (i.e. marks it as no longer pending). Returns
1719 * HF_INVALID_INTID if there are no pending interrupts.
1720 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001721uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001722{
1723 uint8_t i;
1724 uint32_t first_interrupt = HF_INVALID_INTID;
Manish Pandey35e452f2021-02-18 21:36:34 +00001725 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001726
1727 /*
1728 * Find the first enabled and pending interrupt ID, return it, and
1729 * deactivate it.
1730 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001731 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001732 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1733 uint32_t enabled_and_pending =
1734 current->interrupts.interrupt_enabled[i] &
1735 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001736
Andrew Walbran318f5732018-11-20 16:23:42 +00001737 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001738 uint8_t bit_index = ctz(enabled_and_pending);
Manish Pandey35e452f2021-02-18 21:36:34 +00001739 uint32_t intid_mask = 1U << bit_index;
1740
Andrew Walbran3d84a262018-12-13 14:41:19 +00001741 /*
1742 * Mark it as no longer pending and decrement the count.
1743 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001744 current->interrupts.interrupt_pending[i] &= ~intid_mask;
1745
1746 if ((current->interrupts.interrupt_type[i] &
1747 intid_mask) == (INTERRUPT_TYPE_IRQ << bit_index)) {
1748 vcpu_irq_count_decrement(current_locked);
1749 } else {
1750 vcpu_fiq_count_decrement(current_locked);
1751 }
1752
Andrew Walbran3d84a262018-12-13 14:41:19 +00001753 first_interrupt =
1754 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001755 break;
1756 }
1757 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001758
Manish Pandey35e452f2021-02-18 21:36:34 +00001759 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001760 return first_interrupt;
1761}
1762
1763/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001764 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001765 * given VM and vCPU.
1766 */
1767static inline bool is_injection_allowed(uint32_t target_vm_id,
1768 struct vcpu *current)
1769{
1770 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001771
Andrew Walbran318f5732018-11-20 16:23:42 +00001772 /*
1773 * The primary VM is allowed to inject interrupts into any VM. Secondary
1774 * VMs are only allowed to inject interrupts into their own vCPUs.
1775 */
1776 return current_vm_id == HF_PRIMARY_VM_ID ||
1777 current_vm_id == target_vm_id;
1778}
1779
1780/**
1781 * Injects a virtual interrupt of the given ID into the given target vCPU.
1782 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1783 * when the vCPU is next run, which is up to the scheduler.
1784 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001785 * Returns:
1786 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1787 * ID is invalid, or the current VM is not allowed to inject interrupts to
1788 * the target VM.
1789 * - 0 on success if no further action is needed.
1790 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1791 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001792 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001793int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
1794 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001795 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001796{
Andrew Walbran318f5732018-11-20 16:23:42 +00001797 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001798 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001799
1800 if (intid >= HF_NUM_INTIDS) {
1801 return -1;
1802 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001803
Andrew Walbran318f5732018-11-20 16:23:42 +00001804 if (target_vm == NULL) {
1805 return -1;
1806 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001807
Andrew Walbran318f5732018-11-20 16:23:42 +00001808 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001809 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001810 return -1;
1811 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001812
Andrew Walbran318f5732018-11-20 16:23:42 +00001813 if (!is_injection_allowed(target_vm_id, current)) {
1814 return -1;
1815 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001816
Andrew Walbrane1310df2019-04-29 17:28:28 +01001817 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001818
Manish Pandey35e452f2021-02-18 21:36:34 +00001819 dlog_verbose(
1820 "Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
1821 "%u\n",
1822 intid, target_vm_id, target_vcpu_idx, current->vm->id,
1823 vcpu_index(current));
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001824 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001825}
Andrew Scull6386f252018-12-06 13:29:10 +00001826
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001827/** Returns the version of the implemented FF-A specification. */
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00001828struct ffa_value api_ffa_version(struct vcpu *current,
1829 uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001830{
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00001831 struct vm_locked current_vm_locked;
1832
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001833 /*
1834 * Ensure that both major and minor revision representation occupies at
1835 * most 15 bits.
1836 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001837 static_assert(0x8000 > FFA_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001838 "Major revision representation takes more than 15 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001839 static_assert(0x10000 > FFA_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001840 "Minor revision representation takes more than 16 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001841 if (requested_version & FFA_VERSION_RESERVED_BIT) {
Andrew Walbran9fd29072020-04-22 12:12:14 +01001842 /* Invalid encoding, return an error. */
J-Alves13318e32021-02-22 17:21:00 +00001843 return (struct ffa_value){.func = (uint32_t)FFA_NOT_SUPPORTED};
Andrew Walbran9fd29072020-04-22 12:12:14 +01001844 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001845
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00001846 current_vm_locked = vm_lock(current->vm);
1847 current_vm_locked.vm->ffa_version = requested_version;
1848 vm_unlock(&current_vm_locked);
1849
Daniel Boulby6e32c612021-02-17 15:09:41 +00001850 return ((struct ffa_value){.func = FFA_VERSION_COMPILED});
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001851}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001852
1853int64_t api_debug_log(char c, struct vcpu *current)
1854{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001855 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001856 struct vm *vm = current->vm;
1857 struct vm_locked vm_locked = vm_lock(vm);
1858
Andrew Sculld54e1be2019-08-20 11:09:42 +01001859 if (c == '\n' || c == '\0') {
1860 flush = true;
1861 } else {
1862 vm->log_buffer[vm->log_buffer_length++] = c;
1863 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1864 }
1865
1866 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001867 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1868 vm->log_buffer_length);
1869 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001870 }
1871
1872 vm_unlock(&vm_locked);
1873
1874 return 0;
1875}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001876
1877/**
J-Alves6f72ca82021-11-01 12:34:58 +00001878 * Helper for success return of FFA_FEATURES, for when it is used to query
1879 * an interrupt ID.
1880 */
1881struct ffa_value api_ffa_feature_success(uint32_t arg2)
1882{
1883 return (struct ffa_value){
1884 .func = FFA_SUCCESS_32, .arg1 = 0U, .arg2 = arg2};
1885}
1886
1887/**
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001888 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001889 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001890 */
J-Alves6f72ca82021-11-01 12:34:58 +00001891struct ffa_value api_ffa_features(uint32_t feature_function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001892{
J-Alves6f72ca82021-11-01 12:34:58 +00001893 /*
1894 * According to table 13.8 of FF-A v1.1 Beta 0 spec, bits [30:8] MBZ
1895 * if using a feature ID.
1896 */
1897 if ((feature_function_id & FFA_FEATURES_FUNC_ID_MASK) == 0U &&
1898 (feature_function_id & ~FFA_FEATURES_FEATURE_ID_MASK) != 0) {
1899 return ffa_error(FFA_NOT_SUPPORTED);
1900 }
1901
1902 switch (feature_function_id) {
1903 /* Check support of the given Function ID. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001904 case FFA_ERROR_32:
1905 case FFA_SUCCESS_32:
1906 case FFA_INTERRUPT_32:
1907 case FFA_VERSION_32:
1908 case FFA_FEATURES_32:
1909 case FFA_RX_RELEASE_32:
1910 case FFA_RXTX_MAP_64:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001911 case FFA_RXTX_UNMAP_32:
Fuad Tabbae4efcc32020-07-16 15:37:27 +01001912 case FFA_PARTITION_INFO_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001913 case FFA_ID_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001914 case FFA_MSG_WAIT_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001915 case FFA_RUN_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001916 case FFA_MEM_DONATE_32:
1917 case FFA_MEM_LEND_32:
1918 case FFA_MEM_SHARE_32:
1919 case FFA_MEM_RETRIEVE_REQ_32:
1920 case FFA_MEM_RETRIEVE_RESP_32:
1921 case FFA_MEM_RELINQUISH_32:
1922 case FFA_MEM_RECLAIM_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001923 case FFA_MSG_SEND_DIRECT_RESP_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001924 case FFA_MSG_SEND_DIRECT_RESP_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001925 case FFA_MSG_SEND_DIRECT_REQ_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001926 case FFA_MSG_SEND_DIRECT_REQ_32:
J-Alves3829fc02021-03-18 12:49:18 +00001927#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00001928 /* FF-A v1.1 features. */
1929 case FFA_SPM_ID_GET_32:
J-Alves6f72ca82021-11-01 12:34:58 +00001930 case FFA_NOTIFICATION_BITMAP_CREATE_32:
1931 case FFA_NOTIFICATION_BITMAP_DESTROY_32:
1932 case FFA_NOTIFICATION_BIND_32:
1933 case FFA_NOTIFICATION_UNBIND_32:
1934 case FFA_NOTIFICATION_SET_32:
1935 case FFA_NOTIFICATION_GET_32:
1936 case FFA_NOTIFICATION_INFO_GET_64:
Raghu Krishnamurthy6764b072021-10-18 12:54:24 -07001937 case FFA_MEM_PERM_GET_32:
1938 case FFA_MEM_PERM_SET_32:
1939 case FFA_MEM_PERM_GET_64:
1940 case FFA_MEM_PERM_SET_64:
J-Alves3829fc02021-03-18 12:49:18 +00001941#endif
1942 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves6f72ca82021-11-01 12:34:58 +00001943
1944#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
1945 /* Check support of a feature provided respective feature ID. */
1946 case FFA_FEATURE_NPI:
1947 return api_ffa_feature_success(HF_NOTIFICATION_PENDING_INTID);
1948 case FFA_FEATURE_SRI:
1949 return api_ffa_feature_success(HF_SCHEDULE_RECEIVER_INTID);
1950#endif
1951 /* Platform specific feature support. */
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001952 default:
J-Alves6f72ca82021-11-01 12:34:58 +00001953 return arch_ffa_features(feature_function_id);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001954 }
1955}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001956
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001957/**
J-Alves645eabe2021-02-22 16:08:27 +00001958 * FF-A specification states that x2/w2 Must Be Zero for direct messaging
1959 * interfaces.
1960 */
1961static inline bool api_ffa_dir_msg_is_arg2_zero(struct ffa_value args)
1962{
1963 return args.arg2 == 0U;
1964}
1965
1966/**
J-Alves76d99af2021-03-10 17:42:11 +00001967 * Limits size of arguments in ffa_value structure to 32-bit.
1968 */
1969static struct ffa_value api_ffa_value_copy32(struct ffa_value args)
1970{
1971 return (struct ffa_value){
1972 .func = (uint32_t)args.func,
1973 .arg1 = (uint32_t)args.arg1,
1974 .arg2 = (uint32_t)0,
1975 .arg3 = (uint32_t)args.arg3,
1976 .arg4 = (uint32_t)args.arg4,
1977 .arg5 = (uint32_t)args.arg5,
1978 .arg6 = (uint32_t)args.arg6,
1979 .arg7 = (uint32_t)args.arg7,
1980 };
1981}
1982
1983/**
1984 * Helper to copy direct message payload, depending on SMC used and expected
1985 * registers size.
1986 */
1987static struct ffa_value api_ffa_dir_msg_value(struct ffa_value args)
1988{
1989 if (args.func == FFA_MSG_SEND_DIRECT_REQ_32 ||
1990 args.func == FFA_MSG_SEND_DIRECT_RESP_32) {
1991 return api_ffa_value_copy32(args);
1992 }
1993
1994 return (struct ffa_value){
1995 .func = args.func,
1996 .arg1 = args.arg1,
1997 .arg2 = 0,
1998 .arg3 = args.arg3,
1999 .arg4 = args.arg4,
2000 .arg5 = args.arg5,
2001 .arg6 = args.arg6,
2002 .arg7 = args.arg7,
2003 };
2004}
2005
2006/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002007 * Send an FF-A direct message request.
2008 */
2009struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
2010 ffa_vm_id_t receiver_vm_id,
2011 struct ffa_value args,
2012 struct vcpu *current,
2013 struct vcpu **next)
2014{
J-Alves17228f72021-04-20 17:13:19 +01002015 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002016 struct vm *receiver_vm;
2017 struct vcpu *receiver_vcpu;
2018 struct two_vcpu_locked vcpus_locked;
2019
J-Alves645eabe2021-02-22 16:08:27 +00002020 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
2021 return ffa_error(FFA_INVALID_PARAMETERS);
2022 }
2023
Olivier Deprez55a189e2021-06-09 15:45:27 +02002024 if (!plat_ffa_is_direct_request_valid(current, sender_vm_id,
2025 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00002026 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002027 }
2028
Olivier Deprez55a189e2021-06-09 15:45:27 +02002029 if (plat_ffa_direct_request_forward(receiver_vm_id, args, &ret)) {
J-Alves17228f72021-04-20 17:13:19 +01002030 return ret;
2031 }
2032
2033 ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
2034
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002035 receiver_vm = vm_find(receiver_vm_id);
2036 if (receiver_vm == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002037 dlog_verbose("Invalid Receiver!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002038 return ffa_error(FFA_INVALID_PARAMETERS);
2039 }
2040
2041 /*
2042 * Per PSA FF-A EAC spec section 4.4.1 the firmware framework supports
2043 * UP (migratable) or MP partitions with a number of vCPUs matching the
2044 * number of PEs in the system. It further states that MP partitions
2045 * accepting direct request messages cannot migrate.
2046 */
J-Alvesad6a0432021-04-09 16:06:21 +01002047 receiver_vcpu = api_ffa_get_vm_vcpu(receiver_vm, current);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002048 if (receiver_vcpu == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002049 dlog_verbose("Invalid vCPU!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002050 return ffa_error(FFA_INVALID_PARAMETERS);
2051 }
2052
2053 vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
2054
2055 /*
2056 * If destination vCPU is executing or already received an
2057 * FFA_MSG_SEND_DIRECT_REQ then return to caller hinting recipient is
2058 * busy. There is a brief period of time where the vCPU state has
2059 * changed but regs_available is still false thus consider this case as
2060 * the vCPU not yet ready to receive a direct message request.
2061 */
2062 if (is_ffa_direct_msg_request_ongoing(vcpus_locked.vcpu1) ||
2063 receiver_vcpu->state == VCPU_STATE_RUNNING ||
2064 !receiver_vcpu->regs_available) {
J-Alves88a13542021-12-14 15:39:52 +00002065 dlog_verbose("Receiver is busy with another request.\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002066 ret = ffa_error(FFA_BUSY);
2067 goto out;
2068 }
2069
2070 if (atomic_load_explicit(&receiver_vcpu->vm->aborting,
2071 memory_order_relaxed)) {
2072 if (receiver_vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002073 dlog_notice("Aborting VM %#x vCPU %u\n",
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002074 receiver_vcpu->vm->id,
2075 vcpu_index(receiver_vcpu));
2076 receiver_vcpu->state = VCPU_STATE_ABORTED;
2077 }
2078
2079 ret = ffa_error(FFA_ABORTED);
2080 goto out;
2081 }
2082
2083 switch (receiver_vcpu->state) {
2084 case VCPU_STATE_OFF:
2085 case VCPU_STATE_RUNNING:
2086 case VCPU_STATE_ABORTED:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002087 case VCPU_STATE_BLOCKED_INTERRUPT:
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002088 case VCPU_STATE_BLOCKED:
2089 case VCPU_STATE_PREEMPTED:
J-Alves88a13542021-12-14 15:39:52 +00002090 dlog_verbose("Receiver's vCPU can't receive request (%u)!\n",
2091 vcpu_index(receiver_vcpu));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002092 ret = ffa_error(FFA_BUSY);
2093 goto out;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002094 case VCPU_STATE_WAITING:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002095 /*
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002096 * We expect target vCPU to be in WAITING state after either
2097 * having called ffa_msg_wait or sent a direct message response.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002098 */
2099 break;
2100 }
2101
2102 /* Inject timer interrupt if any pending */
2103 if (arch_timer_pending(&receiver_vcpu->regs)) {
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002104 api_interrupt_inject_locked(vcpus_locked.vcpu1,
2105 HF_VIRTUAL_TIMER_INTID, current,
2106 NULL);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002107
2108 arch_timer_mask(&receiver_vcpu->regs);
2109 }
2110
2111 /* The receiver vCPU runs upon direct message invocation */
2112 receiver_vcpu->cpu = current->cpu;
2113 receiver_vcpu->state = VCPU_STATE_RUNNING;
2114 receiver_vcpu->regs_available = false;
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002115 receiver_vcpu->direct_request_origin_vm_id = sender_vm_id;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002116
J-Alves76d99af2021-03-10 17:42:11 +00002117 arch_regs_set_retval(&receiver_vcpu->regs, api_ffa_dir_msg_value(args));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002118
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002119 current->state = VCPU_STATE_BLOCKED;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002120
2121 /* Switch to receiver vCPU targeted to by direct msg request */
2122 *next = receiver_vcpu;
2123
2124 /*
2125 * Since this flow will lead to a VM switch, the return value will not
2126 * be applied to current vCPU.
2127 */
2128
2129out:
2130 sl_unlock(&receiver_vcpu->lock);
2131 sl_unlock(&current->lock);
2132
2133 return ret;
2134}
2135
2136/**
2137 * Send an FF-A direct message response.
2138 */
2139struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
2140 ffa_vm_id_t receiver_vm_id,
2141 struct ffa_value args,
2142 struct vcpu *current,
2143 struct vcpu **next)
2144{
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002145 struct vcpu_locked current_locked;
J-Alves645eabe2021-02-22 16:08:27 +00002146
2147 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
2148 return ffa_error(FFA_INVALID_PARAMETERS);
2149 }
2150
J-Alves76d99af2021-03-10 17:42:11 +00002151 struct ffa_value to_ret = api_ffa_dir_msg_value(args);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002152
Olivier Deprez55a189e2021-06-09 15:45:27 +02002153 if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
2154 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00002155 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002156 }
2157
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002158 current_locked = vcpu_lock(current);
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002159 if (api_ffa_is_managed_exit_ongoing(current_locked)) {
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002160 /*
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002161 * No need for REQ/RESP state management as managed exit does
2162 * not have corresponding REQ pair.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002163 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002164 if (receiver_vm_id != HF_PRIMARY_VM_ID) {
2165 vcpu_unlock(&current_locked);
2166 return ffa_error(FFA_DENIED);
2167 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002168
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05002169 /*
2170 * Per FF-A v1.1 Beta section 8.4.1.2 bullet 6, SPMC can signal
2171 * a secure interrupt to a SP that is performing managed exit.
2172 * We have taken a implementation defined choice to not allow
2173 * Managed exit while a SP is processing a secure interrupt.
2174 */
2175 CHECK(!current->processing_secure_interrupt);
2176
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002177 plat_interrupts_set_priority_mask(0xff);
2178 current->processing_managed_exit = false;
2179 } else {
2180 /*
2181 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
2182 * defined originator.
2183 */
2184 if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
2185 /*
2186 * Sending direct response but direct request origin
2187 * vCPU is not set.
2188 */
2189 vcpu_unlock(&current_locked);
2190 return ffa_error(FFA_DENIED);
2191 }
2192
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05002193 /* Refer to FF-A v1.1 Beta0 section 7.3 bulet 3. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002194 if (current->direct_request_origin_vm_id != receiver_vm_id) {
2195 vcpu_unlock(&current_locked);
2196 return ffa_error(FFA_DENIED);
2197 }
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05002198
2199 /*
2200 * Per FF-A v1.1 Beta0 section 7.4, if a secure interrupt is
2201 * handled by an SP in RUNNING state the existing runtime model
2202 * is preserved. Hence, per section 7.3 bullet 3, SP can use
2203 * FFA_MSG_SEND_DIRECT_RESP to return a response after
2204 * interrupt completion.
2205 */
2206 if (current->processing_secure_interrupt) {
2207 /* There is no preempted vCPU to resume. */
2208 CHECK(current->preempted_vcpu == NULL);
2209
2210 /* Unmask interrupts. */
2211 plat_interrupts_set_priority_mask(0xff);
2212
2213 /*
2214 * Clear fields corresponding to secure interrupt
2215 * handling.
2216 */
2217 current->processing_secure_interrupt = false;
2218 current->secure_interrupt_deactivated = false;
2219 current->current_sec_interrupt_id = 0;
2220 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002221 }
2222
2223 /* Clear direct request origin for the caller. */
2224 current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
2225
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002226 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002227
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002228 if (!vm_id_is_current_world(receiver_vm_id)) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002229 *next = api_switch_to_other_world(
2230 current, to_ret,
2231 /*
2232 * Current vcpu sent a direct response. It moves to
2233 * waiting state.
2234 */
2235 VCPU_STATE_WAITING);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002236 } else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002237 *next = api_switch_to_primary(
2238 current, to_ret,
2239 /*
2240 * Current vcpu sent a direct response. It moves to
2241 * waiting state.
2242 */
2243 VCPU_STATE_WAITING);
J-Alvesfe7f7372020-11-09 11:32:12 +00002244 } else if (vm_id_is_current_world(receiver_vm_id)) {
2245 /*
2246 * It is expected the receiver_vm_id to be from an SP, otherwise
J-Alvesaa79c012021-07-09 14:29:45 +01002247 * 'plat_ffa_is_direct_response_valid' should have
J-Alvesfe7f7372020-11-09 11:32:12 +00002248 * made function return error before getting to this point.
2249 */
2250 *next = api_switch_to_vm(current, to_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002251 /*
2252 * current vcpu sent a direct response.
2253 * It moves to waiting state.
2254 */
2255 VCPU_STATE_WAITING, receiver_vm_id);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002256 } else {
2257 panic("Invalid direct message response invocation");
2258 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002259
2260 return (struct ffa_value){.func = FFA_INTERRUPT_32};
2261}
2262
J-Alves84658fc2021-06-17 14:37:32 +01002263static bool api_memory_region_check_flags(
2264 struct ffa_memory_region *memory_region, uint32_t share_func)
2265{
2266 switch (share_func) {
2267 case FFA_MEM_SHARE_32:
2268 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2269 0U) {
2270 return false;
2271 }
2272 /* Intentional fall-through */
2273 case FFA_MEM_LEND_32:
2274 case FFA_MEM_DONATE_32: {
2275 /* Bits 31:2 Must Be Zero. */
2276 ffa_memory_receiver_flags_t to_mask =
2277 ~(FFA_MEMORY_REGION_FLAG_CLEAR |
2278 FFA_MEMORY_REGION_FLAG_TIME_SLICE);
2279
2280 if ((memory_region->flags & to_mask) != 0U) {
2281 return false;
2282 }
2283 break;
2284 }
2285 default:
2286 panic("Check for mem send calls only.\n");
2287 }
2288
2289 /* Last check reserved values are 0 */
2290 return true;
2291}
2292
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002293struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
2294 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002295 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002296{
2297 struct vm *from = current->vm;
2298 struct vm *to;
2299 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002300 struct ffa_memory_region *memory_region;
2301 struct ffa_value ret;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002302
2303 if (ipa_addr(address) != 0 || page_count != 0) {
2304 /*
2305 * Hafnium only supports passing the descriptor in the TX
2306 * mailbox.
2307 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002308 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002309 }
2310
Andrew Walbranca808b12020-05-15 17:22:28 +01002311 if (fragment_length > length) {
2312 dlog_verbose(
2313 "Fragment length %d greater than total length %d.\n",
2314 fragment_length, length);
2315 return ffa_error(FFA_INVALID_PARAMETERS);
2316 }
2317 if (fragment_length < sizeof(struct ffa_memory_region) +
2318 sizeof(struct ffa_memory_access)) {
2319 dlog_verbose(
2320 "Initial fragment length %d smaller than header size "
2321 "%d.\n",
2322 fragment_length,
2323 sizeof(struct ffa_memory_region) +
2324 sizeof(struct ffa_memory_access));
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002325 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002326 }
2327
2328 /*
2329 * Check that the sender has configured its send buffer. If the TX
2330 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2331 * be safely accessed after releasing the lock since the TX mailbox
2332 * address can only be configured once.
2333 */
2334 sl_lock(&from->lock);
2335 from_msg = from->mailbox.send;
2336 sl_unlock(&from->lock);
2337
2338 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002339 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002340 }
2341
2342 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002343 * Copy the memory region descriptor to a fresh page from the memory
2344 * pool. This prevents the sender from changing it underneath us, and
2345 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002346 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002347 if (fragment_length > HF_MAILBOX_SIZE ||
2348 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002349 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002350 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002351 memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002352 if (memory_region == NULL) {
2353 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002354 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002355 }
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002356 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002357
2358 /* The sender must match the caller. */
2359 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002360 dlog_verbose("Memory region sender doesn't match caller.\n");
J-Alves99948662021-07-28 18:07:04 +01002361 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002362 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002363 }
2364
J-Alves84658fc2021-06-17 14:37:32 +01002365 if (!api_memory_region_check_flags(memory_region, share_func)) {
2366 dlog_verbose(
2367 "Memory region reserved arguments must be zero.\n");
2368 ret = ffa_error(FFA_INVALID_PARAMETERS);
2369 goto out;
2370 }
2371
Andrew Walbrana65a1322020-04-06 19:32:32 +01002372 if (memory_region->receiver_count != 1) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002373 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002374 dlog_verbose(
2375 "Multi-way memory sharing not supported (got %d "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002376 "endpoint memory access descriptors, expected 1).\n",
2377 memory_region->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002378 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002379 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002380 }
2381
2382 /*
2383 * Ensure that the receiver VM exists and isn't the same as the sender.
2384 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002385 to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002386 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002387 dlog_verbose("Invalid receiver.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002388 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002389 goto out;
2390 }
2391
2392 if (to->id == HF_TEE_VM_ID) {
2393 /*
2394 * The 'to' VM lock is only needed in the case that it is the
2395 * TEE VM.
2396 */
2397 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2398
2399 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002400 ret = ffa_error(FFA_BUSY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002401 goto out_unlock;
2402 }
2403
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002404 ret = ffa_memory_tee_send(
2405 vm_to_from_lock.vm2, vm_to_from_lock.vm1, memory_region,
2406 length, fragment_length, share_func, &api_page_pool);
2407 /*
2408 * ffa_tee_memory_send takes ownership of the memory_region, so
2409 * make sure we don't free it.
2410 */
2411 memory_region = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002412
2413 out_unlock:
2414 vm_unlock(&vm_to_from_lock.vm1);
2415 vm_unlock(&vm_to_from_lock.vm2);
2416 } else {
2417 struct vm_locked from_locked = vm_lock(from);
2418
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002419 ret = ffa_memory_send(from_locked, memory_region, length,
2420 fragment_length, share_func,
2421 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002422 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002423 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002424 * make sure we don't free it.
2425 */
2426 memory_region = NULL;
2427
2428 vm_unlock(&from_locked);
2429 }
2430
2431out:
2432 if (memory_region != NULL) {
2433 mpool_free(&api_page_pool, memory_region);
2434 }
2435
2436 return ret;
2437}
2438
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002439struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
2440 uint32_t fragment_length,
2441 ipaddr_t address, uint32_t page_count,
2442 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002443{
2444 struct vm *to = current->vm;
2445 struct vm_locked to_locked;
2446 const void *to_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002447 struct ffa_memory_region *retrieve_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002448 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002449 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002450
2451 if (ipa_addr(address) != 0 || page_count != 0) {
2452 /*
2453 * Hafnium only supports passing the descriptor in the TX
2454 * mailbox.
2455 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002456 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002457 }
2458
Andrew Walbrana65a1322020-04-06 19:32:32 +01002459 if (fragment_length != length) {
2460 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002461 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002462 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002463
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002464 retrieve_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002465 (struct ffa_memory_region *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002466 message_buffer_size = cpu_get_buffer_size(current->cpu);
2467 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2468 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002469 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002470 }
2471
2472 to_locked = vm_lock(to);
2473 to_msg = to->mailbox.send;
2474
2475 if (to_msg == NULL) {
2476 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002477 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002478 goto out;
2479 }
2480
2481 /*
2482 * Copy the retrieve request descriptor to an internal buffer, so that
2483 * the caller can't change it underneath us.
2484 */
2485 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
2486
2487 if (msg_receiver_busy(to_locked, NULL, false)) {
2488 /*
2489 * Can't retrieve memory information if the mailbox is not
2490 * available.
2491 */
2492 dlog_verbose("RX buffer not ready.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002493 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002494 goto out;
2495 }
2496
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002497 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
2498 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002499
2500out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002501 vm_unlock(&to_locked);
2502 return ret;
2503}
2504
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002505struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002506{
2507 struct vm *from = current->vm;
2508 struct vm_locked from_locked;
2509 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002510 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002511 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002512 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002513 uint32_t length;
2514
2515 from_locked = vm_lock(from);
2516 from_msg = from->mailbox.send;
2517
2518 if (from_msg == NULL) {
2519 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002520 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002521 goto out;
2522 }
2523
2524 /*
2525 * Calculate length from relinquish descriptor before copying. We will
2526 * check again later to make sure it hasn't changed.
2527 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002528 length = sizeof(struct ffa_mem_relinquish) +
2529 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
2530 sizeof(ffa_vm_id_t);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002531 /*
2532 * Copy the relinquish descriptor to an internal buffer, so that the
2533 * caller can't change it underneath us.
2534 */
2535 relinquish_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002536 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002537 message_buffer_size = cpu_get_buffer_size(current->cpu);
2538 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2539 dlog_verbose("Relinquish message too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002540 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002541 goto out;
2542 }
2543 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
2544
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002545 if (sizeof(struct ffa_mem_relinquish) +
2546 relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002547 length) {
2548 dlog_verbose(
2549 "Endpoint count changed while copying to internal "
2550 "buffer.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002551 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002552 goto out;
2553 }
2554
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002555 ret = ffa_memory_relinquish(from_locked, relinquish_request,
2556 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002557
2558out:
2559 vm_unlock(&from_locked);
2560 return ret;
2561}
2562
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002563struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
2564 ffa_memory_region_flags_t flags,
2565 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002566{
2567 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002568 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002569
Olivier Deprez55a189e2021-06-09 15:45:27 +02002570 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00002571 struct vm_locked to_locked = vm_lock(to);
2572
Andrew Walbranca808b12020-05-15 17:22:28 +01002573 ret = ffa_memory_reclaim(to_locked, handle, flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002574 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002575
Andrew Walbran290b0c92020-02-03 16:37:14 +00002576 vm_unlock(&to_locked);
2577 } else {
2578 struct vm *from = vm_find(HF_TEE_VM_ID);
2579 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2580
Andrew Walbranca808b12020-05-15 17:22:28 +01002581 ret = ffa_memory_tee_reclaim(vm_to_from_lock.vm1,
2582 vm_to_from_lock.vm2, handle, flags,
2583 &api_page_pool);
2584
2585 vm_unlock(&vm_to_from_lock.vm1);
2586 vm_unlock(&vm_to_from_lock.vm2);
2587 }
2588
2589 return ret;
2590}
2591
2592struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
2593 uint32_t fragment_offset,
2594 ffa_vm_id_t sender_vm_id,
2595 struct vcpu *current)
2596{
2597 struct vm *to = current->vm;
2598 struct vm_locked to_locked;
2599 struct ffa_value ret;
2600
2601 /* Sender ID MBZ at virtual instance. */
2602 if (sender_vm_id != 0) {
2603 return ffa_error(FFA_INVALID_PARAMETERS);
2604 }
2605
2606 to_locked = vm_lock(to);
2607
2608 if (msg_receiver_busy(to_locked, NULL, false)) {
2609 /*
2610 * Can't retrieve memory information if the mailbox is not
2611 * available.
2612 */
2613 dlog_verbose("RX buffer not ready.\n");
2614 ret = ffa_error(FFA_BUSY);
2615 goto out;
2616 }
2617
2618 ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
2619 &api_page_pool);
2620
2621out:
2622 vm_unlock(&to_locked);
2623 return ret;
2624}
2625
2626struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
2627 uint32_t fragment_length,
2628 ffa_vm_id_t sender_vm_id,
2629 struct vcpu *current)
2630{
2631 struct vm *from = current->vm;
2632 const void *from_msg;
2633 void *fragment_copy;
2634 struct ffa_value ret;
2635
2636 /* Sender ID MBZ at virtual instance. */
2637 if (sender_vm_id != 0) {
2638 return ffa_error(FFA_INVALID_PARAMETERS);
2639 }
2640
2641 /*
2642 * Check that the sender has configured its send buffer. If the TX
2643 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2644 * be safely accessed after releasing the lock since the TX mailbox
2645 * address can only be configured once.
2646 */
2647 sl_lock(&from->lock);
2648 from_msg = from->mailbox.send;
2649 sl_unlock(&from->lock);
2650
2651 if (from_msg == NULL) {
2652 return ffa_error(FFA_INVALID_PARAMETERS);
2653 }
2654
2655 /*
2656 * Copy the fragment to a fresh page from the memory pool. This prevents
2657 * the sender from changing it underneath us, and also lets us keep it
2658 * around in the share state table if needed.
2659 */
2660 if (fragment_length > HF_MAILBOX_SIZE ||
2661 fragment_length > MM_PPOOL_ENTRY_SIZE) {
2662 dlog_verbose(
2663 "Fragment length %d larger than mailbox size %d.\n",
2664 fragment_length, HF_MAILBOX_SIZE);
2665 return ffa_error(FFA_INVALID_PARAMETERS);
2666 }
2667 if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
2668 fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2669 0) {
2670 dlog_verbose("Invalid fragment length %d.\n", fragment_length);
2671 return ffa_error(FFA_INVALID_PARAMETERS);
2672 }
2673 fragment_copy = mpool_alloc(&api_page_pool);
2674 if (fragment_copy == NULL) {
2675 dlog_verbose("Failed to allocate fragment copy.\n");
2676 return ffa_error(FFA_NO_MEMORY);
2677 }
2678 memcpy_s(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
2679
2680 /*
2681 * Hafnium doesn't support fragmentation of memory retrieve requests
2682 * (because it doesn't support caller-specified mappings, so a request
2683 * will never be larger than a single page), so this must be part of a
2684 * memory send (i.e. donate, lend or share) request.
2685 *
2686 * We can tell from the handle whether the memory transaction is for the
2687 * TEE or not.
2688 */
2689 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
2690 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
2691 struct vm_locked from_locked = vm_lock(from);
2692
2693 ret = ffa_memory_send_continue(from_locked, fragment_copy,
2694 fragment_length, handle,
2695 &api_page_pool);
2696 /*
2697 * `ffa_memory_send_continue` takes ownership of the
2698 * fragment_copy, so we don't need to free it here.
2699 */
2700 vm_unlock(&from_locked);
2701 } else {
2702 struct vm *to = vm_find(HF_TEE_VM_ID);
2703 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2704
2705 /*
2706 * The TEE RX buffer state is checked in
2707 * `ffa_memory_tee_send_continue` rather than here, as we need
2708 * to return `FFA_MEM_FRAG_RX` with the current offset rather
2709 * than FFA_ERROR FFA_BUSY in case it is busy.
2710 */
2711
2712 ret = ffa_memory_tee_send_continue(
2713 vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy,
2714 fragment_length, handle, &api_page_pool);
2715 /*
2716 * `ffa_memory_tee_send_continue` takes ownership of the
2717 * fragment_copy, so we don't need to free it here.
2718 */
Andrew Walbran290b0c92020-02-03 16:37:14 +00002719
2720 vm_unlock(&vm_to_from_lock.vm1);
2721 vm_unlock(&vm_to_from_lock.vm2);
2722 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002723
2724 return ret;
2725}
Max Shvetsov40108e72020-08-27 12:39:50 +01002726
2727struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
2728 struct vcpu *current)
2729{
2730 struct vm_locked vm_locked;
2731
2732 vm_locked = vm_lock(current->vm);
2733 vm_locked.vm->secondary_ep = entry_point;
2734 vm_unlock(&vm_locked);
2735
2736 return (struct ffa_value){.func = FFA_SUCCESS_32};
2737}
J-Alvesa0f317d2021-06-09 13:31:59 +01002738
2739struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
2740 ffa_vcpu_count_t vcpu_count,
2741 struct vcpu *current)
2742{
2743 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2744 dlog_verbose("Bitmap create for NWd VM IDs only (%x).\n",
2745 vm_id);
2746 return ffa_error(FFA_NOT_SUPPORTED);
2747 }
2748
2749 return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
2750}
2751
2752struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
2753 struct vcpu *current)
2754{
2755 /*
2756 * Validity of use of this interface is the same as for bitmap create.
2757 */
2758 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2759 dlog_verbose("Bitmap destroy for NWd VM IDs only (%x).\n",
2760 vm_id);
2761 return ffa_error(FFA_NOT_SUPPORTED);
2762 }
2763
2764 return plat_ffa_notifications_bitmap_destroy(vm_id);
2765}
J-Alvesc003a7a2021-03-18 13:06:53 +00002766
2767struct ffa_value api_ffa_notification_update_bindings(
2768 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
2769 ffa_notifications_bitmap_t notifications, bool is_bind,
2770 struct vcpu *current)
2771{
2772 struct ffa_value ret = {.func = FFA_SUCCESS_32};
2773 struct vm_locked receiver_locked;
2774 const bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
2775 const ffa_vm_id_t id_to_update =
2776 is_bind ? sender_vm_id : HF_INVALID_VM_ID;
2777 const ffa_vm_id_t id_to_validate =
2778 is_bind ? HF_INVALID_VM_ID : sender_vm_id;
2779
2780 if (!plat_ffa_is_notifications_bind_valid(current, sender_vm_id,
2781 receiver_vm_id)) {
2782 dlog_verbose("Invalid use of notifications bind interface.\n");
2783 return ffa_error(FFA_INVALID_PARAMETERS);
2784 }
2785
J-Alvesb15e9402021-09-08 11:44:42 +01002786 if (plat_ffa_notifications_update_bindings_forward(
2787 receiver_vm_id, sender_vm_id, flags, notifications, is_bind,
2788 &ret)) {
J-Alvesb15e9402021-09-08 11:44:42 +01002789 return ret;
2790 }
2791
J-Alvesc003a7a2021-03-18 13:06:53 +00002792 if (notifications == 0U) {
2793 dlog_verbose("No notifications have been specified.\n");
2794 return ffa_error(FFA_INVALID_PARAMETERS);
2795 }
2796
2797 /**
2798 * This check assumes receiver is the current VM, and has been enforced
2799 * by 'plat_ffa_is_notifications_bind_valid'.
2800 */
2801 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
2802
2803 if (receiver_locked.vm == NULL) {
2804 dlog_verbose("Receiver doesn't exist!\n");
2805 return ffa_error(FFA_DENIED);
2806 }
2807
J-Alves09ff9d82021-11-02 11:55:20 +00002808 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesc003a7a2021-03-18 13:06:53 +00002809 dlog_verbose("Notifications are not enabled.\n");
2810 ret = ffa_error(FFA_NOT_SUPPORTED);
2811 goto out;
2812 }
2813
2814 if (is_bind && vm_id_is_current_world(sender_vm_id) &&
2815 vm_find(sender_vm_id) == NULL) {
2816 dlog_verbose("Sender VM does not exist!\n");
2817 ret = ffa_error(FFA_INVALID_PARAMETERS);
2818 goto out;
2819 }
2820
2821 /*
2822 * Can't bind/unbind notifications if at least one is bound to a
2823 * different sender.
2824 */
2825 if (!vm_notifications_validate_bound_sender(
2826 receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
2827 id_to_validate, notifications)) {
2828 dlog_verbose("Notifications are bound to other sender.\n");
2829 ret = ffa_error(FFA_DENIED);
2830 goto out;
2831 }
2832
2833 /**
2834 * Check if there is a pending notification within those specified in
2835 * the bitmap.
2836 */
2837 if (vm_are_notifications_pending(receiver_locked,
2838 plat_ffa_is_vm_id(sender_vm_id),
2839 notifications)) {
2840 dlog_verbose("Notifications within '%x' pending.\n",
2841 notifications);
2842 ret = ffa_error(FFA_DENIED);
2843 goto out;
2844 }
2845
2846 vm_notifications_update_bindings(
2847 receiver_locked, plat_ffa_is_vm_id(sender_vm_id), id_to_update,
2848 notifications, is_per_vcpu && is_bind);
2849
2850out:
2851 vm_unlock(&receiver_locked);
2852 return ret;
2853}
J-Alvesaa79c012021-07-09 14:29:45 +01002854
2855struct ffa_value api_ffa_notification_set(
2856 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
2857 ffa_notifications_bitmap_t notifications, struct vcpu *current)
2858{
2859 struct ffa_value ret;
2860 struct vm_locked receiver_locked;
2861
2862 /*
2863 * Check if is per-vCPU or global, and extracting vCPU ID according
2864 * to table 17.19 of the FF-A v1.1 Beta 0 spec.
2865 */
2866 bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
2867 ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
2868
J-Alvesaa79c012021-07-09 14:29:45 +01002869 if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
2870 receiver_vm_id)) {
2871 dlog_verbose("Invalid use of notifications set interface.\n");
2872 return ffa_error(FFA_INVALID_PARAMETERS);
2873 }
2874
2875 if (notifications == 0U) {
2876 dlog_verbose("No notifications have been specified.\n");
2877 return ffa_error(FFA_INVALID_PARAMETERS);
2878 }
2879
J-Alvesde7bd2f2021-09-09 19:54:35 +01002880 if (plat_ffa_notification_set_forward(sender_vm_id, receiver_vm_id,
2881 flags, notifications, &ret)) {
2882 return ret;
2883 }
2884
J-Alvesaa79c012021-07-09 14:29:45 +01002885 /*
2886 * This check assumes receiver is the current VM, and has been enforced
2887 * by 'plat_ffa_is_notification_set_valid'.
2888 */
2889 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
2890
2891 if (receiver_locked.vm == NULL) {
2892 dlog_verbose("Receiver ID is not valid.\n");
2893 return ffa_error(FFA_INVALID_PARAMETERS);
2894 }
2895
J-Alves09ff9d82021-11-02 11:55:20 +00002896 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesaa79c012021-07-09 14:29:45 +01002897 dlog_verbose("Receiver's notifications not enabled.\n");
2898 ret = ffa_error(FFA_DENIED);
2899 goto out;
2900 }
2901
2902 /*
2903 * If notifications are not bound to the sender, they wouldn't be
2904 * enabled either for the receiver.
2905 */
2906 if (!vm_notifications_validate_binding(
2907 receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
2908 sender_vm_id, notifications, is_per_vcpu)) {
2909 dlog_verbose("Notifications bindings not valid.\n");
2910 ret = ffa_error(FFA_DENIED);
2911 goto out;
2912 }
2913
2914 if (is_per_vcpu && vcpu_id >= receiver_locked.vm->vcpu_count) {
2915 dlog_verbose("Invalid VCPU ID!\n");
2916 ret = ffa_error(FFA_INVALID_PARAMETERS);
2917 goto out;
2918 }
2919
J-Alves7461ef22021-10-18 17:21:33 +01002920 /* Set notifications pending. */
J-Alvesaa79c012021-07-09 14:29:45 +01002921 vm_notifications_set(receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
2922 notifications, vcpu_id, is_per_vcpu);
2923 dlog_verbose("Set the notifications: %x.\n", notifications);
2924
J-Alves13394022021-06-30 13:48:49 +01002925 if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
2926 dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
2927 vcpu_index(current));
2928 plat_ffa_sri_trigger_not_delayed(current->cpu);
2929 } else {
2930 plat_ffa_sri_state_set(DELAYED);
2931 }
J-Alvesaa79c012021-07-09 14:29:45 +01002932
J-Alves7461ef22021-10-18 17:21:33 +01002933 /*
2934 * If notifications set are per-vCPU and the receiver is SP, the
2935 * Notifications Pending Interrupt can be injected now.
2936 * If not, it should be injected when the scheduler gives it CPU cycles
2937 * in a specific vCPU.
2938 */
2939 if (is_per_vcpu && vm_id_is_current_world(receiver_vm_id)) {
2940 struct vcpu *target_vcpu =
2941 vm_get_vcpu(receiver_locked.vm, vcpu_id);
2942
2943 dlog_verbose("Per-vCPU notification, pending NPI.\n");
J-Alves6f72ca82021-11-01 12:34:58 +00002944 internal_interrupt_inject(target_vcpu,
2945 HF_NOTIFICATION_PENDING_INTID,
2946 current, NULL);
J-Alves7461ef22021-10-18 17:21:33 +01002947 }
2948
J-Alves13394022021-06-30 13:48:49 +01002949 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alvesaa79c012021-07-09 14:29:45 +01002950out:
2951 vm_unlock(&receiver_locked);
2952
2953 return ret;
2954}
2955
2956static struct ffa_value api_ffa_notification_get_success_return(
2957 ffa_notifications_bitmap_t from_sp, ffa_notifications_bitmap_t from_vm,
2958 ffa_notifications_bitmap_t from_framework)
2959{
2960 return (struct ffa_value){
2961 .func = FFA_SUCCESS_32,
2962 .arg1 = 0U,
2963 .arg2 = (uint32_t)from_sp,
2964 .arg3 = (uint32_t)(from_sp >> 32),
2965 .arg4 = (uint32_t)from_vm,
2966 .arg5 = (uint32_t)(from_vm >> 32),
2967 .arg6 = (uint32_t)from_framework,
2968 .arg7 = (uint32_t)(from_framework >> 32),
2969 };
2970}
2971
2972struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
2973 ffa_vcpu_index_t vcpu_id,
2974 uint32_t flags, struct vcpu *current)
2975{
2976 /* TODO: get framework notifications, when these are supported. */
2977 ffa_notifications_bitmap_t sp_notifications = 0;
2978 ffa_notifications_bitmap_t vm_notifications = 0;
2979 struct vm_locked receiver_locked;
2980 struct ffa_value ret;
2981
2982 /*
2983 * Following check should capture wrong uses of the interface, depending
2984 * on whether Hafnium is SPMC or hypervisor.
2985 * On the rest of the function it is assumed this condition is met.
2986 */
2987 if (!plat_ffa_is_notification_get_valid(current, receiver_vm_id)) {
2988 dlog_verbose("Invalid use of notifications get interface.\n");
2989 return ffa_error(FFA_INVALID_PARAMETERS);
2990 }
2991
2992 /*
2993 * This check assumes receiver is the current VM, and has been enforced
2994 * by `plat_ffa_is_notifications_get_valid`.
2995 */
2996 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
2997
2998 /*
2999 * `plat_ffa_is_notifications_get_valid` ensures following is never
3000 * true.
3001 */
3002 CHECK(receiver_locked.vm != NULL);
3003
3004 if (receiver_locked.vm->vcpu_count <= vcpu_id ||
3005 (receiver_locked.vm->vcpu_count != 1 &&
3006 cpu_index(current->cpu) != vcpu_id)) {
3007 dlog_verbose("Invalid VCPU ID!\n");
3008 ret = ffa_error(FFA_INVALID_PARAMETERS);
3009 goto out;
3010 }
3011
3012 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_SP) != 0U) {
J-Alves98ff9562021-09-09 14:39:41 +01003013 if (!plat_ffa_notifications_get_from_sp(
3014 receiver_locked, vcpu_id, &sp_notifications,
3015 &ret)) {
3016 dlog_verbose("Failed to get notifications from sps.");
3017 goto out;
3018 }
J-Alvesaa79c012021-07-09 14:29:45 +01003019 }
3020
3021 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U) {
3022 vm_notifications = vm_notifications_get_pending_and_clear(
3023 receiver_locked, true, vcpu_id);
3024 }
3025
3026 ret = api_ffa_notification_get_success_return(sp_notifications,
3027 vm_notifications, 0);
3028
J-Alvesfe23ebe2021-10-13 16:07:07 +01003029 /*
3030 * If there are no more pending notifications, change `sri_state` to
3031 * handled.
3032 */
3033 if (vm_is_notifications_pending_count_zero()) {
3034 plat_ffa_sri_state_set(HANDLED);
3035 }
3036
J-Alvesaa79c012021-07-09 14:29:45 +01003037out:
3038 vm_unlock(&receiver_locked);
3039
3040 return ret;
3041}
J-Alvesc8e8a222021-06-08 17:33:52 +01003042
3043/**
3044 * Prepares successful return for FFA_NOTIFICATION_INFO_GET, as described by
3045 * the section 17.7.1 of the FF-A v1.1 Beta0 specification.
3046 */
3047static struct ffa_value api_ffa_notification_info_get_success_return(
3048 const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
J-Alvesfe23ebe2021-10-13 16:07:07 +01003049 uint32_t lists_count)
J-Alvesc8e8a222021-06-08 17:33:52 +01003050{
3051 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
3052
3053 /*
3054 * Copying content of ids into ret structure. Use 5 registers (x3-x7) to
3055 * hold the list of ids.
3056 */
3057 memcpy_s(&ret.arg3,
3058 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET, ids,
3059 sizeof(ids[0]) * ids_count);
3060
3061 /*
3062 * According to the spec x2 should have:
3063 * - Bit flagging if there are more notifications pending;
3064 * - The total number of elements (i.e. total list size);
3065 * - The number of VCPU IDs within each VM specific list.
3066 */
J-Alvesfe23ebe2021-10-13 16:07:07 +01003067 ret.arg2 = vm_notifications_pending_not_retrieved_by_scheduler()
3068 ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING
3069 : 0;
J-Alvesc8e8a222021-06-08 17:33:52 +01003070
3071 ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
3072 << FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
3073
3074 for (unsigned int i = 0; i < lists_count; i++) {
3075 ret.arg2 |= (lists_sizes[i] & FFA_NOTIFICATIONS_LIST_SIZE_MASK)
3076 << FFA_NOTIFICATIONS_LIST_SHIFT(i + 1);
3077 }
3078
3079 return ret;
3080}
3081
3082struct ffa_value api_ffa_notification_info_get(struct vcpu *current)
3083{
3084 /*
3085 * Following set of variables should be populated with the return info.
3086 * At a successfull handling of this interface, they should be used
3087 * to populate the 'ret' structure in accordance to the table 17.29
3088 * of the FF-A v1.1 Beta0 specification.
3089 */
3090 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
3091 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
3092 uint32_t lists_count = 0;
3093 uint32_t ids_count = 0;
3094 bool list_is_full = false;
J-Alves13394022021-06-30 13:48:49 +01003095 struct ffa_value result;
J-Alvesc8e8a222021-06-08 17:33:52 +01003096
3097 /*
3098 * This interface can only be called at NS virtual/physical FF-A
3099 * instance by the endpoint implementing the primary scheduler and the
3100 * Hypervisor/OS kernel.
3101 * In the SPM, following check passes if call has been forwarded from
3102 * the hypervisor.
3103 */
3104 if (current->vm->id != HF_PRIMARY_VM_ID) {
3105 dlog_verbose(
3106 "Only the receiver's scheduler can use this "
3107 "interface\n");
3108 return ffa_error(FFA_NOT_SUPPORTED);
3109 }
3110
J-Alvesca058c22021-09-10 14:02:07 +01003111 /*
3112 * Forward call to the other world, and fill the arrays used to assemble
3113 * return.
3114 */
3115 plat_ffa_notification_info_get_forward(
3116 ids, &ids_count, lists_sizes, &lists_count,
3117 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3118
3119 list_is_full = ids_count == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
3120
J-Alvesc8e8a222021-06-08 17:33:52 +01003121 /* Get notifications' info from this world */
3122 for (ffa_vm_count_t index = 0; index < vm_get_count() && !list_is_full;
3123 ++index) {
3124 struct vm_locked vm_locked = vm_lock(vm_find_index(index));
3125
3126 list_is_full = vm_notifications_info_get(
3127 vm_locked, ids, &ids_count, lists_sizes, &lists_count,
3128 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3129
3130 vm_unlock(&vm_locked);
3131 }
3132
3133 if (!list_is_full) {
3134 /* Grab notifications info from other world */
J-Alvesfe23ebe2021-10-13 16:07:07 +01003135 plat_ffa_vm_notifications_info_get(
J-Alvesc8e8a222021-06-08 17:33:52 +01003136 ids, &ids_count, lists_sizes, &lists_count,
3137 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3138 }
3139
3140 if (ids_count == 0) {
J-Alvesca058c22021-09-10 14:02:07 +01003141 dlog_verbose(
3142 "Notification info get has no data to retrieve.\n");
J-Alves13394022021-06-30 13:48:49 +01003143 result = ffa_error(FFA_NO_DATA);
3144 } else {
3145 result = api_ffa_notification_info_get_success_return(
J-Alvesfe23ebe2021-10-13 16:07:07 +01003146 ids, ids_count, lists_sizes, lists_count);
J-Alvesc8e8a222021-06-08 17:33:52 +01003147 }
3148
J-Alvesfe23ebe2021-10-13 16:07:07 +01003149 plat_ffa_sri_state_set(HANDLED);
3150
J-Alves13394022021-06-30 13:48:49 +01003151 return result;
J-Alvesc8e8a222021-06-08 17:33:52 +01003152}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -07003153
3154struct ffa_value api_ffa_mem_perm_get(vaddr_t base_addr, struct vcpu *current)
3155{
3156 struct vm_locked vm_locked;
3157 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
3158 bool mode_ret = false;
3159 uint32_t mode = 0;
3160
3161 if (!plat_ffa_is_mem_perm_get_valid(current)) {
3162 return ffa_error(FFA_NOT_SUPPORTED);
3163 }
3164
3165 if (!(current->vm->el0_partition)) {
3166 return ffa_error(FFA_DENIED);
3167 }
3168
3169 vm_locked = vm_lock(current->vm);
3170
3171 /*
3172 * mm_get_mode is used to check if the given base_addr page is already
3173 * mapped. If the page is unmapped, return error. If the page is mapped
3174 * appropriate attributes are returned to the caller. Note that
3175 * mm_get_mode returns true if the address is in the valid VA range as
3176 * supported by the architecture and MMU configurations, as opposed to
3177 * whether a page is mapped or not. For a page to be known as mapped,
3178 * the API must return true AND the returned mode must not have
3179 * MM_MODE_INVALID set.
3180 */
3181 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
3182 va_add(base_addr, PAGE_SIZE), &mode);
3183 if (!mode_ret || (mode & MM_MODE_INVALID)) {
3184 ret = ffa_error(FFA_INVALID_PARAMETERS);
3185 goto out;
3186 }
3187
3188 /* No memory should be marked RWX */
3189 CHECK((mode & (MM_MODE_R | MM_MODE_W | MM_MODE_X)) !=
3190 (MM_MODE_R | MM_MODE_W | MM_MODE_X));
3191
3192 /*
3193 * S-EL0 partitions are expected to have all their pages marked as
3194 * non-global.
3195 */
3196 CHECK((mode & (MM_MODE_NG | MM_MODE_USER)) ==
3197 (MM_MODE_NG | MM_MODE_USER));
3198
3199 if (mode & MM_MODE_W) {
3200 /* No memory should be writeable but not readable. */
3201 CHECK(mode & MM_MODE_R);
3202 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
3203 .arg2 = (uint32_t)(FFA_MEM_PERM_RW)};
3204 } else if (mode & MM_MODE_R) {
3205 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
3206 .arg2 = (uint32_t)(FFA_MEM_PERM_RX)};
3207 if (!(mode & MM_MODE_X)) {
3208 ret.arg2 = (uint32_t)(FFA_MEM_PERM_RO);
3209 }
3210 }
3211out:
3212 vm_unlock(&vm_locked);
3213 return ret;
3214}
3215
3216struct ffa_value api_ffa_mem_perm_set(vaddr_t base_addr, uint32_t page_count,
3217 uint32_t mem_perm, struct vcpu *current)
3218{
3219 struct vm_locked vm_locked;
3220 struct ffa_value ret;
3221 bool mode_ret = false;
3222 uint32_t original_mode;
3223 uint32_t new_mode;
3224 struct mpool local_page_pool;
3225
3226 if (!plat_ffa_is_mem_perm_set_valid(current)) {
3227 return ffa_error(FFA_NOT_SUPPORTED);
3228 }
3229
3230 if (!(current->vm->el0_partition)) {
3231 return ffa_error(FFA_DENIED);
3232 }
3233
3234 if (!is_aligned(va_addr(base_addr), PAGE_SIZE)) {
3235 return ffa_error(FFA_INVALID_PARAMETERS);
3236 }
3237
3238 if ((mem_perm != FFA_MEM_PERM_RW) && (mem_perm != FFA_MEM_PERM_RO) &&
3239 (mem_perm != FFA_MEM_PERM_RX)) {
3240 return ffa_error(FFA_INVALID_PARAMETERS);
3241 }
3242
3243 /*
3244 * Create a local pool so any freed memory can't be used by another
3245 * thread. This is to ensure the original mapping can be restored if any
3246 * stage of the process fails.
3247 */
3248 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
3249
3250 vm_locked = vm_lock(current->vm);
3251
3252 /*
3253 * All regions accessible by the partition are mapped during boot. If we
3254 * cannot get a successful translation for the page range, the request
3255 * to change permissions is rejected.
3256 * mm_get_mode is used to check if the given address range is already
3257 * mapped. If the range is unmapped, return error. If the range is
3258 * mapped appropriate attributes are returned to the caller. Note that
3259 * mm_get_mode returns true if the address is in the valid VA range as
3260 * supported by the architecture and MMU configurations, as opposed to
3261 * whether a page is mapped or not. For a page to be known as mapped,
3262 * the API must return true AND the returned mode must not have
3263 * MM_MODE_INVALID set.
3264 */
3265
3266 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
3267 va_add(base_addr, page_count * PAGE_SIZE),
3268 &original_mode);
3269 if (!mode_ret || (original_mode & MM_MODE_INVALID)) {
3270 ret = ffa_error(FFA_INVALID_PARAMETERS);
3271 goto out;
3272 }
3273
3274 /* Device memory cannot be marked as executable */
3275 if ((original_mode & MM_MODE_D) && (mem_perm == FFA_MEM_PERM_RX)) {
3276 ret = ffa_error(FFA_INVALID_PARAMETERS);
3277 goto out;
3278 }
3279
3280 new_mode = MM_MODE_USER | MM_MODE_NG;
3281
3282 if (mem_perm == FFA_MEM_PERM_RW) {
3283 new_mode |= MM_MODE_R | MM_MODE_W;
3284 } else if (mem_perm == FFA_MEM_PERM_RX) {
3285 new_mode |= MM_MODE_R | MM_MODE_X;
3286 } else if (mem_perm == FFA_MEM_PERM_RO) {
3287 new_mode |= MM_MODE_R;
3288 }
3289
3290 /*
3291 * Safe to re-map memory, since we know the requested permissions are
3292 * valid, and the memory requested to be re-mapped is also valid.
3293 */
3294 if (!mm_identity_prepare(
3295 &vm_locked.vm->ptable, pa_from_va(base_addr),
3296 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3297 new_mode, &local_page_pool)) {
3298 /*
3299 * Defrag the table into the local page pool.
3300 * mm_identity_prepare could have allocated or freed pages to
3301 * split blocks or tables etc.
3302 */
3303 mm_stage1_defrag(&vm_locked.vm->ptable, &local_page_pool);
3304
3305 /*
3306 * Guaranteed to succeed mapping with old mode since the mapping
3307 * with old mode already existed and we have a local page pool
3308 * that should have sufficient memory to go back to the original
3309 * state.
3310 */
3311 CHECK(mm_identity_prepare(
3312 &vm_locked.vm->ptable, pa_from_va(base_addr),
3313 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3314 original_mode, &local_page_pool));
3315 mm_identity_commit(
3316 &vm_locked.vm->ptable, pa_from_va(base_addr),
3317 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3318 original_mode, &local_page_pool);
3319
3320 mm_stage1_defrag(&vm_locked.vm->ptable, &api_page_pool);
3321 ret = ffa_error(FFA_NO_MEMORY);
3322 goto out;
3323 }
3324
3325 mm_identity_commit(
3326 &vm_locked.vm->ptable, pa_from_va(base_addr),
3327 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)), new_mode,
3328 &local_page_pool);
3329
3330 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
3331
3332out:
3333 mpool_fini(&local_page_pool);
3334 vm_unlock(&vm_locked);
3335
3336 return ret;
3337}