blob: 019b719ec569a2ca0b42bd7aaedb41af0d0a6a6a [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
J-Alves13318e32021-02-22 17:21:00 +00002 * Copyright 2021 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010010
Andrew Walbran318f5732018-11-20 16:23:42 +000011#include "hf/arch/cpu.h"
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +000012#include "hf/arch/ffa.h"
Olivier Deprez96a2a262020-06-11 17:21:38 +020013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Olivier Deprez55a189e2021-06-09 15:45:27 +020015#include "hf/arch/plat/ffa.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000016#include "hf/arch/timer.h"
Olivier Deprez764fd2e2020-07-29 15:14:09 +020017#include "hf/arch/vm.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000018
Andrew Scull877ae4b2019-07-02 12:52:33 +010019#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000020#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010021#include "hf/ffa_internal.h"
22#include "hf/ffa_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000023#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010024#include "hf/plat/console.h"
Manish Pandeya5f39fb2020-09-11 09:47:11 +010025#include "hf/plat/interrupts.h"
Andrew Scull6386f252018-12-06 13:29:10 +000026#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010027#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010028#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010029#include "hf/vm.h"
30
Andrew Scullf35a5c92018-08-07 18:09:46 +010031#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010032#include "vmapi/hf/ffa.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010033
Daniel Boulby1ddb3d72021-12-16 18:16:50 +000034static_assert(sizeof(struct ffa_partition_info_v1_0) == 8,
Fuad Tabbae4efcc32020-07-16 15:37:27 +010035 "Partition information descriptor size doesn't match the one in "
36 "the FF-A 1.0 EAC specification, Table 82.");
Daniel Boulby1ddb3d72021-12-16 18:16:50 +000037static_assert(sizeof(struct ffa_partition_info) == 24,
38 "Partition information descriptor size doesn't match the one in "
39 "the FF-A 1.1 BETA0 EAC specification, Table 13.34.");
Fuad Tabbae4efcc32020-07-16 15:37:27 +010040
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000041/*
42 * To eliminate the risk of deadlocks, we define a partial order for the
43 * acquisition of locks held concurrently by the same physical CPU. Our current
44 * ordering requirements are as follows:
45 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010046 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000047 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010048 * Locks of the same kind require the lock of lowest address to be locked first,
49 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000050 */
51
Andrew Scullaa039b32018-10-04 15:02:26 +010052static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010053 "Currently, a page is mapped for the send and receive buffers so "
54 "the maximum request is the size of a page.");
55
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000056static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
57 "The page pool entry size must be at least as big as the mailbox "
58 "size, so that memory region descriptors can be copied from the "
59 "mailbox for memory sharing.");
60
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000061static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000062
63/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000064 * Initialises the API page pool by taking ownership of the contents of the
65 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000066 */
67void api_init(struct mpool *ppool)
68{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000069 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000070}
71
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072/**
J-Alvesad6a0432021-04-09 16:06:21 +010073 * Get target VM vCPU:
74 * If VM is UP then return first vCPU.
75 * If VM is MP then return vCPU whose index matches current CPU index.
76 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050077struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
J-Alvesad6a0432021-04-09 16:06:21 +010078{
79 ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
80 struct vcpu *vcpu = NULL;
81
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -050082 CHECK((vm != NULL) && (current != NULL));
83
J-Alvesad6a0432021-04-09 16:06:21 +010084 if (vm->vcpu_count == 1) {
85 vcpu = vm_get_vcpu(vm, 0);
86 } else if (current_cpu_index < vm->vcpu_count) {
87 vcpu = vm_get_vcpu(vm, current_cpu_index);
88 }
89
90 return vcpu;
91}
92
93/**
J-Alvesfe7f7372020-11-09 11:32:12 +000094 * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
95 * is given as argument of the function.
96 *
97 * Called to change the context between SPs for direct messaging (when Hafnium
98 * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
99 *
100 * This function works for partitions that are:
J-Alvesad6a0432021-04-09 16:06:21 +0100101 * - UP migratable.
J-Alvesfe7f7372020-11-09 11:32:12 +0000102 * - MP with pinned Execution Contexts.
103 */
104static struct vcpu *api_switch_to_vm(struct vcpu *current,
105 struct ffa_value to_ret,
106 enum vcpu_state vcpu_state,
107 ffa_vm_id_t to_id)
108{
109 struct vm *to_vm = vm_find(to_id);
J-Alvesad6a0432021-04-09 16:06:21 +0100110 struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current);
J-Alvesfe7f7372020-11-09 11:32:12 +0000111
112 CHECK(next != NULL);
113
114 /* Set the return value for the target VM. */
115 arch_regs_set_retval(&next->regs, to_ret);
116
117 /* Set the current vCPU state. */
118 sl_lock(&current->lock);
119 current->state = vcpu_state;
120 sl_unlock(&current->lock);
121
122 return next;
123}
124
125/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000126 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100127 *
128 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100129 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100130 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500131struct vcpu *api_switch_to_primary(struct vcpu *current,
132 struct ffa_value primary_ret,
133 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100134{
Andrew Walbran508e63c2018-12-20 17:02:37 +0000135 /*
136 * If the secondary is blocked but has a timer running, sleep until the
137 * timer fires rather than indefinitely.
138 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100139 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100140 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
141 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100142 if (arch_timer_enabled_current()) {
143 uint64_t remaining_ns =
144 arch_timer_remaining_ns_current();
145
146 if (remaining_ns == 0) {
147 /*
148 * Timer is pending, so the current vCPU should
149 * be run again right away.
150 */
Andrew Walbran7e824602020-10-22 16:51:40 +0100151 primary_ret = (struct ffa_value){
152 .func = FFA_INTERRUPT_32};
153
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100154 } else {
155 primary_ret.arg2 = remaining_ns;
156 }
157 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100158 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100159 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000160 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100161 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000162
163 default:
164 /* Do nothing. */
165 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000166 }
167
J-Alvesfe7f7372020-11-09 11:32:12 +0000168 return api_switch_to_vm(current, primary_ret, secondary_state,
169 HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100170}
171
172/**
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200173 * Choose next vCPU to run to be the counterpart vCPU in the other
174 * world (run the normal world if currently running in the secure
175 * world). Set current vCPU state to the given vcpu_state parameter.
176 * Set FF-A return values to the target vCPU in the other world.
177 *
178 * Called in context of a direct message response from a secure
179 * partition to a VM.
180 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100181struct vcpu *api_switch_to_other_world(struct vcpu *current,
182 struct ffa_value other_world_ret,
183 enum vcpu_state vcpu_state)
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200184{
J-Alvesfe7f7372020-11-09 11:32:12 +0000185 return api_switch_to_vm(current, other_world_ret, vcpu_state,
186 HF_OTHER_WORLD_ID);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200187}
188
189/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100190 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
191 * registers the `from` VM to be notified when it becomes available.
192 */
193static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
194{
195 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
196 to.vm->mailbox.recv == NULL) {
197 /*
198 * Fail if the receiver isn't currently ready to receive data,
199 * setting up for notification if requested.
200 */
201 if (notify) {
202 struct wait_entry *entry =
203 vm_get_wait_entry(from, to.vm->id);
204
205 /* Append waiter only if it's not there yet. */
206 if (list_empty(&entry->wait_links)) {
207 list_append(&to.vm->mailbox.waiter_list,
208 &entry->wait_links);
209 }
210 }
211
212 return true;
213 }
214
215 return false;
216}
217
218/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000219 * Returns true if the given vCPU is executing in context of an
220 * FFA_MSG_SEND_DIRECT_REQ invocation.
221 */
222static bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
223{
224 return locked.vcpu->direct_request_origin_vm_id != HF_INVALID_VM_ID;
225}
226
227/**
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100228 * Returns true if the VM owning the given vCPU is supporting managed exit and
229 * the vCPU is currently processing a managed exit.
230 */
231static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
232{
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100233 return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100234 vcpu_locked.vcpu->processing_managed_exit);
235}
236
237/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000238 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000239 */
240struct vcpu *api_preempt(struct vcpu *current)
241{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100242 struct ffa_value ret = {
243 .func = FFA_INTERRUPT_32,
Andrew Scull33fecd32019-01-08 14:48:27 +0000244 };
245
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500246 return api_switch_to_primary(current, ret, VCPU_STATE_PREEMPTED);
Andrew Scull33fecd32019-01-08 14:48:27 +0000247}
248
249/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000250 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000251 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100252 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100253struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100254{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100255 struct ffa_value ret = {
256 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
257 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100258 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000259
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000260 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100261 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100262}
263
264/**
Andrew Walbran33645652019-04-15 12:29:31 +0100265 * Puts the current vCPU in off mode, and returns to the primary VM.
266 */
267struct vcpu *api_vcpu_off(struct vcpu *current)
268{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100269 struct ffa_value ret = {
270 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
271 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100272 };
273
274 /*
275 * Disable the timer, so the scheduler doesn't get told to call back
276 * based on it.
277 */
278 arch_timer_disable_current();
279
280 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
281}
282
283/**
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500284 * The current vCPU is blocked on some resource and needs to relinquish
285 * control back to the execution context of the endpoint that originally
286 * allocated cycles to it.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000287 */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000288struct ffa_value api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000289{
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000290 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
291 struct vcpu_locked current_locked;
292 bool is_direct_request_ongoing;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000293
294 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000295 /* NOOP on the primary as it makes the scheduling decisions. */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000296 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000297 }
298
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000299 current_locked = vcpu_lock(current);
300 is_direct_request_ongoing =
301 is_ffa_direct_msg_request_ongoing(current_locked);
302 vcpu_unlock(&current_locked);
303
304 if (is_direct_request_ongoing) {
305 return ffa_error(FFA_DENIED);
306 }
307
308 *next = api_switch_to_primary(
309 current,
310 (struct ffa_value){.func = FFA_YIELD_32,
311 .arg1 = ffa_vm_vcpu(current->vm->id,
312 vcpu_index(current))},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500313 VCPU_STATE_BLOCKED);
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000314
315 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000316}
317
318/**
Andrew Walbran33645652019-04-15 12:29:31 +0100319 * Switches to the primary so that it can switch to the target, or kick it if it
320 * is already running on a different physical CPU.
321 */
322struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
323{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100324 struct ffa_value ret = {
Andrew Walbran7e824602020-10-22 16:51:40 +0100325 .func = FFA_INTERRUPT_32,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100326 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
327 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100328 };
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500329 return api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
Andrew Walbran33645652019-04-15 12:29:31 +0100330}
331
332/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000333 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000334 */
335struct vcpu *api_abort(struct vcpu *current)
336{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100337 struct ffa_value ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000338
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100339 dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000340 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000341
342 if (current->vm->id == HF_PRIMARY_VM_ID) {
343 /* TODO: what to do when the primary aborts? */
344 for (;;) {
345 /* Do nothing. */
346 }
347 }
348
349 atomic_store_explicit(&current->vm->aborting, true,
350 memory_order_relaxed);
351
352 /* TODO: free resources once all vCPUs abort. */
353
Andrew Sculld6ee1102019-04-05 22:12:42 +0100354 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000355}
356
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000357/*
358 * Format the partition info descriptors according to the version supported
359 * by the endpoint and return the size of the array created.
360 */
361static struct ffa_value send_versioned_partition_info_descriptors(
Daniel Boulby191b5f02022-02-17 17:26:14 +0000362 struct vm_locked vm_locked, struct ffa_partition_info *partitions,
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000363 uint32_t vm_count)
364{
Daniel Boulby191b5f02022-02-17 17:26:14 +0000365 struct vm *vm = vm_locked.vm;
366 uint32_t version = vm->ffa_version;
Daniel Boulby835e1592022-05-30 17:38:51 +0100367 uint32_t partition_info_size;
368 uint32_t buffer_size;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000369
Daniel Boulby191b5f02022-02-17 17:26:14 +0000370 if (msg_receiver_busy(vm_locked, NULL, false)) {
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000371 /*
372 * Can't retrieve memory information if the mailbox is not
373 * available.
374 */
375 dlog_verbose("RX buffer not ready.\n");
Daniel Boulby191b5f02022-02-17 17:26:14 +0000376 return ffa_error(FFA_BUSY);
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000377 }
378
Daniel Boulby191b5f02022-02-17 17:26:14 +0000379 if (version == MAKE_FFA_VERSION(1, 0)) {
380 struct ffa_partition_info_v1_0 *recv_mailbox = vm->mailbox.recv;
381
Daniel Boulby835e1592022-05-30 17:38:51 +0100382 partition_info_size = sizeof(struct ffa_partition_info_v1_0);
383 buffer_size = partition_info_size * vm_count;
384 if (buffer_size > HF_MAILBOX_SIZE) {
Daniel Boulby191b5f02022-02-17 17:26:14 +0000385 dlog_error(
386 "Partition information does not fit in the "
387 "VM's RX "
388 "buffer.\n");
389 return ffa_error(FFA_NO_MEMORY);
390 }
391
392 for (uint32_t i = 0; i < vm_count; i++) {
393 /*
394 * Populate the VM's RX buffer with the partition
395 * information.
396 */
397 recv_mailbox[i].vm_id = partitions[i].vm_id;
398 recv_mailbox[i].vcpu_count = partitions[i].vcpu_count;
399 recv_mailbox[i].properties = partitions[i].properties;
400 }
401
402 } else {
Daniel Boulby835e1592022-05-30 17:38:51 +0100403 partition_info_size = sizeof(struct ffa_partition_info);
404 buffer_size = partition_info_size * vm_count;
405 if (buffer_size > HF_MAILBOX_SIZE) {
Daniel Boulby191b5f02022-02-17 17:26:14 +0000406 dlog_error(
407 "Partition information does not fit in the "
408 "VM's RX "
409 "buffer.\n");
410 return ffa_error(FFA_NO_MEMORY);
411 }
412
413 /* Populate the VM's RX buffer with the partition information.
414 */
Daniel Boulby835e1592022-05-30 17:38:51 +0100415 memcpy_s(vm->mailbox.recv, HF_MAILBOX_SIZE, partitions,
416 buffer_size);
Daniel Boulby191b5f02022-02-17 17:26:14 +0000417 }
418
Daniel Boulby835e1592022-05-30 17:38:51 +0100419 vm->mailbox.recv_size = buffer_size;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000420
421 /* Sender is Hypervisor in the normal world (TEE in secure world). */
Daniel Boulby191b5f02022-02-17 17:26:14 +0000422 vm->mailbox.recv_sender = HF_VM_ID_BASE;
423 vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
424 vm->mailbox.state = MAILBOX_STATE_READ;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000425
Daniel Boulby835e1592022-05-30 17:38:51 +0100426 /*
427 * Return the count of partition information descriptors in w2
428 * and the size of the descriptors in w3.
429 */
430 return (struct ffa_value){.func = FFA_SUCCESS_32,
431 .arg2 = vm_count,
432 .arg3 = partition_info_size};
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000433}
434
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100435struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000436 const struct ffa_uuid *uuid,
437 const uint32_t flags)
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100438{
439 struct vm *current_vm = current->vm;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100440 ffa_vm_count_t vm_count = 0;
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000441 bool count_flag = (flags && FFA_PARTITION_COUNT_FLAG_MASK) ==
442 FFA_PARTITION_COUNT_FLAG;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100443 bool uuid_is_null = ffa_uuid_is_null(uuid);
Olivier Depreze562e542020-06-11 17:31:54 +0200444 struct ffa_partition_info partitions[2 * MAX_VMS];
Daniel Boulby191b5f02022-02-17 17:26:14 +0000445 struct vm_locked vm_locked;
446 struct ffa_value ret;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100447
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000448 /* Bits 31:1 Must Be Zero */
449 if ((flags & ~FFA_PARTITION_COUNT_FLAG) != 0) {
450 return ffa_error(FFA_INVALID_PARAMETERS);
451 }
452
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100453 /*
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000454 * No need to count if we are returning the number of paritions as we
455 * already know this.
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100456 */
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000457 if (uuid_is_null && count_flag) {
458 vm_count = vm_get_count();
459 } else {
460 /*
461 * Iterate through the VMs to find the ones with a matching
462 * UUID. A Null UUID retrieves information for all VMs.
463 */
464 for (uint16_t index = 0; index < vm_get_count(); ++index) {
465 struct vm *vm = vm_find_index(index);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100466
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000467 if (uuid_is_null || ffa_uuid_equal(uuid, &vm->uuid)) {
468 uint16_t array_index = vm_count;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100469
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000470 ++vm_count;
471 if (count_flag) {
472 continue;
473 }
474
475 partitions[array_index].vm_id = vm->id;
476 partitions[array_index].vcpu_count =
477 vm->vcpu_count;
478 partitions[array_index].properties =
479 plat_ffa_partition_properties(
480 current_vm->id, vm);
481 partitions[array_index].properties |=
482 vm_are_notifications_enabled(vm)
483 ? FFA_PARTITION_NOTIFICATION
484 : 0;
Daniel Boulby1ddb3d72021-12-16 18:16:50 +0000485 partitions[array_index].uuid = vm->uuid;
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000486 }
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100487 }
488 }
489
Olivier Depreze562e542020-06-11 17:31:54 +0200490 /* If UUID is Null vm_count must not be zero at this stage. */
491 CHECK(!uuid_is_null || vm_count != 0);
492
493 /*
494 * When running the Hypervisor:
495 * - If UUID is Null the Hypervisor forwards the query to the SPMC for
496 * it to fill with secure partitions information.
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000497 * - If UUID is non-Null vm_count may be zero because the UUID matches
Olivier Depreze562e542020-06-11 17:31:54 +0200498 * a secure partition and the query is forwarded to the SPMC.
499 * When running the SPMC:
500 * - If UUID is non-Null and vm_count is zero it means there is no such
501 * partition identified in the system.
502 */
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000503 plat_ffa_partition_info_get_forward(uuid, flags, partitions, &vm_count);
Olivier Depreze562e542020-06-11 17:31:54 +0200504
505 /*
506 * Unrecognized UUID: does not match any of the VMs (or SPs)
507 * and is not Null.
508 */
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100509 if (vm_count == 0) {
510 return ffa_error(FFA_INVALID_PARAMETERS);
511 }
512
Daniel Boulbyb46cad12021-12-13 17:47:21 +0000513 /*
514 * If the count flag is set we don't need to return the partition info
515 * descriptors.
516 */
517 if (count_flag) {
518 return (struct ffa_value){.func = FFA_SUCCESS_32,
519 .arg2 = vm_count};
520 }
521
Daniel Boulby191b5f02022-02-17 17:26:14 +0000522 vm_locked = vm_lock(current_vm);
523 ret = send_versioned_partition_info_descriptors(vm_locked, partitions,
524 vm_count);
525 vm_unlock(&vm_locked);
526 return ret;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100527}
528
Andrew Scull9726c252019-01-23 13:44:19 +0000529/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000530 * Returns the ID of the VM.
531 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100532struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000533{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100534 return (struct ffa_value){.func = FFA_SUCCESS_32,
535 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000536}
537
538/**
Olivier Deprez421677d2021-06-18 12:18:53 +0200539 * Returns the SPMC FF-A ID at NS virtual/physical and secure virtual
540 * FF-A instances.
541 * DEN0077A FF-A v1.1 Beta0 section 13.9 FFA_SPM_ID_GET.
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000542 */
543struct ffa_value api_ffa_spm_id_get(void)
544{
J-Alves3829fc02021-03-18 12:49:18 +0000545#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
546 /*
547 * Return the SPMC ID that was fetched during FF-A
548 * initialization.
549 */
550 return (struct ffa_value){.func = FFA_SUCCESS_32,
551 .arg2 = arch_ffa_spmc_id_get()};
552#else
553 return ffa_error(FFA_NOT_SUPPORTED);
554#endif
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +0000555}
556
557/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000558 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000559 * function to indicate that register state for the given vCPU has been saved
560 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000561 */
562void api_regs_state_saved(struct vcpu *vcpu)
563{
564 sl_lock(&vcpu->lock);
565 vcpu->regs_available = true;
566 sl_unlock(&vcpu->lock);
567}
568
569/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000570 * Retrieves the next waiter and removes it from the wait list if the VM's
571 * mailbox is in a writable state.
572 */
573static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
574{
575 struct wait_entry *entry;
576 struct vm *vm = locked_vm.vm;
577
Andrew Sculld6ee1102019-04-05 22:12:42 +0100578 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000579 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
580 /* The mailbox is not writable or there are no waiters. */
581 return NULL;
582 }
583
584 /* Remove waiter from the wait list. */
585 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
586 wait_links);
587 list_remove(&entry->wait_links);
588 return entry;
589}
590
591/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000592 * Assuming that the arguments have already been checked by the caller, injects
593 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
594 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
595 * is next run, which is up to the scheduler.
596 *
597 * Returns:
598 * - 0 on success if no further action is needed.
599 * - 1 if it was called by the primary VM and the primary VM now needs to wake
600 * up or kick the target vCPU.
601 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100602int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
603 uint32_t intid, struct vcpu *current,
604 struct vcpu **next)
Andrew Walbran508e63c2018-12-20 17:02:37 +0000605{
Manish Pandey35e452f2021-02-18 21:36:34 +0000606 struct vcpu *target_vcpu = target_locked.vcpu;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000607 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +0000608 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
609 uint32_t intid_mask = 1U << intid_shift;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000610 int64_t ret = 0;
611
Andrew Walbran508e63c2018-12-20 17:02:37 +0000612 /*
Manish Pandey35e452f2021-02-18 21:36:34 +0000613 * We only need to change state and (maybe) trigger a virtual interrupt
614 * if it is enabled and was not previously pending. Otherwise we can
615 * skip everything except setting the pending bit.
Andrew Walbran508e63c2018-12-20 17:02:37 +0000616 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000617 if (!(target_vcpu->interrupts.interrupt_enabled[intid_index] &
618 ~target_vcpu->interrupts.interrupt_pending[intid_index] &
Andrew Walbran508e63c2018-12-20 17:02:37 +0000619 intid_mask)) {
620 goto out;
621 }
622
623 /* Increment the count. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000624 if ((target_vcpu->interrupts.interrupt_type[intid_index] &
625 intid_mask) == (INTERRUPT_TYPE_IRQ << intid_shift)) {
626 vcpu_irq_count_increment(target_locked);
627 } else {
628 vcpu_fiq_count_increment(target_locked);
629 }
Andrew Walbran508e63c2018-12-20 17:02:37 +0000630
631 /*
632 * Only need to update state if there was not already an
633 * interrupt enabled and pending.
634 */
Manish Pandey35e452f2021-02-18 21:36:34 +0000635 if (vcpu_interrupt_count_get(target_locked) != 1) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000636 goto out;
637 }
638
Andrew Walbran508e63c2018-12-20 17:02:37 +0000639 if (current->vm->id == HF_PRIMARY_VM_ID) {
640 /*
641 * If the call came from the primary VM, let it know that it
642 * should run or kick the target vCPU.
643 */
644 ret = 1;
Manish Pandey35e452f2021-02-18 21:36:34 +0000645 } else if (current != target_vcpu && next != NULL) {
646 *next = api_wake_up(current, target_vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000647 }
648
649out:
650 /* Either way, make it pending. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000651 target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000652
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200653 return ret;
654}
655
656/* Wrapper to internal_interrupt_inject with locking of target vCPU */
657static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
658 uint32_t intid, struct vcpu *current,
659 struct vcpu **next)
660{
661 int64_t ret;
662 struct vcpu_locked target_locked;
663
664 target_locked = vcpu_lock(target_vcpu);
Manish Pandeya5f39fb2020-09-11 09:47:11 +0100665 ret = api_interrupt_inject_locked(target_locked, intid, current, next);
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200666 vcpu_unlock(&target_locked);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000667
668 return ret;
669}
670
671/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100672 * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
673 * or FFA_MSG_WAIT call.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100674 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100675static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100676{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000677 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100678 case FFA_MSG_SEND_32:
679 return (struct ffa_value){
680 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000681 .arg1 = (receiver->mailbox.recv_sender << 16) |
682 receiver->id,
683 .arg3 = receiver->mailbox.recv_size};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000684 default:
685 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000686 dlog_error("Tried to return an invalid message function %#x\n",
687 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100688 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000689 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100690}
691
Madhukar Pappireddy5522c672021-12-17 16:35:51 -0600692struct ffa_value api_ffa_msg_wait(struct vcpu *current, struct vcpu **next,
693 struct ffa_value *args)
694{
695 struct ffa_value ret;
696
697 if (args->arg1 != 0U || args->arg2 != 0U || args->arg3 != 0U ||
698 args->arg4 != 0U || args->arg5 != 0U || args->arg6 != 0U ||
699 args->arg7 != 0U) {
700 return ffa_error(FFA_INVALID_PARAMETERS);
701 }
702
703 if (plat_ffa_msg_wait_prepare(current, next, &ret)) {
704 return ret;
705 }
706
707 return api_ffa_msg_recv(true, current, next);
708}
709
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100710/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000711 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000712 * value needs to be forced onto the vCPU.
713 */
J-Alves6e2abc62021-12-02 14:58:56 +0000714static bool api_vcpu_prepare_run(struct vcpu *current, struct vcpu *vcpu,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100715 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000716{
Max Shvetsov40108e72020-08-27 12:39:50 +0100717 struct vcpu_locked vcpu_locked;
718 struct vm_locked vm_locked;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000719 bool ret;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500720 uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
J-Alves6e2abc62021-12-02 14:58:56 +0000721 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000722
Andrew Scullb06d1752019-02-04 10:15:48 +0000723 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000724 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000725 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100726 * The VM lock is not needed in the common case so it must only be taken
727 * when it is going to be needed. This ensures there are no inter-vCPU
728 * dependencies in the common run case meaning the sensitive context
729 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000730 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100731 vcpu_locked = vcpu_lock(vcpu);
732
733#if SECURE_WORLD == 1
J-Alves7ac49052022-02-08 17:20:53 +0000734 bool is_vcpu_reset_and_start = vcpu_secondary_reset_and_start(
735 vcpu_locked, vcpu->vm->secondary_ep, 0);
736 if (is_vcpu_reset_and_start) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100737 dlog_verbose("%s secondary cold boot vmid %#x vcpu id %#x\n",
738 __func__, vcpu->vm->id, current->cpu->id);
739 }
740
741#endif
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000742 /* The VM needs to be locked to deliver mailbox messages. */
J-Alves6e2abc62021-12-02 14:58:56 +0000743 need_vm_lock = vcpu->state == VCPU_STATE_WAITING ||
744 (!vcpu->vm->el0_partition &&
745 (vcpu->state == VCPU_STATE_BLOCKED_INTERRUPT ||
746 vcpu->state == VCPU_STATE_BLOCKED ||
747 vcpu->state == VCPU_STATE_PREEMPTED));
748
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000749 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100750 vcpu_unlock(&vcpu_locked);
751 vm_locked = vm_lock(vcpu->vm);
752 vcpu_locked = vcpu_lock(vcpu);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000753 }
754
755 /*
756 * If the vCPU is already running somewhere then we can't run it here
757 * simultaneously. While it is actually running then the state should be
758 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
759 * stops running but while Hafnium is in the process of switching back
760 * to the primary there will be a brief period while the state has been
761 * updated but `regs_available` is still false (until
762 * `api_regs_state_saved` is called). We can't start running it again
763 * until this has finished, so count this state as still running for the
764 * purposes of this check.
765 */
766 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
767 /*
768 * vCPU is running on another pCPU.
769 *
770 * It's okay not to return the sleep duration here because the
771 * other physical CPU that is currently running this vCPU will
772 * return the sleep duration if needed.
773 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100774 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000775 ret = false;
776 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000777 }
Andrew Scull9726c252019-01-23 13:44:19 +0000778
779 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100780 if (vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100781 dlog_notice("Aborting VM %#x vCPU %u\n", vcpu->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000782 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100783 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000784 }
785 ret = false;
786 goto out;
787 }
788
Andrew Walbran508e63c2018-12-20 17:02:37 +0000789 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100790 case VCPU_STATE_RUNNING:
791 case VCPU_STATE_OFF:
792 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000793 ret = false;
794 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000795
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500796 case VCPU_STATE_WAITING:
797 /*
798 * An initial FFA_RUN is necessary for secondary VM/SP to reach
799 * the message wait loop.
800 */
801 if (!vcpu->is_bootstrapped) {
802 vcpu->is_bootstrapped = true;
803 break;
804 }
805
J-Alves6e2abc62021-12-02 14:58:56 +0000806 assert(need_vm_lock == true);
807 if (!vm_locked.vm->el0_partition &&
808 plat_ffa_inject_notification_pending_interrupt(
809 vcpu_locked, current, vm_locked)) {
810 break;
811 }
812
Andrew Scullb06d1752019-02-04 10:15:48 +0000813 /*
814 * A pending message allows the vCPU to run so the message can
815 * be delivered directly.
816 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100817 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100818 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100819 ffa_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100820 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000821 break;
822 }
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500823
824 if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
825 break;
826 }
827
828 if (arch_timer_enabled(&vcpu->regs)) {
829 timer_remaining_ns =
830 arch_timer_remaining_ns(&vcpu->regs);
831 if (timer_remaining_ns == 0) {
832 break;
833 }
834 } else {
835 dlog_verbose("Timer disabled\n");
836 }
837 run_ret->func = FFA_MSG_WAIT_32;
838 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
839 run_ret->arg2 = timer_remaining_ns;
840 ret = false;
841 goto out;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100842 case VCPU_STATE_BLOCKED_INTERRUPT:
J-Alves6e2abc62021-12-02 14:58:56 +0000843 if (need_vm_lock &&
844 plat_ffa_inject_notification_pending_interrupt(
845 vcpu_locked, current, vm_locked)) {
846 assert(vcpu_interrupt_count_get(vcpu_locked) > 0);
847 break;
848 }
849
Andrew Scullb06d1752019-02-04 10:15:48 +0000850 /* Allow virtual interrupts to be delivered. */
Manish Pandey35e452f2021-02-18 21:36:34 +0000851 if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
Andrew Scullb06d1752019-02-04 10:15:48 +0000852 break;
853 }
854
Andrew Walbran508e63c2018-12-20 17:02:37 +0000855 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100856 timer_remaining_ns =
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000857 arch_timer_remaining_ns(&vcpu->regs);
858
859 /*
860 * The timer expired so allow the interrupt to be
861 * delivered.
862 */
863 if (timer_remaining_ns == 0) {
864 break;
865 }
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100866 }
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000867
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100868 /*
869 * The vCPU is not ready to run, return the appropriate code to
870 * the primary which called vcpu_run.
871 */
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500872 run_ret->func = HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100873 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
874 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000875
876 ret = false;
877 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000878
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500879 case VCPU_STATE_BLOCKED:
880 /* A blocked vCPU is run unconditionally. Fall through. */
881 case VCPU_STATE_PREEMPTED:
J-Alves6e2abc62021-12-02 14:58:56 +0000882 /* Check NPI is to be injected here. */
883 if (need_vm_lock) {
884 plat_ffa_inject_notification_pending_interrupt(
885 vcpu_locked, current, vm_locked);
886 }
Andrew Walbran508e63c2018-12-20 17:02:37 +0000887 break;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500888 default:
889 /*
890 * Execution not expected to reach here. Deny the request
891 * gracefully.
892 */
893 *run_ret = ffa_error(FFA_DENIED);
894 ret = false;
895 goto out;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000896 }
897
Andrew Scullb06d1752019-02-04 10:15:48 +0000898 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000899 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100900 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000901
J-Alves7ac49052022-02-08 17:20:53 +0000902#if SECURE_WORLD == 1
903 /* Set the designated GP register with the vCPU ID. */
904 if (is_vcpu_reset_and_start) {
905 vcpu_set_phys_core_idx(vcpu_locked.vcpu);
906 }
907#endif
908
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000909 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000910 * Mark the registers as unavailable now that we're about to reflect
911 * them onto the real registers. This will also prevent another physical
912 * CPU from trying to read these registers.
913 */
914 vcpu->regs_available = false;
915
916 ret = true;
917
918out:
Max Shvetsov40108e72020-08-27 12:39:50 +0100919 vcpu_unlock(&vcpu_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000920 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100921 vm_unlock(&vm_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000922 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000923 return ret;
924}
925
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100926struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500927 struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100928{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100929 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100930 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100931 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100932
Raghu Krishnamurthy048d63f2021-12-11 12:45:41 -0800933 if (!plat_ffa_run_checks(current, vm_id, vcpu_idx, &ret, next)) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -0500934 return ret;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100935 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100936
Raghu Krishnamurthy62f97a72021-07-27 02:14:59 -0700937 if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
938 return ret;
939 }
940
Andrew Scull19503262018-09-20 14:48:39 +0100941 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100942 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100943 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100944 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100945 }
946
Fuad Tabbaed294af2019-12-20 10:43:01 +0000947 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100948 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100949 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100950 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100951
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000952 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100953 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000954 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000955 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100956 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000957
Andrew Walbran508e63c2018-12-20 17:02:37 +0000958 /*
959 * Inject timer interrupt if timer has expired. It's safe to access
960 * vcpu->regs here because api_vcpu_prepare_run already made sure that
961 * regs_available was true (and then set it to false) before returning
962 * true.
963 */
964 if (arch_timer_pending(&vcpu->regs)) {
965 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100966 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
967 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000968
969 /*
970 * Set the mask bit so the hardware interrupt doesn't fire
971 * again. Ideally we wouldn't do this because it affects what
972 * the secondary vCPU sees, but if we don't then we end up with
973 * a loop of the interrupt firing each time we try to return to
974 * the secondary vCPU.
975 */
976 arch_timer_mask(&vcpu->regs);
977 }
978
Fuad Tabbaed294af2019-12-20 10:43:01 +0000979 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000980 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000981
Andrew Scull33fecd32019-01-08 14:48:27 +0000982 /*
983 * Set a placeholder return code to the scheduler. This will be
984 * overwritten when the switch back to the primary occurs.
985 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100986 ret.func = FFA_INTERRUPT_32;
Andrew Walbran7e824602020-10-22 16:51:40 +0100987 ret.arg1 = 0;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100988 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000989
Andrew Scull6d2db332018-10-10 15:28:17 +0100990out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100991 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100992}
993
994/**
Andrew Scull81e85092018-12-12 12:56:20 +0000995 * Check that the mode indicates memory that is valid, owned and exclusive.
996 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100997static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000998{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100999 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
1000 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +00001001}
1002
1003/**
Andrew Walbranc8a01972020-09-22 11:23:30 +01001004 * Determines the value to be returned by api_ffa_rxtx_map and
1005 * api_ffa_rx_release after they've succeeded. If a secondary VM is running and
1006 * there are waiters, it also switches back to the primary VM for it to wake
1007 * waiters up.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001008 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001009static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
1010 struct vcpu *current,
1011 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001012{
1013 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001014
1015 if (list_empty(&vm->mailbox.waiter_list)) {
1016 /* No waiters, nothing else to do. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001017 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001018 }
1019
1020 if (vm->id == HF_PRIMARY_VM_ID) {
1021 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001022 return (struct ffa_value){.func = FFA_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001023 }
1024
1025 /*
1026 * Switch back to the primary VM, informing it that there are waiters
1027 * that need to be notified.
1028 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001029 *next = api_switch_to_primary(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001030 current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001031 VCPU_STATE_WAITING);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001032
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001033 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001034}
1035
1036/**
Manish Pandeyd34f8892020-06-19 17:41:07 +01001037 * Configures the hypervisor's stage-1 view of the send and receive pages.
Andrew Sculle1322792019-07-01 17:46:10 +01001038 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001039static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
1040 struct vm_locked vm_locked,
Andrew Sculle1322792019-07-01 17:46:10 +01001041 paddr_t pa_send_begin, paddr_t pa_send_end,
1042 paddr_t pa_recv_begin, paddr_t pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001043 uint32_t extra_attributes,
Andrew Sculle1322792019-07-01 17:46:10 +01001044 struct mpool *local_page_pool)
1045{
1046 bool ret;
Andrew Sculle1322792019-07-01 17:46:10 +01001047
1048 /* Map the send page as read-only in the hypervisor address space. */
1049 vm_locked.vm->mailbox.send =
1050 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001051 MM_MODE_R | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001052 if (!vm_locked.vm->mailbox.send) {
1053 /* TODO: partial defrag of failed range. */
1054 /* Recover any memory consumed in failed mapping. */
1055 mm_defrag(mm_stage1_locked, local_page_pool);
1056 goto fail;
1057 }
1058
1059 /*
1060 * Map the receive page as writable in the hypervisor address space. On
1061 * failure, unmap the send page before returning.
1062 */
1063 vm_locked.vm->mailbox.recv =
1064 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001065 MM_MODE_W | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +01001066 if (!vm_locked.vm->mailbox.recv) {
1067 /* TODO: partial defrag of failed range. */
1068 /* Recover any memory consumed in failed mapping. */
1069 mm_defrag(mm_stage1_locked, local_page_pool);
1070 goto fail_undo_send;
1071 }
1072
1073 ret = true;
1074 goto out;
1075
1076 /*
1077 * The following mappings will not require more memory than is available
1078 * in the local pool.
1079 */
1080fail_undo_send:
1081 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +01001082 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
1083 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +01001084
1085fail:
1086 ret = false;
1087
1088out:
Andrew Sculle1322792019-07-01 17:46:10 +01001089 return ret;
1090}
1091
1092/**
Manish Pandeyd34f8892020-06-19 17:41:07 +01001093 * Sanity checks and configures the send and receive pages in the VM stage-2
1094 * and hypervisor stage-1 page tables.
1095 *
1096 * Returns:
1097 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001098 * aligned, are the same or have invalid attributes.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001099 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
1100 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001101 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Manish Pandeyd34f8892020-06-19 17:41:07 +01001102 * - FFA_SUCCESS on success if no further action is needed.
Andrew Sculle1322792019-07-01 17:46:10 +01001103 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001104
1105struct ffa_value api_vm_configure_pages(
1106 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
1107 ipaddr_t send, ipaddr_t recv, uint32_t page_count,
1108 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +01001109{
Manish Pandeyd34f8892020-06-19 17:41:07 +01001110 struct ffa_value ret;
1111 paddr_t pa_send_begin;
1112 paddr_t pa_send_end;
1113 paddr_t pa_recv_begin;
1114 paddr_t pa_recv_end;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001115 uint32_t orig_send_mode = 0;
1116 uint32_t orig_recv_mode = 0;
Olivier Deprez96a2a262020-06-11 17:21:38 +02001117 uint32_t extra_attributes;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001118
1119 /* We only allow these to be setup once. */
1120 if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
1121 ret = ffa_error(FFA_DENIED);
1122 goto out;
1123 }
1124
1125 /* Hafnium only supports a fixed size of RX/TX buffers. */
1126 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
1127 ret = ffa_error(FFA_INVALID_PARAMETERS);
1128 goto out;
1129 }
1130
1131 /* Fail if addresses are not page-aligned. */
1132 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
1133 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
1134 ret = ffa_error(FFA_INVALID_PARAMETERS);
1135 goto out;
1136 }
1137
1138 /* Convert to physical addresses. */
1139 pa_send_begin = pa_from_ipa(send);
1140 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
1141 pa_recv_begin = pa_from_ipa(recv);
1142 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
1143
1144 /* Fail if the same page is used for the send and receive pages. */
1145 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
1146 ret = ffa_error(FFA_INVALID_PARAMETERS);
1147 goto out;
1148 }
Andrew Sculle1322792019-07-01 17:46:10 +01001149
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001150 /* Set stage 2 translation tables only for virtual FF-A instances. */
1151 if (vm_id_is_current_world(vm_locked.vm->id)) {
1152 /*
1153 * Ensure the pages are valid, owned and exclusive to the VM and
1154 * that the VM has the required access to the memory.
1155 */
1156 if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
1157 &orig_send_mode) ||
1158 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
1159 (orig_send_mode & MM_MODE_R) == 0 ||
1160 (orig_send_mode & MM_MODE_W) == 0) {
1161 dlog_error(
1162 "VM doesn't have required access rights to map "
1163 "TX buffer in stage 2.\n");
1164 ret = ffa_error(FFA_INVALID_PARAMETERS);
1165 goto out;
1166 }
Manish Pandeyd34f8892020-06-19 17:41:07 +01001167
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001168 if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
1169 &orig_recv_mode) ||
1170 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
1171 (orig_recv_mode & MM_MODE_R) == 0) {
1172 dlog_error(
1173 "VM doesn't have required access rights to map "
1174 "RX buffer in stage 2.\n");
1175 ret = ffa_error(FFA_INVALID_PARAMETERS);
1176 goto out;
1177 }
Andrew Sculle1322792019-07-01 17:46:10 +01001178
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001179 /* Take memory ownership away from the VM and mark as shared. */
1180 uint32_t mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R |
1181 MM_MODE_W;
1182 if (vm_locked.vm->el0_partition) {
1183 mode |= MM_MODE_USER | MM_MODE_NG;
1184 }
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001185
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001186 if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
1187 mode, local_page_pool, NULL)) {
1188 dlog_error(
1189 "Cannot allocate a new entry in stage 2 "
1190 "translation table.\n");
1191 ret = ffa_error(FFA_NO_MEMORY);
1192 goto out;
1193 }
Andrew Sculle1322792019-07-01 17:46:10 +01001194
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001195 mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
1196 if (vm_locked.vm->el0_partition) {
1197 mode |= MM_MODE_USER | MM_MODE_NG;
1198 }
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001199
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001200 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
1201 mode, local_page_pool, NULL)) {
1202 /* TODO: partial defrag of failed range. */
1203 /* Recover any memory consumed in failed mapping. */
1204 vm_ptable_defrag(vm_locked, local_page_pool);
1205 goto fail_undo_send;
1206 }
Andrew Sculle1322792019-07-01 17:46:10 +01001207 }
1208
Olivier Deprez96a2a262020-06-11 17:21:38 +02001209 /* Get extra send/recv pages mapping attributes for the given VM ID. */
1210 extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
1211
Raghu Krishnamurthy95c3a272021-02-26 18:09:41 -08001212 /*
1213 * For EL0 partitions, since both the partition and the hypervisor code
1214 * use the EL2&0 translation regime, it is critical to mark the mappings
1215 * of the send and recv buffers as non-global in the TLB. For one, if we
1216 * dont mark it as non-global, it would cause TLB conflicts since there
1217 * would be an identity mapping with non-global attribute in the
1218 * partitions page tables, but another identity mapping in the
1219 * hypervisor page tables with the global attribute. The other issue is
1220 * one of security, we dont want other partitions to be able to access
1221 * other partitions buffers through cached translations.
1222 */
1223 if (vm_locked.vm->el0_partition) {
1224 extra_attributes |= MM_MODE_NG;
1225 }
1226
Manish Pandeyd34f8892020-06-19 17:41:07 +01001227 if (!api_vm_configure_stage1(mm_stage1_locked, vm_locked, pa_send_begin,
1228 pa_send_end, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +02001229 extra_attributes, local_page_pool)) {
Andrew Sculle1322792019-07-01 17:46:10 +01001230 goto fail_undo_send_and_recv;
1231 }
1232
Manish Pandeyd34f8892020-06-19 17:41:07 +01001233 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +01001234 goto out;
1235
Andrew Sculle1322792019-07-01 17:46:10 +01001236fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +00001237 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001238 orig_recv_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +01001239
1240fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +00001241 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +01001242 orig_send_mode, local_page_pool, NULL));
1243 ret = ffa_error(FFA_NO_MEMORY);
Andrew Sculle1322792019-07-01 17:46:10 +01001244
1245out:
Andrew Sculle1322792019-07-01 17:46:10 +01001246 return ret;
1247}
1248
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001249static void api_get_rxtx_description(struct vm_locked vm_locked, ipaddr_t *send,
1250 ipaddr_t *recv, uint32_t *page_count,
1251 ffa_vm_id_t *owner_vm_id)
1252{
1253 /*
1254 * If the message has been forwarded the effective addresses are in
1255 * hypervisor's TX buffer.
1256 */
1257 bool forwarded = (vm_locked.vm->id == HF_OTHER_WORLD_ID) &&
1258 (ipa_addr(*send) == 0) && (ipa_addr(*recv) == 0) &&
1259 (*page_count == 0);
1260
1261 if (forwarded) {
1262 struct ffa_endpoint_rx_tx_descriptor *endpoint_desc =
1263 (struct ffa_endpoint_rx_tx_descriptor *)
1264 vm_locked.vm->mailbox.send;
1265 struct ffa_composite_memory_region *rx_region =
1266 ffa_enpoint_get_rx_memory_region(endpoint_desc);
1267 struct ffa_composite_memory_region *tx_region =
1268 ffa_enpoint_get_tx_memory_region(endpoint_desc);
1269
1270 *owner_vm_id = endpoint_desc->endpoint_id;
1271 *recv = ipa_init(rx_region->constituents[0].address);
1272 *send = ipa_init(tx_region->constituents[0].address);
1273 *page_count = rx_region->constituents[0].page_count;
1274 } else {
1275 *owner_vm_id = vm_locked.vm->id;
1276 }
1277}
Andrew Sculle1322792019-07-01 17:46:10 +01001278/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001279 * Configures the VM to send/receive data through the specified pages. The pages
Manish Pandeyd34f8892020-06-19 17:41:07 +01001280 * must not be shared. Locking of the page tables combined with a local memory
1281 * pool ensures there will always be enough memory to recover from any errors
1282 * that arise. The stage-1 page tables must be locked so memory cannot be taken
1283 * by another core which could result in this transaction being unable to roll
1284 * back in the case of an error.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001285 *
1286 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001287 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001288 * aligned, are the same or have invalid attributes.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001289 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001290 * due to insuffient page table memory.
Daniel Boulby6f8941e2021-06-14 18:27:18 +01001291 * - FFA_ERROR FFA_DENIED if the pages are already mapped.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001292 * - FFA_SUCCESS on success if no further action is needed.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001293 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001294struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
Federico Recanati9f1b6532022-04-14 13:15:28 +02001295 uint32_t page_count, struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001296{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001297 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001298 struct ffa_value ret;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001299 struct vm_locked vm_locked;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001300 struct vm_locked owner_vm_locked;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001301 struct mm_stage1_locked mm_stage1_locked;
1302 struct mpool local_page_pool;
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001303 ffa_vm_id_t owner_vm_id;
1304
1305 vm_locked = vm_lock(vm);
1306 /*
1307 * Get the original buffer addresses and VM ID in case of forwarded
1308 * message.
1309 */
1310 api_get_rxtx_description(vm_locked, &send, &recv, &page_count,
1311 &owner_vm_id);
1312 vm_unlock(&vm_locked);
1313
1314 owner_vm_locked = plat_ffa_vm_find_locked_create(owner_vm_id);
1315 if (owner_vm_locked.vm == NULL) {
1316 dlog_error("Cannot map RX/TX for VM ID %#x, not found.\n",
1317 owner_vm_id);
1318 return ffa_error(FFA_DENIED);
1319 }
Andrew Scull220e6212018-12-21 18:09:00 +00001320
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001321 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001322 * Create a local pool so any freed memory can't be used by another
1323 * thread. This is to ensure the original mapping can be restored if any
1324 * stage of the process fails.
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001325 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001326 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1327
Manish Pandeyd34f8892020-06-19 17:41:07 +01001328 mm_stage1_locked = mm_lock_stage1();
Andrew Scull220e6212018-12-21 18:09:00 +00001329
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001330 ret = api_vm_configure_pages(mm_stage1_locked, owner_vm_locked, send,
1331 recv, page_count, &local_page_pool);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001332 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001333 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001334 }
1335
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001336 /* Forward buffer mapping to SPMC if coming from a VM. */
1337 plat_ffa_rxtx_map_forward(owner_vm_locked);
1338
Federico Recanati9f1b6532022-04-14 13:15:28 +02001339 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Scull220e6212018-12-21 18:09:00 +00001340
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001341exit:
Manish Pandeyd34f8892020-06-19 17:41:07 +01001342 mpool_fini(&local_page_pool);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001343 mm_unlock_stage1(&mm_stage1_locked);
Federico Recanati8d8b1cf2022-04-14 13:16:00 +02001344 vm_unlock(&owner_vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001345
1346 return ret;
1347}
1348
1349/**
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001350 * Unmaps the RX/TX buffer pair with a partition or partition manager from the
1351 * translation regime of the caller. Unmap the region for the hypervisor and
1352 * set the memory region to owned and exclusive for the component. Since the
1353 * memory region mapped in the page table, when the buffers were originally
1354 * created we can safely remap it.
1355 *
1356 * Returns:
1357 * - FFA_ERROR FFA_INVALID_PARAMETERS if there is no buffer pair registered on
1358 * behalf of the caller.
1359 * - FFA_SUCCESS on success if no further action is needed.
1360 */
1361struct ffa_value api_ffa_rxtx_unmap(ffa_vm_id_t allocator_id,
1362 struct vcpu *current)
1363{
1364 struct vm *vm = current->vm;
1365 struct vm_locked vm_locked;
Federico Recanati8da9e332022-02-10 11:00:17 +01001366 ffa_vm_id_t owner_vm_id;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001367 struct mm_stage1_locked mm_stage1_locked;
1368 paddr_t send_pa_begin;
1369 paddr_t send_pa_end;
1370 paddr_t recv_pa_begin;
1371 paddr_t recv_pa_end;
Federico Recanati8da9e332022-02-10 11:00:17 +01001372 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001373
Federico Recanati8da9e332022-02-10 11:00:17 +01001374 /* Ensure `allocator_id` is set only at Non-Secure Physical instance. */
1375 if (vm_id_is_current_world(vm->id) && (allocator_id != 0)) {
1376 dlog_error("`allocator_id` must be 0 at virtual instances.\n");
1377 return ffa_error(FFA_INVALID_PARAMETERS);
1378 }
1379
1380 /* VM ID of which buffers have to be unmapped. */
1381 owner_vm_id = (allocator_id != 0) ? allocator_id : vm->id;
1382
1383 vm_locked = plat_ffa_vm_find_locked(owner_vm_id);
1384 vm = vm_locked.vm;
1385 if (vm == NULL) {
1386 dlog_error("Cannot unmap RX/TX for VM ID %#x, not found.\n",
1387 owner_vm_id);
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001388 return ffa_error(FFA_INVALID_PARAMETERS);
1389 }
1390
1391 /* Get send and receive buffers. */
1392 if (vm->mailbox.send == NULL || vm->mailbox.recv == NULL) {
Olivier Deprez86d87ae2021-08-19 14:27:46 +02001393 dlog_verbose(
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001394 "No buffer pair registered on behalf of the caller.\n");
Federico Recanati8da9e332022-02-10 11:00:17 +01001395 ret = ffa_error(FFA_INVALID_PARAMETERS);
1396 goto out;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001397 }
1398
1399 /* Currently a mailbox size of 1 page is assumed. */
1400 send_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.send));
1401 send_pa_end = pa_add(send_pa_begin, HF_MAILBOX_SIZE);
1402 recv_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.recv));
1403 recv_pa_end = pa_add(recv_pa_begin, HF_MAILBOX_SIZE);
1404
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001405 mm_stage1_locked = mm_lock_stage1();
1406
Federico Recanati8da9e332022-02-10 11:00:17 +01001407 /* Reset stage 2 mapping only for virtual FF-A instances. */
1408 if (vm_id_is_current_world(owner_vm_id)) {
1409 /*
1410 * Set the memory region of the buffers back to the default mode
1411 * for the VM. Since this memory region was already mapped for
1412 * the RXTX buffers we can safely remap them.
1413 */
1414 CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
1415 MM_MODE_R | MM_MODE_W | MM_MODE_X,
1416 &api_page_pool, NULL));
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001417
Federico Recanati8da9e332022-02-10 11:00:17 +01001418 CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
1419 MM_MODE_R | MM_MODE_W | MM_MODE_X,
1420 &api_page_pool, NULL));
1421 }
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001422
1423 /* Unmap the buffers in the partition manager. */
1424 CHECK(mm_unmap(mm_stage1_locked, send_pa_begin, send_pa_end,
1425 &api_page_pool));
1426 CHECK(mm_unmap(mm_stage1_locked, recv_pa_begin, recv_pa_end,
1427 &api_page_pool));
1428
1429 vm->mailbox.send = NULL;
1430 vm->mailbox.recv = NULL;
Federico Recanati10bd06c2022-02-23 17:32:59 +01001431 plat_ffa_vm_destroy(vm_locked);
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001432
Federico Recanati8da9e332022-02-10 11:00:17 +01001433 /* Forward buffer unmapping to SPMC if coming from a VM. */
1434 plat_ffa_rxtx_unmap_forward(owner_vm_id);
1435
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001436 mm_unlock_stage1(&mm_stage1_locked);
Federico Recanati8da9e332022-02-10 11:00:17 +01001437
1438out:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001439 vm_unlock(&vm_locked);
1440
Federico Recanati8da9e332022-02-10 11:00:17 +01001441 return ret;
Daniel Boulby9e420ca2021-07-07 15:03:49 +01001442}
1443
1444/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001445 * Notifies the `to` VM about the message currently in its mailbox, possibly
1446 * with the help of the primary VM.
1447 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001448static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
1449 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001450{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001451 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1452 struct ffa_value primary_ret = {
1453 .func = FFA_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +00001454 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001455 };
1456
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001457 /* Messages for the primary VM are delivered directly. */
1458 if (to.vm->id == HF_PRIMARY_VM_ID) {
1459 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001460 * Only tell the primary VM the size and other details if the
1461 * message is for it, to avoid leaking data about messages for
1462 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001463 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001464 primary_ret = ffa_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001465
1466 to.vm->mailbox.state = MAILBOX_STATE_READ;
1467 *next = api_switch_to_primary(current, primary_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001468 VCPU_STATE_BLOCKED);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001469 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001470 }
1471
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001472 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1473
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001474 /* Messages for the TEE are sent on via the dispatcher. */
1475 if (to.vm->id == HF_TEE_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001476 struct ffa_value call = ffa_msg_recv_return(to.vm);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001477
Olivier Deprez112d2b52020-09-30 07:39:23 +02001478 ret = arch_other_world_call(call);
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001479 /*
1480 * After the call to the TEE completes it must have finished
1481 * reading its RX buffer, so it is ready for another message.
1482 */
1483 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001484 /*
1485 * Don't return to the primary VM in this case, as the TEE is
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001486 * not (yet) scheduled via FF-A.
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001487 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001488 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001489 }
1490
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001491 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +00001492 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001493 *next = api_switch_to_primary(current, primary_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001494 VCPU_STATE_BLOCKED);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001495 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001496
1497 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001498}
1499
1500/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001501 * Copies data from the sender's send buffer to the recipient's receive buffer
1502 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +00001503 *
1504 * If the recipient's receive buffer is busy, it can optionally register the
1505 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001506 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001507struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
1508 ffa_vm_id_t receiver_vm_id, uint32_t size,
1509 uint32_t attributes, struct vcpu *current,
1510 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001511{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001512 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001513 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +00001514 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +01001515 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001516 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001517 struct vcpu_locked current_locked;
1518 bool is_direct_request_ongoing;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001519 bool notify =
1520 (attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +01001521
Andrew Walbran70bc8622019-10-07 14:15:58 +01001522 /* Ensure sender VM ID corresponds to the current VM. */
1523 if (sender_vm_id != from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001524 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001525 }
1526
1527 /* Disallow reflexive requests as this suggests an error in the VM. */
1528 if (receiver_vm_id == from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001529 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001530 }
1531
1532 /* Limit the size of transfer. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001533 if (size > FFA_MSG_PAYLOAD_MAX) {
1534 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001535 }
1536
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001537 /*
1538 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1539 * invocation.
1540 */
1541 current_locked = vcpu_lock(current);
1542 is_direct_request_ongoing =
1543 is_ffa_direct_msg_request_ongoing(current_locked);
1544 vcpu_unlock(&current_locked);
1545
1546 if (is_direct_request_ongoing) {
1547 return ffa_error(FFA_DENIED);
1548 }
1549
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001550 /* Ensure the receiver VM exists. */
1551 to = vm_find(receiver_vm_id);
1552 if (to == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001553 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001554 }
1555
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001556 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001557 * Check that the sender has configured its send buffer. If the tx
1558 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1559 * be safely accessed after releasing the lock since the tx mailbox
1560 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001561 */
1562 sl_lock(&from->lock);
1563 from_msg = from->mailbox.send;
1564 sl_unlock(&from->lock);
1565
1566 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001567 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001568 }
1569
Andrew Walbran82d6d152019-12-24 15:02:06 +00001570 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001571
Andrew Walbran82d6d152019-12-24 15:02:06 +00001572 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001573 ret = ffa_error(FFA_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001574 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001575 }
1576
Andrew Walbran82d6d152019-12-24 15:02:06 +00001577 /* Copy data. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001578 memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
Andrew Walbran82d6d152019-12-24 15:02:06 +00001579 to->mailbox.recv_size = size;
1580 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001581 to->mailbox.recv_func = FFA_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001582 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001583
1584out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001585 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001586
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001587 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001588}
1589
1590/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001591 * Checks whether the vCPU's attempt to block for a message has already been
1592 * interrupted or whether it is allowed to block.
1593 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001594bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
Andrew Scullec52ddf2019-08-20 10:41:01 +01001595{
Manish Pandey35e452f2021-02-18 21:36:34 +00001596 struct vcpu_locked current_locked;
Andrew Scullec52ddf2019-08-20 10:41:01 +01001597 bool interrupted;
1598
Manish Pandey35e452f2021-02-18 21:36:34 +00001599 current_locked = vcpu_lock(current);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001600
1601 /*
1602 * Don't block if there are enabled and pending interrupts, to match
1603 * behaviour of wait_for_interrupt.
1604 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001605 interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001606
Manish Pandey35e452f2021-02-18 21:36:34 +00001607 vcpu_unlock(&current_locked);
Andrew Scullec52ddf2019-08-20 10:41:01 +01001608
1609 return interrupted;
1610}
1611
1612/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001613 * Receives a message from the mailbox. If one isn't available, this function
1614 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001615 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001616 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001617 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001618struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
1619 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001620{
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001621 bool is_direct_request_ongoing;
1622 struct vcpu_locked current_locked;
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001623 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001624 struct ffa_value return_code;
J-Alvesb37fd082020-10-22 12:29:21 +01001625 bool is_from_secure_world =
1626 (current->vm->id & HF_VM_ID_WORLD_MASK) != 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001627
Andrew Scullaa039b32018-10-04 15:02:26 +01001628 /*
1629 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001630 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001631 */
J-Alvesb37fd082020-10-22 12:29:21 +01001632 if (!is_from_secure_world && vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001633 return ffa_error(FFA_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001634 }
1635
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001636 /*
1637 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1638 * invocation.
1639 */
1640 current_locked = vcpu_lock(current);
1641 is_direct_request_ongoing =
1642 is_ffa_direct_msg_request_ongoing(current_locked);
1643 vcpu_unlock(&current_locked);
1644
1645 if (is_direct_request_ongoing) {
1646 return ffa_error(FFA_DENIED);
1647 }
1648
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001649 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001650
Andrew Scullaa039b32018-10-04 15:02:26 +01001651 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001652 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1653 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001654 return_code = ffa_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001655 goto out;
1656 }
1657
1658 /* No pending message so fail if not allowed to block. */
1659 if (!block) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001660 return_code = ffa_error(FFA_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001661 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001662 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001663
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001664 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001665 * From this point onward this call can only be interrupted or a message
1666 * received. If a message is received the return value will be set at
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001667 * that time to FFA_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001668 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001669 return_code = ffa_error(FFA_INTERRUPTED);
1670 if (api_ffa_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001671 goto out;
1672 }
1673
J-Alvesb37fd082020-10-22 12:29:21 +01001674 if (is_from_secure_world) {
1675 /* Return to other world if caller is a SP. */
1676 *next = api_switch_to_other_world(
1677 current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001678 VCPU_STATE_WAITING);
J-Alvesb37fd082020-10-22 12:29:21 +01001679 } else {
1680 /* Switch back to primary VM to block. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001681 struct ffa_value run_return = {
1682 .func = FFA_MSG_WAIT_32,
1683 .arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001684 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001685
Andrew Walbranb4816552018-12-05 17:35:42 +00001686 *next = api_switch_to_primary(current, run_return,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05001687 VCPU_STATE_WAITING);
Andrew Walbranb4816552018-12-05 17:35:42 +00001688 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001689out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001690 sl_unlock(&vm->lock);
1691
Jose Marinho3e2442f2019-03-12 13:30:37 +00001692 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001693}
1694
1695/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001696 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1697 * by this function, the caller must have called api_mailbox_send before with
1698 * the notify argument set to true, and this call must have failed because the
1699 * mailbox was not available.
1700 *
1701 * It should be called repeatedly to retrieve a list of VMs.
1702 *
1703 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1704 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001705 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001706int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001707{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001708 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001709 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001710 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001711
1712 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001713 if (list_empty(&vm->mailbox.ready_list)) {
1714 ret = -1;
1715 goto exit;
1716 }
1717
1718 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1719 ready_links);
1720 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001721 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001722
1723exit:
1724 sl_unlock(&vm->lock);
1725 return ret;
1726}
1727
1728/**
1729 * Retrieves the next VM waiting to be notified that the mailbox of the
1730 * specified VM became writable. Only primary VMs are allowed to call this.
1731 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001732 * Returns -1 on failure or if there are no waiters; the VM id of the next
1733 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001734 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001735int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001736{
1737 struct vm *vm;
1738 struct vm_locked locked;
1739 struct wait_entry *entry;
1740 struct vm *waiting_vm;
1741
1742 /* Only primary VMs are allowed to call this function. */
1743 if (current->vm->id != HF_PRIMARY_VM_ID) {
1744 return -1;
1745 }
1746
Andrew Walbran42347a92019-05-09 13:59:03 +01001747 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001748 if (vm == NULL) {
1749 return -1;
1750 }
1751
Fuad Tabbaed294af2019-12-20 10:43:01 +00001752 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001753 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001754 entry = api_fetch_waiter(locked);
1755 vm_unlock(&locked);
1756
1757 if (entry == NULL) {
1758 return -1;
1759 }
1760
1761 /* Enqueue notification to waiting VM. */
1762 waiting_vm = entry->waiting_vm;
1763
1764 sl_lock(&waiting_vm->lock);
1765 if (list_empty(&entry->ready_links)) {
1766 list_append(&waiting_vm->mailbox.ready_list,
1767 &entry->ready_links);
1768 }
1769 sl_unlock(&waiting_vm->lock);
1770
1771 return waiting_vm->id;
1772}
1773
1774/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001775 * Releases the caller's mailbox so that a new message can be received. The
1776 * caller must have copied out all data they wish to preserve as new messages
1777 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001778 *
1779 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001780 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
1781 * - FFA_SUCCESS on success if no further action is needed.
1782 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001783 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001784 * hf_mailbox_waiter_get.
1785 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001786struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001787{
1788 struct vm *vm = current->vm;
1789 struct vm_locked locked;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001790 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001791
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001792 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001793 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001794 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001795 case MAILBOX_STATE_RECEIVED:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001796 ret = ffa_error(FFA_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001797 break;
1798
Andrew Sculld6ee1102019-04-05 22:12:42 +01001799 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001800 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001801 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001802 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001803 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001804 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001805
1806 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001807}
Andrew Walbran318f5732018-11-20 16:23:42 +00001808
1809/**
1810 * Enables or disables a given interrupt ID for the calling vCPU.
1811 *
1812 * Returns 0 on success, or -1 if the intid is invalid.
1813 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001814int64_t api_interrupt_enable(uint32_t intid, bool enable,
1815 enum interrupt_type type, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001816{
Manish Pandey35e452f2021-02-18 21:36:34 +00001817 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001818 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Manish Pandey35e452f2021-02-18 21:36:34 +00001819 uint32_t intid_shift = intid % INTERRUPT_REGISTER_BITS;
1820 uint32_t intid_mask = 1U << intid_shift;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001821
Andrew Walbran318f5732018-11-20 16:23:42 +00001822 if (intid >= HF_NUM_INTIDS) {
1823 return -1;
1824 }
1825
Manish Pandey35e452f2021-02-18 21:36:34 +00001826 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001827 if (enable) {
Maksims Svecovs953dee52022-04-01 12:15:36 +01001828 if (type == INTERRUPT_TYPE_IRQ) {
1829 current->interrupts.interrupt_type[intid_index] &=
1830 ~intid_mask;
1831 } else if (type == INTERRUPT_TYPE_FIQ) {
1832 current->interrupts.interrupt_type[intid_index] |=
1833 intid_mask;
1834 }
1835
Andrew Walbran3d84a262018-12-13 14:41:19 +00001836 /*
1837 * If it is pending and was not enabled before, increment the
1838 * count.
1839 */
1840 if (current->interrupts.interrupt_pending[intid_index] &
1841 ~current->interrupts.interrupt_enabled[intid_index] &
1842 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001843 if ((current->interrupts.interrupt_type[intid_index] &
1844 intid_mask) ==
1845 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1846 vcpu_irq_count_increment(current_locked);
1847 } else {
1848 vcpu_fiq_count_increment(current_locked);
1849 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001850 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001851 current->interrupts.interrupt_enabled[intid_index] |=
1852 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001853 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001854 /*
1855 * If it is pending and was enabled before, decrement the count.
1856 */
1857 if (current->interrupts.interrupt_pending[intid_index] &
1858 current->interrupts.interrupt_enabled[intid_index] &
1859 intid_mask) {
Manish Pandey35e452f2021-02-18 21:36:34 +00001860 if ((current->interrupts.interrupt_type[intid_index] &
1861 intid_mask) ==
1862 (INTERRUPT_TYPE_IRQ << intid_shift)) {
1863 vcpu_irq_count_decrement(current_locked);
1864 } else {
1865 vcpu_fiq_count_decrement(current_locked);
1866 }
Andrew Walbran3d84a262018-12-13 14:41:19 +00001867 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001868 current->interrupts.interrupt_enabled[intid_index] &=
1869 ~intid_mask;
Manish Pandey35e452f2021-02-18 21:36:34 +00001870 current->interrupts.interrupt_type[intid_index] &= ~intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001871 }
1872
Manish Pandey35e452f2021-02-18 21:36:34 +00001873 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001874 return 0;
1875}
1876
1877/**
1878 * Returns the ID of the next pending interrupt for the calling vCPU, and
1879 * acknowledges it (i.e. marks it as no longer pending). Returns
1880 * HF_INVALID_INTID if there are no pending interrupts.
1881 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001882uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001883{
1884 uint8_t i;
1885 uint32_t first_interrupt = HF_INVALID_INTID;
Manish Pandey35e452f2021-02-18 21:36:34 +00001886 struct vcpu_locked current_locked;
Andrew Walbran318f5732018-11-20 16:23:42 +00001887
1888 /*
1889 * Find the first enabled and pending interrupt ID, return it, and
1890 * deactivate it.
1891 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001892 current_locked = vcpu_lock(current);
Andrew Walbran318f5732018-11-20 16:23:42 +00001893 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1894 uint32_t enabled_and_pending =
1895 current->interrupts.interrupt_enabled[i] &
1896 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001897
Andrew Walbran318f5732018-11-20 16:23:42 +00001898 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001899 uint8_t bit_index = ctz(enabled_and_pending);
Manish Pandey35e452f2021-02-18 21:36:34 +00001900 uint32_t intid_mask = 1U << bit_index;
1901
Andrew Walbran3d84a262018-12-13 14:41:19 +00001902 /*
1903 * Mark it as no longer pending and decrement the count.
1904 */
Manish Pandey35e452f2021-02-18 21:36:34 +00001905 current->interrupts.interrupt_pending[i] &= ~intid_mask;
1906
1907 if ((current->interrupts.interrupt_type[i] &
1908 intid_mask) == (INTERRUPT_TYPE_IRQ << bit_index)) {
1909 vcpu_irq_count_decrement(current_locked);
1910 } else {
1911 vcpu_fiq_count_decrement(current_locked);
1912 }
1913
Andrew Walbran3d84a262018-12-13 14:41:19 +00001914 first_interrupt =
1915 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001916 break;
1917 }
1918 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001919
Manish Pandey35e452f2021-02-18 21:36:34 +00001920 vcpu_unlock(&current_locked);
Andrew Walbran318f5732018-11-20 16:23:42 +00001921 return first_interrupt;
1922}
1923
1924/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001925 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001926 * given VM and vCPU.
1927 */
1928static inline bool is_injection_allowed(uint32_t target_vm_id,
1929 struct vcpu *current)
1930{
1931 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001932
Andrew Walbran318f5732018-11-20 16:23:42 +00001933 /*
1934 * The primary VM is allowed to inject interrupts into any VM. Secondary
1935 * VMs are only allowed to inject interrupts into their own vCPUs.
1936 */
1937 return current_vm_id == HF_PRIMARY_VM_ID ||
1938 current_vm_id == target_vm_id;
1939}
1940
1941/**
1942 * Injects a virtual interrupt of the given ID into the given target vCPU.
1943 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1944 * when the vCPU is next run, which is up to the scheduler.
1945 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001946 * Returns:
1947 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1948 * ID is invalid, or the current VM is not allowed to inject interrupts to
1949 * the target VM.
1950 * - 0 on success if no further action is needed.
1951 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1952 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001953 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001954int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
1955 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001956 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001957{
Andrew Walbran318f5732018-11-20 16:23:42 +00001958 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001959 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001960
1961 if (intid >= HF_NUM_INTIDS) {
1962 return -1;
1963 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001964
Andrew Walbran318f5732018-11-20 16:23:42 +00001965 if (target_vm == NULL) {
1966 return -1;
1967 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001968
Andrew Walbran318f5732018-11-20 16:23:42 +00001969 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001970 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001971 return -1;
1972 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001973
Andrew Walbran318f5732018-11-20 16:23:42 +00001974 if (!is_injection_allowed(target_vm_id, current)) {
1975 return -1;
1976 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001977
Andrew Walbrane1310df2019-04-29 17:28:28 +01001978 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001979
Manish Pandey35e452f2021-02-18 21:36:34 +00001980 dlog_verbose(
1981 "Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
1982 "%u\n",
1983 intid, target_vm_id, target_vcpu_idx, current->vm->id,
1984 vcpu_index(current));
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001985 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001986}
Andrew Scull6386f252018-12-06 13:29:10 +00001987
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001988/** Returns the version of the implemented FF-A specification. */
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00001989struct ffa_value api_ffa_version(struct vcpu *current,
1990 uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001991{
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00001992 struct vm_locked current_vm_locked;
1993
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001994 /*
1995 * Ensure that both major and minor revision representation occupies at
1996 * most 15 bits.
1997 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001998 static_assert(0x8000 > FFA_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001999 "Major revision representation takes more than 15 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002000 static_assert(0x10000 > FFA_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01002001 "Minor revision representation takes more than 16 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002002 if (requested_version & FFA_VERSION_RESERVED_BIT) {
Andrew Walbran9fd29072020-04-22 12:12:14 +01002003 /* Invalid encoding, return an error. */
J-Alves13318e32021-02-22 17:21:00 +00002004 return (struct ffa_value){.func = (uint32_t)FFA_NOT_SUPPORTED};
Andrew Walbran9fd29072020-04-22 12:12:14 +01002005 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01002006
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +00002007 current_vm_locked = vm_lock(current->vm);
2008 current_vm_locked.vm->ffa_version = requested_version;
2009 vm_unlock(&current_vm_locked);
2010
Daniel Boulby6e32c612021-02-17 15:09:41 +00002011 return ((struct ffa_value){.func = FFA_VERSION_COMPILED});
Jose Marinhofc0b2b62019-06-06 11:18:45 +01002012}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01002013
2014int64_t api_debug_log(char c, struct vcpu *current)
2015{
Andrew Sculld54e1be2019-08-20 11:09:42 +01002016 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01002017 struct vm *vm = current->vm;
2018 struct vm_locked vm_locked = vm_lock(vm);
2019
Andrew Sculld54e1be2019-08-20 11:09:42 +01002020 if (c == '\n' || c == '\0') {
2021 flush = true;
2022 } else {
2023 vm->log_buffer[vm->log_buffer_length++] = c;
2024 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
2025 }
2026
2027 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01002028 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
2029 vm->log_buffer_length);
2030 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01002031 }
2032
2033 vm_unlock(&vm_locked);
2034
2035 return 0;
2036}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002037
2038/**
J-Alves6f72ca82021-11-01 12:34:58 +00002039 * Helper for success return of FFA_FEATURES, for when it is used to query
2040 * an interrupt ID.
2041 */
2042struct ffa_value api_ffa_feature_success(uint32_t arg2)
2043{
2044 return (struct ffa_value){
2045 .func = FFA_SUCCESS_32, .arg1 = 0U, .arg2 = arg2};
2046}
2047
2048/**
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002049 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002050 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002051 */
J-Alves6f72ca82021-11-01 12:34:58 +00002052struct ffa_value api_ffa_features(uint32_t feature_function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002053{
J-Alves6f72ca82021-11-01 12:34:58 +00002054 /*
2055 * According to table 13.8 of FF-A v1.1 Beta 0 spec, bits [30:8] MBZ
2056 * if using a feature ID.
2057 */
2058 if ((feature_function_id & FFA_FEATURES_FUNC_ID_MASK) == 0U &&
2059 (feature_function_id & ~FFA_FEATURES_FEATURE_ID_MASK) != 0) {
2060 return ffa_error(FFA_NOT_SUPPORTED);
2061 }
2062
2063 switch (feature_function_id) {
2064 /* Check support of the given Function ID. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002065 case FFA_ERROR_32:
2066 case FFA_SUCCESS_32:
2067 case FFA_INTERRUPT_32:
2068 case FFA_VERSION_32:
2069 case FFA_FEATURES_32:
2070 case FFA_RX_RELEASE_32:
2071 case FFA_RXTX_MAP_64:
Daniel Boulby9e420ca2021-07-07 15:03:49 +01002072 case FFA_RXTX_UNMAP_32:
Fuad Tabbae4efcc32020-07-16 15:37:27 +01002073 case FFA_PARTITION_INFO_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002074 case FFA_ID_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002075 case FFA_MSG_WAIT_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002076 case FFA_RUN_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002077 case FFA_MEM_DONATE_32:
2078 case FFA_MEM_LEND_32:
2079 case FFA_MEM_SHARE_32:
2080 case FFA_MEM_RETRIEVE_REQ_32:
2081 case FFA_MEM_RETRIEVE_RESP_32:
2082 case FFA_MEM_RELINQUISH_32:
2083 case FFA_MEM_RECLAIM_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00002084 case FFA_MSG_SEND_DIRECT_RESP_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002085 case FFA_MSG_SEND_DIRECT_RESP_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00002086 case FFA_MSG_SEND_DIRECT_REQ_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002087 case FFA_MSG_SEND_DIRECT_REQ_32:
J-Alves3829fc02021-03-18 12:49:18 +00002088#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
Daniel Boulbyb2fb80e2021-02-03 15:09:23 +00002089 /* FF-A v1.1 features. */
2090 case FFA_SPM_ID_GET_32:
J-Alves6f72ca82021-11-01 12:34:58 +00002091 case FFA_NOTIFICATION_BITMAP_CREATE_32:
2092 case FFA_NOTIFICATION_BITMAP_DESTROY_32:
2093 case FFA_NOTIFICATION_BIND_32:
2094 case FFA_NOTIFICATION_UNBIND_32:
2095 case FFA_NOTIFICATION_SET_32:
2096 case FFA_NOTIFICATION_GET_32:
2097 case FFA_NOTIFICATION_INFO_GET_64:
Raghu Krishnamurthy6764b072021-10-18 12:54:24 -07002098 case FFA_MEM_PERM_GET_32:
2099 case FFA_MEM_PERM_SET_32:
2100 case FFA_MEM_PERM_GET_64:
2101 case FFA_MEM_PERM_SET_64:
J-Alves3829fc02021-03-18 12:49:18 +00002102#endif
2103 return (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alves6f72ca82021-11-01 12:34:58 +00002104
2105#if (MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED)
2106 /* Check support of a feature provided respective feature ID. */
2107 case FFA_FEATURE_NPI:
2108 return api_ffa_feature_success(HF_NOTIFICATION_PENDING_INTID);
2109 case FFA_FEATURE_SRI:
2110 return api_ffa_feature_success(HF_SCHEDULE_RECEIVER_INTID);
2111#endif
2112 /* Platform specific feature support. */
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002113 default:
J-Alves6f72ca82021-11-01 12:34:58 +00002114 return arch_ffa_features(feature_function_id);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01002115 }
2116}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002117
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002118/**
J-Alves645eabe2021-02-22 16:08:27 +00002119 * FF-A specification states that x2/w2 Must Be Zero for direct messaging
2120 * interfaces.
2121 */
2122static inline bool api_ffa_dir_msg_is_arg2_zero(struct ffa_value args)
2123{
2124 return args.arg2 == 0U;
2125}
2126
2127/**
J-Alves76d99af2021-03-10 17:42:11 +00002128 * Limits size of arguments in ffa_value structure to 32-bit.
2129 */
2130static struct ffa_value api_ffa_value_copy32(struct ffa_value args)
2131{
2132 return (struct ffa_value){
2133 .func = (uint32_t)args.func,
2134 .arg1 = (uint32_t)args.arg1,
2135 .arg2 = (uint32_t)0,
2136 .arg3 = (uint32_t)args.arg3,
2137 .arg4 = (uint32_t)args.arg4,
2138 .arg5 = (uint32_t)args.arg5,
2139 .arg6 = (uint32_t)args.arg6,
2140 .arg7 = (uint32_t)args.arg7,
2141 };
2142}
2143
2144/**
2145 * Helper to copy direct message payload, depending on SMC used and expected
2146 * registers size.
2147 */
2148static struct ffa_value api_ffa_dir_msg_value(struct ffa_value args)
2149{
2150 if (args.func == FFA_MSG_SEND_DIRECT_REQ_32 ||
2151 args.func == FFA_MSG_SEND_DIRECT_RESP_32) {
2152 return api_ffa_value_copy32(args);
2153 }
2154
2155 return (struct ffa_value){
2156 .func = args.func,
2157 .arg1 = args.arg1,
2158 .arg2 = 0,
2159 .arg3 = args.arg3,
2160 .arg4 = args.arg4,
2161 .arg5 = args.arg5,
2162 .arg6 = args.arg6,
2163 .arg7 = args.arg7,
2164 };
2165}
2166
2167/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002168 * Send an FF-A direct message request.
2169 */
2170struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
2171 ffa_vm_id_t receiver_vm_id,
2172 struct ffa_value args,
2173 struct vcpu *current,
2174 struct vcpu **next)
2175{
J-Alves17228f72021-04-20 17:13:19 +01002176 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002177 struct vm *receiver_vm;
J-Alves6e2abc62021-12-02 14:58:56 +00002178 struct vm_locked receiver_locked;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002179 struct vcpu *receiver_vcpu;
2180 struct two_vcpu_locked vcpus_locked;
2181
J-Alves645eabe2021-02-22 16:08:27 +00002182 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
2183 return ffa_error(FFA_INVALID_PARAMETERS);
2184 }
2185
Olivier Deprez55a189e2021-06-09 15:45:27 +02002186 if (!plat_ffa_is_direct_request_valid(current, sender_vm_id,
2187 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00002188 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002189 }
2190
Olivier Deprez55a189e2021-06-09 15:45:27 +02002191 if (plat_ffa_direct_request_forward(receiver_vm_id, args, &ret)) {
J-Alves17228f72021-04-20 17:13:19 +01002192 return ret;
2193 }
2194
2195 ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
2196
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002197 receiver_vm = vm_find(receiver_vm_id);
2198 if (receiver_vm == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002199 dlog_verbose("Invalid Receiver!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002200 return ffa_error(FFA_INVALID_PARAMETERS);
2201 }
2202
2203 /*
J-Alves439ac972021-11-18 17:32:03 +00002204 * Check if sender supports sending direct message req, and if
2205 * receiver supports receipt of direct message requests.
2206 */
2207 if (!plat_ffa_is_direct_request_supported(current->vm, receiver_vm)) {
2208 return ffa_error(FFA_DENIED);
2209 }
2210
2211 /*
Olivier Deprezc13a8692022-04-08 17:47:14 +02002212 * Per FF-A EAC spec section 4.4.1 the firmware framework supports
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002213 * UP (migratable) or MP partitions with a number of vCPUs matching the
2214 * number of PEs in the system. It further states that MP partitions
2215 * accepting direct request messages cannot migrate.
2216 */
J-Alvesad6a0432021-04-09 16:06:21 +01002217 receiver_vcpu = api_ffa_get_vm_vcpu(receiver_vm, current);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002218 if (receiver_vcpu == NULL) {
J-Alves88a13542021-12-14 15:39:52 +00002219 dlog_verbose("Invalid vCPU!\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002220 return ffa_error(FFA_INVALID_PARAMETERS);
2221 }
2222
J-Alves6e2abc62021-12-02 14:58:56 +00002223 receiver_locked = vm_lock(receiver_vm);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002224 vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
2225
2226 /*
2227 * If destination vCPU is executing or already received an
2228 * FFA_MSG_SEND_DIRECT_REQ then return to caller hinting recipient is
2229 * busy. There is a brief period of time where the vCPU state has
2230 * changed but regs_available is still false thus consider this case as
2231 * the vCPU not yet ready to receive a direct message request.
2232 */
2233 if (is_ffa_direct_msg_request_ongoing(vcpus_locked.vcpu1) ||
2234 receiver_vcpu->state == VCPU_STATE_RUNNING ||
2235 !receiver_vcpu->regs_available) {
J-Alves88a13542021-12-14 15:39:52 +00002236 dlog_verbose("Receiver is busy with another request.\n");
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002237 ret = ffa_error(FFA_BUSY);
2238 goto out;
2239 }
2240
2241 if (atomic_load_explicit(&receiver_vcpu->vm->aborting,
2242 memory_order_relaxed)) {
2243 if (receiver_vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +01002244 dlog_notice("Aborting VM %#x vCPU %u\n",
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002245 receiver_vcpu->vm->id,
2246 vcpu_index(receiver_vcpu));
2247 receiver_vcpu->state = VCPU_STATE_ABORTED;
2248 }
2249
2250 ret = ffa_error(FFA_ABORTED);
2251 goto out;
2252 }
2253
2254 switch (receiver_vcpu->state) {
2255 case VCPU_STATE_OFF:
2256 case VCPU_STATE_RUNNING:
2257 case VCPU_STATE_ABORTED:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002258 case VCPU_STATE_BLOCKED_INTERRUPT:
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002259 case VCPU_STATE_BLOCKED:
2260 case VCPU_STATE_PREEMPTED:
J-Alves88a13542021-12-14 15:39:52 +00002261 dlog_verbose("Receiver's vCPU can't receive request (%u)!\n",
2262 vcpu_index(receiver_vcpu));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002263 ret = ffa_error(FFA_BUSY);
2264 goto out;
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002265 case VCPU_STATE_WAITING:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002266 /*
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002267 * We expect target vCPU to be in WAITING state after either
2268 * having called ffa_msg_wait or sent a direct message response.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002269 */
2270 break;
2271 }
2272
2273 /* Inject timer interrupt if any pending */
2274 if (arch_timer_pending(&receiver_vcpu->regs)) {
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002275 api_interrupt_inject_locked(vcpus_locked.vcpu1,
2276 HF_VIRTUAL_TIMER_INTID, current,
2277 NULL);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002278
2279 arch_timer_mask(&receiver_vcpu->regs);
2280 }
2281
2282 /* The receiver vCPU runs upon direct message invocation */
2283 receiver_vcpu->cpu = current->cpu;
2284 receiver_vcpu->state = VCPU_STATE_RUNNING;
2285 receiver_vcpu->regs_available = false;
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002286 receiver_vcpu->direct_request_origin_vm_id = sender_vm_id;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002287
J-Alves76d99af2021-03-10 17:42:11 +00002288 arch_regs_set_retval(&receiver_vcpu->regs, api_ffa_dir_msg_value(args));
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002289
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002290 current->state = VCPU_STATE_BLOCKED;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002291
2292 /* Switch to receiver vCPU targeted to by direct msg request */
2293 *next = receiver_vcpu;
2294
J-Alves6e2abc62021-12-02 14:58:56 +00002295 if (!receiver_locked.vm->el0_partition) {
2296 /*
2297 * If the scheduler in the system is giving CPU cycles to the
2298 * receiver, due to pending notifications, inject the NPI
2299 * interrupt. Following call assumes that '*next' has been set
2300 * to receiver_vcpu.
2301 */
2302 plat_ffa_inject_notification_pending_interrupt(
2303 vcpus_locked.vcpu1.vcpu == receiver_vcpu
2304 ? vcpus_locked.vcpu1
2305 : vcpus_locked.vcpu2,
2306 current, receiver_locked);
2307 }
2308
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002309 /*
2310 * Since this flow will lead to a VM switch, the return value will not
2311 * be applied to current vCPU.
2312 */
2313
2314out:
2315 sl_unlock(&receiver_vcpu->lock);
2316 sl_unlock(&current->lock);
J-Alves6e2abc62021-12-02 14:58:56 +00002317 vm_unlock(&receiver_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002318
2319 return ret;
2320}
2321
2322/**
2323 * Send an FF-A direct message response.
2324 */
2325struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
2326 ffa_vm_id_t receiver_vm_id,
2327 struct ffa_value args,
2328 struct vcpu *current,
2329 struct vcpu **next)
2330{
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002331 struct vcpu_locked current_locked;
J-Alves645eabe2021-02-22 16:08:27 +00002332
2333 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
2334 return ffa_error(FFA_INVALID_PARAMETERS);
2335 }
2336
J-Alves76d99af2021-03-10 17:42:11 +00002337 struct ffa_value to_ret = api_ffa_dir_msg_value(args);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002338
Olivier Deprez55a189e2021-06-09 15:45:27 +02002339 if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
2340 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00002341 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002342 }
2343
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002344 current_locked = vcpu_lock(current);
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002345 if (api_ffa_is_managed_exit_ongoing(current_locked)) {
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002346 /*
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002347 * No need for REQ/RESP state management as managed exit does
2348 * not have corresponding REQ pair.
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002349 */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002350 if (receiver_vm_id != HF_PRIMARY_VM_ID) {
2351 vcpu_unlock(&current_locked);
2352 return ffa_error(FFA_DENIED);
2353 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002354
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05002355 /*
2356 * Per FF-A v1.1 Beta section 8.4.1.2 bullet 6, SPMC can signal
2357 * a secure interrupt to a SP that is performing managed exit.
2358 * We have taken a implementation defined choice to not allow
2359 * Managed exit while a SP is processing a secure interrupt.
2360 */
2361 CHECK(!current->processing_secure_interrupt);
2362
Madhukar Pappireddydd6fdfb2021-12-14 12:30:36 -06002363 plat_interrupts_set_priority_mask(current->priority_mask);
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002364 current->processing_managed_exit = false;
2365 } else {
2366 /*
2367 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
2368 * defined originator.
2369 */
2370 if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
2371 /*
2372 * Sending direct response but direct request origin
2373 * vCPU is not set.
2374 */
2375 vcpu_unlock(&current_locked);
2376 return ffa_error(FFA_DENIED);
2377 }
2378
Madhukar Pappireddyed4ab942021-08-03 14:22:53 -05002379 /* Refer to FF-A v1.1 Beta0 section 7.3 bulet 3. */
Manish Pandeya5f39fb2020-09-11 09:47:11 +01002380 if (current->direct_request_origin_vm_id != receiver_vm_id) {
2381 vcpu_unlock(&current_locked);
2382 return ffa_error(FFA_DENIED);
2383 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002384 }
2385
2386 /* Clear direct request origin for the caller. */
2387 current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
2388
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002389 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002390
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002391 if (!vm_id_is_current_world(receiver_vm_id)) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002392 *next = api_switch_to_other_world(
2393 current, to_ret,
2394 /*
2395 * Current vcpu sent a direct response. It moves to
2396 * waiting state.
2397 */
2398 VCPU_STATE_WAITING);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002399 } else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002400 *next = api_switch_to_primary(
2401 current, to_ret,
2402 /*
2403 * Current vcpu sent a direct response. It moves to
2404 * waiting state.
2405 */
2406 VCPU_STATE_WAITING);
J-Alvesfe7f7372020-11-09 11:32:12 +00002407 } else if (vm_id_is_current_world(receiver_vm_id)) {
2408 /*
2409 * It is expected the receiver_vm_id to be from an SP, otherwise
J-Alvesaa79c012021-07-09 14:29:45 +01002410 * 'plat_ffa_is_direct_response_valid' should have
J-Alvesfe7f7372020-11-09 11:32:12 +00002411 * made function return error before getting to this point.
2412 */
2413 *next = api_switch_to_vm(current, to_ret,
Madhukar Pappireddyb11e0d12021-08-02 19:44:35 -05002414 /*
2415 * current vcpu sent a direct response.
2416 * It moves to waiting state.
2417 */
2418 VCPU_STATE_WAITING, receiver_vm_id);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02002419 } else {
2420 panic("Invalid direct message response invocation");
2421 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00002422
2423 return (struct ffa_value){.func = FFA_INTERRUPT_32};
2424}
2425
J-Alves84658fc2021-06-17 14:37:32 +01002426static bool api_memory_region_check_flags(
2427 struct ffa_memory_region *memory_region, uint32_t share_func)
2428{
2429 switch (share_func) {
2430 case FFA_MEM_SHARE_32:
2431 if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) !=
2432 0U) {
2433 return false;
2434 }
2435 /* Intentional fall-through */
2436 case FFA_MEM_LEND_32:
2437 case FFA_MEM_DONATE_32: {
2438 /* Bits 31:2 Must Be Zero. */
2439 ffa_memory_receiver_flags_t to_mask =
2440 ~(FFA_MEMORY_REGION_FLAG_CLEAR |
2441 FFA_MEMORY_REGION_FLAG_TIME_SLICE);
2442
2443 if ((memory_region->flags & to_mask) != 0U) {
2444 return false;
2445 }
2446 break;
2447 }
2448 default:
2449 panic("Check for mem send calls only.\n");
2450 }
2451
2452 /* Last check reserved values are 0 */
2453 return true;
2454}
2455
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002456struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
2457 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002458 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002459{
2460 struct vm *from = current->vm;
2461 struct vm *to;
2462 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002463 struct ffa_memory_region *memory_region;
2464 struct ffa_value ret;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002465
2466 if (ipa_addr(address) != 0 || page_count != 0) {
2467 /*
2468 * Hafnium only supports passing the descriptor in the TX
2469 * mailbox.
2470 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002471 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002472 }
2473
Andrew Walbranca808b12020-05-15 17:22:28 +01002474 if (fragment_length > length) {
2475 dlog_verbose(
2476 "Fragment length %d greater than total length %d.\n",
2477 fragment_length, length);
2478 return ffa_error(FFA_INVALID_PARAMETERS);
2479 }
2480 if (fragment_length < sizeof(struct ffa_memory_region) +
2481 sizeof(struct ffa_memory_access)) {
2482 dlog_verbose(
2483 "Initial fragment length %d smaller than header size "
2484 "%d.\n",
2485 fragment_length,
2486 sizeof(struct ffa_memory_region) +
2487 sizeof(struct ffa_memory_access));
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002488 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002489 }
2490
2491 /*
2492 * Check that the sender has configured its send buffer. If the TX
2493 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2494 * be safely accessed after releasing the lock since the TX mailbox
2495 * address can only be configured once.
2496 */
2497 sl_lock(&from->lock);
2498 from_msg = from->mailbox.send;
2499 sl_unlock(&from->lock);
2500
2501 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002502 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002503 }
2504
2505 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002506 * Copy the memory region descriptor to a fresh page from the memory
2507 * pool. This prevents the sender from changing it underneath us, and
2508 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002509 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002510 if (fragment_length > HF_MAILBOX_SIZE ||
2511 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002512 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002513 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002514 memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002515 if (memory_region == NULL) {
2516 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002517 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002518 }
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002519 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002520
2521 /* The sender must match the caller. */
2522 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002523 dlog_verbose("Memory region sender doesn't match caller.\n");
J-Alves99948662021-07-28 18:07:04 +01002524 ret = ffa_error(FFA_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002525 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002526 }
2527
J-Alves84658fc2021-06-17 14:37:32 +01002528 if (!api_memory_region_check_flags(memory_region, share_func)) {
2529 dlog_verbose(
2530 "Memory region reserved arguments must be zero.\n");
2531 ret = ffa_error(FFA_INVALID_PARAMETERS);
2532 goto out;
2533 }
2534
Andrew Walbrana65a1322020-04-06 19:32:32 +01002535 if (memory_region->receiver_count != 1) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002536 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002537 dlog_verbose(
2538 "Multi-way memory sharing not supported (got %d "
Andrew Walbrana65a1322020-04-06 19:32:32 +01002539 "endpoint memory access descriptors, expected 1).\n",
2540 memory_region->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002541 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002542 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002543 }
2544
2545 /*
2546 * Ensure that the receiver VM exists and isn't the same as the sender.
2547 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01002548 to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002549 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002550 dlog_verbose("Invalid receiver.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002551 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002552 goto out;
2553 }
2554
Maksims Svecovsa3d570c2021-12-08 11:16:32 +00002555 if (!plat_ffa_is_memory_send_valid(to->id, share_func)) {
2556 ret = ffa_error(FFA_DENIED);
2557 goto out;
2558 }
2559
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002560 if (to->id == HF_TEE_VM_ID) {
2561 /*
2562 * The 'to' VM lock is only needed in the case that it is the
2563 * TEE VM.
2564 */
2565 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2566
2567 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002568 ret = ffa_error(FFA_BUSY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002569 goto out_unlock;
2570 }
2571
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002572 ret = ffa_memory_tee_send(
2573 vm_to_from_lock.vm2, vm_to_from_lock.vm1, memory_region,
2574 length, fragment_length, share_func, &api_page_pool);
2575 /*
2576 * ffa_tee_memory_send takes ownership of the memory_region, so
2577 * make sure we don't free it.
2578 */
2579 memory_region = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002580
2581 out_unlock:
2582 vm_unlock(&vm_to_from_lock.vm1);
2583 vm_unlock(&vm_to_from_lock.vm2);
2584 } else {
2585 struct vm_locked from_locked = vm_lock(from);
2586
Andrew Walbran1a86aa92020-05-15 17:22:28 +01002587 ret = ffa_memory_send(from_locked, memory_region, length,
2588 fragment_length, share_func,
2589 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002590 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002591 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002592 * make sure we don't free it.
2593 */
2594 memory_region = NULL;
2595
2596 vm_unlock(&from_locked);
2597 }
2598
2599out:
2600 if (memory_region != NULL) {
2601 mpool_free(&api_page_pool, memory_region);
2602 }
2603
2604 return ret;
2605}
2606
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002607struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
2608 uint32_t fragment_length,
2609 ipaddr_t address, uint32_t page_count,
2610 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002611{
2612 struct vm *to = current->vm;
2613 struct vm_locked to_locked;
2614 const void *to_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002615 struct ffa_memory_region *retrieve_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002616 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002617 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002618
2619 if (ipa_addr(address) != 0 || page_count != 0) {
2620 /*
2621 * Hafnium only supports passing the descriptor in the TX
2622 * mailbox.
2623 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002624 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002625 }
2626
Andrew Walbrana65a1322020-04-06 19:32:32 +01002627 if (fragment_length != length) {
2628 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002629 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002630 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002631
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002632 retrieve_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002633 (struct ffa_memory_region *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002634 message_buffer_size = cpu_get_buffer_size(current->cpu);
2635 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2636 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002637 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002638 }
2639
2640 to_locked = vm_lock(to);
2641 to_msg = to->mailbox.send;
2642
2643 if (to_msg == NULL) {
2644 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002645 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002646 goto out;
2647 }
2648
2649 /*
2650 * Copy the retrieve request descriptor to an internal buffer, so that
2651 * the caller can't change it underneath us.
2652 */
2653 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
2654
2655 if (msg_receiver_busy(to_locked, NULL, false)) {
2656 /*
2657 * Can't retrieve memory information if the mailbox is not
2658 * available.
2659 */
2660 dlog_verbose("RX buffer not ready.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002661 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002662 goto out;
2663 }
2664
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002665 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
2666 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002667
2668out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002669 vm_unlock(&to_locked);
2670 return ret;
2671}
2672
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002673struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002674{
2675 struct vm *from = current->vm;
2676 struct vm_locked from_locked;
2677 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002678 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002679 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002680 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002681 uint32_t length;
2682
2683 from_locked = vm_lock(from);
2684 from_msg = from->mailbox.send;
2685
2686 if (from_msg == NULL) {
2687 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002688 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002689 goto out;
2690 }
2691
2692 /*
2693 * Calculate length from relinquish descriptor before copying. We will
2694 * check again later to make sure it hasn't changed.
2695 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002696 length = sizeof(struct ffa_mem_relinquish) +
2697 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
2698 sizeof(ffa_vm_id_t);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002699 /*
2700 * Copy the relinquish descriptor to an internal buffer, so that the
2701 * caller can't change it underneath us.
2702 */
2703 relinquish_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002704 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002705 message_buffer_size = cpu_get_buffer_size(current->cpu);
2706 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2707 dlog_verbose("Relinquish message too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002708 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002709 goto out;
2710 }
2711 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
2712
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002713 if (sizeof(struct ffa_mem_relinquish) +
2714 relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002715 length) {
2716 dlog_verbose(
2717 "Endpoint count changed while copying to internal "
2718 "buffer.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002719 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002720 goto out;
2721 }
2722
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002723 ret = ffa_memory_relinquish(from_locked, relinquish_request,
2724 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002725
2726out:
2727 vm_unlock(&from_locked);
2728 return ret;
2729}
2730
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002731struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
2732 ffa_memory_region_flags_t flags,
2733 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002734{
2735 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002736 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002737
Olivier Deprez55a189e2021-06-09 15:45:27 +02002738 if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00002739 struct vm_locked to_locked = vm_lock(to);
2740
Andrew Walbranca808b12020-05-15 17:22:28 +01002741 ret = ffa_memory_reclaim(to_locked, handle, flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002742 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002743
Andrew Walbran290b0c92020-02-03 16:37:14 +00002744 vm_unlock(&to_locked);
2745 } else {
2746 struct vm *from = vm_find(HF_TEE_VM_ID);
2747 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2748
Andrew Walbranca808b12020-05-15 17:22:28 +01002749 ret = ffa_memory_tee_reclaim(vm_to_from_lock.vm1,
2750 vm_to_from_lock.vm2, handle, flags,
2751 &api_page_pool);
2752
2753 vm_unlock(&vm_to_from_lock.vm1);
2754 vm_unlock(&vm_to_from_lock.vm2);
2755 }
2756
2757 return ret;
2758}
2759
2760struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
2761 uint32_t fragment_offset,
2762 ffa_vm_id_t sender_vm_id,
2763 struct vcpu *current)
2764{
2765 struct vm *to = current->vm;
2766 struct vm_locked to_locked;
2767 struct ffa_value ret;
2768
2769 /* Sender ID MBZ at virtual instance. */
2770 if (sender_vm_id != 0) {
2771 return ffa_error(FFA_INVALID_PARAMETERS);
2772 }
2773
2774 to_locked = vm_lock(to);
2775
2776 if (msg_receiver_busy(to_locked, NULL, false)) {
2777 /*
2778 * Can't retrieve memory information if the mailbox is not
2779 * available.
2780 */
2781 dlog_verbose("RX buffer not ready.\n");
2782 ret = ffa_error(FFA_BUSY);
2783 goto out;
2784 }
2785
2786 ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
2787 &api_page_pool);
2788
2789out:
2790 vm_unlock(&to_locked);
2791 return ret;
2792}
2793
2794struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
2795 uint32_t fragment_length,
2796 ffa_vm_id_t sender_vm_id,
2797 struct vcpu *current)
2798{
2799 struct vm *from = current->vm;
2800 const void *from_msg;
2801 void *fragment_copy;
2802 struct ffa_value ret;
2803
2804 /* Sender ID MBZ at virtual instance. */
2805 if (sender_vm_id != 0) {
2806 return ffa_error(FFA_INVALID_PARAMETERS);
2807 }
2808
2809 /*
2810 * Check that the sender has configured its send buffer. If the TX
2811 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2812 * be safely accessed after releasing the lock since the TX mailbox
2813 * address can only be configured once.
2814 */
2815 sl_lock(&from->lock);
2816 from_msg = from->mailbox.send;
2817 sl_unlock(&from->lock);
2818
2819 if (from_msg == NULL) {
2820 return ffa_error(FFA_INVALID_PARAMETERS);
2821 }
2822
2823 /*
2824 * Copy the fragment to a fresh page from the memory pool. This prevents
2825 * the sender from changing it underneath us, and also lets us keep it
2826 * around in the share state table if needed.
2827 */
2828 if (fragment_length > HF_MAILBOX_SIZE ||
2829 fragment_length > MM_PPOOL_ENTRY_SIZE) {
2830 dlog_verbose(
2831 "Fragment length %d larger than mailbox size %d.\n",
2832 fragment_length, HF_MAILBOX_SIZE);
2833 return ffa_error(FFA_INVALID_PARAMETERS);
2834 }
2835 if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
2836 fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2837 0) {
2838 dlog_verbose("Invalid fragment length %d.\n", fragment_length);
2839 return ffa_error(FFA_INVALID_PARAMETERS);
2840 }
2841 fragment_copy = mpool_alloc(&api_page_pool);
2842 if (fragment_copy == NULL) {
2843 dlog_verbose("Failed to allocate fragment copy.\n");
2844 return ffa_error(FFA_NO_MEMORY);
2845 }
2846 memcpy_s(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
2847
2848 /*
2849 * Hafnium doesn't support fragmentation of memory retrieve requests
2850 * (because it doesn't support caller-specified mappings, so a request
2851 * will never be larger than a single page), so this must be part of a
2852 * memory send (i.e. donate, lend or share) request.
2853 *
2854 * We can tell from the handle whether the memory transaction is for the
2855 * TEE or not.
2856 */
2857 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
2858 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
2859 struct vm_locked from_locked = vm_lock(from);
2860
2861 ret = ffa_memory_send_continue(from_locked, fragment_copy,
2862 fragment_length, handle,
2863 &api_page_pool);
2864 /*
2865 * `ffa_memory_send_continue` takes ownership of the
2866 * fragment_copy, so we don't need to free it here.
2867 */
2868 vm_unlock(&from_locked);
2869 } else {
2870 struct vm *to = vm_find(HF_TEE_VM_ID);
2871 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2872
2873 /*
2874 * The TEE RX buffer state is checked in
2875 * `ffa_memory_tee_send_continue` rather than here, as we need
2876 * to return `FFA_MEM_FRAG_RX` with the current offset rather
2877 * than FFA_ERROR FFA_BUSY in case it is busy.
2878 */
2879
2880 ret = ffa_memory_tee_send_continue(
2881 vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy,
2882 fragment_length, handle, &api_page_pool);
2883 /*
2884 * `ffa_memory_tee_send_continue` takes ownership of the
2885 * fragment_copy, so we don't need to free it here.
2886 */
Andrew Walbran290b0c92020-02-03 16:37:14 +00002887
2888 vm_unlock(&vm_to_from_lock.vm1);
2889 vm_unlock(&vm_to_from_lock.vm2);
2890 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002891
2892 return ret;
2893}
Max Shvetsov40108e72020-08-27 12:39:50 +01002894
Olivier Deprezd614d322021-06-18 15:21:00 +02002895/**
2896 * Register an entry point for a vCPU in warm boot cases.
2897 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1 FFA_SECONDARY_EP_REGISTER.
2898 */
Max Shvetsov40108e72020-08-27 12:39:50 +01002899struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
2900 struct vcpu *current)
2901{
2902 struct vm_locked vm_locked;
Olivier Deprezd614d322021-06-18 15:21:00 +02002903 struct ffa_value ret = ffa_error(FFA_DENIED);
Max Shvetsov40108e72020-08-27 12:39:50 +01002904
Olivier Deprezd614d322021-06-18 15:21:00 +02002905 /*
2906 * Reject if interface is not supported at this FF-A instance
2907 * (DEN0077A FF-A v1.1 Beta0 Table 18.29) or the VM is UP.
2908 */
2909 if (!plat_ffa_is_secondary_ep_register_supported() ||
2910 current->vm->vcpu_count == 1) {
2911 return ffa_error(FFA_NOT_SUPPORTED);
2912 }
2913
2914 /*
2915 * No further check is made on the address validity
2916 * (FF-A v1.1 Beta0 Table 18.29) as the VM boundaries are not known
2917 * from the VM or vCPU structure.
2918 * DEN0077A FF-A v1.1 Beta0 section 18.3.2.1.1:
2919 * For each SP [...] the Framework assumes that the same entry point
2920 * address is used for initializing any execution context during a
2921 * secondary cold boot.
2922 * If this function is invoked multiple times, then the entry point
2923 * address specified in the last valid invocation must be used by the
2924 * callee.
2925 */
Max Shvetsov40108e72020-08-27 12:39:50 +01002926 vm_locked = vm_lock(current->vm);
Olivier Deprezd614d322021-06-18 15:21:00 +02002927 if (vm_locked.vm->initialized) {
2928 goto out;
2929 }
2930
Max Shvetsov40108e72020-08-27 12:39:50 +01002931 vm_locked.vm->secondary_ep = entry_point;
Olivier Deprezd614d322021-06-18 15:21:00 +02002932
2933 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
2934
2935out:
Max Shvetsov40108e72020-08-27 12:39:50 +01002936 vm_unlock(&vm_locked);
2937
Olivier Deprezd614d322021-06-18 15:21:00 +02002938 return ret;
Max Shvetsov40108e72020-08-27 12:39:50 +01002939}
J-Alvesa0f317d2021-06-09 13:31:59 +01002940
2941struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
2942 ffa_vcpu_count_t vcpu_count,
2943 struct vcpu *current)
2944{
2945 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2946 dlog_verbose("Bitmap create for NWd VM IDs only (%x).\n",
2947 vm_id);
2948 return ffa_error(FFA_NOT_SUPPORTED);
2949 }
2950
2951 return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
2952}
2953
2954struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
2955 struct vcpu *current)
2956{
2957 /*
2958 * Validity of use of this interface is the same as for bitmap create.
2959 */
2960 if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
2961 dlog_verbose("Bitmap destroy for NWd VM IDs only (%x).\n",
2962 vm_id);
2963 return ffa_error(FFA_NOT_SUPPORTED);
2964 }
2965
2966 return plat_ffa_notifications_bitmap_destroy(vm_id);
2967}
J-Alvesc003a7a2021-03-18 13:06:53 +00002968
2969struct ffa_value api_ffa_notification_update_bindings(
2970 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
2971 ffa_notifications_bitmap_t notifications, bool is_bind,
2972 struct vcpu *current)
2973{
2974 struct ffa_value ret = {.func = FFA_SUCCESS_32};
2975 struct vm_locked receiver_locked;
2976 const bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
2977 const ffa_vm_id_t id_to_update =
2978 is_bind ? sender_vm_id : HF_INVALID_VM_ID;
2979 const ffa_vm_id_t id_to_validate =
2980 is_bind ? HF_INVALID_VM_ID : sender_vm_id;
2981
2982 if (!plat_ffa_is_notifications_bind_valid(current, sender_vm_id,
2983 receiver_vm_id)) {
2984 dlog_verbose("Invalid use of notifications bind interface.\n");
2985 return ffa_error(FFA_INVALID_PARAMETERS);
2986 }
2987
J-Alvesb15e9402021-09-08 11:44:42 +01002988 if (plat_ffa_notifications_update_bindings_forward(
2989 receiver_vm_id, sender_vm_id, flags, notifications, is_bind,
2990 &ret)) {
J-Alvesb15e9402021-09-08 11:44:42 +01002991 return ret;
2992 }
2993
J-Alvesc003a7a2021-03-18 13:06:53 +00002994 if (notifications == 0U) {
2995 dlog_verbose("No notifications have been specified.\n");
2996 return ffa_error(FFA_INVALID_PARAMETERS);
2997 }
2998
2999 /**
3000 * This check assumes receiver is the current VM, and has been enforced
3001 * by 'plat_ffa_is_notifications_bind_valid'.
3002 */
3003 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
3004
3005 if (receiver_locked.vm == NULL) {
3006 dlog_verbose("Receiver doesn't exist!\n");
3007 return ffa_error(FFA_DENIED);
3008 }
3009
J-Alves09ff9d82021-11-02 11:55:20 +00003010 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesc003a7a2021-03-18 13:06:53 +00003011 dlog_verbose("Notifications are not enabled.\n");
3012 ret = ffa_error(FFA_NOT_SUPPORTED);
3013 goto out;
3014 }
3015
3016 if (is_bind && vm_id_is_current_world(sender_vm_id) &&
3017 vm_find(sender_vm_id) == NULL) {
3018 dlog_verbose("Sender VM does not exist!\n");
3019 ret = ffa_error(FFA_INVALID_PARAMETERS);
3020 goto out;
3021 }
3022
3023 /*
3024 * Can't bind/unbind notifications if at least one is bound to a
3025 * different sender.
3026 */
3027 if (!vm_notifications_validate_bound_sender(
3028 receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
3029 id_to_validate, notifications)) {
3030 dlog_verbose("Notifications are bound to other sender.\n");
3031 ret = ffa_error(FFA_DENIED);
3032 goto out;
3033 }
3034
3035 /**
3036 * Check if there is a pending notification within those specified in
3037 * the bitmap.
3038 */
3039 if (vm_are_notifications_pending(receiver_locked,
3040 plat_ffa_is_vm_id(sender_vm_id),
3041 notifications)) {
3042 dlog_verbose("Notifications within '%x' pending.\n",
3043 notifications);
3044 ret = ffa_error(FFA_DENIED);
3045 goto out;
3046 }
3047
3048 vm_notifications_update_bindings(
3049 receiver_locked, plat_ffa_is_vm_id(sender_vm_id), id_to_update,
3050 notifications, is_per_vcpu && is_bind);
3051
3052out:
3053 vm_unlock(&receiver_locked);
3054 return ret;
3055}
J-Alvesaa79c012021-07-09 14:29:45 +01003056
3057struct ffa_value api_ffa_notification_set(
3058 ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
3059 ffa_notifications_bitmap_t notifications, struct vcpu *current)
3060{
3061 struct ffa_value ret;
3062 struct vm_locked receiver_locked;
3063
3064 /*
3065 * Check if is per-vCPU or global, and extracting vCPU ID according
3066 * to table 17.19 of the FF-A v1.1 Beta 0 spec.
3067 */
3068 bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
3069 ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
3070
J-Alvesaa79c012021-07-09 14:29:45 +01003071 if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
3072 receiver_vm_id)) {
3073 dlog_verbose("Invalid use of notifications set interface.\n");
3074 return ffa_error(FFA_INVALID_PARAMETERS);
3075 }
3076
3077 if (notifications == 0U) {
3078 dlog_verbose("No notifications have been specified.\n");
3079 return ffa_error(FFA_INVALID_PARAMETERS);
3080 }
3081
J-Alvesde7bd2f2021-09-09 19:54:35 +01003082 if (plat_ffa_notification_set_forward(sender_vm_id, receiver_vm_id,
3083 flags, notifications, &ret)) {
3084 return ret;
3085 }
3086
J-Alvesaa79c012021-07-09 14:29:45 +01003087 /*
3088 * This check assumes receiver is the current VM, and has been enforced
3089 * by 'plat_ffa_is_notification_set_valid'.
3090 */
3091 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
3092
3093 if (receiver_locked.vm == NULL) {
3094 dlog_verbose("Receiver ID is not valid.\n");
3095 return ffa_error(FFA_INVALID_PARAMETERS);
3096 }
3097
J-Alves09ff9d82021-11-02 11:55:20 +00003098 if (!vm_locked_are_notifications_enabled(receiver_locked)) {
J-Alvesaa79c012021-07-09 14:29:45 +01003099 dlog_verbose("Receiver's notifications not enabled.\n");
3100 ret = ffa_error(FFA_DENIED);
3101 goto out;
3102 }
3103
3104 /*
3105 * If notifications are not bound to the sender, they wouldn't be
3106 * enabled either for the receiver.
3107 */
3108 if (!vm_notifications_validate_binding(
3109 receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
3110 sender_vm_id, notifications, is_per_vcpu)) {
3111 dlog_verbose("Notifications bindings not valid.\n");
3112 ret = ffa_error(FFA_DENIED);
3113 goto out;
3114 }
3115
3116 if (is_per_vcpu && vcpu_id >= receiver_locked.vm->vcpu_count) {
3117 dlog_verbose("Invalid VCPU ID!\n");
3118 ret = ffa_error(FFA_INVALID_PARAMETERS);
3119 goto out;
3120 }
3121
J-Alves7461ef22021-10-18 17:21:33 +01003122 /* Set notifications pending. */
J-Alves5a16c962022-03-25 12:32:51 +00003123 vm_notifications_partition_set_pending(
3124 receiver_locked, plat_ffa_is_vm_id(sender_vm_id), notifications,
3125 vcpu_id, is_per_vcpu);
3126
J-Alvesaa79c012021-07-09 14:29:45 +01003127 dlog_verbose("Set the notifications: %x.\n", notifications);
3128
J-Alves13394022021-06-30 13:48:49 +01003129 if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
3130 dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
3131 vcpu_index(current));
3132 plat_ffa_sri_trigger_not_delayed(current->cpu);
3133 } else {
3134 plat_ffa_sri_state_set(DELAYED);
3135 }
J-Alvesaa79c012021-07-09 14:29:45 +01003136
J-Alves13394022021-06-30 13:48:49 +01003137 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
J-Alvesaa79c012021-07-09 14:29:45 +01003138out:
3139 vm_unlock(&receiver_locked);
3140
3141 return ret;
3142}
3143
3144static struct ffa_value api_ffa_notification_get_success_return(
3145 ffa_notifications_bitmap_t from_sp, ffa_notifications_bitmap_t from_vm,
3146 ffa_notifications_bitmap_t from_framework)
3147{
3148 return (struct ffa_value){
3149 .func = FFA_SUCCESS_32,
3150 .arg1 = 0U,
3151 .arg2 = (uint32_t)from_sp,
3152 .arg3 = (uint32_t)(from_sp >> 32),
3153 .arg4 = (uint32_t)from_vm,
3154 .arg5 = (uint32_t)(from_vm >> 32),
3155 .arg6 = (uint32_t)from_framework,
3156 .arg7 = (uint32_t)(from_framework >> 32),
3157 };
3158}
3159
3160struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
3161 ffa_vcpu_index_t vcpu_id,
3162 uint32_t flags, struct vcpu *current)
3163{
J-Alves663682a2022-03-25 13:56:51 +00003164 ffa_notifications_bitmap_t framework_notifications = 0;
J-Alvesaa79c012021-07-09 14:29:45 +01003165 ffa_notifications_bitmap_t sp_notifications = 0;
3166 ffa_notifications_bitmap_t vm_notifications = 0;
3167 struct vm_locked receiver_locked;
3168 struct ffa_value ret;
J-Alvesfc95a302022-04-22 14:18:23 +01003169 const uint32_t flags_mbz = ~(FFA_NOTIFICATION_FLAG_BITMAP_HYP |
3170 FFA_NOTIFICATION_FLAG_BITMAP_SPM |
3171 FFA_NOTIFICATION_FLAG_BITMAP_SP |
3172 FFA_NOTIFICATION_FLAG_BITMAP_VM);
3173
3174 /* The FF-A v1.1 EAC0 specification states bits [31:4] Must Be Zero. */
3175 if ((flags & flags_mbz) != 0U) {
3176 dlog_verbose(
3177 "Invalid flags bit(s) set in notifications get. [31:4] "
3178 "MBZ(%x)\n",
3179 flags);
3180 return ffa_error(FFA_INVALID_PARAMETERS);
3181 }
J-Alvesaa79c012021-07-09 14:29:45 +01003182
3183 /*
J-Alvesfc95a302022-04-22 14:18:23 +01003184 * Following check should capture wrong uses of the interface,
3185 * depending on whether Hafnium is SPMC or hypervisor. On the
3186 * rest of the function it is assumed this condition is met.
J-Alvesaa79c012021-07-09 14:29:45 +01003187 */
J-Alvesfc95a302022-04-22 14:18:23 +01003188 if (!plat_ffa_is_notification_get_valid(current, receiver_vm_id,
3189 flags)) {
J-Alvesaa79c012021-07-09 14:29:45 +01003190 dlog_verbose("Invalid use of notifications get interface.\n");
3191 return ffa_error(FFA_INVALID_PARAMETERS);
3192 }
3193
3194 /*
3195 * This check assumes receiver is the current VM, and has been enforced
3196 * by `plat_ffa_is_notifications_get_valid`.
3197 */
3198 receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
3199
3200 /*
3201 * `plat_ffa_is_notifications_get_valid` ensures following is never
3202 * true.
3203 */
3204 CHECK(receiver_locked.vm != NULL);
3205
3206 if (receiver_locked.vm->vcpu_count <= vcpu_id ||
3207 (receiver_locked.vm->vcpu_count != 1 &&
3208 cpu_index(current->cpu) != vcpu_id)) {
J-Alves1abb3342022-01-05 11:59:10 +00003209 dlog_verbose(
3210 "Invalid VCPU ID %u. vcpu count %u current core: %u!\n",
3211 vcpu_id, receiver_locked.vm->vcpu_count,
3212 cpu_index(current->cpu));
J-Alvesaa79c012021-07-09 14:29:45 +01003213 ret = ffa_error(FFA_INVALID_PARAMETERS);
3214 goto out;
3215 }
3216
3217 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_SP) != 0U) {
J-Alves98ff9562021-09-09 14:39:41 +01003218 if (!plat_ffa_notifications_get_from_sp(
3219 receiver_locked, vcpu_id, &sp_notifications,
3220 &ret)) {
3221 dlog_verbose("Failed to get notifications from sps.");
3222 goto out;
3223 }
J-Alvesaa79c012021-07-09 14:29:45 +01003224 }
3225
3226 if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U) {
J-Alves5136dda2022-03-25 12:26:38 +00003227 vm_notifications = vm_notifications_partition_get_pending(
J-Alvesaa79c012021-07-09 14:29:45 +01003228 receiver_locked, true, vcpu_id);
3229 }
3230
J-Alves663682a2022-03-25 13:56:51 +00003231 ret = api_ffa_notification_get_success_return(
3232 sp_notifications, vm_notifications, framework_notifications);
J-Alvesaa79c012021-07-09 14:29:45 +01003233
J-Alvesfe23ebe2021-10-13 16:07:07 +01003234 /*
3235 * If there are no more pending notifications, change `sri_state` to
3236 * handled.
3237 */
3238 if (vm_is_notifications_pending_count_zero()) {
3239 plat_ffa_sri_state_set(HANDLED);
3240 }
3241
J-Alves6e2abc62021-12-02 14:58:56 +00003242 if (!receiver_locked.vm->el0_partition &&
3243 !vm_are_global_notifications_pending(receiver_locked)) {
3244 vm_notifications_set_npi_injected(receiver_locked, false);
3245 }
3246
J-Alvesaa79c012021-07-09 14:29:45 +01003247out:
3248 vm_unlock(&receiver_locked);
3249
3250 return ret;
3251}
J-Alvesc8e8a222021-06-08 17:33:52 +01003252
3253/**
3254 * Prepares successful return for FFA_NOTIFICATION_INFO_GET, as described by
3255 * the section 17.7.1 of the FF-A v1.1 Beta0 specification.
3256 */
3257static struct ffa_value api_ffa_notification_info_get_success_return(
3258 const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
J-Alvesfe23ebe2021-10-13 16:07:07 +01003259 uint32_t lists_count)
J-Alvesc8e8a222021-06-08 17:33:52 +01003260{
3261 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
3262
3263 /*
3264 * Copying content of ids into ret structure. Use 5 registers (x3-x7) to
3265 * hold the list of ids.
3266 */
3267 memcpy_s(&ret.arg3,
3268 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET, ids,
3269 sizeof(ids[0]) * ids_count);
3270
3271 /*
3272 * According to the spec x2 should have:
3273 * - Bit flagging if there are more notifications pending;
3274 * - The total number of elements (i.e. total list size);
3275 * - The number of VCPU IDs within each VM specific list.
3276 */
J-Alvesfe23ebe2021-10-13 16:07:07 +01003277 ret.arg2 = vm_notifications_pending_not_retrieved_by_scheduler()
3278 ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING
3279 : 0;
J-Alvesc8e8a222021-06-08 17:33:52 +01003280
3281 ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
3282 << FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
3283
3284 for (unsigned int i = 0; i < lists_count; i++) {
3285 ret.arg2 |= (lists_sizes[i] & FFA_NOTIFICATIONS_LIST_SIZE_MASK)
3286 << FFA_NOTIFICATIONS_LIST_SHIFT(i + 1);
3287 }
3288
3289 return ret;
3290}
3291
3292struct ffa_value api_ffa_notification_info_get(struct vcpu *current)
3293{
3294 /*
3295 * Following set of variables should be populated with the return info.
3296 * At a successfull handling of this interface, they should be used
3297 * to populate the 'ret' structure in accordance to the table 17.29
3298 * of the FF-A v1.1 Beta0 specification.
3299 */
3300 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
3301 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
3302 uint32_t lists_count = 0;
3303 uint32_t ids_count = 0;
3304 bool list_is_full = false;
J-Alves13394022021-06-30 13:48:49 +01003305 struct ffa_value result;
J-Alvesc8e8a222021-06-08 17:33:52 +01003306
3307 /*
3308 * This interface can only be called at NS virtual/physical FF-A
3309 * instance by the endpoint implementing the primary scheduler and the
3310 * Hypervisor/OS kernel.
3311 * In the SPM, following check passes if call has been forwarded from
3312 * the hypervisor.
3313 */
3314 if (current->vm->id != HF_PRIMARY_VM_ID) {
3315 dlog_verbose(
3316 "Only the receiver's scheduler can use this "
3317 "interface\n");
3318 return ffa_error(FFA_NOT_SUPPORTED);
3319 }
3320
J-Alvesca058c22021-09-10 14:02:07 +01003321 /*
3322 * Forward call to the other world, and fill the arrays used to assemble
3323 * return.
3324 */
3325 plat_ffa_notification_info_get_forward(
3326 ids, &ids_count, lists_sizes, &lists_count,
3327 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3328
3329 list_is_full = ids_count == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
3330
J-Alvesc8e8a222021-06-08 17:33:52 +01003331 /* Get notifications' info from this world */
3332 for (ffa_vm_count_t index = 0; index < vm_get_count() && !list_is_full;
3333 ++index) {
3334 struct vm_locked vm_locked = vm_lock(vm_find_index(index));
3335
3336 list_is_full = vm_notifications_info_get(
3337 vm_locked, ids, &ids_count, lists_sizes, &lists_count,
3338 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3339
3340 vm_unlock(&vm_locked);
3341 }
3342
3343 if (!list_is_full) {
3344 /* Grab notifications info from other world */
J-Alvesfe23ebe2021-10-13 16:07:07 +01003345 plat_ffa_vm_notifications_info_get(
J-Alvesc8e8a222021-06-08 17:33:52 +01003346 ids, &ids_count, lists_sizes, &lists_count,
3347 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
3348 }
3349
3350 if (ids_count == 0) {
J-Alvesca058c22021-09-10 14:02:07 +01003351 dlog_verbose(
3352 "Notification info get has no data to retrieve.\n");
J-Alves13394022021-06-30 13:48:49 +01003353 result = ffa_error(FFA_NO_DATA);
3354 } else {
3355 result = api_ffa_notification_info_get_success_return(
J-Alvesfe23ebe2021-10-13 16:07:07 +01003356 ids, ids_count, lists_sizes, lists_count);
J-Alvesc8e8a222021-06-08 17:33:52 +01003357 }
3358
J-Alvesfe23ebe2021-10-13 16:07:07 +01003359 plat_ffa_sri_state_set(HANDLED);
3360
J-Alves13394022021-06-30 13:48:49 +01003361 return result;
J-Alvesc8e8a222021-06-08 17:33:52 +01003362}
Raghu Krishnamurthyea6d25f2021-09-14 15:27:06 -07003363
3364struct ffa_value api_ffa_mem_perm_get(vaddr_t base_addr, struct vcpu *current)
3365{
3366 struct vm_locked vm_locked;
3367 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
3368 bool mode_ret = false;
3369 uint32_t mode = 0;
3370
3371 if (!plat_ffa_is_mem_perm_get_valid(current)) {
3372 return ffa_error(FFA_NOT_SUPPORTED);
3373 }
3374
3375 if (!(current->vm->el0_partition)) {
3376 return ffa_error(FFA_DENIED);
3377 }
3378
3379 vm_locked = vm_lock(current->vm);
3380
3381 /*
3382 * mm_get_mode is used to check if the given base_addr page is already
3383 * mapped. If the page is unmapped, return error. If the page is mapped
3384 * appropriate attributes are returned to the caller. Note that
3385 * mm_get_mode returns true if the address is in the valid VA range as
3386 * supported by the architecture and MMU configurations, as opposed to
3387 * whether a page is mapped or not. For a page to be known as mapped,
3388 * the API must return true AND the returned mode must not have
3389 * MM_MODE_INVALID set.
3390 */
3391 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
3392 va_add(base_addr, PAGE_SIZE), &mode);
3393 if (!mode_ret || (mode & MM_MODE_INVALID)) {
3394 ret = ffa_error(FFA_INVALID_PARAMETERS);
3395 goto out;
3396 }
3397
3398 /* No memory should be marked RWX */
3399 CHECK((mode & (MM_MODE_R | MM_MODE_W | MM_MODE_X)) !=
3400 (MM_MODE_R | MM_MODE_W | MM_MODE_X));
3401
3402 /*
3403 * S-EL0 partitions are expected to have all their pages marked as
3404 * non-global.
3405 */
3406 CHECK((mode & (MM_MODE_NG | MM_MODE_USER)) ==
3407 (MM_MODE_NG | MM_MODE_USER));
3408
3409 if (mode & MM_MODE_W) {
3410 /* No memory should be writeable but not readable. */
3411 CHECK(mode & MM_MODE_R);
3412 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
3413 .arg2 = (uint32_t)(FFA_MEM_PERM_RW)};
3414 } else if (mode & MM_MODE_R) {
3415 ret = (struct ffa_value){.func = FFA_SUCCESS_32,
3416 .arg2 = (uint32_t)(FFA_MEM_PERM_RX)};
3417 if (!(mode & MM_MODE_X)) {
3418 ret.arg2 = (uint32_t)(FFA_MEM_PERM_RO);
3419 }
3420 }
3421out:
3422 vm_unlock(&vm_locked);
3423 return ret;
3424}
3425
3426struct ffa_value api_ffa_mem_perm_set(vaddr_t base_addr, uint32_t page_count,
3427 uint32_t mem_perm, struct vcpu *current)
3428{
3429 struct vm_locked vm_locked;
3430 struct ffa_value ret;
3431 bool mode_ret = false;
3432 uint32_t original_mode;
3433 uint32_t new_mode;
3434 struct mpool local_page_pool;
3435
3436 if (!plat_ffa_is_mem_perm_set_valid(current)) {
3437 return ffa_error(FFA_NOT_SUPPORTED);
3438 }
3439
3440 if (!(current->vm->el0_partition)) {
3441 return ffa_error(FFA_DENIED);
3442 }
3443
3444 if (!is_aligned(va_addr(base_addr), PAGE_SIZE)) {
3445 return ffa_error(FFA_INVALID_PARAMETERS);
3446 }
3447
3448 if ((mem_perm != FFA_MEM_PERM_RW) && (mem_perm != FFA_MEM_PERM_RO) &&
3449 (mem_perm != FFA_MEM_PERM_RX)) {
3450 return ffa_error(FFA_INVALID_PARAMETERS);
3451 }
3452
3453 /*
3454 * Create a local pool so any freed memory can't be used by another
3455 * thread. This is to ensure the original mapping can be restored if any
3456 * stage of the process fails.
3457 */
3458 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
3459
3460 vm_locked = vm_lock(current->vm);
3461
3462 /*
3463 * All regions accessible by the partition are mapped during boot. If we
3464 * cannot get a successful translation for the page range, the request
3465 * to change permissions is rejected.
3466 * mm_get_mode is used to check if the given address range is already
3467 * mapped. If the range is unmapped, return error. If the range is
3468 * mapped appropriate attributes are returned to the caller. Note that
3469 * mm_get_mode returns true if the address is in the valid VA range as
3470 * supported by the architecture and MMU configurations, as opposed to
3471 * whether a page is mapped or not. For a page to be known as mapped,
3472 * the API must return true AND the returned mode must not have
3473 * MM_MODE_INVALID set.
3474 */
3475
3476 mode_ret = mm_get_mode(&vm_locked.vm->ptable, base_addr,
3477 va_add(base_addr, page_count * PAGE_SIZE),
3478 &original_mode);
3479 if (!mode_ret || (original_mode & MM_MODE_INVALID)) {
3480 ret = ffa_error(FFA_INVALID_PARAMETERS);
3481 goto out;
3482 }
3483
3484 /* Device memory cannot be marked as executable */
3485 if ((original_mode & MM_MODE_D) && (mem_perm == FFA_MEM_PERM_RX)) {
3486 ret = ffa_error(FFA_INVALID_PARAMETERS);
3487 goto out;
3488 }
3489
3490 new_mode = MM_MODE_USER | MM_MODE_NG;
3491
3492 if (mem_perm == FFA_MEM_PERM_RW) {
3493 new_mode |= MM_MODE_R | MM_MODE_W;
3494 } else if (mem_perm == FFA_MEM_PERM_RX) {
3495 new_mode |= MM_MODE_R | MM_MODE_X;
3496 } else if (mem_perm == FFA_MEM_PERM_RO) {
3497 new_mode |= MM_MODE_R;
3498 }
3499
3500 /*
3501 * Safe to re-map memory, since we know the requested permissions are
3502 * valid, and the memory requested to be re-mapped is also valid.
3503 */
3504 if (!mm_identity_prepare(
3505 &vm_locked.vm->ptable, pa_from_va(base_addr),
3506 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3507 new_mode, &local_page_pool)) {
3508 /*
3509 * Defrag the table into the local page pool.
3510 * mm_identity_prepare could have allocated or freed pages to
3511 * split blocks or tables etc.
3512 */
3513 mm_stage1_defrag(&vm_locked.vm->ptable, &local_page_pool);
3514
3515 /*
3516 * Guaranteed to succeed mapping with old mode since the mapping
3517 * with old mode already existed and we have a local page pool
3518 * that should have sufficient memory to go back to the original
3519 * state.
3520 */
3521 CHECK(mm_identity_prepare(
3522 &vm_locked.vm->ptable, pa_from_va(base_addr),
3523 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3524 original_mode, &local_page_pool));
3525 mm_identity_commit(
3526 &vm_locked.vm->ptable, pa_from_va(base_addr),
3527 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)),
3528 original_mode, &local_page_pool);
3529
3530 mm_stage1_defrag(&vm_locked.vm->ptable, &api_page_pool);
3531 ret = ffa_error(FFA_NO_MEMORY);
3532 goto out;
3533 }
3534
3535 mm_identity_commit(
3536 &vm_locked.vm->ptable, pa_from_va(base_addr),
3537 pa_from_va(va_add(base_addr, page_count * PAGE_SIZE)), new_mode,
3538 &local_page_pool);
3539
3540 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
3541
3542out:
3543 mpool_fini(&local_page_pool);
3544 vm_unlock(&vm_locked);
3545
3546 return ret;
3547}