blob: 1234ba987126bfcc105446dc6513a8a0951c6ddc [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
J-Alves13318e32021-02-22 17:21:00 +00002 * Copyright 2021 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/api.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010010
Andrew Walbran318f5732018-11-20 16:23:42 +000011#include "hf/arch/cpu.h"
J-Alves917d2f22020-10-30 18:39:30 +000012#include "hf/arch/ffa_memory_handle.h"
Olivier Deprez96a2a262020-06-11 17:21:38 +020013#include "hf/arch/mm.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
Andrew Walbran508e63c2018-12-20 17:02:37 +000015#include "hf/arch/timer.h"
Olivier Deprez764fd2e2020-07-29 15:14:09 +020016#include "hf/arch/vm.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000017
Andrew Scull877ae4b2019-07-02 12:52:33 +010018#include "hf/check.h"
Andrew Walbran318f5732018-11-20 16:23:42 +000019#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010020#include "hf/ffa_internal.h"
21#include "hf/ffa_memory.h"
Andrew Scull6386f252018-12-06 13:29:10 +000022#include "hf/mm.h"
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010023#include "hf/plat/console.h"
Andrew Scull6386f252018-12-06 13:29:10 +000024#include "hf/spinlock.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010025#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010026#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010027#include "hf/vm.h"
28
Andrew Scullf35a5c92018-08-07 18:09:46 +010029#include "vmapi/hf/call.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010030#include "vmapi/hf/ffa.h"
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010031
Fuad Tabbae4efcc32020-07-16 15:37:27 +010032static_assert(sizeof(struct ffa_partition_info) == 8,
33 "Partition information descriptor size doesn't match the one in "
34 "the FF-A 1.0 EAC specification, Table 82.");
35
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000036/*
37 * To eliminate the risk of deadlocks, we define a partial order for the
38 * acquisition of locks held concurrently by the same physical CPU. Our current
39 * ordering requirements are as follows:
40 *
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +010041 * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
Andrew Scull6386f252018-12-06 13:29:10 +000042 *
Andrew Scull4caadaf2019-07-03 13:13:47 +010043 * Locks of the same kind require the lock of lowest address to be locked first,
44 * see `sl_lock_both()`.
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +000045 */
46
Andrew Scullaa039b32018-10-04 15:02:26 +010047static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
Andrew Scull13652af2018-09-17 14:49:08 +010048 "Currently, a page is mapped for the send and receive buffers so "
49 "the maximum request is the size of a page.");
50
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000051static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
52 "The page pool entry size must be at least as big as the mailbox "
53 "size, so that memory region descriptors can be copied from the "
54 "mailbox for memory sharing.");
55
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000056static struct mpool api_page_pool;
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000057
58/**
Wedson Almeida Filho81568c42019-01-04 13:33:02 +000059 * Initialises the API page pool by taking ownership of the contents of the
60 * given page pool.
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000061 */
62void api_init(struct mpool *ppool)
63{
Wedson Almeida Filho9ed8da52018-12-17 16:09:11 +000064 mpool_init_from(&api_page_pool, ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000065}
66
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010067/**
J-Alvesfe7f7372020-11-09 11:32:12 +000068 * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
69 * is given as argument of the function.
70 *
71 * Called to change the context between SPs for direct messaging (when Hafnium
72 * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
73 *
74 * This function works for partitions that are:
75 * - UP non-migratable.
76 * - MP with pinned Execution Contexts.
77 */
78static struct vcpu *api_switch_to_vm(struct vcpu *current,
79 struct ffa_value to_ret,
80 enum vcpu_state vcpu_state,
81 ffa_vm_id_t to_id)
82{
83 struct vm *to_vm = vm_find(to_id);
84 struct vcpu *next = vm_get_vcpu(to_vm, cpu_index(current->cpu));
85
86 CHECK(next != NULL);
87
88 /* Set the return value for the target VM. */
89 arch_regs_set_retval(&next->regs, to_ret);
90
91 /* Set the current vCPU state. */
92 sl_lock(&current->lock);
93 current->state = vcpu_state;
94 sl_unlock(&current->lock);
95
96 return next;
97}
98
99/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000100 * Switches the physical CPU back to the corresponding vCPU of the primary VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100101 *
102 * This triggers the scheduling logic to run. Run in the context of secondary VM
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100103 * to cause FFA_RUN to return and the primary VM to regain control of the CPU.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100104 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100105static struct vcpu *api_switch_to_primary(struct vcpu *current,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100106 struct ffa_value primary_ret,
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000107 enum vcpu_state secondary_state)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100108{
Andrew Walbran508e63c2018-12-20 17:02:37 +0000109 /*
110 * If the secondary is blocked but has a timer running, sleep until the
111 * timer fires rather than indefinitely.
112 */
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100113 switch (primary_ret.func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100114 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
115 case FFA_MSG_WAIT_32: {
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100116 if (arch_timer_enabled_current()) {
117 uint64_t remaining_ns =
118 arch_timer_remaining_ns_current();
119
120 if (remaining_ns == 0) {
121 /*
122 * Timer is pending, so the current vCPU should
123 * be run again right away.
124 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100125 primary_ret.func = FFA_INTERRUPT_32;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100126 /*
127 * primary_ret.arg1 should already be set to the
128 * current VM ID and vCPU ID.
129 */
130 primary_ret.arg2 = 0;
131 } else {
132 primary_ret.arg2 = remaining_ns;
133 }
134 } else {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100135 primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100136 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000137 break;
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100138 }
Andrew Scullb06d1752019-02-04 10:15:48 +0000139
140 default:
141 /* Do nothing. */
142 break;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000143 }
144
J-Alvesfe7f7372020-11-09 11:32:12 +0000145 return api_switch_to_vm(current, primary_ret, secondary_state,
146 HF_PRIMARY_VM_ID);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100147}
148
149/**
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200150 * Choose next vCPU to run to be the counterpart vCPU in the other
151 * world (run the normal world if currently running in the secure
152 * world). Set current vCPU state to the given vcpu_state parameter.
153 * Set FF-A return values to the target vCPU in the other world.
154 *
155 * Called in context of a direct message response from a secure
156 * partition to a VM.
157 */
158static struct vcpu *api_switch_to_other_world(struct vcpu *current,
159 struct ffa_value other_world_ret,
160 enum vcpu_state vcpu_state)
161{
J-Alvesfe7f7372020-11-09 11:32:12 +0000162 return api_switch_to_vm(current, other_world_ret, vcpu_state,
163 HF_OTHER_WORLD_ID);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200164}
165
166/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100167 * Checks whether the given `to` VM's mailbox is currently busy, and optionally
168 * registers the `from` VM to be notified when it becomes available.
169 */
170static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
171{
172 if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
173 to.vm->mailbox.recv == NULL) {
174 /*
175 * Fail if the receiver isn't currently ready to receive data,
176 * setting up for notification if requested.
177 */
178 if (notify) {
179 struct wait_entry *entry =
180 vm_get_wait_entry(from, to.vm->id);
181
182 /* Append waiter only if it's not there yet. */
183 if (list_empty(&entry->wait_links)) {
184 list_append(&to.vm->mailbox.waiter_list,
185 &entry->wait_links);
186 }
187 }
188
189 return true;
190 }
191
192 return false;
193}
194
195/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000196 * Returns true if the given vCPU is executing in context of an
197 * FFA_MSG_SEND_DIRECT_REQ invocation.
198 */
199static bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
200{
201 return locked.vcpu->direct_request_origin_vm_id != HF_INVALID_VM_ID;
202}
203
204/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000205 * Returns to the primary VM and signals that the vCPU still has work to do so.
Andrew Scull33fecd32019-01-08 14:48:27 +0000206 */
207struct vcpu *api_preempt(struct vcpu *current)
208{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100209 struct ffa_value ret = {
210 .func = FFA_INTERRUPT_32,
211 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull33fecd32019-01-08 14:48:27 +0000212 };
213
Andrew Sculld6ee1102019-04-05 22:12:42 +0100214 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
Andrew Scull33fecd32019-01-08 14:48:27 +0000215}
216
217/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000218 * Puts the current vCPU in wait for interrupt mode, and returns to the primary
Fuad Tabbaed294af2019-12-20 10:43:01 +0000219 * VM.
Andrew Scullaa039b32018-10-04 15:02:26 +0100220 */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100221struct vcpu *api_wait_for_interrupt(struct vcpu *current)
Andrew Scullaa039b32018-10-04 15:02:26 +0100222{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100223 struct ffa_value ret = {
224 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
225 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Scull6d2db332018-10-10 15:28:17 +0100226 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000227
Wedson Almeida Filhoba641ef2018-12-03 04:19:44 +0000228 return api_switch_to_primary(current, ret,
Andrew Sculld6ee1102019-04-05 22:12:42 +0100229 VCPU_STATE_BLOCKED_INTERRUPT);
Andrew Scullaa039b32018-10-04 15:02:26 +0100230}
231
232/**
Andrew Walbran33645652019-04-15 12:29:31 +0100233 * Puts the current vCPU in off mode, and returns to the primary VM.
234 */
235struct vcpu *api_vcpu_off(struct vcpu *current)
236{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237 struct ffa_value ret = {
238 .func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
239 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
Andrew Walbran33645652019-04-15 12:29:31 +0100240 };
241
242 /*
243 * Disable the timer, so the scheduler doesn't get told to call back
244 * based on it.
245 */
246 arch_timer_disable_current();
247
248 return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
249}
250
251/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000252 * Returns to the primary VM to allow this CPU to be used for other tasks as the
253 * vCPU does not have work to do at this moment. The current vCPU is marked as
Andrew Walbran16075b62019-09-03 17:11:07 +0100254 * ready to be scheduled again.
Andrew Scull66d62bf2019-02-01 13:54:10 +0000255 */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000256struct ffa_value api_yield(struct vcpu *current, struct vcpu **next)
Andrew Scull66d62bf2019-02-01 13:54:10 +0000257{
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000258 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
259 struct vcpu_locked current_locked;
260 bool is_direct_request_ongoing;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000261
262 if (current->vm->id == HF_PRIMARY_VM_ID) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000263 /* NOOP on the primary as it makes the scheduling decisions. */
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000264 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000265 }
266
Olivier Deprezee9d6a92019-11-26 09:14:11 +0000267 current_locked = vcpu_lock(current);
268 is_direct_request_ongoing =
269 is_ffa_direct_msg_request_ongoing(current_locked);
270 vcpu_unlock(&current_locked);
271
272 if (is_direct_request_ongoing) {
273 return ffa_error(FFA_DENIED);
274 }
275
276 *next = api_switch_to_primary(
277 current,
278 (struct ffa_value){.func = FFA_YIELD_32,
279 .arg1 = ffa_vm_vcpu(current->vm->id,
280 vcpu_index(current))},
281 VCPU_STATE_READY);
282
283 return ret;
Andrew Scull66d62bf2019-02-01 13:54:10 +0000284}
285
286/**
Andrew Walbran33645652019-04-15 12:29:31 +0100287 * Switches to the primary so that it can switch to the target, or kick it if it
288 * is already running on a different physical CPU.
289 */
290struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
291{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100292 struct ffa_value ret = {
293 .func = HF_FFA_RUN_WAKE_UP,
294 .arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
295 vcpu_index(target_vcpu)),
Andrew Walbran33645652019-04-15 12:29:31 +0100296 };
297 return api_switch_to_primary(current, ret, VCPU_STATE_READY);
298}
299
300/**
Andrew Scull38772ab2019-01-24 15:16:50 +0000301 * Aborts the vCPU and triggers its VM to abort fully.
Andrew Scull9726c252019-01-23 13:44:19 +0000302 */
303struct vcpu *api_abort(struct vcpu *current)
304{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100305 struct ffa_value ret = ffa_error(FFA_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000306
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100307 dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000308 vcpu_index(current));
Andrew Scull9726c252019-01-23 13:44:19 +0000309
310 if (current->vm->id == HF_PRIMARY_VM_ID) {
311 /* TODO: what to do when the primary aborts? */
312 for (;;) {
313 /* Do nothing. */
314 }
315 }
316
317 atomic_store_explicit(&current->vm->aborting, true,
318 memory_order_relaxed);
319
320 /* TODO: free resources once all vCPUs abort. */
321
Andrew Sculld6ee1102019-04-05 22:12:42 +0100322 return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
Andrew Scull9726c252019-01-23 13:44:19 +0000323}
324
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100325struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
326 const struct ffa_uuid *uuid)
327{
328 struct vm *current_vm = current->vm;
329 struct vm_locked current_vm_locked;
330 ffa_vm_count_t vm_count = 0;
331 bool uuid_is_null = ffa_uuid_is_null(uuid);
332 struct ffa_value ret;
333 uint32_t size;
334 struct ffa_partition_info partitions[MAX_VMS];
335
336 /*
337 * Iterate through the VMs to find the ones with a matching UUID.
338 * A Null UUID retrieves information for all VMs.
339 */
340 for (uint16_t index = 0; index < vm_get_count(); ++index) {
341 const struct vm *vm = vm_find_index(index);
342
343 if (uuid_is_null || ffa_uuid_equal(uuid, &vm->uuid)) {
344 partitions[vm_count].vm_id = vm->id;
345 partitions[vm_count].vcpu_count = vm->vcpu_count;
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100346 partitions[vm_count].properties =
Olivier Deprez764fd2e2020-07-29 15:14:09 +0200347 arch_vm_partition_properties(vm->id);
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100348
349 ++vm_count;
350 }
351 }
352
353 /* Unrecognized UUID: does not match any of the VMs and is not Null. */
354 if (vm_count == 0) {
355 return ffa_error(FFA_INVALID_PARAMETERS);
356 }
357
358 size = vm_count * sizeof(partitions[0]);
359 if (size > FFA_MSG_PAYLOAD_MAX) {
360 dlog_error(
361 "Partition information does not fit in the VM's RX "
362 "buffer.\n");
363 return ffa_error(FFA_NO_MEMORY);
364 }
365
366 /*
367 * Partition information is returned in the VM's RX buffer, which is why
368 * the lock is needed.
369 */
370 current_vm_locked = vm_lock(current_vm);
371
372 if (msg_receiver_busy(current_vm_locked, NULL, false)) {
373 /*
374 * Can't retrieve memory information if the mailbox is not
375 * available.
376 */
377 dlog_verbose("RX buffer not ready.\n");
378 ret = ffa_error(FFA_BUSY);
379 goto out_unlock;
380 }
381
382 /* Populate the VM's RX buffer with the partition information. */
383 memcpy_s(current_vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, partitions,
384 size);
385 current_vm->mailbox.recv_size = size;
386 current_vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
387 current_vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
388 current_vm->mailbox.state = MAILBOX_STATE_READ;
389
390 /* Return the count of partition information descriptors in w2. */
391 ret = (struct ffa_value){.func = FFA_SUCCESS_32, .arg2 = vm_count};
392
393out_unlock:
394 vm_unlock(&current_vm_locked);
395
396 return ret;
397}
398
Andrew Scull9726c252019-01-23 13:44:19 +0000399/**
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000400 * Returns the ID of the VM.
401 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100402struct ffa_value api_ffa_id_get(const struct vcpu *current)
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000403{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100404 return (struct ffa_value){.func = FFA_SUCCESS_32,
405 .arg2 = current->vm->id};
Andrew Scull55c4d8b2018-12-18 18:50:18 +0000406}
407
408/**
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000409 * This function is called by the architecture-specific context switching
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000410 * function to indicate that register state for the given vCPU has been saved
411 * and can therefore be used by other pCPUs.
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000412 */
413void api_regs_state_saved(struct vcpu *vcpu)
414{
415 sl_lock(&vcpu->lock);
416 vcpu->regs_available = true;
417 sl_unlock(&vcpu->lock);
418}
419
420/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000421 * Retrieves the next waiter and removes it from the wait list if the VM's
422 * mailbox is in a writable state.
423 */
424static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
425{
426 struct wait_entry *entry;
427 struct vm *vm = locked_vm.vm;
428
Andrew Sculld6ee1102019-04-05 22:12:42 +0100429 if (vm->mailbox.state != MAILBOX_STATE_EMPTY ||
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000430 vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
431 /* The mailbox is not writable or there are no waiters. */
432 return NULL;
433 }
434
435 /* Remove waiter from the wait list. */
436 entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
437 wait_links);
438 list_remove(&entry->wait_links);
439 return entry;
440}
441
442/**
Andrew Walbran508e63c2018-12-20 17:02:37 +0000443 * Assuming that the arguments have already been checked by the caller, injects
444 * a virtual interrupt of the given ID into the given target vCPU. This doesn't
445 * cause the vCPU to actually be run immediately; it will be taken when the vCPU
446 * is next run, which is up to the scheduler.
447 *
448 * Returns:
449 * - 0 on success if no further action is needed.
450 * - 1 if it was called by the primary VM and the primary VM now needs to wake
451 * up or kick the target vCPU.
452 */
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200453static int64_t internal_interrupt_inject_locked(
454 struct vcpu_locked target_locked, uint32_t intid, struct vcpu *current,
455 struct vcpu **next)
Andrew Walbran508e63c2018-12-20 17:02:37 +0000456{
457 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +0100458 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000459 int64_t ret = 0;
460
Andrew Walbran508e63c2018-12-20 17:02:37 +0000461 /*
462 * We only need to change state and (maybe) trigger a virtual IRQ if it
463 * is enabled and was not previously pending. Otherwise we can skip
464 * everything except setting the pending bit.
465 *
466 * If you change this logic make sure to update the need_vm_lock logic
467 * above to match.
468 */
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200469 if (!(target_locked.vcpu->interrupts.interrupt_enabled[intid_index] &
470 ~target_locked.vcpu->interrupts.interrupt_pending[intid_index] &
Andrew Walbran508e63c2018-12-20 17:02:37 +0000471 intid_mask)) {
472 goto out;
473 }
474
475 /* Increment the count. */
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200476 target_locked.vcpu->interrupts.enabled_and_pending_count++;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000477
478 /*
479 * Only need to update state if there was not already an
480 * interrupt enabled and pending.
481 */
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200482 if (target_locked.vcpu->interrupts.enabled_and_pending_count != 1) {
Andrew Walbran508e63c2018-12-20 17:02:37 +0000483 goto out;
484 }
485
Andrew Walbran508e63c2018-12-20 17:02:37 +0000486 if (current->vm->id == HF_PRIMARY_VM_ID) {
487 /*
488 * If the call came from the primary VM, let it know that it
489 * should run or kick the target vCPU.
490 */
491 ret = 1;
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200492 } else if (current != target_locked.vcpu && next != NULL) {
493 *next = api_wake_up(current, target_locked.vcpu);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000494 }
495
496out:
497 /* Either way, make it pending. */
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200498 target_locked.vcpu->interrupts.interrupt_pending[intid_index] |=
499 intid_mask;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000500
Olivier Deprezc19a6ea2020-08-06 11:16:07 +0200501 return ret;
502}
503
504/* Wrapper to internal_interrupt_inject with locking of target vCPU */
505static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
506 uint32_t intid, struct vcpu *current,
507 struct vcpu **next)
508{
509 int64_t ret;
510 struct vcpu_locked target_locked;
511
512 target_locked = vcpu_lock(target_vcpu);
513 ret = internal_interrupt_inject_locked(target_locked, intid, current,
514 next);
515 vcpu_unlock(&target_locked);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000516
517 return ret;
518}
519
520/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100521 * Constructs an FFA_MSG_SEND value to return from a successful FFA_MSG_POLL
522 * or FFA_MSG_WAIT call.
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100523 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100524static struct ffa_value ffa_msg_recv_return(const struct vm *receiver)
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100525{
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000526 switch (receiver->mailbox.recv_func) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100527 case FFA_MSG_SEND_32:
528 return (struct ffa_value){
529 .func = FFA_MSG_SEND_32,
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000530 .arg1 = (receiver->mailbox.recv_sender << 16) |
531 receiver->id,
532 .arg3 = receiver->mailbox.recv_size};
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000533 default:
534 /* This should never be reached, but return an error in case. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000535 dlog_error("Tried to return an invalid message function %#x\n",
536 receiver->mailbox.recv_func);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100537 return ffa_error(FFA_DENIED);
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000538 }
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100539}
540
541/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000542 * Prepares the vCPU to run by updating its state and fetching whether a return
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000543 * value needs to be forced onto the vCPU.
544 */
Andrew Scull38772ab2019-01-24 15:16:50 +0000545static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100546 struct ffa_value *run_ret)
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000547{
Max Shvetsov40108e72020-08-27 12:39:50 +0100548 struct vcpu_locked vcpu_locked;
549 struct vm_locked vm_locked;
Andrew Scullb06d1752019-02-04 10:15:48 +0000550 bool need_vm_lock;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000551 bool ret;
552
Andrew Scullb06d1752019-02-04 10:15:48 +0000553 /*
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000554 * Check that the registers are available so that the vCPU can be run.
Andrew Scullb06d1752019-02-04 10:15:48 +0000555 *
Andrew Scull4caadaf2019-07-03 13:13:47 +0100556 * The VM lock is not needed in the common case so it must only be taken
557 * when it is going to be needed. This ensures there are no inter-vCPU
558 * dependencies in the common run case meaning the sensitive context
559 * switch performance is consistent.
Andrew Scullb06d1752019-02-04 10:15:48 +0000560 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100561 vcpu_locked = vcpu_lock(vcpu);
562
563#if SECURE_WORLD == 1
564
565 if (vcpu_secondary_reset_and_start(vcpu_locked, vcpu->vm->secondary_ep,
566 0)) {
567 dlog_verbose("%s secondary cold boot vmid %#x vcpu id %#x\n",
568 __func__, vcpu->vm->id, current->cpu->id);
569 }
570
571#endif
Andrew Scullb06d1752019-02-04 10:15:48 +0000572
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000573 /* The VM needs to be locked to deliver mailbox messages. */
574 need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
575 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100576 vcpu_unlock(&vcpu_locked);
577 vm_locked = vm_lock(vcpu->vm);
578 vcpu_locked = vcpu_lock(vcpu);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000579 }
580
581 /*
582 * If the vCPU is already running somewhere then we can't run it here
583 * simultaneously. While it is actually running then the state should be
584 * `VCPU_STATE_RUNNING` and `regs_available` should be false. Once it
585 * stops running but while Hafnium is in the process of switching back
586 * to the primary there will be a brief period while the state has been
587 * updated but `regs_available` is still false (until
588 * `api_regs_state_saved` is called). We can't start running it again
589 * until this has finished, so count this state as still running for the
590 * purposes of this check.
591 */
592 if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
593 /*
594 * vCPU is running on another pCPU.
595 *
596 * It's okay not to return the sleep duration here because the
597 * other physical CPU that is currently running this vCPU will
598 * return the sleep duration if needed.
599 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100600 *run_ret = ffa_error(FFA_BUSY);
Andrew Walbrand81c7d82019-11-27 18:34:46 +0000601 ret = false;
602 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000603 }
Andrew Scull9726c252019-01-23 13:44:19 +0000604
605 if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100606 if (vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +0100607 dlog_notice("Aborting VM %#x vCPU %u\n", vcpu->vm->id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000608 vcpu_index(vcpu));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100609 vcpu->state = VCPU_STATE_ABORTED;
Andrew Scull9726c252019-01-23 13:44:19 +0000610 }
611 ret = false;
612 goto out;
613 }
614
Andrew Walbran508e63c2018-12-20 17:02:37 +0000615 switch (vcpu->state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +0100616 case VCPU_STATE_RUNNING:
617 case VCPU_STATE_OFF:
618 case VCPU_STATE_ABORTED:
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000619 ret = false;
620 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000621
Andrew Sculld6ee1102019-04-05 22:12:42 +0100622 case VCPU_STATE_BLOCKED_MAILBOX:
Andrew Scullb06d1752019-02-04 10:15:48 +0000623 /*
624 * A pending message allows the vCPU to run so the message can
625 * be delivered directly.
626 */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100627 if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
Andrew Walbrand4d2fa12019-10-01 16:47:25 +0100628 arch_regs_set_retval(&vcpu->regs,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100629 ffa_msg_recv_return(vcpu->vm));
Andrew Sculld6ee1102019-04-05 22:12:42 +0100630 vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Scullb06d1752019-02-04 10:15:48 +0000631 break;
632 }
633 /* Fall through. */
Andrew Sculld6ee1102019-04-05 22:12:42 +0100634 case VCPU_STATE_BLOCKED_INTERRUPT:
Andrew Scullb06d1752019-02-04 10:15:48 +0000635 /* Allow virtual interrupts to be delivered. */
636 if (vcpu->interrupts.enabled_and_pending_count > 0) {
637 break;
638 }
639
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100640 uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
641
Andrew Walbran508e63c2018-12-20 17:02:37 +0000642 if (arch_timer_enabled(&vcpu->regs)) {
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100643 timer_remaining_ns =
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000644 arch_timer_remaining_ns(&vcpu->regs);
645
646 /*
647 * The timer expired so allow the interrupt to be
648 * delivered.
649 */
650 if (timer_remaining_ns == 0) {
651 break;
652 }
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100653 }
Andrew Walbran2fc856a2019-11-04 15:17:24 +0000654
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100655 /*
656 * The vCPU is not ready to run, return the appropriate code to
657 * the primary which called vcpu_run.
658 */
659 run_ret->func = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100660 ? FFA_MSG_WAIT_32
661 : HF_FFA_RUN_WAIT_FOR_INTERRUPT;
Andrew Walbran4692a3a2020-08-07 12:42:01 +0100662 run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
663 run_ret->arg2 = timer_remaining_ns;
Andrew Walbran508e63c2018-12-20 17:02:37 +0000664
665 ret = false;
666 goto out;
Andrew Scullb06d1752019-02-04 10:15:48 +0000667
Andrew Sculld6ee1102019-04-05 22:12:42 +0100668 case VCPU_STATE_READY:
Andrew Walbran508e63c2018-12-20 17:02:37 +0000669 break;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000670 }
671
Andrew Scullb06d1752019-02-04 10:15:48 +0000672 /* It has been decided that the vCPU should be run. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000673 vcpu->cpu = current->cpu;
Andrew Sculld6ee1102019-04-05 22:12:42 +0100674 vcpu->state = VCPU_STATE_RUNNING;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000675
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000676 /*
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000677 * Mark the registers as unavailable now that we're about to reflect
678 * them onto the real registers. This will also prevent another physical
679 * CPU from trying to read these registers.
680 */
681 vcpu->regs_available = false;
682
683 ret = true;
684
685out:
Max Shvetsov40108e72020-08-27 12:39:50 +0100686 vcpu_unlock(&vcpu_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000687 if (need_vm_lock) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100688 vm_unlock(&vm_locked);
Andrew Scullb06d1752019-02-04 10:15:48 +0000689 }
690
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000691 return ret;
692}
693
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100694struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
695 const struct vcpu *current, struct vcpu **next)
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100696{
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100697 struct vm *vm;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100698 struct vcpu *vcpu;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100699 struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100700
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000701 /* Only the primary VM can switch vCPUs. */
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +0100702 if (current->vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100703 ret.arg2 = FFA_DENIED;
Andrew Scull6d2db332018-10-10 15:28:17 +0100704 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100705 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100706
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000707 /* Only secondary VM vCPUs can be run. */
Andrew Scull19503262018-09-20 14:48:39 +0100708 if (vm_id == HF_PRIMARY_VM_ID) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100709 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100710 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100711
Andrew Scull19503262018-09-20 14:48:39 +0100712 /* The requested VM must exist. */
Andrew Walbran42347a92019-05-09 13:59:03 +0100713 vm = vm_find(vm_id);
Andrew Scull19503262018-09-20 14:48:39 +0100714 if (vm == NULL) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100715 goto out;
Andrew Scull19503262018-09-20 14:48:39 +0100716 }
717
Fuad Tabbaed294af2019-12-20 10:43:01 +0000718 /* The requested vCPU must exist. */
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100719 if (vcpu_idx >= vm->vcpu_count) {
Andrew Scull6d2db332018-10-10 15:28:17 +0100720 goto out;
Andrew Scull7364a8e2018-07-19 15:39:29 +0100721 }
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100722
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000723 /* Update state if allowed. */
Andrew Walbrane1310df2019-04-29 17:28:28 +0100724 vcpu = vm_get_vcpu(vm, vcpu_idx);
Andrew Scullb06d1752019-02-04 10:15:48 +0000725 if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000726 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100727 }
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000728
Andrew Walbran508e63c2018-12-20 17:02:37 +0000729 /*
730 * Inject timer interrupt if timer has expired. It's safe to access
731 * vcpu->regs here because api_vcpu_prepare_run already made sure that
732 * regs_available was true (and then set it to false) before returning
733 * true.
734 */
735 if (arch_timer_pending(&vcpu->regs)) {
736 /* Make virtual timer interrupt pending. */
Andrew Walbranfc9d4382019-05-10 18:07:21 +0100737 internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
738 NULL);
Andrew Walbran508e63c2018-12-20 17:02:37 +0000739
740 /*
741 * Set the mask bit so the hardware interrupt doesn't fire
742 * again. Ideally we wouldn't do this because it affects what
743 * the secondary vCPU sees, but if we don't then we end up with
744 * a loop of the interrupt firing each time we try to return to
745 * the secondary vCPU.
746 */
747 arch_timer_mask(&vcpu->regs);
748 }
749
Fuad Tabbaed294af2019-12-20 10:43:01 +0000750 /* Switch to the vCPU. */
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000751 *next = vcpu;
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000752
Andrew Scull33fecd32019-01-08 14:48:27 +0000753 /*
754 * Set a placeholder return code to the scheduler. This will be
755 * overwritten when the switch back to the primary occurs.
756 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100757 ret.func = FFA_INTERRUPT_32;
758 ret.arg1 = ffa_vm_vcpu(vm_id, vcpu_idx);
Andrew Walbran7a1ea0b2019-10-02 18:18:44 +0100759 ret.arg2 = 0;
Andrew Scull33fecd32019-01-08 14:48:27 +0000760
Andrew Scull6d2db332018-10-10 15:28:17 +0100761out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100762 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100763}
764
765/**
Andrew Scull81e85092018-12-12 12:56:20 +0000766 * Check that the mode indicates memory that is valid, owned and exclusive.
767 */
Andrew Walbran1281ed42019-10-22 17:23:40 +0100768static bool api_mode_valid_owned_and_exclusive(uint32_t mode)
Andrew Scull81e85092018-12-12 12:56:20 +0000769{
Andrew Scullb5f49e02019-10-02 13:20:47 +0100770 return (mode & (MM_MODE_D | MM_MODE_INVALID | MM_MODE_UNOWNED |
771 MM_MODE_SHARED)) == 0;
Andrew Scull81e85092018-12-12 12:56:20 +0000772}
773
774/**
Andrew Walbranc8a01972020-09-22 11:23:30 +0100775 * Determines the value to be returned by api_ffa_rxtx_map and
776 * api_ffa_rx_release after they've succeeded. If a secondary VM is running and
777 * there are waiters, it also switches back to the primary VM for it to wake
778 * waiters up.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000779 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100780static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
781 struct vcpu *current,
782 struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000783{
784 struct vm *vm = locked_vm.vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000785
786 if (list_empty(&vm->mailbox.waiter_list)) {
787 /* No waiters, nothing else to do. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100788 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000789 }
790
791 if (vm->id == HF_PRIMARY_VM_ID) {
792 /* The caller is the primary VM. Tell it to wake up waiters. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100793 return (struct ffa_value){.func = FFA_RX_RELEASE_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000794 }
795
796 /*
797 * Switch back to the primary VM, informing it that there are waiters
798 * that need to be notified.
799 */
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000800 *next = api_switch_to_primary(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100801 current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000802 VCPU_STATE_READY);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000803
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100804 return (struct ffa_value){.func = FFA_SUCCESS_32};
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000805}
806
807/**
Manish Pandeyd34f8892020-06-19 17:41:07 +0100808 * Configures the hypervisor's stage-1 view of the send and receive pages.
Andrew Sculle1322792019-07-01 17:46:10 +0100809 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100810static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
811 struct vm_locked vm_locked,
Andrew Sculle1322792019-07-01 17:46:10 +0100812 paddr_t pa_send_begin, paddr_t pa_send_end,
813 paddr_t pa_recv_begin, paddr_t pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200814 uint32_t extra_attributes,
Andrew Sculle1322792019-07-01 17:46:10 +0100815 struct mpool *local_page_pool)
816{
817 bool ret;
Andrew Sculle1322792019-07-01 17:46:10 +0100818
819 /* Map the send page as read-only in the hypervisor address space. */
820 vm_locked.vm->mailbox.send =
821 mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200822 MM_MODE_R | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100823 if (!vm_locked.vm->mailbox.send) {
824 /* TODO: partial defrag of failed range. */
825 /* Recover any memory consumed in failed mapping. */
826 mm_defrag(mm_stage1_locked, local_page_pool);
827 goto fail;
828 }
829
830 /*
831 * Map the receive page as writable in the hypervisor address space. On
832 * failure, unmap the send page before returning.
833 */
834 vm_locked.vm->mailbox.recv =
835 mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200836 MM_MODE_W | extra_attributes, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100837 if (!vm_locked.vm->mailbox.recv) {
838 /* TODO: partial defrag of failed range. */
839 /* Recover any memory consumed in failed mapping. */
840 mm_defrag(mm_stage1_locked, local_page_pool);
841 goto fail_undo_send;
842 }
843
844 ret = true;
845 goto out;
846
847 /*
848 * The following mappings will not require more memory than is available
849 * in the local pool.
850 */
851fail_undo_send:
852 vm_locked.vm->mailbox.send = NULL;
Andrew Scull7e8de322019-07-02 13:00:56 +0100853 CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
854 local_page_pool));
Andrew Sculle1322792019-07-01 17:46:10 +0100855
856fail:
857 ret = false;
858
859out:
Andrew Sculle1322792019-07-01 17:46:10 +0100860 return ret;
861}
862
863/**
Manish Pandeyd34f8892020-06-19 17:41:07 +0100864 * Sanity checks and configures the send and receive pages in the VM stage-2
865 * and hypervisor stage-1 page tables.
866 *
867 * Returns:
868 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
869 * aligned or are the same.
870 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
871 * due to insuffient page table memory.
872 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
873 * the caller.
874 * - FFA_SUCCESS on success if no further action is needed.
Andrew Sculle1322792019-07-01 17:46:10 +0100875 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100876
877struct ffa_value api_vm_configure_pages(
878 struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
879 ipaddr_t send, ipaddr_t recv, uint32_t page_count,
880 struct mpool *local_page_pool)
Andrew Sculle1322792019-07-01 17:46:10 +0100881{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100882 struct ffa_value ret;
883 paddr_t pa_send_begin;
884 paddr_t pa_send_end;
885 paddr_t pa_recv_begin;
886 paddr_t pa_recv_end;
887 uint32_t orig_send_mode;
888 uint32_t orig_recv_mode;
Olivier Deprez96a2a262020-06-11 17:21:38 +0200889 uint32_t extra_attributes;
Manish Pandeyd34f8892020-06-19 17:41:07 +0100890
891 /* We only allow these to be setup once. */
892 if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
893 ret = ffa_error(FFA_DENIED);
894 goto out;
895 }
896
897 /* Hafnium only supports a fixed size of RX/TX buffers. */
898 if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
899 ret = ffa_error(FFA_INVALID_PARAMETERS);
900 goto out;
901 }
902
903 /* Fail if addresses are not page-aligned. */
904 if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
905 !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
906 ret = ffa_error(FFA_INVALID_PARAMETERS);
907 goto out;
908 }
909
910 /* Convert to physical addresses. */
911 pa_send_begin = pa_from_ipa(send);
912 pa_send_end = pa_add(pa_send_begin, HF_MAILBOX_SIZE);
913 pa_recv_begin = pa_from_ipa(recv);
914 pa_recv_end = pa_add(pa_recv_begin, HF_MAILBOX_SIZE);
915
916 /* Fail if the same page is used for the send and receive pages. */
917 if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
918 ret = ffa_error(FFA_INVALID_PARAMETERS);
919 goto out;
920 }
Andrew Sculle1322792019-07-01 17:46:10 +0100921
922 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +0100923 * Ensure the pages are valid, owned and exclusive to the VM and that
924 * the VM has the required access to the memory.
Andrew Sculle1322792019-07-01 17:46:10 +0100925 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100926 if (!mm_vm_get_mode(&vm_locked.vm->ptable, send,
927 ipa_add(send, PAGE_SIZE), &orig_send_mode) ||
928 !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
929 (orig_send_mode & MM_MODE_R) == 0 ||
930 (orig_send_mode & MM_MODE_W) == 0) {
931 ret = ffa_error(FFA_DENIED);
932 goto out;
933 }
934
935 if (!mm_vm_get_mode(&vm_locked.vm->ptable, recv,
936 ipa_add(recv, PAGE_SIZE), &orig_recv_mode) ||
937 !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
938 (orig_recv_mode & MM_MODE_R) == 0) {
939 ret = ffa_error(FFA_DENIED);
940 goto out;
941 }
Andrew Sculle1322792019-07-01 17:46:10 +0100942
943 /* Take memory ownership away from the VM and mark as shared. */
Andrew Scull3c257452019-11-26 13:32:50 +0000944 if (!vm_identity_map(
945 vm_locked, pa_send_begin, pa_send_end,
Andrew Sculle1322792019-07-01 17:46:10 +0100946 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100947 local_page_pool, NULL)) {
948 ret = ffa_error(FFA_NO_MEMORY);
949 goto out;
Andrew Sculle1322792019-07-01 17:46:10 +0100950 }
951
Andrew Scull3c257452019-11-26 13:32:50 +0000952 if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
953 MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100954 local_page_pool, NULL)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100955 /* TODO: partial defrag of failed range. */
956 /* Recover any memory consumed in failed mapping. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100957 mm_vm_defrag(&vm_locked.vm->ptable, local_page_pool);
Andrew Sculle1322792019-07-01 17:46:10 +0100958 goto fail_undo_send;
959 }
960
Olivier Deprez96a2a262020-06-11 17:21:38 +0200961 /* Get extra send/recv pages mapping attributes for the given VM ID. */
962 extra_attributes = arch_mm_extra_attributes_from_vm(vm_locked.vm->id);
963
Manish Pandeyd34f8892020-06-19 17:41:07 +0100964 if (!api_vm_configure_stage1(mm_stage1_locked, vm_locked, pa_send_begin,
965 pa_send_end, pa_recv_begin, pa_recv_end,
Olivier Deprez96a2a262020-06-11 17:21:38 +0200966 extra_attributes, local_page_pool)) {
Andrew Sculle1322792019-07-01 17:46:10 +0100967 goto fail_undo_send_and_recv;
968 }
969
Manish Pandeyd34f8892020-06-19 17:41:07 +0100970 ret = (struct ffa_value){.func = FFA_SUCCESS_32};
Andrew Sculle1322792019-07-01 17:46:10 +0100971 goto out;
972
Andrew Sculle1322792019-07-01 17:46:10 +0100973fail_undo_send_and_recv:
Andrew Scull3c257452019-11-26 13:32:50 +0000974 CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100975 orig_send_mode, local_page_pool, NULL));
Andrew Sculle1322792019-07-01 17:46:10 +0100976
977fail_undo_send:
Andrew Scull3c257452019-11-26 13:32:50 +0000978 CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100979 orig_send_mode, local_page_pool, NULL));
980 ret = ffa_error(FFA_NO_MEMORY);
Andrew Sculle1322792019-07-01 17:46:10 +0100981
982out:
Andrew Sculle1322792019-07-01 17:46:10 +0100983 return ret;
984}
985
986/**
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +0100987 * Configures the VM to send/receive data through the specified pages. The pages
Manish Pandeyd34f8892020-06-19 17:41:07 +0100988 * must not be shared. Locking of the page tables combined with a local memory
989 * pool ensures there will always be enough memory to recover from any errors
990 * that arise. The stage-1 page tables must be locked so memory cannot be taken
991 * by another core which could result in this transaction being unable to roll
992 * back in the case of an error.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000993 *
994 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100995 * - FFA_ERROR FFA_INVALID_PARAMETERS if the given addresses are not properly
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000996 * aligned or are the same.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100997 * - FFA_ERROR FFA_NO_MEMORY if the hypervisor was unable to map the buffers
Andrew Walbranbfffb0f2019-11-05 14:02:34 +0000998 * due to insuffient page table memory.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100999 * - FFA_ERROR FFA_DENIED if the pages are already mapped or are not owned by
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001000 * the caller.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001001 * - FFA_SUCCESS on success if no further action is needed.
1002 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001003 * needs to wake up or kick waiters.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001004 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001005struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
1006 uint32_t page_count, struct vcpu *current,
1007 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001008{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001009 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001010 struct ffa_value ret;
Manish Pandeyd34f8892020-06-19 17:41:07 +01001011 struct vm_locked vm_locked;
1012 struct mm_stage1_locked mm_stage1_locked;
1013 struct mpool local_page_pool;
Andrew Scull220e6212018-12-21 18:09:00 +00001014
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001015 /*
Manish Pandeyd34f8892020-06-19 17:41:07 +01001016 * Create a local pool so any freed memory can't be used by another
1017 * thread. This is to ensure the original mapping can be restored if any
1018 * stage of the process fails.
Andrew Scull3c0a90a2019-07-01 11:55:53 +01001019 */
Manish Pandeyd34f8892020-06-19 17:41:07 +01001020 mpool_init_with_fallback(&local_page_pool, &api_page_pool);
1021
Andrew Sculle1322792019-07-01 17:46:10 +01001022 vm_locked = vm_lock(vm);
Manish Pandeyd34f8892020-06-19 17:41:07 +01001023 mm_stage1_locked = mm_lock_stage1();
Andrew Scull220e6212018-12-21 18:09:00 +00001024
Manish Pandeyd34f8892020-06-19 17:41:07 +01001025 ret = api_vm_configure_pages(mm_stage1_locked, vm_locked, send, recv,
1026 page_count, &local_page_pool);
1027 if (ret.func != FFA_SUCCESS_32) {
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001028 goto exit;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001029 }
1030
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001031 /* Tell caller about waiters, if any. */
Andrew Sculle1322792019-07-01 17:46:10 +01001032 ret = api_waiter_result(vm_locked, current, next);
Andrew Scull220e6212018-12-21 18:09:00 +00001033
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001034exit:
Manish Pandeyd34f8892020-06-19 17:41:07 +01001035 mpool_fini(&local_page_pool);
1036
1037 mm_unlock_stage1(&mm_stage1_locked);
Andrew Sculle1322792019-07-01 17:46:10 +01001038 vm_unlock(&vm_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001039
1040 return ret;
1041}
1042
1043/**
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001044 * Notifies the `to` VM about the message currently in its mailbox, possibly
1045 * with the help of the primary VM.
1046 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001047static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
1048 struct vcpu *current, struct vcpu **next)
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001049{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001050 struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
1051 struct ffa_value primary_ret = {
1052 .func = FFA_MSG_SEND_32,
Andrew Walbranf76f5752019-12-03 18:33:08 +00001053 .arg1 = ((uint32_t)from_id << 16) | to.vm->id,
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001054 };
1055
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001056 /* Messages for the primary VM are delivered directly. */
1057 if (to.vm->id == HF_PRIMARY_VM_ID) {
1058 /*
Andrew Walbrane7ad3c02019-12-24 17:03:04 +00001059 * Only tell the primary VM the size and other details if the
1060 * message is for it, to avoid leaking data about messages for
1061 * other VMs.
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001062 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001063 primary_ret = ffa_msg_recv_return(to.vm);
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001064
1065 to.vm->mailbox.state = MAILBOX_STATE_READ;
1066 *next = api_switch_to_primary(current, primary_ret,
1067 VCPU_STATE_READY);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001068 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001069 }
1070
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001071 to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
1072
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001073 /* Messages for the TEE are sent on via the dispatcher. */
1074 if (to.vm->id == HF_TEE_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001075 struct ffa_value call = ffa_msg_recv_return(to.vm);
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001076
Olivier Deprez112d2b52020-09-30 07:39:23 +02001077 ret = arch_other_world_call(call);
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001078 /*
1079 * After the call to the TEE completes it must have finished
1080 * reading its RX buffer, so it is ready for another message.
1081 */
1082 to.vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001083 /*
1084 * Don't return to the primary VM in this case, as the TEE is
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001085 * not (yet) scheduled via FF-A.
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001086 */
Andrew Walbran11cff3a2020-02-28 11:33:17 +00001087 return ret;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001088 }
1089
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001090 /* Return to the primary VM directly or with a switch. */
Andrew Walbranf76f5752019-12-03 18:33:08 +00001091 if (from_id != HF_PRIMARY_VM_ID) {
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001092 *next = api_switch_to_primary(current, primary_ret,
1093 VCPU_STATE_READY);
1094 }
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001095
1096 return ret;
Andrew Walbrane0f575f2019-10-16 16:00:12 +01001097}
1098
1099/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001100 * Copies data from the sender's send buffer to the recipient's receive buffer
1101 * and notifies the recipient.
Wedson Almeida Filho17c997f2019-01-09 18:50:09 +00001102 *
1103 * If the recipient's receive buffer is busy, it can optionally register the
1104 * caller to be notified when the recipient's receive buffer becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001105 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001106struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
1107 ffa_vm_id_t receiver_vm_id, uint32_t size,
1108 uint32_t attributes, struct vcpu *current,
1109 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001110{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001111 struct vm *from = current->vm;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001112 struct vm *to;
Andrew Walbran82d6d152019-12-24 15:02:06 +00001113 struct vm_locked to_locked;
Andrew Walbran70bc8622019-10-07 14:15:58 +01001114 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001115 struct ffa_value ret;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001116 struct vcpu_locked current_locked;
1117 bool is_direct_request_ongoing;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001118 bool notify =
1119 (attributes & FFA_MSG_SEND_NOTIFY_MASK) == FFA_MSG_SEND_NOTIFY;
Andrew Scull19503262018-09-20 14:48:39 +01001120
Andrew Walbran70bc8622019-10-07 14:15:58 +01001121 /* Ensure sender VM ID corresponds to the current VM. */
1122 if (sender_vm_id != from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001123 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001124 }
1125
1126 /* Disallow reflexive requests as this suggests an error in the VM. */
1127 if (receiver_vm_id == from->id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001128 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001129 }
1130
1131 /* Limit the size of transfer. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001132 if (size > FFA_MSG_PAYLOAD_MAX) {
1133 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran70bc8622019-10-07 14:15:58 +01001134 }
1135
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001136 /*
1137 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1138 * invocation.
1139 */
1140 current_locked = vcpu_lock(current);
1141 is_direct_request_ongoing =
1142 is_ffa_direct_msg_request_ongoing(current_locked);
1143 vcpu_unlock(&current_locked);
1144
1145 if (is_direct_request_ongoing) {
1146 return ffa_error(FFA_DENIED);
1147 }
1148
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001149 /* Ensure the receiver VM exists. */
1150 to = vm_find(receiver_vm_id);
1151 if (to == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001152 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran0b60c4f2019-12-10 17:05:29 +00001153 }
1154
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001155 /*
Andrew Walbran70bc8622019-10-07 14:15:58 +01001156 * Check that the sender has configured its send buffer. If the tx
1157 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1158 * be safely accessed after releasing the lock since the tx mailbox
1159 * address can only be configured once.
Jose Marinhoa1dfeda2019-02-27 16:46:03 +00001160 */
1161 sl_lock(&from->lock);
1162 from_msg = from->mailbox.send;
1163 sl_unlock(&from->lock);
1164
1165 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001166 return ffa_error(FFA_INVALID_PARAMETERS);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001167 }
1168
Andrew Walbran82d6d152019-12-24 15:02:06 +00001169 to_locked = vm_lock(to);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001170
Andrew Walbran82d6d152019-12-24 15:02:06 +00001171 if (msg_receiver_busy(to_locked, from, notify)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001172 ret = ffa_error(FFA_BUSY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001173 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001174 }
1175
Andrew Walbran82d6d152019-12-24 15:02:06 +00001176 /* Copy data. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001177 memcpy_s(to->mailbox.recv, FFA_MSG_PAYLOAD_MAX, from_msg, size);
Andrew Walbran82d6d152019-12-24 15:02:06 +00001178 to->mailbox.recv_size = size;
1179 to->mailbox.recv_sender = sender_vm_id;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001180 to->mailbox.recv_func = FFA_MSG_SEND_32;
Andrew Walbran2619e0a2020-01-10 16:37:50 +00001181 ret = deliver_msg(to_locked, sender_vm_id, current, next);
Andrew Scullaa039b32018-10-04 15:02:26 +01001182
1183out:
Andrew Walbran82d6d152019-12-24 15:02:06 +00001184 vm_unlock(&to_locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001185
Wedson Almeida Filho80eb4a32018-11-30 17:11:15 +00001186 return ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001187}
1188
1189/**
Andrew Scullec52ddf2019-08-20 10:41:01 +01001190 * Checks whether the vCPU's attempt to block for a message has already been
1191 * interrupted or whether it is allowed to block.
1192 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001193bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
Andrew Scullec52ddf2019-08-20 10:41:01 +01001194{
1195 bool interrupted;
1196
1197 sl_lock(&current->lock);
1198
1199 /*
1200 * Don't block if there are enabled and pending interrupts, to match
1201 * behaviour of wait_for_interrupt.
1202 */
1203 interrupted = (current->interrupts.enabled_and_pending_count > 0);
1204
1205 sl_unlock(&current->lock);
1206
1207 return interrupted;
1208}
1209
1210/**
Andrew Scullaa039b32018-10-04 15:02:26 +01001211 * Receives a message from the mailbox. If one isn't available, this function
1212 * can optionally block the caller until one becomes available.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001213 *
Andrew Scullaa039b32018-10-04 15:02:26 +01001214 * No new messages can be received until the mailbox has been cleared.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001215 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001216struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
1217 struct vcpu **next)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001218{
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001219 bool is_direct_request_ongoing;
1220 struct vcpu_locked current_locked;
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001221 struct vm *vm = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001222 struct ffa_value return_code;
J-Alvesb37fd082020-10-22 12:29:21 +01001223 bool is_from_secure_world =
1224 (current->vm->id & HF_VM_ID_WORLD_MASK) != 0;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001225
Andrew Scullaa039b32018-10-04 15:02:26 +01001226 /*
1227 * The primary VM will receive messages as a status code from running
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001228 * vCPUs and must not call this function.
Andrew Scullaa039b32018-10-04 15:02:26 +01001229 */
J-Alvesb37fd082020-10-22 12:29:21 +01001230 if (!is_from_secure_world && vm->id == HF_PRIMARY_VM_ID) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001231 return ffa_error(FFA_NOT_SUPPORTED);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001232 }
1233
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001234 /*
1235 * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
1236 * invocation.
1237 */
1238 current_locked = vcpu_lock(current);
1239 is_direct_request_ongoing =
1240 is_ffa_direct_msg_request_ongoing(current_locked);
1241 vcpu_unlock(&current_locked);
1242
1243 if (is_direct_request_ongoing) {
1244 return ffa_error(FFA_DENIED);
1245 }
1246
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001247 sl_lock(&vm->lock);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001248
Andrew Scullaa039b32018-10-04 15:02:26 +01001249 /* Return pending messages without blocking. */
Andrew Sculld6ee1102019-04-05 22:12:42 +01001250 if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
1251 vm->mailbox.state = MAILBOX_STATE_READ;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001252 return_code = ffa_msg_recv_return(vm);
Jose Marinho3e2442f2019-03-12 13:30:37 +00001253 goto out;
1254 }
1255
1256 /* No pending message so fail if not allowed to block. */
1257 if (!block) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001258 return_code = ffa_error(FFA_RETRY);
Andrew Scullaa039b32018-10-04 15:02:26 +01001259 goto out;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001260 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001261
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001262 /*
Jose Marinho3e2442f2019-03-12 13:30:37 +00001263 * From this point onward this call can only be interrupted or a message
1264 * received. If a message is received the return value will be set at
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001265 * that time to FFA_SUCCESS.
Andrew Walbran9311c9a2019-03-12 16:59:04 +00001266 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001267 return_code = ffa_error(FFA_INTERRUPTED);
1268 if (api_ffa_msg_recv_block_interrupted(current)) {
Andrew Scullaa039b32018-10-04 15:02:26 +01001269 goto out;
1270 }
1271
J-Alvesb37fd082020-10-22 12:29:21 +01001272 if (is_from_secure_world) {
1273 /* Return to other world if caller is a SP. */
1274 *next = api_switch_to_other_world(
1275 current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
1276 VCPU_STATE_BLOCKED_MAILBOX);
1277 } else {
1278 /* Switch back to primary VM to block. */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001279 struct ffa_value run_return = {
1280 .func = FFA_MSG_WAIT_32,
1281 .arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
Andrew Walbranb4816552018-12-05 17:35:42 +00001282 };
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001283
Andrew Walbranb4816552018-12-05 17:35:42 +00001284 *next = api_switch_to_primary(current, run_return,
Andrew Sculld6ee1102019-04-05 22:12:42 +01001285 VCPU_STATE_BLOCKED_MAILBOX);
Andrew Walbranb4816552018-12-05 17:35:42 +00001286 }
Andrew Scullaa039b32018-10-04 15:02:26 +01001287out:
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001288 sl_unlock(&vm->lock);
1289
Jose Marinho3e2442f2019-03-12 13:30:37 +00001290 return return_code;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001291}
1292
1293/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001294 * Retrieves the next VM whose mailbox became writable. For a VM to be notified
1295 * by this function, the caller must have called api_mailbox_send before with
1296 * the notify argument set to true, and this call must have failed because the
1297 * mailbox was not available.
1298 *
1299 * It should be called repeatedly to retrieve a list of VMs.
1300 *
1301 * Returns -1 if no VM became writable, or the id of the VM whose mailbox
1302 * became writable.
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001303 */
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001304int64_t api_mailbox_writable_get(const struct vcpu *current)
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001305{
Wedson Almeida Filho00df6c72018-10-18 11:19:24 +01001306 struct vm *vm = current->vm;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001307 struct wait_entry *entry;
Andrew Scullc0e569a2018-10-02 18:05:21 +01001308 int64_t ret;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001309
1310 sl_lock(&vm->lock);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001311 if (list_empty(&vm->mailbox.ready_list)) {
1312 ret = -1;
1313 goto exit;
1314 }
1315
1316 entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
1317 ready_links);
1318 list_remove(&entry->ready_links);
Andrew Walbranaad8f982019-12-04 10:56:39 +00001319 ret = vm_id_for_wait_entry(vm, entry);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001320
1321exit:
1322 sl_unlock(&vm->lock);
1323 return ret;
1324}
1325
1326/**
1327 * Retrieves the next VM waiting to be notified that the mailbox of the
1328 * specified VM became writable. Only primary VMs are allowed to call this.
1329 *
Wedson Almeida Filhob790f652019-01-22 23:41:56 +00001330 * Returns -1 on failure or if there are no waiters; the VM id of the next
1331 * waiter otherwise.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001332 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001333int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001334{
1335 struct vm *vm;
1336 struct vm_locked locked;
1337 struct wait_entry *entry;
1338 struct vm *waiting_vm;
1339
1340 /* Only primary VMs are allowed to call this function. */
1341 if (current->vm->id != HF_PRIMARY_VM_ID) {
1342 return -1;
1343 }
1344
Andrew Walbran42347a92019-05-09 13:59:03 +01001345 vm = vm_find(vm_id);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001346 if (vm == NULL) {
1347 return -1;
1348 }
1349
Fuad Tabbaed294af2019-12-20 10:43:01 +00001350 /* Check if there are outstanding notifications from given VM. */
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001351 locked = vm_lock(vm);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001352 entry = api_fetch_waiter(locked);
1353 vm_unlock(&locked);
1354
1355 if (entry == NULL) {
1356 return -1;
1357 }
1358
1359 /* Enqueue notification to waiting VM. */
1360 waiting_vm = entry->waiting_vm;
1361
1362 sl_lock(&waiting_vm->lock);
1363 if (list_empty(&entry->ready_links)) {
1364 list_append(&waiting_vm->mailbox.ready_list,
1365 &entry->ready_links);
1366 }
1367 sl_unlock(&waiting_vm->lock);
1368
1369 return waiting_vm->id;
1370}
1371
1372/**
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001373 * Releases the caller's mailbox so that a new message can be received. The
1374 * caller must have copied out all data they wish to preserve as new messages
1375 * will overwrite the old and will arrive asynchronously.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001376 *
1377 * Returns:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001378 * - FFA_ERROR FFA_DENIED on failure, if the mailbox hasn't been read.
1379 * - FFA_SUCCESS on success if no further action is needed.
1380 * - FFA_RX_RELEASE if it was called by the primary VM and the primary VM now
Andrew Walbran8a0f5ca2019-11-05 13:12:23 +00001381 * needs to wake up or kick waiters. Waiters should be retrieved by calling
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001382 * hf_mailbox_waiter_get.
1383 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001384struct ffa_value api_ffa_rx_release(struct vcpu *current, struct vcpu **next)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001385{
1386 struct vm *vm = current->vm;
1387 struct vm_locked locked;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001388 struct ffa_value ret;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001389
Andrew Walbran7e932bd2019-04-29 16:47:06 +01001390 locked = vm_lock(vm);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001391 switch (vm->mailbox.state) {
Andrew Sculld6ee1102019-04-05 22:12:42 +01001392 case MAILBOX_STATE_EMPTY:
Andrew Sculld6ee1102019-04-05 22:12:42 +01001393 case MAILBOX_STATE_RECEIVED:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001394 ret = ffa_error(FFA_DENIED);
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001395 break;
1396
Andrew Sculld6ee1102019-04-05 22:12:42 +01001397 case MAILBOX_STATE_READ:
Andrew Walbranbfffb0f2019-11-05 14:02:34 +00001398 ret = api_waiter_result(locked, current, next);
Andrew Sculld6ee1102019-04-05 22:12:42 +01001399 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scullaa7db8e2019-02-01 14:12:19 +00001400 break;
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001401 }
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +00001402 vm_unlock(&locked);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +01001403
1404 return ret;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +01001405}
Andrew Walbran318f5732018-11-20 16:23:42 +00001406
1407/**
1408 * Enables or disables a given interrupt ID for the calling vCPU.
1409 *
1410 * Returns 0 on success, or -1 if the intid is invalid.
1411 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001412int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001413{
1414 uint32_t intid_index = intid / INTERRUPT_REGISTER_BITS;
Andrew Walbrane52006c2019-10-22 18:01:28 +01001415 uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001416
Andrew Walbran318f5732018-11-20 16:23:42 +00001417 if (intid >= HF_NUM_INTIDS) {
1418 return -1;
1419 }
1420
1421 sl_lock(&current->lock);
1422 if (enable) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001423 /*
1424 * If it is pending and was not enabled before, increment the
1425 * count.
1426 */
1427 if (current->interrupts.interrupt_pending[intid_index] &
1428 ~current->interrupts.interrupt_enabled[intid_index] &
1429 intid_mask) {
1430 current->interrupts.enabled_and_pending_count++;
1431 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001432 current->interrupts.interrupt_enabled[intid_index] |=
1433 intid_mask;
Andrew Walbran318f5732018-11-20 16:23:42 +00001434 } else {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001435 /*
1436 * If it is pending and was enabled before, decrement the count.
1437 */
1438 if (current->interrupts.interrupt_pending[intid_index] &
1439 current->interrupts.interrupt_enabled[intid_index] &
1440 intid_mask) {
1441 current->interrupts.enabled_and_pending_count--;
1442 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001443 current->interrupts.interrupt_enabled[intid_index] &=
1444 ~intid_mask;
1445 }
1446
1447 sl_unlock(&current->lock);
1448 return 0;
1449}
1450
1451/**
1452 * Returns the ID of the next pending interrupt for the calling vCPU, and
1453 * acknowledges it (i.e. marks it as no longer pending). Returns
1454 * HF_INVALID_INTID if there are no pending interrupts.
1455 */
Wedson Almeida Filhoc559d132019-01-09 19:33:40 +00001456uint32_t api_interrupt_get(struct vcpu *current)
Andrew Walbran318f5732018-11-20 16:23:42 +00001457{
1458 uint8_t i;
1459 uint32_t first_interrupt = HF_INVALID_INTID;
Andrew Walbran318f5732018-11-20 16:23:42 +00001460
1461 /*
1462 * Find the first enabled and pending interrupt ID, return it, and
1463 * deactivate it.
1464 */
1465 sl_lock(&current->lock);
1466 for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
1467 uint32_t enabled_and_pending =
1468 current->interrupts.interrupt_enabled[i] &
1469 current->interrupts.interrupt_pending[i];
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001470
Andrew Walbran318f5732018-11-20 16:23:42 +00001471 if (enabled_and_pending != 0) {
Andrew Walbran3d84a262018-12-13 14:41:19 +00001472 uint8_t bit_index = ctz(enabled_and_pending);
1473 /*
1474 * Mark it as no longer pending and decrement the count.
1475 */
1476 current->interrupts.interrupt_pending[i] &=
Andrew Walbrane52006c2019-10-22 18:01:28 +01001477 ~(1U << bit_index);
Andrew Walbran3d84a262018-12-13 14:41:19 +00001478 current->interrupts.enabled_and_pending_count--;
1479 first_interrupt =
1480 i * INTERRUPT_REGISTER_BITS + bit_index;
Andrew Walbran318f5732018-11-20 16:23:42 +00001481 break;
1482 }
1483 }
Andrew Walbran318f5732018-11-20 16:23:42 +00001484
1485 sl_unlock(&current->lock);
1486 return first_interrupt;
1487}
1488
1489/**
Andrew Walbran4cf217a2018-12-14 15:24:50 +00001490 * Returns whether the current vCPU is allowed to inject an interrupt into the
Andrew Walbran318f5732018-11-20 16:23:42 +00001491 * given VM and vCPU.
1492 */
1493static inline bool is_injection_allowed(uint32_t target_vm_id,
1494 struct vcpu *current)
1495{
1496 uint32_t current_vm_id = current->vm->id;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001497
Andrew Walbran318f5732018-11-20 16:23:42 +00001498 /*
1499 * The primary VM is allowed to inject interrupts into any VM. Secondary
1500 * VMs are only allowed to inject interrupts into their own vCPUs.
1501 */
1502 return current_vm_id == HF_PRIMARY_VM_ID ||
1503 current_vm_id == target_vm_id;
1504}
1505
1506/**
1507 * Injects a virtual interrupt of the given ID into the given target vCPU.
1508 * This doesn't cause the vCPU to actually be run immediately; it will be taken
1509 * when the vCPU is next run, which is up to the scheduler.
1510 *
Andrew Walbran3d84a262018-12-13 14:41:19 +00001511 * Returns:
1512 * - -1 on failure because the target VM or vCPU doesn't exist, the interrupt
1513 * ID is invalid, or the current VM is not allowed to inject interrupts to
1514 * the target VM.
1515 * - 0 on success if no further action is needed.
1516 * - 1 if it was called by the primary VM and the primary VM now needs to wake
1517 * up or kick the target vCPU.
Andrew Walbran318f5732018-11-20 16:23:42 +00001518 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001519int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
1520 ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
Andrew Walbran42347a92019-05-09 13:59:03 +01001521 struct vcpu *current, struct vcpu **next)
Andrew Walbran318f5732018-11-20 16:23:42 +00001522{
Andrew Walbran318f5732018-11-20 16:23:42 +00001523 struct vcpu *target_vcpu;
Andrew Walbran42347a92019-05-09 13:59:03 +01001524 struct vm *target_vm = vm_find(target_vm_id);
Andrew Walbran318f5732018-11-20 16:23:42 +00001525
1526 if (intid >= HF_NUM_INTIDS) {
1527 return -1;
1528 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001529
Andrew Walbran318f5732018-11-20 16:23:42 +00001530 if (target_vm == NULL) {
1531 return -1;
1532 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001533
Andrew Walbran318f5732018-11-20 16:23:42 +00001534 if (target_vcpu_idx >= target_vm->vcpu_count) {
Fuad Tabbab0ef2a42019-12-19 11:19:25 +00001535 /* The requested vCPU must exist. */
Andrew Walbran318f5732018-11-20 16:23:42 +00001536 return -1;
1537 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001538
Andrew Walbran318f5732018-11-20 16:23:42 +00001539 if (!is_injection_allowed(target_vm_id, current)) {
1540 return -1;
1541 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +00001542
Andrew Walbrane1310df2019-04-29 17:28:28 +01001543 target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
Andrew Walbran318f5732018-11-20 16:23:42 +00001544
Olivier Deprezf92e5d42020-11-13 16:00:54 +01001545 dlog_info("Injecting IRQ %u for VM %#x vCPU %u from VM %#x vCPU %u\n",
Andrew Walbran17eebf92020-02-05 16:35:49 +00001546 intid, target_vm_id, target_vcpu_idx, current->vm->id,
Olivier Deprezf92e5d42020-11-13 16:00:54 +01001547 vcpu_index(current));
Andrew Walbranfc9d4382019-05-10 18:07:21 +01001548 return internal_interrupt_inject(target_vcpu, intid, current, next);
Andrew Walbran318f5732018-11-20 16:23:42 +00001549}
Andrew Scull6386f252018-12-06 13:29:10 +00001550
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001551/** Returns the version of the implemented FF-A specification. */
1552struct ffa_value api_ffa_version(uint32_t requested_version)
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001553{
1554 /*
1555 * Ensure that both major and minor revision representation occupies at
1556 * most 15 bits.
1557 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001558 static_assert(0x8000 > FFA_VERSION_MAJOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001559 "Major revision representation takes more than 15 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001560 static_assert(0x10000 > FFA_VERSION_MINOR,
Andrew Walbran9fd29072020-04-22 12:12:14 +01001561 "Minor revision representation takes more than 16 bits.");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001562 if (requested_version & FFA_VERSION_RESERVED_BIT) {
Andrew Walbran9fd29072020-04-22 12:12:14 +01001563 /* Invalid encoding, return an error. */
J-Alves13318e32021-02-22 17:21:00 +00001564 return (struct ffa_value){.func = (uint32_t)FFA_NOT_SUPPORTED};
Andrew Walbran9fd29072020-04-22 12:12:14 +01001565 }
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001566
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001567 return (struct ffa_value){
1568 .func = (FFA_VERSION_MAJOR << FFA_VERSION_MAJOR_OFFSET) |
1569 FFA_VERSION_MINOR};
Jose Marinhofc0b2b62019-06-06 11:18:45 +01001570}
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001571
1572int64_t api_debug_log(char c, struct vcpu *current)
1573{
Andrew Sculld54e1be2019-08-20 11:09:42 +01001574 bool flush;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001575 struct vm *vm = current->vm;
1576 struct vm_locked vm_locked = vm_lock(vm);
1577
Andrew Sculld54e1be2019-08-20 11:09:42 +01001578 if (c == '\n' || c == '\0') {
1579 flush = true;
1580 } else {
1581 vm->log_buffer[vm->log_buffer_length++] = c;
1582 flush = (vm->log_buffer_length == sizeof(vm->log_buffer));
1583 }
1584
1585 if (flush) {
Andrew Walbran7f904bf2019-07-12 16:38:38 +01001586 dlog_flush_vm_buffer(vm->id, vm->log_buffer,
1587 vm->log_buffer_length);
1588 vm->log_buffer_length = 0;
Andrew Walbranc1ad4ce2019-05-09 11:41:39 +01001589 }
1590
1591 vm_unlock(&vm_locked);
1592
1593 return 0;
1594}
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001595
1596/**
1597 * Discovery function returning information about the implementation of optional
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001598 * FF-A interfaces.
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001599 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001600struct ffa_value api_ffa_features(uint32_t function_id)
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001601{
1602 switch (function_id) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001603 case FFA_ERROR_32:
1604 case FFA_SUCCESS_32:
1605 case FFA_INTERRUPT_32:
1606 case FFA_VERSION_32:
1607 case FFA_FEATURES_32:
1608 case FFA_RX_RELEASE_32:
1609 case FFA_RXTX_MAP_64:
Fuad Tabbae4efcc32020-07-16 15:37:27 +01001610 case FFA_PARTITION_INFO_GET_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001611 case FFA_ID_GET_32:
1612 case FFA_MSG_POLL_32:
1613 case FFA_MSG_WAIT_32:
1614 case FFA_YIELD_32:
1615 case FFA_RUN_32:
1616 case FFA_MSG_SEND_32:
1617 case FFA_MEM_DONATE_32:
1618 case FFA_MEM_LEND_32:
1619 case FFA_MEM_SHARE_32:
1620 case FFA_MEM_RETRIEVE_REQ_32:
1621 case FFA_MEM_RETRIEVE_RESP_32:
1622 case FFA_MEM_RELINQUISH_32:
1623 case FFA_MEM_RECLAIM_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001624 case FFA_MSG_SEND_DIRECT_RESP_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001625 case FFA_MSG_SEND_DIRECT_RESP_32:
J-Alvesbc3de8b2020-12-07 14:32:04 +00001626 case FFA_MSG_SEND_DIRECT_REQ_64:
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001627 case FFA_MSG_SEND_DIRECT_REQ_32:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001628 return (struct ffa_value){.func = FFA_SUCCESS_32};
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001629 default:
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001630 return ffa_error(FFA_NOT_SUPPORTED);
Jose Marinhoc0f4ff22019-10-09 10:37:42 +01001631 }
1632}
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001633
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001634/**
1635 * Get target VM vCPU for direct messaging request.
1636 * If VM is UP then return first vCPU.
1637 * If VM is MP then return vCPU whose index matches current CPU index.
1638 */
1639static struct vcpu *api_ffa_msg_send_direct_get_receiver_vcpu(
1640 struct vm *vm, struct vcpu *current)
1641{
1642 ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
1643 struct vcpu *vcpu = NULL;
1644
1645 if (vm->vcpu_count == 1) {
1646 vcpu = vm_get_vcpu(vm, 0);
1647 } else if (current_cpu_index < vm->vcpu_count) {
1648 vcpu = vm_get_vcpu(vm, current_cpu_index);
1649 }
1650
1651 return vcpu;
1652}
1653
1654/**
J-Alves645eabe2021-02-22 16:08:27 +00001655 * FF-A specification states that x2/w2 Must Be Zero for direct messaging
1656 * interfaces.
1657 */
1658static inline bool api_ffa_dir_msg_is_arg2_zero(struct ffa_value args)
1659{
1660 return args.arg2 == 0U;
1661}
1662
1663/**
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001664 * Send an FF-A direct message request.
1665 */
1666struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
1667 ffa_vm_id_t receiver_vm_id,
1668 struct ffa_value args,
1669 struct vcpu *current,
1670 struct vcpu **next)
1671{
1672 struct ffa_value ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001673 struct vm *receiver_vm;
1674 struct vcpu *receiver_vcpu;
1675 struct two_vcpu_locked vcpus_locked;
1676
J-Alves645eabe2021-02-22 16:08:27 +00001677 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
1678 return ffa_error(FFA_INVALID_PARAMETERS);
1679 }
1680
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001681 if (!arch_other_world_is_direct_request_valid(current, sender_vm_id,
1682 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00001683 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001684 }
1685
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001686 receiver_vm = vm_find(receiver_vm_id);
1687 if (receiver_vm == NULL) {
1688 return ffa_error(FFA_INVALID_PARAMETERS);
1689 }
1690
1691 /*
1692 * Per PSA FF-A EAC spec section 4.4.1 the firmware framework supports
1693 * UP (migratable) or MP partitions with a number of vCPUs matching the
1694 * number of PEs in the system. It further states that MP partitions
1695 * accepting direct request messages cannot migrate.
1696 */
1697 receiver_vcpu =
1698 api_ffa_msg_send_direct_get_receiver_vcpu(receiver_vm, current);
1699 if (receiver_vcpu == NULL) {
1700 return ffa_error(FFA_INVALID_PARAMETERS);
1701 }
1702
1703 vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
1704
1705 /*
1706 * If destination vCPU is executing or already received an
1707 * FFA_MSG_SEND_DIRECT_REQ then return to caller hinting recipient is
1708 * busy. There is a brief period of time where the vCPU state has
1709 * changed but regs_available is still false thus consider this case as
1710 * the vCPU not yet ready to receive a direct message request.
1711 */
1712 if (is_ffa_direct_msg_request_ongoing(vcpus_locked.vcpu1) ||
1713 receiver_vcpu->state == VCPU_STATE_RUNNING ||
1714 !receiver_vcpu->regs_available) {
1715 ret = ffa_error(FFA_BUSY);
1716 goto out;
1717 }
1718
1719 if (atomic_load_explicit(&receiver_vcpu->vm->aborting,
1720 memory_order_relaxed)) {
1721 if (receiver_vcpu->state != VCPU_STATE_ABORTED) {
Olivier Deprezf92e5d42020-11-13 16:00:54 +01001722 dlog_notice("Aborting VM %#x vCPU %u\n",
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001723 receiver_vcpu->vm->id,
1724 vcpu_index(receiver_vcpu));
1725 receiver_vcpu->state = VCPU_STATE_ABORTED;
1726 }
1727
1728 ret = ffa_error(FFA_ABORTED);
1729 goto out;
1730 }
1731
1732 switch (receiver_vcpu->state) {
1733 case VCPU_STATE_OFF:
1734 case VCPU_STATE_RUNNING:
1735 case VCPU_STATE_ABORTED:
1736 case VCPU_STATE_READY:
1737 case VCPU_STATE_BLOCKED_INTERRUPT:
1738 ret = ffa_error(FFA_BUSY);
1739 goto out;
1740 case VCPU_STATE_BLOCKED_MAILBOX:
1741 /*
1742 * Expect target vCPU to be blocked after having called
1743 * ffa_msg_wait or sent a direct message response.
1744 */
1745 break;
1746 }
1747
1748 /* Inject timer interrupt if any pending */
1749 if (arch_timer_pending(&receiver_vcpu->regs)) {
1750 internal_interrupt_inject_locked(vcpus_locked.vcpu1,
1751 HF_VIRTUAL_TIMER_INTID,
1752 current, NULL);
1753
1754 arch_timer_mask(&receiver_vcpu->regs);
1755 }
1756
1757 /* The receiver vCPU runs upon direct message invocation */
1758 receiver_vcpu->cpu = current->cpu;
1759 receiver_vcpu->state = VCPU_STATE_RUNNING;
1760 receiver_vcpu->regs_available = false;
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001761 receiver_vcpu->direct_request_origin_vm_id = sender_vm_id;
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001762
1763 arch_regs_set_retval(&receiver_vcpu->regs, (struct ffa_value){
1764 .func = args.func,
1765 .arg1 = args.arg1,
1766 .arg2 = 0,
1767 .arg3 = args.arg3,
1768 .arg4 = args.arg4,
1769 .arg5 = args.arg5,
1770 .arg6 = args.arg6,
1771 .arg7 = args.arg7,
1772 });
1773
1774 current->state = VCPU_STATE_BLOCKED_MAILBOX;
1775
1776 /* Switch to receiver vCPU targeted to by direct msg request */
1777 *next = receiver_vcpu;
1778
1779 /*
1780 * Since this flow will lead to a VM switch, the return value will not
1781 * be applied to current vCPU.
1782 */
1783
1784out:
1785 sl_unlock(&receiver_vcpu->lock);
1786 sl_unlock(&current->lock);
1787
1788 return ret;
1789}
1790
1791/**
1792 * Send an FF-A direct message response.
1793 */
1794struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
1795 ffa_vm_id_t receiver_vm_id,
1796 struct ffa_value args,
1797 struct vcpu *current,
1798 struct vcpu **next)
1799{
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001800 struct vcpu_locked current_locked;
J-Alves645eabe2021-02-22 16:08:27 +00001801
1802 if (!api_ffa_dir_msg_is_arg2_zero(args)) {
1803 return ffa_error(FFA_INVALID_PARAMETERS);
1804 }
1805
J-Alvesfe7f7372020-11-09 11:32:12 +00001806 struct ffa_value to_ret = {
1807 .func = args.func,
1808 .arg1 = args.arg1,
1809 .arg2 = 0,
1810 .arg3 = args.arg3,
1811 .arg4 = args.arg4,
1812 .arg5 = args.arg5,
1813 .arg6 = args.arg6,
1814 .arg7 = args.arg7,
1815 };
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001816
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001817 if (!arch_other_world_is_direct_response_valid(current, sender_vm_id,
1818 receiver_vm_id)) {
J-Alvesaa336102021-03-01 13:02:45 +00001819 return ffa_error(FFA_INVALID_PARAMETERS);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001820 }
1821
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001822 current_locked = vcpu_lock(current);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001823
1824 /*
1825 * Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
1826 * defined originator.
1827 */
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001828 if (!is_ffa_direct_msg_request_ongoing(current_locked)) {
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001829 /*
1830 * Sending direct response but direct request origin vCPU is
1831 * not set.
1832 */
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001833 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001834 return ffa_error(FFA_DENIED);
1835 }
1836
1837 if (current->direct_request_origin_vm_id != receiver_vm_id) {
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001838 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001839 return ffa_error(FFA_DENIED);
1840 }
1841
1842 /* Clear direct request origin for the caller. */
1843 current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
1844
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001845 vcpu_unlock(&current_locked);
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001846
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001847 if (!vm_id_is_current_world(receiver_vm_id)) {
J-Alvesfe7f7372020-11-09 11:32:12 +00001848 *next = api_switch_to_other_world(current, to_ret,
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001849 VCPU_STATE_BLOCKED_MAILBOX);
1850 } else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
J-Alvesfe7f7372020-11-09 11:32:12 +00001851 *next = api_switch_to_primary(current, to_ret,
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001852 VCPU_STATE_BLOCKED_MAILBOX);
J-Alvesfe7f7372020-11-09 11:32:12 +00001853 } else if (vm_id_is_current_world(receiver_vm_id)) {
1854 /*
1855 * It is expected the receiver_vm_id to be from an SP, otherwise
1856 * 'arch_other_world_is_direct_response_valid' should have
1857 * made function return error before getting to this point.
1858 */
1859 *next = api_switch_to_vm(current, to_ret,
1860 VCPU_STATE_BLOCKED_MAILBOX,
1861 receiver_vm_id);
Olivier Deprez2ebae3a2020-06-11 16:34:30 +02001862 } else {
1863 panic("Invalid direct message response invocation");
1864 }
Olivier Deprezee9d6a92019-11-26 09:14:11 +00001865
1866 return (struct ffa_value){.func = FFA_INTERRUPT_32};
1867}
1868
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001869struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
1870 uint32_t fragment_length, ipaddr_t address,
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001871 uint32_t page_count, struct vcpu *current)
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001872{
1873 struct vm *from = current->vm;
1874 struct vm *to;
1875 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001876 struct ffa_memory_region *memory_region;
1877 struct ffa_value ret;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001878
1879 if (ipa_addr(address) != 0 || page_count != 0) {
1880 /*
1881 * Hafnium only supports passing the descriptor in the TX
1882 * mailbox.
1883 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001884 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001885 }
1886
Andrew Walbranca808b12020-05-15 17:22:28 +01001887 if (fragment_length > length) {
1888 dlog_verbose(
1889 "Fragment length %d greater than total length %d.\n",
1890 fragment_length, length);
1891 return ffa_error(FFA_INVALID_PARAMETERS);
1892 }
1893 if (fragment_length < sizeof(struct ffa_memory_region) +
1894 sizeof(struct ffa_memory_access)) {
1895 dlog_verbose(
1896 "Initial fragment length %d smaller than header size "
1897 "%d.\n",
1898 fragment_length,
1899 sizeof(struct ffa_memory_region) +
1900 sizeof(struct ffa_memory_access));
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001901 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001902 }
1903
1904 /*
1905 * Check that the sender has configured its send buffer. If the TX
1906 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
1907 * be safely accessed after releasing the lock since the TX mailbox
1908 * address can only be configured once.
1909 */
1910 sl_lock(&from->lock);
1911 from_msg = from->mailbox.send;
1912 sl_unlock(&from->lock);
1913
1914 if (from_msg == NULL) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001915 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001916 }
1917
1918 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001919 * Copy the memory region descriptor to a fresh page from the memory
1920 * pool. This prevents the sender from changing it underneath us, and
1921 * also lets us keep it around in the share state table if needed.
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001922 */
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001923 if (fragment_length > HF_MAILBOX_SIZE ||
1924 fragment_length > MM_PPOOL_ENTRY_SIZE) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001925 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001926 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001927 memory_region = (struct ffa_memory_region *)mpool_alloc(&api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001928 if (memory_region == NULL) {
1929 dlog_verbose("Failed to allocate memory region copy.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001930 return ffa_error(FFA_NO_MEMORY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001931 }
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001932 memcpy_s(memory_region, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001933
1934 /* The sender must match the caller. */
1935 if (memory_region->sender != from->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001936 dlog_verbose("Memory region sender doesn't match caller.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001937 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001938 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001939 }
1940
Andrew Walbrana65a1322020-04-06 19:32:32 +01001941 if (memory_region->receiver_count != 1) {
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001942 /* Hafnium doesn't support multi-way memory sharing for now. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001943 dlog_verbose(
1944 "Multi-way memory sharing not supported (got %d "
Andrew Walbrana65a1322020-04-06 19:32:32 +01001945 "endpoint memory access descriptors, expected 1).\n",
1946 memory_region->receiver_count);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001947 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001948 goto out;
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001949 }
1950
1951 /*
1952 * Ensure that the receiver VM exists and isn't the same as the sender.
1953 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01001954 to = vm_find(memory_region->receivers[0].receiver_permissions.receiver);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00001955 if (to == NULL || to == from) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001956 dlog_verbose("Invalid receiver.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001957 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001958 goto out;
1959 }
1960
1961 if (to->id == HF_TEE_VM_ID) {
1962 /*
1963 * The 'to' VM lock is only needed in the case that it is the
1964 * TEE VM.
1965 */
1966 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
1967
1968 if (msg_receiver_busy(vm_to_from_lock.vm1, from, false)) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001969 ret = ffa_error(FFA_BUSY);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001970 goto out_unlock;
1971 }
1972
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001973 ret = ffa_memory_tee_send(
1974 vm_to_from_lock.vm2, vm_to_from_lock.vm1, memory_region,
1975 length, fragment_length, share_func, &api_page_pool);
1976 /*
1977 * ffa_tee_memory_send takes ownership of the memory_region, so
1978 * make sure we don't free it.
1979 */
1980 memory_region = NULL;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001981
1982 out_unlock:
1983 vm_unlock(&vm_to_from_lock.vm1);
1984 vm_unlock(&vm_to_from_lock.vm2);
1985 } else {
1986 struct vm_locked from_locked = vm_lock(from);
1987
Andrew Walbran1a86aa92020-05-15 17:22:28 +01001988 ret = ffa_memory_send(from_locked, memory_region, length,
1989 fragment_length, share_func,
1990 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001991 /*
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001992 * ffa_memory_send takes ownership of the memory_region, so
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001993 * make sure we don't free it.
1994 */
1995 memory_region = NULL;
1996
1997 vm_unlock(&from_locked);
1998 }
1999
2000out:
2001 if (memory_region != NULL) {
2002 mpool_free(&api_page_pool, memory_region);
2003 }
2004
2005 return ret;
2006}
2007
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002008struct ffa_value api_ffa_mem_retrieve_req(uint32_t length,
2009 uint32_t fragment_length,
2010 ipaddr_t address, uint32_t page_count,
2011 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002012{
2013 struct vm *to = current->vm;
2014 struct vm_locked to_locked;
2015 const void *to_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002016 struct ffa_memory_region *retrieve_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002017 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002018 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002019
2020 if (ipa_addr(address) != 0 || page_count != 0) {
2021 /*
2022 * Hafnium only supports passing the descriptor in the TX
2023 * mailbox.
2024 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002025 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002026 }
2027
Andrew Walbrana65a1322020-04-06 19:32:32 +01002028 if (fragment_length != length) {
2029 dlog_verbose("Fragmentation not yet supported.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002030 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002031 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002032
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002033 retrieve_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002034 (struct ffa_memory_region *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002035 message_buffer_size = cpu_get_buffer_size(current->cpu);
2036 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2037 dlog_verbose("Retrieve request too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002038 return ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002039 }
2040
2041 to_locked = vm_lock(to);
2042 to_msg = to->mailbox.send;
2043
2044 if (to_msg == NULL) {
2045 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002046 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002047 goto out;
2048 }
2049
2050 /*
2051 * Copy the retrieve request descriptor to an internal buffer, so that
2052 * the caller can't change it underneath us.
2053 */
2054 memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
2055
2056 if (msg_receiver_busy(to_locked, NULL, false)) {
2057 /*
2058 * Can't retrieve memory information if the mailbox is not
2059 * available.
2060 */
2061 dlog_verbose("RX buffer not ready.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002062 ret = ffa_error(FFA_BUSY);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002063 goto out;
2064 }
2065
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002066 ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
2067 &api_page_pool);
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002068
2069out:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002070 vm_unlock(&to_locked);
2071 return ret;
2072}
2073
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002074struct ffa_value api_ffa_mem_relinquish(struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002075{
2076 struct vm *from = current->vm;
2077 struct vm_locked from_locked;
2078 const void *from_msg;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002079 struct ffa_mem_relinquish *relinquish_request;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002080 uint32_t message_buffer_size;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002081 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002082 uint32_t length;
2083
2084 from_locked = vm_lock(from);
2085 from_msg = from->mailbox.send;
2086
2087 if (from_msg == NULL) {
2088 dlog_verbose("TX buffer not setup.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002089 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002090 goto out;
2091 }
2092
2093 /*
2094 * Calculate length from relinquish descriptor before copying. We will
2095 * check again later to make sure it hasn't changed.
2096 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002097 length = sizeof(struct ffa_mem_relinquish) +
2098 ((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
2099 sizeof(ffa_vm_id_t);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002100 /*
2101 * Copy the relinquish descriptor to an internal buffer, so that the
2102 * caller can't change it underneath us.
2103 */
2104 relinquish_request =
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002105 (struct ffa_mem_relinquish *)cpu_get_buffer(current->cpu);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002106 message_buffer_size = cpu_get_buffer_size(current->cpu);
2107 if (length > HF_MAILBOX_SIZE || length > message_buffer_size) {
2108 dlog_verbose("Relinquish message too long.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002109 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002110 goto out;
2111 }
2112 memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
2113
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002114 if (sizeof(struct ffa_mem_relinquish) +
2115 relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002116 length) {
2117 dlog_verbose(
2118 "Endpoint count changed while copying to internal "
2119 "buffer.\n");
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002120 ret = ffa_error(FFA_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002121 goto out;
2122 }
2123
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002124 ret = ffa_memory_relinquish(from_locked, relinquish_request,
2125 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002126
2127out:
2128 vm_unlock(&from_locked);
2129 return ret;
2130}
2131
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002132struct ffa_value api_ffa_mem_reclaim(ffa_memory_handle_t handle,
2133 ffa_memory_region_flags_t flags,
2134 struct vcpu *current)
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002135{
2136 struct vm *to = current->vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002137 struct ffa_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002138
J-Alves917d2f22020-10-30 18:39:30 +00002139 if (ffa_memory_handle_allocated_by_current_world(handle)) {
Andrew Walbran290b0c92020-02-03 16:37:14 +00002140 struct vm_locked to_locked = vm_lock(to);
2141
Andrew Walbranca808b12020-05-15 17:22:28 +01002142 ret = ffa_memory_reclaim(to_locked, handle, flags,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01002143 &api_page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00002144
Andrew Walbran290b0c92020-02-03 16:37:14 +00002145 vm_unlock(&to_locked);
2146 } else {
2147 struct vm *from = vm_find(HF_TEE_VM_ID);
2148 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2149
Andrew Walbranca808b12020-05-15 17:22:28 +01002150 ret = ffa_memory_tee_reclaim(vm_to_from_lock.vm1,
2151 vm_to_from_lock.vm2, handle, flags,
2152 &api_page_pool);
2153
2154 vm_unlock(&vm_to_from_lock.vm1);
2155 vm_unlock(&vm_to_from_lock.vm2);
2156 }
2157
2158 return ret;
2159}
2160
2161struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
2162 uint32_t fragment_offset,
2163 ffa_vm_id_t sender_vm_id,
2164 struct vcpu *current)
2165{
2166 struct vm *to = current->vm;
2167 struct vm_locked to_locked;
2168 struct ffa_value ret;
2169
2170 /* Sender ID MBZ at virtual instance. */
2171 if (sender_vm_id != 0) {
2172 return ffa_error(FFA_INVALID_PARAMETERS);
2173 }
2174
2175 to_locked = vm_lock(to);
2176
2177 if (msg_receiver_busy(to_locked, NULL, false)) {
2178 /*
2179 * Can't retrieve memory information if the mailbox is not
2180 * available.
2181 */
2182 dlog_verbose("RX buffer not ready.\n");
2183 ret = ffa_error(FFA_BUSY);
2184 goto out;
2185 }
2186
2187 ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
2188 &api_page_pool);
2189
2190out:
2191 vm_unlock(&to_locked);
2192 return ret;
2193}
2194
2195struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
2196 uint32_t fragment_length,
2197 ffa_vm_id_t sender_vm_id,
2198 struct vcpu *current)
2199{
2200 struct vm *from = current->vm;
2201 const void *from_msg;
2202 void *fragment_copy;
2203 struct ffa_value ret;
2204
2205 /* Sender ID MBZ at virtual instance. */
2206 if (sender_vm_id != 0) {
2207 return ffa_error(FFA_INVALID_PARAMETERS);
2208 }
2209
2210 /*
2211 * Check that the sender has configured its send buffer. If the TX
2212 * mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
2213 * be safely accessed after releasing the lock since the TX mailbox
2214 * address can only be configured once.
2215 */
2216 sl_lock(&from->lock);
2217 from_msg = from->mailbox.send;
2218 sl_unlock(&from->lock);
2219
2220 if (from_msg == NULL) {
2221 return ffa_error(FFA_INVALID_PARAMETERS);
2222 }
2223
2224 /*
2225 * Copy the fragment to a fresh page from the memory pool. This prevents
2226 * the sender from changing it underneath us, and also lets us keep it
2227 * around in the share state table if needed.
2228 */
2229 if (fragment_length > HF_MAILBOX_SIZE ||
2230 fragment_length > MM_PPOOL_ENTRY_SIZE) {
2231 dlog_verbose(
2232 "Fragment length %d larger than mailbox size %d.\n",
2233 fragment_length, HF_MAILBOX_SIZE);
2234 return ffa_error(FFA_INVALID_PARAMETERS);
2235 }
2236 if (fragment_length < sizeof(struct ffa_memory_region_constituent) ||
2237 fragment_length % sizeof(struct ffa_memory_region_constituent) !=
2238 0) {
2239 dlog_verbose("Invalid fragment length %d.\n", fragment_length);
2240 return ffa_error(FFA_INVALID_PARAMETERS);
2241 }
2242 fragment_copy = mpool_alloc(&api_page_pool);
2243 if (fragment_copy == NULL) {
2244 dlog_verbose("Failed to allocate fragment copy.\n");
2245 return ffa_error(FFA_NO_MEMORY);
2246 }
2247 memcpy_s(fragment_copy, MM_PPOOL_ENTRY_SIZE, from_msg, fragment_length);
2248
2249 /*
2250 * Hafnium doesn't support fragmentation of memory retrieve requests
2251 * (because it doesn't support caller-specified mappings, so a request
2252 * will never be larger than a single page), so this must be part of a
2253 * memory send (i.e. donate, lend or share) request.
2254 *
2255 * We can tell from the handle whether the memory transaction is for the
2256 * TEE or not.
2257 */
2258 if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) ==
2259 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) {
2260 struct vm_locked from_locked = vm_lock(from);
2261
2262 ret = ffa_memory_send_continue(from_locked, fragment_copy,
2263 fragment_length, handle,
2264 &api_page_pool);
2265 /*
2266 * `ffa_memory_send_continue` takes ownership of the
2267 * fragment_copy, so we don't need to free it here.
2268 */
2269 vm_unlock(&from_locked);
2270 } else {
2271 struct vm *to = vm_find(HF_TEE_VM_ID);
2272 struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
2273
2274 /*
2275 * The TEE RX buffer state is checked in
2276 * `ffa_memory_tee_send_continue` rather than here, as we need
2277 * to return `FFA_MEM_FRAG_RX` with the current offset rather
2278 * than FFA_ERROR FFA_BUSY in case it is busy.
2279 */
2280
2281 ret = ffa_memory_tee_send_continue(
2282 vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy,
2283 fragment_length, handle, &api_page_pool);
2284 /*
2285 * `ffa_memory_tee_send_continue` takes ownership of the
2286 * fragment_copy, so we don't need to free it here.
2287 */
Andrew Walbran290b0c92020-02-03 16:37:14 +00002288
2289 vm_unlock(&vm_to_from_lock.vm1);
2290 vm_unlock(&vm_to_from_lock.vm2);
2291 }
Andrew Walbrane908c4a2019-12-02 17:13:47 +00002292
2293 return ret;
2294}
Max Shvetsov40108e72020-08-27 12:39:50 +01002295
2296struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
2297 struct vcpu *current)
2298{
2299 struct vm_locked vm_locked;
2300
2301 vm_locked = vm_lock(current->vm);
2302 vm_locked.vm->secondary_ep = entry_point;
2303 vm_unlock(&vm_locked);
2304
2305 return (struct ffa_value){.func = FFA_SUCCESS_32};
2306}