FF-A: enable direct messaging between SPs
Changed conditions that validate the use of direct message interfaces
(functions 'arch_other_world_is_direct_request_valid' and
'arch_other_world_is_direct_response_valid') to allow direct messaging
between SPs; Implemented function to change execution context to the VM
of a given id (function 'api_switch_to_vm').
Change-Id: I0cc4f40d0dd9ed3bc554bfe16d92f9b2fce4177a
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 226b4e7..f0766a8 100644
--- a/src/api.c
+++ b/src/api.c
@@ -64,6 +64,38 @@
}
/**
+ * Switches the physical CPU back to the corresponding vCPU of the VM whose ID
+ * is given as argument of the function.
+ *
+ * Called to change the context between SPs for direct messaging (when Hafnium
+ * is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
+ *
+ * This function works for partitions that are:
+ * - UP non-migratable.
+ * - MP with pinned Execution Contexts.
+ */
+static struct vcpu *api_switch_to_vm(struct vcpu *current,
+ struct ffa_value to_ret,
+ enum vcpu_state vcpu_state,
+ ffa_vm_id_t to_id)
+{
+ struct vm *to_vm = vm_find(to_id);
+ struct vcpu *next = vm_get_vcpu(to_vm, cpu_index(current->cpu));
+
+ CHECK(next != NULL);
+
+ /* Set the return value for the target VM. */
+ arch_regs_set_retval(&next->regs, to_ret);
+
+ /* Set the current vCPU state. */
+ sl_lock(¤t->lock);
+ current->state = vcpu_state;
+ sl_unlock(¤t->lock);
+
+ return next;
+}
+
+/**
* Switches the physical CPU back to the corresponding vCPU of the primary VM.
*
* This triggers the scheduling logic to run. Run in the context of secondary VM
@@ -73,9 +105,6 @@
struct ffa_value primary_ret,
enum vcpu_state secondary_state)
{
- struct vm *primary = vm_find(HF_PRIMARY_VM_ID);
- struct vcpu *next = vm_get_vcpu(primary, cpu_index(current->cpu));
-
/*
* If the secondary is blocked but has a timer running, sleep until the
* timer fires rather than indefinitely.
@@ -112,15 +141,8 @@
break;
}
- /* Set the return value for the primary VM's call to FFA_RUN. */
- arch_regs_set_retval(&next->regs, primary_ret);
-
- /* Mark the current vCPU as waiting. */
- sl_lock(¤t->lock);
- current->state = secondary_state;
- sl_unlock(¤t->lock);
-
- return next;
+ return api_switch_to_vm(current, primary_ret, secondary_state,
+ HF_PRIMARY_VM_ID);
}
/**
@@ -136,19 +158,8 @@
struct ffa_value other_world_ret,
enum vcpu_state vcpu_state)
{
- struct vcpu *next = vcpu_get_other_world_counterpart(current);
-
- CHECK(next != NULL);
-
- /* Set the return value for the other world's VM. */
- arch_regs_set_retval(&next->regs, other_world_ret);
-
- /* Set the current vCPU state. */
- sl_lock(¤t->lock);
- current->state = vcpu_state;
- sl_unlock(¤t->lock);
-
- return next;
+ return api_switch_to_vm(current, other_world_ret, vcpu_state,
+ HF_OTHER_WORLD_ID);
}
/**
@@ -1759,6 +1770,16 @@
struct vcpu **next)
{
struct vcpu_locked current_locked;
+ struct ffa_value to_ret = {
+ .func = args.func,
+ .arg1 = args.arg1,
+ .arg2 = 0,
+ .arg3 = args.arg3,
+ .arg4 = args.arg4,
+ .arg5 = args.arg5,
+ .arg6 = args.arg6,
+ .arg7 = args.arg7,
+ };
if (!arch_other_world_is_direct_response_valid(current, sender_vm_id,
receiver_vm_id)) {
@@ -1791,31 +1812,20 @@
vcpu_unlock(¤t_locked);
if (!vm_id_is_current_world(receiver_vm_id)) {
- *next = api_switch_to_other_world(current,
- (struct ffa_value){
- .func = args.func,
- .arg1 = args.arg1,
- .arg2 = 0,
- .arg3 = args.arg3,
- .arg4 = args.arg4,
- .arg5 = args.arg5,
- .arg6 = args.arg6,
- .arg7 = args.arg7,
- },
+ *next = api_switch_to_other_world(current, to_ret,
VCPU_STATE_BLOCKED_MAILBOX);
} else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
- *next = api_switch_to_primary(current,
- (struct ffa_value){
- .func = args.func,
- .arg1 = args.arg1,
- .arg2 = 0,
- .arg3 = args.arg3,
- .arg4 = args.arg4,
- .arg5 = args.arg5,
- .arg6 = args.arg6,
- .arg7 = args.arg7,
- },
+ *next = api_switch_to_primary(current, to_ret,
VCPU_STATE_BLOCKED_MAILBOX);
+ } else if (vm_id_is_current_world(receiver_vm_id)) {
+ /*
+ * It is expected the receiver_vm_id to be from an SP, otherwise
+ * 'arch_other_world_is_direct_response_valid' should have
+ * made function return error before getting to this point.
+ */
+ *next = api_switch_to_vm(current, to_ret,
+ VCPU_STATE_BLOCKED_MAILBOX,
+ receiver_vm_id);
} else {
panic("Invalid direct message response invocation");
}
diff --git a/src/arch/aarch64/hypervisor/other_world.c b/src/arch/aarch64/hypervisor/other_world.c
index 8868873..74a2bb1 100644
--- a/src/arch/aarch64/hypervisor/other_world.c
+++ b/src/arch/aarch64/hypervisor/other_world.c
@@ -120,13 +120,13 @@
/*
* The normal world can send direct message requests
- * via the Hypervisor to any SP.
+ * via the Hypervisor to any SP. SPs can also send direct messages
+ * to each other.
*/
return sender_vm_id != receiver_vm_id &&
- current_vm_id == HF_HYPERVISOR_VM_ID &&
- vm_id_is_current_world(receiver_vm_id) &&
- !vm_id_is_current_world(sender_vm_id);
-
+ (sender_vm_id == current_vm_id ||
+ (current_vm_id == HF_HYPERVISOR_VM_ID &&
+ !vm_id_is_current_world(sender_vm_id)));
#else
/*
@@ -155,13 +155,12 @@
#if SECURE_WORLD == 1
/*
- * Direct message responses emitted from a SP
- * target a VM in NWd.
+ * Direct message responses emitted from a SP target either the NWd
+ * or another SP.
*/
return sender_vm_id != receiver_vm_id &&
sender_vm_id == current_vm_id &&
- vm_id_is_current_world(sender_vm_id) &&
- !vm_id_is_current_world(receiver_vm_id);
+ vm_id_is_current_world(sender_vm_id);
#else