fix(ff-a): FFA_RUN checking of vcpu index
FFA_RUN in the secure world, does not check that the vCPU index provided
is the same as the physical CPU index. Per the FF-A specification, all
MP partitions are pinned to a physical cpu and if this is not checked,
FFA_RUN can be successfully called on an arbitrary vCPU..
Signed-off-by: Raghu Krishnamurthy <raghu.ncstate@gmail.com>
Change-Id: I4edb02e1624efa54d3eee5a36eba4f9fed26e580
diff --git a/src/api.c b/src/api.c
index bb9c79e..8ce4136 100644
--- a/src/api.c
+++ b/src/api.c
@@ -877,7 +877,7 @@
struct vcpu *vcpu;
struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
- if (!plat_ffa_run_checks(current, vm_id, &ret, next)) {
+ if (!plat_ffa_run_checks(current, vm_id, vcpu_idx, &ret, next)) {
return ret;
}
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 87a552e..6f46cdd 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -249,13 +249,14 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- struct ffa_value *run_ret, struct vcpu **next)
+ ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
+ struct vcpu **next)
{
(void)current;
(void)target_vm_id;
(void)run_ret;
(void)next;
-
+ (void)vcpu_idx;
return true;
}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index d38a6b0..411bbdd 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -509,9 +509,11 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- struct ffa_value *run_ret, struct vcpu **next)
+ ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
+ struct vcpu **next)
{
(void)next;
+ (void)vcpu_idx;
/* Only the primary VM can switch vCPUs. */
if (current->vm->id != HF_PRIMARY_VM_ID) {
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 08d8825..a7b9ba6 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -624,7 +624,8 @@
* Check if current VM can resume target VM using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- struct ffa_value *run_ret, struct vcpu **next)
+ ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
+ struct vcpu **next)
{
(void)next;
/*
@@ -640,6 +641,12 @@
return false;
}
+ if (vm->vcpu_count > 1 && vcpu_idx != cpu_index(current->cpu)) {
+ dlog_verbose("vcpu_idx (%d) != pcpu index (%d)\n", vcpu_idx,
+ cpu_index(current->cpu));
+ return false;
+ }
+
target_vcpu = api_ffa_get_vm_vcpu(vm, current);
/* Lock both vCPUs at once. */
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 13c67e6..d043c6e 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -235,13 +235,14 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- struct ffa_value *run_ret, struct vcpu **next)
+ ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
+ struct vcpu **next)
{
(void)current;
(void)target_vm_id;
(void)run_ret;
(void)next;
-
+ (void)vcpu_idx;
return true;
}