refactor: pass locked vcpu structures to ff-a helper functions

While handling FF-A ABI call, hafnium has to alter the state of
multiple vCPUs. It is necessary for hafnium to lock the vCPUs (to
protect from concurrent accesses due to execution on other physical
cores) before modifying its properties and unlock once done.

Currently, this is done in a piecemeal approach which could lead to
deadlocks. This patch refactors the helper functions to receive
locked vCPU(s) by locking them as early as possible and unlocking
only at the tail end of FF-A ABI handler.

Also, in order to adhere to the rule stating a VM's lock must be
acquired before any of its vCPU's lock, this patch makes few changes
to momentarily release vCPU lock and acquire the lock immediately.

Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
Change-Id: I392f053f7384d7c34f22924a57a6d8e9f62ddb2e
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 660a1c3..b0dd520 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -20,9 +20,10 @@
 void api_regs_state_saved(struct vcpu *vcpu);
 int64_t api_mailbox_writable_get(const struct vcpu *current);
 int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current);
-struct vcpu *api_switch_to_vm(struct vcpu *current, struct ffa_value to_ret,
+struct vcpu *api_switch_to_vm(struct vcpu_locked current_locked,
+			      struct ffa_value to_ret,
 			      enum vcpu_state vcpu_state, ffa_vm_id_t to_id);
-struct vcpu *api_switch_to_primary(struct vcpu *current,
+struct vcpu *api_switch_to_primary(struct vcpu_locked current_locked,
 				   struct ffa_value primary_ret,
 				   enum vcpu_state secondary_state);
 
@@ -39,7 +40,8 @@
 			     ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
 			     struct vcpu *current, struct vcpu **next);
 int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
-				    uint32_t intid, struct vcpu *current,
+				    uint32_t intid,
+				    struct vcpu_locked current_locked,
 				    struct vcpu **next);
 void api_sri_send_if_delayed(struct vcpu *current);
 
@@ -48,7 +50,7 @@
 				  struct vcpu *current, struct vcpu **next);
 struct ffa_value api_ffa_msg_send2(ffa_vm_id_t sender_vm_id, uint32_t flags,
 				   struct vcpu *current);
-struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
+struct ffa_value api_ffa_msg_recv(bool block, struct vcpu_locked current_locked,
 				  struct vcpu **next);
 struct ffa_value api_ffa_rx_release(ffa_vm_id_t receiver_id,
 				    struct vcpu *current);
@@ -113,7 +115,7 @@
 					      struct vcpu **next);
 struct ffa_value api_ffa_secondary_ep_register(ipaddr_t entry_point,
 					       struct vcpu *current);
-struct vcpu *api_switch_to_other_world(struct vcpu *current,
+struct vcpu *api_switch_to_other_world(struct vcpu_locked current_locked,
 				       struct ffa_value other_world_ret,
 				       enum vcpu_state vcpu_state);
 struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
@@ -144,7 +146,8 @@
 struct ffa_value api_ffa_console_log(const struct ffa_value args,
 				     struct vcpu *current);
 
-void api_ffa_resume_direct_resp_target(struct vcpu *current, struct vcpu **next,
+void api_ffa_resume_direct_resp_target(struct vcpu_locked current_locked,
+				       struct vcpu **next,
 				       ffa_vm_id_t receiver_vm_id,
 				       struct ffa_value to_ret,
 				       bool is_nwd_call_chain);
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 1070a28..1e51d40 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -243,15 +243,15 @@
 bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current);
 bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current);
 
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
 					   struct vcpu **next);
 
 /**
  * Check if current SP can resume target VM/SP using FFA_RUN ABI.
  */
-bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
-			 ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
-			 struct vcpu **next);
+bool plat_ffa_run_checks(struct vcpu_locked current_locked,
+			 ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			 struct ffa_value *run_ret, struct vcpu **next);
 
 /**
  * Deactivate interrupt.
@@ -262,13 +262,8 @@
 struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current,
 						  struct vcpu **next,
 						  bool from_normal_world);
-struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current,
-					      struct vcpu **next);
-struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current,
-						struct vcpu **next);
-
 bool plat_ffa_inject_notification_pending_interrupt(
-	struct vcpu_locked next_locked, struct vcpu *current,
+	struct vcpu_locked next_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked);
 
 bool plat_ffa_partition_info_get_regs_forward(
@@ -299,22 +294,23 @@
  * based on it's runtime model and return false if an illegal transition is
  * being performed.
  */
-bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
+bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
 					     ffa_vm_id_t vm_id,
 					     ffa_vm_id_t receiver_vm_id,
-					     struct vcpu *vcpu, uint32_t func,
+					     struct vcpu_locked locked_vcpu,
+					     uint32_t func,
 					     enum vcpu_state *next_state);
 
 struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current);
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
 					 struct vcpu_locked target_locked);
 
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked);
 
-void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
-						struct vcpu *next);
+void plat_ffa_unwind_call_chain_ffa_direct_resp(
+	struct vcpu_locked current_locked, struct vcpu_locked next_locked);
 
 void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
 					struct vm_locked vm_locked);
@@ -355,7 +351,7 @@
 	struct vm *from, void *fragment, uint32_t fragment_length,
 	ffa_memory_handle_t handle, struct mpool *page_pool);
 
-bool plat_ffa_is_direct_response_interrupted(struct vcpu *current);
+bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked);
 
 /**
  * This FF-A v1.0 FFA_MSG_SEND interface.
@@ -365,7 +361,7 @@
 				   ffa_vm_id_t receiver_vm_id, uint32_t size,
 				   struct vcpu *current, struct vcpu **next);
 
-struct ffa_value plat_ffa_yield_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
 					struct vcpu **next,
 					uint32_t timeout_low,
 					uint32_t timeout_high);
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index dbe6b94..398d9a8 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -354,16 +354,16 @@
 	       vcpu_locked.vcpu->interrupts.enabled_and_pending_fiq_count;
 }
 
-static inline void vcpu_call_chain_extend(struct vcpu *vcpu1,
-					  struct vcpu *vcpu2)
+static inline void vcpu_call_chain_extend(struct vcpu_locked vcpu1_locked,
+					  struct vcpu_locked vcpu2_locked)
 {
-	vcpu1->call_chain.next_node = vcpu2;
-	vcpu2->call_chain.prev_node = vcpu1;
+	vcpu1_locked.vcpu->call_chain.next_node = vcpu2_locked.vcpu;
+	vcpu2_locked.vcpu->call_chain.prev_node = vcpu1_locked.vcpu;
 }
 
-static inline void vcpu_call_chain_remove_node(struct vcpu *vcpu1,
-					       struct vcpu *vcpu2)
+static inline void vcpu_call_chain_remove_node(struct vcpu_locked vcpu1_locked,
+					       struct vcpu_locked vcpu2_locked)
 {
-	vcpu1->call_chain.prev_node = NULL;
-	vcpu2->call_chain.next_node = NULL;
+	vcpu1_locked.vcpu->call_chain.prev_node = NULL;
+	vcpu2_locked.vcpu->call_chain.next_node = NULL;
 }