SPMC: complete vCPU state save prior to normal world exit

This change addresses the generic SPMC problem mentioned in [1].
The current implementation leaves the live vCPU in a stale state prior
to leaving to the normal world. EL3/SPMD fortunately backs up both EL1
and EL2 although it looks cleaner for the SPMC to fully own saving of
the state. The other world loop is moved to after the vcpu_switch save
routine to permit unwinding the stack down to emitting SMC. The other
world arguments are held in the other world VM vCPU representing the
normal world. On returning from normal world, execution restarts after
the SMC and arguments are copied to the restored other world VM vCPU.
This solves problems like losing the state of the last SP when exiting
at boot time or upon a managed exit, and UP SP vCPU migration.

[1] https://git.trustedfirmware.org/hafnium/hafnium.git/
tree/src/arch/aarch64/hypervisor/handler.c?h=v2.4#n590

Change-Id: If3d4dd332233330a10cf2b66ce9c85480dff3793
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 9e279c2..0bc59c0 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -7,6 +7,9 @@
  */
 
 #include "hf/arch/offsets.h"
+
+#include "hf/arch/vmid_base.h"
+
 #include "msr.h"
 #include "exception_macros.S"
 
@@ -324,6 +327,60 @@
 	bl complete_saving_state
 	mov x0, x19
 
+#if SECURE_WORLD == 1
+
+	ldr x1, [x0, #VCPU_VM]
+	ldrh w1, [x1, #VM_ID]
+
+	/* Exit to normal world if VM is HF_OTHER_WORLD_ID. */
+	cmp w1, #HF_OTHER_WORLD_ID
+	bne vcpu_restore_all_and_run
+
+	/*
+	 * The current vCPU state is saved so it's now safe to switch to the
+	 * normal world.
+	 */
+
+other_world_loop:
+	/*
+	 * Prepare arguments from other world VM vCPU.
+	 * x19 holds the other world VM vCPU pointer.
+	 */
+	ldp x0, x1, [x19, #VCPU_REGS + 8 * 0]
+	ldp x2, x3, [x19, #VCPU_REGS + 8 * 2]
+	ldp x4, x5, [x19, #VCPU_REGS + 8 * 4]
+	ldp x6, x7, [x19, #VCPU_REGS + 8 * 6]
+
+	smc #0
+
+	/*
+	 * The call to EL3 returned, First eight GP registers contain an FF-A
+	 * call from the physical FF-A instance. Save those arguments to the
+	 * other world VM vCPU.
+	 * x19 is restored with the other world VM vCPU pointer.
+	 */
+	stp x0, x1, [x19, #VCPU_REGS + 8 * 0]
+	stp x2, x3, [x19, #VCPU_REGS + 8 * 2]
+	stp x4, x5, [x19, #VCPU_REGS + 8 * 4]
+	stp x6, x7, [x19, #VCPU_REGS + 8 * 6]
+
+	/*
+	 * Stack is at top and execution can restart straight into C code.
+	 * Handle the FF-A call from other world.
+	 */
+	mov x0, x19
+	bl smc_handler_from_nwd
+
+	/*
+	 * If the smc handler returns null this indicates no vCPU has to be
+	 * resumed and GP registers contain a fresh FF-A response or call
+	 * directed to the normal world. Hence loop back and emit SMC again.
+	 * Otherwise restore the vCPU pointed to by the handler return value.
+	 */
+	cbz x0, other_world_loop
+
+#endif
+
 	/* Intentional fallthrough. */
 .global vcpu_restore_all_and_run
 vcpu_restore_all_and_run: