Revert "Emit speculation barriers after ERETs"

This reverts commit 366cb48b89ac012a44c3b45cf861649d1c704d42.
Accidentally pushed directly to main tree.

Bug: 146490856
diff --git a/src/arch/aarch64/exception_macros.S b/src/arch/aarch64/exception_macros.S
index 7214e8e..642ee98 100644
--- a/src/arch/aarch64/exception_macros.S
+++ b/src/arch/aarch64/exception_macros.S
@@ -15,23 +15,6 @@
  */
 
 /**
- * From Linux commit 679db70801da9fda91d26caf13bf5b5ccc74e8e8:
- * "Some CPUs can speculate past an ERET instruction and potentially perform
- * speculative accesses to memory before processing the exception return.
- * Since the register state is often controlled by a lower privilege level
- * at the point of an ERET, this could potentially be used as part of a
- * side-channel attack."
- *
- * This macro emits a speculation barrier after the ERET to prevent the CPU
- * from speculating past the exception return.
- */
-.macro eret_with_sb
-	eret
-	dsb	nsh
-	isb
-.endm
-
-/**
  * Saves the volatile registers onto the stack. This currently takes 14
  * instructions, so it can be used in exception handlers with 18 instructions
  * left, 2 of which in the same cache line (assuming a 16-byte cache line).
@@ -107,7 +90,7 @@
 	bl \handler
 	restore_volatile_from_stack \elx
 	msr spsel, #0
-	eret_with_sb
+	eret
 .endm
 
 /**
diff --git a/src/arch/aarch64/hftest/exceptions.S b/src/arch/aarch64/hftest/exceptions.S
index 8e62170..b54e4d2 100644
--- a/src/arch/aarch64/hftest/exceptions.S
+++ b/src/arch/aarch64/hftest/exceptions.S
@@ -113,8 +113,8 @@
 skip_elr:
 	/* Restore register spsr_el1 using x1 as scratch. */
 	ldr x1, [sp, #8 * 23]
-	msr spsr_el1, x1
+        msr spsr_el1, x1
 
 	/* Restore x0 & x1, and release stack space. */
 	ldp x0, x1, [sp], #8 * 24
-	eret_with_sb
+	eret
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index a4c12af..2f47c00 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -518,7 +518,7 @@
 	/* Restore x0..x3, which we have used as scratch before. */
 	ldp x2, x3, [x0, #VCPU_REGS + 8 * 2]
 	ldp x0, x1, [x0, #VCPU_REGS + 8 * 0]
-	eret_with_sb
+	eret
 
 .balign 0x40
 /**
@@ -526,4 +526,4 @@
  */
 restore_from_stack_and_return:
 	restore_volatile_from_stack el2
-	eret_with_sb
+	eret