Disable hardware alignment checking

At the moment, alignment fault checking is always enabled in TF-A
Tests (by setting the HSCTLR/SCTLR.A bit). Thus, for every instruction
that loads or stores one or more registers, the hardware checks that
the address being accessed is properly aligned to the size of the data
element(s) being accessed. If this check fails it causes an alignment
fault, which is taken as a data abort exception.

However, the compiler is currently unaware that it must not emit load
and store instructions resulting in unaligned accesses because we do
not compile the source code with -mstrict-align (AArch64) /
-mno-unaligned-access (AArch32). Because of this, we might get some
unexpected alignment faults.

We could request the compiler to align all data accesses but whether
this gives us any performance benefit is dependent on the
microarchitecture. Thus, it is simpler to just disable hardware
alignment checking and let the compiler make the call.

Change-Id: I6ef4afb09e0f87c8462a968da1ca2192ee075b40
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
diff --git a/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
index 61f816f..22b2f34 100644
--- a/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
+++ b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
@@ -19,11 +19,11 @@
 	stcopr	r0, HVBAR
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache and data access alignment checks.
+	 * Enable the instruction cache.
 	 * --------------------------------------------------------------------
 	 */
 	ldcopr	r0, HSCTLR
-	ldr	r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+	ldr	r1, =HSCTLR_I_BIT
 	orr	r0, r0, r1
 	stcopr	r0, HSCTLR
 	isb
diff --git a/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
index d83be3b..919ec27 100644
--- a/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
+++ b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
@@ -19,11 +19,10 @@
 	asm_write_vbar_el1_or_el2 x1
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache, stack pointer and data access
-	 * alignment checks.
+	 * Enable the instruction cache and stack pointer alignment checks.
 	 * --------------------------------------------------------------------
 	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mov	x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
 	asm_read_sctlr_el1_or_el2
 	orr	x0, x0, x1
 	asm_write_sctlr_el1_or_el2 x1
diff --git a/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
index dfe9e4a..8ba3549 100644
--- a/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
+++ b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
@@ -19,11 +19,11 @@
 	stcopr	r0, HVBAR
 
 	/* ---------------------------------------------------------------------
-	 * Enable the instruction cache and data access alignment checks.
+	 * Enable the instruction cache.
 	 * ---------------------------------------------------------------------
 	 */
 	ldcopr	r0, HSCTLR
-	ldr	r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+	ldr	r1, =HSCTLR_I_BIT
 	orr	r0, r0, r1
 	stcopr	r0, HSCTLR
 	isb
diff --git a/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
index 902636e..4e061b3 100644
--- a/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
+++ b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
@@ -19,11 +19,10 @@
 	asm_write_vbar_el1_or_el2 x1
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache, stack pointer and data access
-	 * alignment checks.
+	 * Enable the instruction cache and stack pointer alignment checks.
 	 * --------------------------------------------------------------------
 	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mov	x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
 	asm_read_sctlr_el1_or_el2
 	orr	x0, x0, x1
 	asm_write_sctlr_el1_or_el2 x1
diff --git a/tftf/framework/aarch32/entrypoint.S b/tftf/framework/aarch32/entrypoint.S
index 04a7d4c..5832dd7 100644
--- a/tftf/framework/aarch32/entrypoint.S
+++ b/tftf/framework/aarch32/entrypoint.S
@@ -24,11 +24,11 @@
 	stcopr	r0, HVBAR
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache and asynchronous interrupts.
+	 * Enable the instruction cache.
 	 * --------------------------------------------------------------------
 	 */
 	ldcopr	r0, HSCTLR
-	ldr	r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+	ldr	r1, =HSCTLR_I_BIT
 	orr	r0, r0, r1
 	stcopr	r0, HSCTLR
 	isb
@@ -103,11 +103,11 @@
 	stcopr	r0, HVBAR
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache and asynchronous interrupts.
+	 * Enable the instruction cache.
 	 * --------------------------------------------------------------------
 	 */
 	ldcopr	r0, HSCTLR
-	ldr	r1, =(HSCTLR_I_BIT | HSCTLR_A_BIT)
+	ldr	r1, =HSCTLR_I_BIT
 	orr	r0, r0, r1
 	stcopr	r0, HSCTLR
 	isb
diff --git a/tftf/framework/aarch64/entrypoint.S b/tftf/framework/aarch64/entrypoint.S
index dfedeae..1d524d2 100644
--- a/tftf/framework/aarch64/entrypoint.S
+++ b/tftf/framework/aarch64/entrypoint.S
@@ -25,11 +25,10 @@
 	asm_write_vbar_el1_or_el2 x1
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache, stack pointer and data access
-	 * alignment checks
+	 * Enable the instruction cache and stack pointer alignment checks.
 	 * --------------------------------------------------------------------
 	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mov	x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
 	asm_read_sctlr_el1_or_el2
 	orr	x0, x0, x1
 	asm_write_sctlr_el1_or_el2 x1
@@ -108,11 +107,10 @@
 	asm_write_vbar_el1_or_el2 x1
 
 	/* --------------------------------------------------------------------
-	 * Enable the instruction cache, stack pointer and data access
-	 * alignment checks
+	 * Enable the instruction cache and stack pointer alignment checks.
 	 * --------------------------------------------------------------------
 	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mov	x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
 	asm_read_sctlr_el1_or_el2
 	orr	x0, x0, x1
 	asm_write_sctlr_el1_or_el2 x1