ARM is now Arm.

Change-Id: Ib272ebd46423dd1a84605abe3d6d2fe0c729b76a
diff --git a/src/arch/aarch64/hypervisor/sysregs.h b/src/arch/aarch64/hypervisor/sysregs.h
index 252d7c1..60e7f31 100644
--- a/src/arch/aarch64/hypervisor/sysregs.h
+++ b/src/arch/aarch64/hypervisor/sysregs.h
@@ -180,7 +180,7 @@
  */
 
 /**
- * Trap ID group 5 (ARMv8.5-MemTag related).
+ * Trap ID group 5 (Armv8.5-MemTag related).
  */
 #define HCR_EL2_TID5 (UINT64_C(0x1) << 58)
 
diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h
index 96b01c8..3e3d1a0 100644
--- a/src/arch/aarch64/inc/hf/arch/spinlock.h
+++ b/src/arch/aarch64/inc/hf/arch/spinlock.h
@@ -17,7 +17,7 @@
 #pragma once
 
 /**
- * Spinlock implementation using ARMv8.0 LDXR/STXR pair and a WFE pause.
+ * Spinlock implementation using Armv8.0 LDXR/STXR pair and a WFE pause.
  *
  * Implementation using C11 atomics also generates a LDXR/STXR pair but no WFE.
  * Without it we observe that Cortex A72 can easily livelock and not make
@@ -25,7 +25,7 @@
  *
  * TODO(b/141087046): Forward progress is still not guaranteed as even with WFE
  * we see that A72 can livelock for extremely tight loops. We should investigate
- * the guarantees provided by atomic instructions introduced in ARMv8.1 LSE.
+ * the guarantees provided by atomic instructions introduced in Armv8.1 LSE.
  */
 
 #include <stdint.h>
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index f93bd8a..d6b00f2 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -286,7 +286,7 @@
 	dsb(ishst);
 
 	/*
-	 * Revisions prior to ARMv8.4 do not support invalidating a range of
+	 * Revisions prior to Armv8.4 do not support invalidating a range of
 	 * addresses, which means we have to loop over individual pages. If
 	 * there are too many, it is quicker to invalidate all TLB entries.
 	 */
@@ -333,7 +333,7 @@
 	dsb(ishst);
 
 	/*
-	 * Revisions prior to ARMv8.4 do not support invalidating a range of
+	 * Revisions prior to Armv8.4 do not support invalidating a range of
 	 * addresses, which means we have to loop over individual pages. If
 	 * there are too many, it is quicker to invalidate all TLB entries.
 	 */
diff --git a/src/fdt.c b/src/fdt.c
index d9cd71d..d12faf2 100644
--- a/src/fdt.c
+++ b/src/fdt.c
@@ -305,7 +305,7 @@
 		return true;
 	case sizeof(uint64_t):
 		/*
-		 * ARMv8 requires `data` to be realigned to 64-bit boundary
+		 * Armv8 requires `data` to be realigned to 64-bit boundary
 		 * to dereference as uint64_t. May not be needed on other
 		 * architectures.
 		 */