Merge changes from topic "for-lts-v2.8.29" into lts-v2.8

* changes:
  feat(fvp): allow configurable FVP Trusted SRAM size
  fix(security): apply SMCCC_ARCH_WORKAROUND_4 to affected cpus
  fix(security): add support in cpu_ops for CVE-2024-7881
  fix(security): add CVE-2024-7881 mitigation to Cortex-X3
  fix(security): add CVE-2024-7881 mitigation to Neoverse-V3
  fix(security): add CVE-2024-7881 mitigation to Neoverse-V2
  fix(security): add CVE-2024-7881 mitigation to Cortex-X4
  fix(security): enable WORKAROUND_CVE_2024_7881 build option
  fix(services): disable workaround discovery on aarch32 for now
  build: always prefix section names with `.`
  style: normalize linker script code style
diff --git a/bl1/bl1.ld.S b/bl1/bl1.ld.S
index c4ec5fe..bec234b 100644
--- a/bl1/bl1.ld.S
+++ b/bl1/bl1.ld.S
@@ -1,13 +1,12 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /*
- * The .data section gets copied from ROM to RAM at runtime.
- * Its LMA should be 16-byte aligned to allow efficient copying of 16-bytes
- * aligned regions in it.
+ * The .data section gets copied from ROM to RAM at runtime. Its LMA should be
+ * 16-byte aligned to allow efficient copying of 16-bytes aligned regions in it.
  * Its VMA must be page-aligned as it marks the first read/write page.
  */
 #define DATA_ALIGN	16
@@ -24,23 +23,26 @@
     RAM (rwx): ORIGIN = BL1_RW_BASE, LENGTH = BL1_RW_LIMIT - BL1_RW_BASE
 }
 
-SECTIONS
-{
+SECTIONS {
     . = BL1_RO_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL1_RO_BASE address is not aligned on a page boundary.")
+        "BL1_RO_BASE address is not aligned on a page boundary.")
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
         *bl1_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
     } >ROM
 
-    /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+    /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
     .ARM.extab . : {
         *(.ARM.extab* .gnu.linkonce.armextab.*)
     } >ROM
@@ -51,51 +53,57 @@
 
     .rodata . : {
         __RODATA_START__ = .;
+
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         /*
          * No need to pad out the .rodata section to a page boundary. Next is
          * the .data section, which can mapped in ROM with the same memory
          * attributes as the .rodata section.
          *
-         * Pad out to 16 bytes though as .data section needs to be 16 byte
-         * aligned and lld does not align the LMA to the aligment specified
+         * Pad out to 16 bytes though as .data section needs to be 16-byte
+         * aligned and lld does not align the LMA to the alignment specified
          * on the .data section.
          */
         __RODATA_END__ = .;
-         . = ALIGN(16);
+
+        . = ALIGN(16);
     } >ROM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *bl1_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         *(.vectors)
+
         __RO_END__ = .;
 
         /*
-         * Pad out to 16 bytes as .data section needs to be 16 byte aligned and
-         * lld does not align the LMA to the aligment specified on the .data
-         * section.
+         * Pad out to 16 bytes as the .data section needs to be 16-byte aligned
+         * and lld does not align the LMA to the alignment specified on the
+         * .data section.
          */
-         . = ALIGN(16);
+        . = ALIGN(16);
     } >ROM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
-           "cpu_ops not defined for this platform.")
+        "cpu_ops not defined for this platform.")
 
     . = BL1_RW_BASE;
+
     ASSERT(BL1_RW_BASE == ALIGN(PAGE_SIZE),
-           "BL1_RW_BASE address is not aligned on a page boundary.")
+        "BL1_RW_BASE address is not aligned on a page boundary.")
 
     DATA_SECTION >RAM AT>ROM
+
     __DATA_RAM_START__ = __DATA_START__;
     __DATA_RAM_END__ = __DATA_END__;
 
@@ -105,24 +113,26 @@
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
-     * memory attributes for the coherent data page tables.
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data. This is required to set up the correct memory
+     * attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
-        *(tzfw_coherent_mem)
+        *(.tzfw_coherent_mem)
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure the rest of
+         * the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
-#endif
+#endif /* USE_COHERENT_MEM */
 
     __BL1_RAM_START__ = ADDR(.data);
     __BL1_RAM_END__ = .;
@@ -135,15 +145,16 @@
      * of BL1's actual content in Trusted ROM.
      */
     __BL1_ROM_END__ =  __DATA_ROM_START__ + __DATA_SIZE__;
+
     ASSERT(__BL1_ROM_END__ <= BL1_RO_LIMIT,
-           "BL1's ROM content has exceeded its limit.")
+        "BL1's ROM content has exceeded its limit.")
 
     __BSS_SIZE__ = SIZEOF(.bss);
 
 #if USE_COHERENT_MEM
     __COHERENT_RAM_UNALIGNED_SIZE__ =
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-#endif
+#endif /* USE_COHERENT_MEM */
 
     ASSERT(. <= BL1_RW_LIMIT, "BL1's RW section has exceeded its limit.")
 }
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 80cf7db..458a12b 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,28 +15,31 @@
     RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
 }
 
-
-SECTIONS
-{
+SECTIONS {
     . = BL2_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL2_BASE address is not aligned on a page boundary.")
+        "BL2_BASE address is not aligned on a page boundary.")
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
 #if ENABLE_RME
         *bl2_rme_entrypoint.o(.text*)
 #else /* ENABLE_RME */
         *bl2_entrypoint.o(.text*)
 #endif /* ENABLE_RME */
+
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
     } >RAM
 
-    /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+    /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
     .ARM.extab . : {
         *(.ARM.extab* .gnu.linkonce.armextab.*)
     } >RAM
@@ -47,39 +50,41 @@
 
     .rodata . : {
         __RODATA_START__ = .;
+
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         . = ALIGN(PAGE_SIZE);
+
         __RODATA_END__ = .;
     } >RAM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *bl2_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         *(.vectors)
+
         __RO_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked as
-         * read-only, executable.  No RW data from the next section must
-         * creep in.  Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as read-only,
+         * executable. No RW data from the next section must creep in. Ensure
+         * that the rest of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __RO_END__ = .;
     } >RAM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+    __RW_START__ = .;
 
     DATA_SECTION >RAM
     STACK_SECTION >RAM
@@ -88,29 +93,27 @@
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data.  This is required to set up the correct
      * memory attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
-        *(tzfw_coherent_mem)
+        *(.tzfw_coherent_mem)
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure the rest of
+         * the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
-#endif
+#endif /* USE_COHERENT_MEM */
 
-    /*
-     * Define a linker symbol to mark end of the RW memory area for this
-     * image.
-     */
     __RW_END__ = .;
     __BL2_END__ = .;
 
@@ -119,7 +122,7 @@
 #if USE_COHERENT_MEM
     __COHERENT_RAM_UNALIGNED_SIZE__ =
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-#endif
+#endif /* USE_COHERENT_MEM */
 
     ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
 }
diff --git a/bl2/bl2_el3.ld.S b/bl2/bl2_el3.ld.S
index c95706c..aa457fa 100644
--- a/bl2/bl2_el3.ld.S
+++ b/bl2/bl2_el3.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,140 +15,158 @@
 #if BL2_IN_XIP_MEM
     ROM (rx): ORIGIN = BL2_RO_BASE, LENGTH = BL2_RO_LIMIT - BL2_RO_BASE
     RAM (rwx): ORIGIN = BL2_RW_BASE, LENGTH = BL2_RW_LIMIT - BL2_RW_BASE
-#else
+#else /* BL2_IN_XIP_MEM */
     RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
-#endif
+#endif /* BL2_IN_XIP_MEM */
+
 #if SEPARATE_BL2_NOLOAD_REGION
     RAM_NOLOAD (rw!a): ORIGIN = BL2_NOLOAD_START, LENGTH = BL2_NOLOAD_LIMIT - BL2_NOLOAD_START
-#else
-#define RAM_NOLOAD RAM
-#endif
+#else /* SEPARATE_BL2_NOLOAD_REGION */
+#   define RAM_NOLOAD RAM
+#endif /* SEPARATE_BL2_NOLOAD_REGION */
 }
 
 #if !BL2_IN_XIP_MEM
-#define ROM RAM
-#endif
+#   define ROM RAM
+#endif /* !BL2_IN_XIP_MEM */
 
-SECTIONS
-{
+SECTIONS {
 #if BL2_IN_XIP_MEM
     . = BL2_RO_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL2_RO_BASE address is not aligned on a page boundary.")
-#else
+        "BL2_RO_BASE address is not aligned on a page boundary.")
+#else /* BL2_IN_XIP_MEM */
     . = BL2_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL2_BASE address is not aligned on a page boundary.")
-#endif
+        "BL2_BASE address is not aligned on a page boundary.")
+#endif /* BL2_IN_XIP_MEM */
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
-	__TEXT_RESIDENT_START__ = .;
-	*bl2_el3_entrypoint.o(.text*)
-	*(.text.asm.*)
-	__TEXT_RESIDENT_END__ = .;
+        __TEXT_RESIDENT_START__ = .;
+
+        *bl2_el3_entrypoint.o(.text*)
+        *(.text.asm.*)
+
+        __TEXT_RESIDENT_END__ = .;
+
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
-     } >ROM
+    } >ROM
 
     .rodata . : {
         __RODATA_START__ = .;
+
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         . = ALIGN(PAGE_SIZE);
+
         __RODATA_END__ = .;
     } >ROM
 
     ASSERT(__TEXT_RESIDENT_END__ - __TEXT_RESIDENT_START__ <= PAGE_SIZE,
-          "Resident part of BL2 has exceeded its limit.")
-#else
-    ro . : {
+        "Resident part of BL2 has exceeded its limit.")
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
-	__TEXT_RESIDENT_START__ = .;
-	*bl2_el3_entrypoint.o(.text*)
-	*(.text.asm.*)
-	__TEXT_RESIDENT_END__ = .;
+        __TEXT_RESIDENT_START__ = .;
+
+        *bl2_el3_entrypoint.o(.text*)
+        *(.text.asm.*)
+
+        __TEXT_RESIDENT_END__ = .;
+
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         *(.vectors)
+
         __RO_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked as
-         * read-only, executable.  No RW data from the next section must
-         * creep in.  Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as read-only,
+         * executable. No RW data from the next section must creep in. Ensure
+         * that the rest of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
 
         __RO_END__ = .;
     } >ROM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
-          "cpu_ops not defined for this platform.")
+        "cpu_ops not defined for this platform.")
 
 #if BL2_IN_XIP_MEM
     . = BL2_RW_BASE;
+
     ASSERT(BL2_RW_BASE == ALIGN(PAGE_SIZE),
            "BL2_RW_BASE address is not aligned on a page boundary.")
-#endif
+#endif /* BL2_IN_XIP_MEM */
 
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+    __RW_START__ = .;
 
     DATA_SECTION >RAM AT>ROM
+
     __DATA_RAM_START__ = __DATA_START__;
     __DATA_RAM_END__ = __DATA_END__;
 
     RELA_SECTION >RAM
+
 #if SEPARATE_BL2_NOLOAD_REGION
     SAVED_ADDR = .;
+
     . = BL2_NOLOAD_START;
+
     __BL2_NOLOAD_START__ = .;
-#endif
+#endif /* SEPARATE_BL2_NOLOAD_REGION */
+
     STACK_SECTION >RAM_NOLOAD
     BSS_SECTION >RAM_NOLOAD
     XLAT_TABLE_SECTION >RAM_NOLOAD
+
 #if SEPARATE_BL2_NOLOAD_REGION
     __BL2_NOLOAD_END__ = .;
+
     . = SAVED_ADDR;
-#endif
+#endif /* SEPARATE_BL2_NOLOAD_REGION */
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data.  This is required to set up the correct
      * memory attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
-        *(tzfw_coherent_mem)
+
+        *(.tzfw_coherent_mem)
+
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure the rest of
+         * the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
-#endif
+#endif /* USE_COHERENT_MEM */
 
-    /*
-     * Define a linker symbol to mark end of the RW memory area for this
-     * image.
-     */
     __RW_END__ = .;
     __BL2_END__ = .;
 
@@ -165,23 +183,24 @@
 
     /*
      * The .data section is the last PROGBITS section so its end marks the end
-     * of BL2's RO content in XIP memory..
+     * of BL2's RO content in XIP memory.
      */
     __BL2_ROM_END__ =  __DATA_ROM_START__ + __DATA_SIZE__;
+
     ASSERT(__BL2_ROM_END__ <= BL2_RO_LIMIT,
            "BL2's RO content has exceeded its limit.")
-#endif
-    __BSS_SIZE__ = SIZEOF(.bss);
+#endif /* BL2_IN_XIP_MEM */
 
+    __BSS_SIZE__ = SIZEOF(.bss);
 
 #if USE_COHERENT_MEM
     __COHERENT_RAM_UNALIGNED_SIZE__ =
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-#endif
+#endif /* USE_COHERENT_MEM */
 
 #if BL2_IN_XIP_MEM
     ASSERT(. <= BL2_RW_LIMIT, "BL2's RW content has exceeded its limit.")
-#else
+#else /* BL2_IN_XIP_MEM */
     ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
-#endif
+#endif /* BL2_IN_XIP_MEM */
 }
diff --git a/bl2u/bl2u.ld.S b/bl2u/bl2u.ld.S
index a7752a4..52a925b 100644
--- a/bl2u/bl2u.ld.S
+++ b/bl2u/bl2u.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -17,67 +17,69 @@
     RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE
 }
 
-
-SECTIONS
-{
+SECTIONS {
     . = BL2U_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL2U_BASE address is not aligned on a page boundary.")
+        "BL2U_BASE address is not aligned on a page boundary.")
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
         *bl2u_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
-     } >RAM
+    } >RAM
 
-     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
-     .ARM.extab . : {
+    /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
+    .ARM.extab . : {
         *(.ARM.extab* .gnu.linkonce.armextab.*)
-     } >RAM
+    } >RAM
 
-     .ARM.exidx . : {
+    .ARM.exidx . : {
         *(.ARM.exidx* .gnu.linkonce.armexidx.*)
-     } >RAM
+    } >RAM
 
     .rodata . : {
         __RODATA_START__ = .;
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         . = ALIGN(PAGE_SIZE);
         __RODATA_END__ = .;
     } >RAM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *bl2u_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         *(.vectors)
+
         __RO_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked as
-         * read-only, executable.  No RW data from the next section must
-         * creep in.  Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as read-only,
+         * executable. No RW data from the next section must creep in. Ensure
+         * that the rest of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __RO_END__ = .;
     } >RAM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+    __RW_START__ = .;
 
     DATA_SECTION >RAM
     STACK_SECTION >RAM
@@ -86,29 +88,27 @@
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data.  This is required to set up the correct
      * memory attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
-        *(tzfw_coherent_mem)
+        *(.tzfw_coherent_mem)
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure the rest of
+         * the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
-#endif
+#endif /* USE_COHERENT_MEM */
 
-    /*
-     * Define a linker symbol to mark end of the RW memory area for this
-     * image.
-     */
     __RW_END__ = .;
     __BL2U_END__ = .;
 
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 309e752..c829058 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,137 +11,145 @@
 OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
 ENTRY(bl31_entrypoint)
 
-
 MEMORY {
     RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
+
 #if SEPARATE_NOBITS_REGION
     NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
-#else
-#define NOBITS RAM
-#endif
+#else /* SEPARATE_NOBITS_REGION */
+#   define NOBITS RAM
+#endif /* SEPARATE_NOBITS_REGION */
 }
 
 #ifdef PLAT_EXTRA_LD_SCRIPT
-#include <plat.ld.S>
-#endif
+#   include <plat.ld.S>
+#endif /* PLAT_EXTRA_LD_SCRIPT */
 
-SECTIONS
-{
+SECTIONS {
     . = BL31_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL31_BASE address is not aligned on a page boundary.")
+        "BL31_BASE address is not aligned on a page boundary.")
 
     __BL31_START__ = .;
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
         *bl31_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(SORT(.text*)))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
     } >RAM
 
     .rodata . : {
         __RODATA_START__ = .;
+
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-#if PLAT_EXTRA_RODATA_INCLUDES
-#include <plat.ld.rodata.inc>
-#endif
+#   if PLAT_EXTRA_RODATA_INCLUDES
+#       include <plat.ld.rodata.inc>
+#   endif /* PLAT_EXTRA_RODATA_INCLUDES */
 
-	RODATA_COMMON
+        RODATA_COMMON
 
-        /* Place pubsub sections for events */
         . = ALIGN(8);
-#include <lib/el3_runtime/pubsub_events.h>
+
+#   include <lib/el3_runtime/pubsub_events.h>
 
         . = ALIGN(PAGE_SIZE);
+
         __RODATA_END__ = .;
     } >RAM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *bl31_entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
-        /* Place pubsub sections for events */
         . = ALIGN(8);
-#include <lib/el3_runtime/pubsub_events.h>
+
+#   include <lib/el3_runtime/pubsub_events.h>
 
         *(.vectors)
+
         __RO_END_UNALIGNED__ = .;
+
         /*
          * Memory page(s) mapped to this section will be marked as read-only,
-         * executable.  No RW data from the next section must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * executable. No RW data from the next section must creep in. Ensure
+         * that the rest of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __RO_END__ = .;
     } >RAM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
-           "cpu_ops not defined for this platform.")
+        "cpu_ops not defined for this platform.")
 
 #if SPM_MM
-#ifndef SPM_SHIM_EXCEPTIONS_VMA
-#define SPM_SHIM_EXCEPTIONS_VMA         RAM
-#endif
+#   ifndef SPM_SHIM_EXCEPTIONS_VMA
+#       define SPM_SHIM_EXCEPTIONS_VMA RAM
+#   endif /* SPM_SHIM_EXCEPTIONS_VMA */
 
     /*
      * Exception vectors of the SPM shim layer. They must be aligned to a 2K
-     * address, but we need to place them in a separate page so that we can set
-     * individual permissions to them, so the actual alignment needed is 4K.
+     * address but we need to place them in a separate page so that we can set
+     * individual permissions on them, so the actual alignment needed is the
+     * page size.
      *
      * There's no need to include this into the RO section of BL31 because it
      * doesn't need to be accessed by BL31.
      */
-    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
+    .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
         __SPM_SHIM_EXCEPTIONS_START__ = .;
+
         *(.spm_shim_exceptions)
+
         . = ALIGN(PAGE_SIZE);
+
         __SPM_SHIM_EXCEPTIONS_END__ = .;
     } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
 
-    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
-    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
-#endif
+    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
 
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+    . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
+#endif /* SPM_MM */
+
+    __RW_START__ = .;
 
     DATA_SECTION >RAM
     RELA_SECTION >RAM
 
 #ifdef BL31_PROGBITS_LIMIT
     ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
-#endif
+#endif /* BL31_PROGBITS_LIMIT */
 
 #if SEPARATE_NOBITS_REGION
-    /*
-     * Define a linker symbol to mark end of the RW memory area for this
-     * image.
-     */
     . = ALIGN(PAGE_SIZE);
+
     __RW_END__ = .;
     __BL31_END__ = .;
 
     ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
 
     . = BL31_NOBITS_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL31 NOBITS base address is not aligned on a page boundary.")
+        "BL31 NOBITS base address is not aligned on a page boundary.")
 
     __NOBITS_START__ = .;
-#endif
+#endif /* SEPARATE_NOBITS_REGION */
 
     STACK_SECTION >NOBITS
     BSS_SECTION >NOBITS
@@ -149,49 +157,44 @@
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data.  This is required to set up the correct
      * memory attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
+
         /*
-         * Bakery locks are stored in coherent memory
-         *
-         * Each lock's data is contiguous and fully allocated by the compiler
+         * Bakery locks are stored in coherent memory. Each lock's data is
+         * contiguous and fully allocated by the compiler.
          */
-        *(bakery_lock)
-        *(tzfw_coherent_mem)
+        *(.bakery_lock)
+        *(.tzfw_coherent_mem)
+
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure the rest of
+         * the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >NOBITS
-#endif
+#endif /* USE_COHERENT_MEM */
 
 #if SEPARATE_NOBITS_REGION
-    /*
-     * Define a linker symbol to mark end of the NOBITS memory area for this
-     * image.
-     */
     __NOBITS_END__ = .;
 
     ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
-#else
-    /*
-     * Define a linker symbol to mark end of the RW memory area for this
-     * image.
-     */
+#else /* SEPARATE_NOBITS_REGION */
     __RW_END__ = .;
     __BL31_END__ = .;
 
     ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
-#endif
+#endif /* SEPARATE_NOBITS_REGION */
 
     /DISCARD/ : {
         *(.dynsym .dynstr .hash .gnu.hash)
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
index 475affa..1695e1e 100644
--- a/bl32/sp_min/sp_min.ld.S
+++ b/bl32/sp_min/sp_min.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -16,130 +16,132 @@
 }
 
 #ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
-#include <plat_sp_min.ld.S>
-#endif
+#   include <plat_sp_min.ld.S>
+#endif /* PLAT_SP_MIN_EXTRA_LD_SCRIPT */
 
-SECTIONS
-{
+SECTIONS {
     . = BL32_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL32_BASE address is not aligned on a page boundary.")
+        "BL32_BASE address is not aligned on a page boundary.")
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
         *entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
     } >RAM
 
-     /* .ARM.extab and .ARM.exidx are only added because Clang need them */
-     .ARM.extab . : {
+    /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
+    .ARM.extab . : {
         *(.ARM.extab* .gnu.linkonce.armextab.*)
-     } >RAM
+    } >RAM
 
-     .ARM.exidx . : {
+    .ARM.exidx . : {
         *(.ARM.exidx* .gnu.linkonce.armexidx.*)
-     } >RAM
+    } >RAM
 
     .rodata . : {
         __RODATA_START__ = .;
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
-        /* Place pubsub sections for events */
         . = ALIGN(8);
-#include <lib/el3_runtime/pubsub_events.h>
+
+#   include <lib/el3_runtime/pubsub_events.h>
 
         . = ALIGN(PAGE_SIZE);
+
         __RODATA_END__ = .;
     } >RAM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *entrypoint.o(.text*)
         *(SORT_BY_ALIGNMENT(.text*))
         *(SORT_BY_ALIGNMENT(.rodata*))
 
-	RODATA_COMMON
+        RODATA_COMMON
 
-        /* Place pubsub sections for events */
         . = ALIGN(8);
-#include <lib/el3_runtime/pubsub_events.h>
+
+#   include <lib/el3_runtime/pubsub_events.h>
 
         *(.vectors)
+
         __RO_END_UNALIGNED__ = .;
 
         /*
-         * Memory page(s) mapped to this section will be marked as
-         * read-only, executable.  No RW data from the next section must
-         * creep in.  Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure that the rest
+         * of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __RO_END__ = .;
     } >RAM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
-           "cpu_ops not defined for this platform.")
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+        "cpu_ops not defined for this platform.")
+
+    __RW_START__ = .;
 
     DATA_SECTION >RAM
     RELA_SECTION >RAM
 
 #ifdef BL32_PROGBITS_LIMIT
     ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
-#endif
+#endif /* BL32_PROGBITS_LIMIT */
 
     STACK_SECTION >RAM
     BSS_SECTION >RAM
     XLAT_TABLE_SECTION >RAM
 
-     __BSS_SIZE__ = SIZEOF(.bss);
+    __BSS_SIZE__ = SIZEOF(.bss);
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data.  This is required to set up the correct
      * memory attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
+
         /*
-         * Bakery locks are stored in coherent memory
-         *
-         * Each lock's data is contiguous and fully allocated by the compiler
+         * Bakery locks are stored in coherent memory. Each lock's data is
+         * contiguous and fully allocated by the compiler.
          */
-        *(bakery_lock)
-        *(tzfw_coherent_mem)
+        *(.bakery_lock)
+        *(.tzfw_coherent_mem)
+
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure that the rest
+         * of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
 
     __COHERENT_RAM_UNALIGNED_SIZE__ =
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-#endif
+#endif /* USE_COHERENT_MEM */
 
-    /*
-     * Define a linker symbol to mark the end of the RW memory area for this
-     * image.
-     */
     __RW_END__ = .;
-
     __BL32_END__ = .;
 
     /DISCARD/ : {
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
index d86ae55..a6658dd 100644
--- a/bl32/tsp/tsp.ld.S
+++ b/bl32/tsp/tsp.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,71 +11,73 @@
 OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
 ENTRY(tsp_entrypoint)
 
-
 MEMORY {
     RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
 }
 
-
-SECTIONS
-{
+SECTIONS {
     . = BL32_BASE;
+
     ASSERT(. == ALIGN(PAGE_SIZE),
-           "BL32_BASE address is not aligned on a page boundary.")
+        "BL32_BASE address is not aligned on a page boundary.")
 
 #if SEPARATE_CODE_AND_RODATA
     .text . : {
         __TEXT_START__ = .;
+
         *tsp_entrypoint.o(.text*)
         *(.text*)
         *(.vectors)
+
         . = ALIGN(PAGE_SIZE);
+
         __TEXT_END__ = .;
     } >RAM
 
     .rodata . : {
         __RODATA_START__ = .;
+
         *(.rodata*)
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         . = ALIGN(PAGE_SIZE);
+
         __RODATA_END__ = .;
     } >RAM
-#else
-    ro . : {
+#else /* SEPARATE_CODE_AND_RODATA */
+    .ro . : {
         __RO_START__ = .;
+
         *tsp_entrypoint.o(.text*)
         *(.text*)
         *(.rodata*)
 
-	RODATA_COMMON
+        RODATA_COMMON
 
         *(.vectors)
 
         __RO_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked as
-         * read-only, executable.  No RW data from the next section must
-         * creep in.  Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as read-only,
+         * executable. No RW data from the next section must creep in. Ensure
+         * that the rest of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __RO_END__ = .;
     } >RAM
-#endif
+#endif /* SEPARATE_CODE_AND_RODATA */
 
-    /*
-     * Define a linker symbol to mark start of the RW memory area for this
-     * image.
-     */
-    __RW_START__ = . ;
+    __RW_START__ = .;
 
     DATA_SECTION >RAM
     RELA_SECTION >RAM
 
 #ifdef TSP_PROGBITS_LIMIT
     ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
-#endif
+#endif /* TSP_PROGBITS_LIMIT */
 
     STACK_SECTION >RAM
     BSS_SECTION >RAM
@@ -83,29 +85,27 @@
 
 #if USE_COHERENT_MEM
     /*
-     * The base address of the coherent memory section must be page-aligned (4K)
-     * to guarantee that the coherent data are stored on their own pages and
-     * are not mixed with normal data.  This is required to set up the correct
-     * memory attributes for the coherent data page tables.
+     * The base address of the coherent memory section must be page-aligned to
+     * guarantee that the coherent data are stored on their own pages and are
+     * not mixed with normal data. This is required to set up the correct memory
+     * attributes for the coherent data page tables.
      */
-    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
         __COHERENT_RAM_START__ = .;
-        *(tzfw_coherent_mem)
+        *(.tzfw_coherent_mem)
         __COHERENT_RAM_END_UNALIGNED__ = .;
+
         /*
-         * Memory page(s) mapped to this section will be marked
-         * as device memory.  No other unexpected data must creep in.
-         * Ensure the rest of the current memory page is unused.
+         * Memory page(s) mapped to this section will be marked as device
+         * memory. No other unexpected data must creep in. Ensure that the rest
+         * of the current memory page is unused.
          */
         . = ALIGN(PAGE_SIZE);
+
         __COHERENT_RAM_END__ = .;
     } >RAM
-#endif
+#endif /* USE_COHERENT_MEM */
 
-    /*
-     * Define a linker symbol to mark the end of the RW memory area for this
-     * image.
-     */
     __RW_END__ = .;
     __BL32_END__ = .;
 
@@ -114,10 +114,11 @@
     }
 
     __BSS_SIZE__ = SIZEOF(.bss);
+
 #if USE_COHERENT_MEM
     __COHERENT_RAM_UNALIGNED_SIZE__ =
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-#endif
+#endif /* USE_COHERENT_MEM */
 
     ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
 }
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 1811bbb..2f09b21 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -38,6 +38,10 @@
    in EL3 FW. This build option should be set to 1 if the target platform contains
    at least 1 CPU that requires this mitigation. Defaults to 1.
 
+-  ``WORKAROUND_CVE_2024_7881``: Enables mitigation for `CVE-2024-7881`.
+   This build option should be set to 1 if the target platform contains at
+   least 1 CPU that requires this mitigation. Defaults to 1.
+
 .. _arm_cpu_macros_errata_workarounds:
 
 CPU Errata Workarounds
@@ -1051,7 +1055,7 @@
 
 --------------
 
-*Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.*
 
 .. _CVE-2017-5715: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715
 .. _CVE-2018-3639: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639
diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst
index a450588..eb7792f 100644
--- a/docs/design/firmware-design.rst
+++ b/docs/design/firmware-design.rst
@@ -900,7 +900,7 @@
 A runtime service is registered using the ``DECLARE_RT_SVC()`` macro, specifying
 the name of the service, the range of OENs covered, the type of service and
 initialization and call handler functions. This macro instantiates a ``const struct rt_svc_desc`` for the service with these details (see ``runtime_svc.h``).
-This structure is allocated in a special ELF section ``rt_svc_descs``, enabling
+This structure is allocated in a special ELF section ``.rt_svc_descs``, enabling
 the framework to find all service descriptors included into BL31.
 
 The specific service for a SMC Function is selected based on the OEN and call
@@ -2224,7 +2224,7 @@
 has been redesigned. The changes utilise the characteristic of Lamport's Bakery
 algorithm mentioned earlier. The bakery_lock structure only allocates the memory
 for a single CPU. The macro ``DEFINE_BAKERY_LOCK`` allocates all the bakery locks
-needed for a CPU into a section ``bakery_lock``. The linker allocates the memory
+needed for a CPU into a section ``.bakery_lock``. The linker allocates the memory
 for other cores by using the total size allocated for the bakery_lock section
 and multiplying it with (PLATFORM_CORE_COUNT - 1). This enables software to
 perform software cache maintenance on the lock data structure without running
@@ -2252,7 +2252,7 @@
 
 ::
 
-    bakery_lock section start
+    .bakery_lock section start
     |----------------|
     | `bakery_info_t`| <-- Lock_0 per-CPU field
     |    Lock_0      |     for CPU0
@@ -2289,7 +2289,7 @@
 
 Consider a system of 2 CPUs with 'N' bakery locks as shown above. For an
 operation on Lock_N, the corresponding ``bakery_info_t`` in both CPU0 and CPU1
-``bakery_lock`` section need to be fetched and appropriate cache operations need
+``.bakery_lock`` section need to be fetched and appropriate cache operations need
 to be performed for each access.
 
 On Arm Platforms, bakery locks are used in psci (``psci_locks``) and power controller
diff --git a/docs/getting_started/porting-guide.rst b/docs/getting_started/porting-guide.rst
index 985ad22..bc17f2f 100644
--- a/docs/getting_started/porting-guide.rst
+++ b/docs/getting_started/porting-guide.rst
@@ -66,22 +66,22 @@
 If the build option ``USE_COHERENT_MEM`` is enabled, each platform can allocate a
 block of identity mapped secure memory with Device-nGnRE attributes aligned to
 page boundary (4K) for each BL stage. All sections which allocate coherent
-memory are grouped under ``coherent_ram``. For ex: Bakery locks are placed in a
-section identified by name ``bakery_lock`` inside ``coherent_ram`` so that its
+memory are grouped under ``.coherent_ram``. For ex: Bakery locks are placed in a
+section identified by name ``.bakery_lock`` inside ``.coherent_ram`` so that its
 possible for the firmware to place variables in it using the following C code
 directive:
 
 ::
 
-    __section("bakery_lock")
+    __section(".bakery_lock")
 
 Or alternatively the following assembler code directive:
 
 ::
 
-    .section bakery_lock
+    .section .bakery_lock
 
-The ``coherent_ram`` section is a sum of all sections like ``bakery_lock`` which are
+The ``.coherent_ram`` section is a sum of all sections like ``.bakery_lock`` which are
 used to allocate any data structures that are accessed both when a CPU is
 executing with its MMU and caches enabled, and when it's running with its MMU
 and caches disabled. Examples are given below.
@@ -2404,7 +2404,7 @@
 accommodate all the bakery locks.
 
 If this constant is not defined when ``USE_COHERENT_MEM = 0``, the linker
-calculates the size of the ``bakery_lock`` input section, aligns it to the
+calculates the size of the ``.bakery_lock`` input section, aligns it to the
 nearest ``CACHE_WRITEBACK_GRANULE``, multiplies it with ``PLATFORM_CORE_COUNT``
 and stores the result in a linker symbol. This constant prevents a platform
 from relying on the linker and provide a more efficient mechanism for
@@ -3498,7 +3498,7 @@
 
 --------------
 
-*Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.*
 
 .. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
 .. _Arm Generic Interrupt Controller version 2.0 (GICv2): http://infocenter.arm.com/help/topic/com.arm.doc.ihi0048b/index.html
diff --git a/docs/plat/arm/arm-build-options.rst b/docs/plat/arm/arm-build-options.rst
index 4bfce61..31b38a2 100644
--- a/docs/plat/arm/arm-build-options.rst
+++ b/docs/plat/arm/arm-build-options.rst
@@ -152,6 +152,12 @@
    require all the CPUs to execute the CPU specific power down sequence to
    complete a warm reboot sequence in which only the CPUs are power cycled.
 
+Arm FVP Build Options
+---------------------
+
+- ``FVP_TRUSTED_SRAM_SIZE``: Size (in kilobytes) of the Trusted SRAM region to
+  utilize when building for the FVP platform. This option defaults to 256.
+
 --------------
 
 .. |FIP in a GPT image| image:: ../../resources/diagrams/FIP_in_a_GPT_image.png
diff --git a/include/common/bl_common.ld.h b/include/common/bl_common.ld.h
index 080e331..c9bed1a 100644
--- a/include/common/bl_common.ld.h
+++ b/include/common/bl_common.ld.h
@@ -24,7 +24,7 @@
 #define CPU_OPS						\
 	. = ALIGN(STRUCT_ALIGN);			\
 	__CPU_OPS_START__ = .;				\
-	KEEP(*(cpu_ops))				\
+	KEEP(*(.cpu_ops))				\
 	__CPU_OPS_END__ = .;
 
 #define PARSER_LIB_DESCS				\
@@ -36,14 +36,14 @@
 #define RT_SVC_DESCS					\
 	. = ALIGN(STRUCT_ALIGN);			\
 	__RT_SVC_DESCS_START__ = .;			\
-	KEEP(*(rt_svc_descs))				\
+	KEEP(*(.rt_svc_descs))				\
 	__RT_SVC_DESCS_END__ = .;
 
 #if SPMC_AT_EL3
 #define EL3_LP_DESCS					\
 	. = ALIGN(STRUCT_ALIGN);			\
 	__EL3_LP_DESCS_START__ = .;			\
-	KEEP(*(el3_lp_descs))				\
+	KEEP(*(.el3_lp_descs))				\
 	__EL3_LP_DESCS_END__ = .;
 #else
 #define EL3_LP_DESCS
@@ -52,7 +52,7 @@
 #define PMF_SVC_DESCS					\
 	. = ALIGN(STRUCT_ALIGN);			\
 	__PMF_SVC_DESCS_START__ = .;			\
-	KEEP(*(pmf_svc_descs))				\
+	KEEP(*(.pmf_svc_descs))				\
 	__PMF_SVC_DESCS_END__ = .;
 
 #define FCONF_POPULATOR					\
@@ -81,7 +81,7 @@
 #define BASE_XLAT_TABLE					\
 	. = ALIGN(16);					\
 	__BASE_XLAT_TABLE_START__ = .;			\
-	*(base_xlat_table)				\
+	*(.base_xlat_table)				\
 	__BASE_XLAT_TABLE_END__ = .;
 
 #if PLAT_RO_XLAT_TABLES
@@ -135,9 +135,9 @@
 
 #if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
 #define STACK_SECTION					\
-	stacks (NOLOAD) : {				\
+	.stacks (NOLOAD) : {				\
 		__STACKS_START__ = .;			\
-		*(tzfw_normal_stacks)			\
+		*(.tzfw_normal_stacks)			\
 		__STACKS_END__ = .;			\
 	}
 #endif
@@ -170,7 +170,7 @@
 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
 	__BAKERY_LOCK_START__ = .;			\
 	__PERCPU_BAKERY_LOCK_START__ = .;		\
-	*(bakery_lock)					\
+	*(.bakery_lock)					\
 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
 	__PERCPU_BAKERY_LOCK_END__ = .;			\
 	__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
@@ -191,7 +191,7 @@
 #define PMF_TIMESTAMP					\
 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
 	__PMF_TIMESTAMP_START__ = .;			\
-	KEEP(*(pmf_timestamp_array))			\
+	KEEP(*(.pmf_timestamp_array))			\
 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
 	__PMF_PERCPU_TIMESTAMP_END__ = .;		\
 	__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
@@ -216,15 +216,15 @@
 	}
 
 /*
- * The xlat_table section is for full, aligned page tables (4K).
+ * The .xlat_table section is for full, aligned page tables (4K).
  * Removing them from .bss avoids forcing 4K alignment on
  * the .bss section. The tables are initialized to zero by the translation
  * tables library.
  */
 #define XLAT_TABLE_SECTION				\
-	xlat_table (NOLOAD) : {				\
+	.xlat_table (NOLOAD) : {				\
 		__XLAT_TABLE_START__ = .;		\
-		*(xlat_table)				\
+		*(.xlat_table)				\
 		__XLAT_TABLE_END__ = .;			\
 	}
 
diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h
index 472a32a..4793e2f 100644
--- a/include/common/runtime_svc.h
+++ b/include/common/runtime_svc.h
@@ -72,7 +72,7 @@
  */
 #define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch)	\
 	static const rt_svc_desc_t __svc_desc_ ## _name			\
-		__section("rt_svc_descs") __used = {			\
+		__section(".rt_svc_descs") __used = {			\
 			.start_oen = (_start),				\
 			.end_oen = (_end),				\
 			.call_type = (_type),				\
diff --git a/include/lib/bakery_lock.h b/include/lib/bakery_lock.h
index 1fece01..2cf2b44 100644
--- a/include/lib/bakery_lock.h
+++ b/include/lib/bakery_lock.h
@@ -96,7 +96,7 @@
 void bakery_lock_get(bakery_lock_t *bakery);
 void bakery_lock_release(bakery_lock_t *bakery);
 
-#define DEFINE_BAKERY_LOCK(_name) bakery_lock_t _name __section("bakery_lock")
+#define DEFINE_BAKERY_LOCK(_name) bakery_lock_t _name __section(".bakery_lock")
 
 #define DECLARE_BAKERY_LOCK(_name) extern bakery_lock_t _name
 
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index f4b1d1e..096e0b1 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -61,7 +61,7 @@
 	 */
 	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
 		_power_down_ops:vararg
-	.section cpu_ops, "a"
+	.section .cpu_ops, "a"
 	.align 2
 	.type cpu_ops_\_name, %object
 	.word \_midr
diff --git a/include/lib/cpus/aarch64/cortex_x4.h b/include/lib/cpus/aarch64/cortex_x4.h
index f701216..116f9a0 100644
--- a/include/lib/cpus/aarch64/cortex_x4.h
+++ b/include/lib/cpus/aarch64/cortex_x4.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -36,6 +36,11 @@
 #define CORTEX_X4_CPUACTLR5_EL1				S3_0_C15_C8_0
 #define CORTEX_X4_CPUACTLR5_EL1_BIT_14			(ULL(1) << 14)
 
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUACTLR6_EL1				S3_0_C15_C8_1
+
 #ifndef __ASSEMBLER__
 #if ERRATA_X4_2726228
 long check_erratum_cortex_x4_2726228(long cpu_rev);
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index f4ba13f..19f45e8 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -63,6 +63,10 @@
 	 *	This is a placeholder for future per CPU operations. Currently,
 	 *	some CPUs use this entry to set a test function to determine if
 	 *	the workaround for CVE-2022-23960 needs to be applied or not.
+	 * _extra4:
+	 *	This is a placeholder for future per CPU operations. Currently,
+	 *	some CPUs use this entry to set a test function to determine if
+	 *	the workaround for CVE-2024-7881 needs to be applied or not.
 	 * _e_handler:
 	 *	This is a placeholder for future per CPU exception handlers.
 	 * _power_down_ops:
@@ -75,8 +79,9 @@
 	 *	used to handle power down at subsequent levels
 	 */
 	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
-		_extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
-	.section cpu_ops, "a"
+		_extra1:req, _extra2:req, _extra3:req, _extra4:req, \
+		_e_handler:req, _power_down_ops:vararg
+	.section .cpu_ops, "a"
 	.align 3
 	.type cpu_ops_\_name, %object
 	.quad \_midr
@@ -86,6 +91,7 @@
 	.quad \_extra1
 	.quad \_extra2
 	.quad \_extra3
+	.quad \_extra4
 	.quad \_e_handler
 #ifdef IMAGE_BL31
 	/* Insert list of functions */
@@ -154,21 +160,28 @@
 
 	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
 		_power_down_ops:vararg
-		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
+		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
 			\_power_down_ops
 	.endm
 
 	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
 		_e_handler:req, _power_down_ops:vararg
 		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
-			0, 0, 0, \_e_handler, \_power_down_ops
+			0, 0, 0, 0, \_e_handler, \_power_down_ops
 	.endm
 
 	.macro declare_cpu_ops_wa _name:req, _midr:req, \
 		_resetfunc:req, _extra1:req, _extra2:req, \
 		_extra3:req, _power_down_ops:vararg
 		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
-			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
+			\_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
+	.endm
+
+	.macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
+		_resetfunc:req, _extra1:req, _extra2:req, \
+		_extra3:req, _extra4:req, _power_down_ops:vararg
+		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
+			\_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
 	.endm
 
 /* TODO can be deleted once all CPUs have been converted */
diff --git a/include/lib/cpus/aarch64/neoverse_v2.h b/include/lib/cpus/aarch64/neoverse_v2.h
index 39a6607..a0e7130 100644
--- a/include/lib/cpus/aarch64/neoverse_v2.h
+++ b/include/lib/cpus/aarch64/neoverse_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -57,4 +57,9 @@
 #define NEOVERSE_V2_CPUACTLR5_EL1_BIT_56		(ULL(1) << 56)
 #define NEOVERSE_V2_CPUACTLR5_EL1_BIT_55		(ULL(1) << 55)
 
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUACTLR6_EL1			S3_0_C15_C8_1
+
 #endif /* NEOVERSE_V2_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v3.h b/include/lib/cpus/aarch64/neoverse_v3.h
index be9530e..a31bdd3 100644
--- a/include/lib/cpus/aarch64/neoverse_v3.h
+++ b/include/lib/cpus/aarch64/neoverse_v3.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022, ARM Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -22,7 +22,12 @@
 /*******************************************************************************
  * CPU Power Control register specific definitions
  ******************************************************************************/
-#define NEOVERSE_V3_CPUPWRCTLR_EL1			S3_0_C15_C2_7
+#define NEOVERSE_V3_CPUPWRCTLR_EL1				S3_0_C15_C2_7
 #define NEOVERSE_V3_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
 
+/*******************************************************************************
+ * CPU Auxiliary control register 6 specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V3_CPUACTLR6_EL1                                S3_0_C15_C8_1
+
 #endif /* NEOVERSE_V3_H */
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index 8b36ff1..3fce66a 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -28,6 +28,7 @@
 #define CPU_NO_EXTRA1_FUNC		0
 #define CPU_NO_EXTRA2_FUNC		0
 #define CPU_NO_EXTRA3_FUNC		0
+#define CPU_NO_EXTRA4_FUNC		0
 #endif /* __aarch64__ */
 
 
@@ -45,6 +46,7 @@
 #define CPU_EXTRA1_FUNC_SIZE	CPU_WORD_SIZE
 #define CPU_EXTRA2_FUNC_SIZE	CPU_WORD_SIZE
 #define CPU_EXTRA3_FUNC_SIZE	CPU_WORD_SIZE
+#define CPU_EXTRA4_FUNC_SIZE	CPU_WORD_SIZE
 #define CPU_E_HANDLER_FUNC_SIZE CPU_WORD_SIZE
 /* The power down core and cluster is needed only in BL31 and BL32 */
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
@@ -91,7 +93,8 @@
 #define CPU_EXTRA1_FUNC		CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
 #define CPU_EXTRA2_FUNC		CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE
 #define CPU_EXTRA3_FUNC		CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
-#define CPU_E_HANDLER_FUNC	CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
+#define CPU_EXTRA4_FUNC		CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
+#define CPU_E_HANDLER_FUNC	CPU_EXTRA4_FUNC + CPU_EXTRA4_FUNC_SIZE
 #define CPU_PWR_DWN_OPS		CPU_E_HANDLER_FUNC + CPU_E_HANDLER_FUNC_SIZE
 #else
 #define CPU_PWR_DWN_OPS		CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
@@ -122,6 +125,7 @@
 	void (*extra1_func)(void);
 	void (*extra2_func)(void);
 	void (*extra3_func)(void);
+	void (*extra4_func)(void);
 	void (*e_handler_func)(long es);
 #endif /* __aarch64__ */
 #if (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index d8960d6..04271e3 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -49,6 +49,8 @@
 unsigned int check_if_affected_core(void);
 #endif
 
+int check_wa_cve_2024_7881(void);
+
 /*
  * NOTE that this structure will be different on AArch32 and AArch64. The
  * uintptr_t will reflect the change and the alignment will be correct in both.
diff --git a/include/lib/el3_runtime/pubsub.h b/include/lib/el3_runtime/pubsub.h
index 64fe5cc..cbd8ecc 100644
--- a/include/lib/el3_runtime/pubsub.h
+++ b/include/lib/el3_runtime/pubsub.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,7 +12,7 @@
 /* For the linker ... */
 #define __pubsub_start_sym(event)	__pubsub_##event##_start
 #define __pubsub_end_sym(event)		__pubsub_##event##_end
-#define __pubsub_section(event)		__pubsub_##event
+#define __pubsub_section(event)		.__pubsub_##event
 
 /*
  * REGISTER_PUBSUB_EVENT has a different definition between linker and compiler
@@ -54,7 +54,7 @@
 #define __pubsub_end_sym(event)		__pubsub_##event##_end
 #endif
 
-#define __pubsub_section(event)		__section("__pubsub_" #event)
+#define __pubsub_section(event)		__section(".__pubsub_" #event)
 
 /*
  * In compiler context, REGISTER_PUBSUB_EVENT declares the per-event symbols
diff --git a/include/lib/pmf/pmf_helpers.h b/include/lib/pmf/pmf_helpers.h
index b49c6da..01cc179 100644
--- a/include/lib/pmf/pmf_helpers.h
+++ b/include/lib/pmf/pmf_helpers.h
@@ -154,7 +154,7 @@
 	extern unsigned long long pmf_ts_mem_ ## _name[_total_id];	\
 	unsigned long long pmf_ts_mem_ ## _name[_total_id]	\
 	__aligned(CACHE_WRITEBACK_GRANULE)			\
-	__section("pmf_timestamp_array")			\
+	__section(".pmf_timestamp_array")			\
 	__used;
 
 /*
@@ -225,7 +225,7 @@
 #define PMF_DEFINE_SERVICE_DESC(_name, _implid, _svcid, _totalid,	\
 		_init, _getts_by_mpidr) 				\
 	static const pmf_svc_desc_t __pmf_desc_ ## _name 		\
-	__section("pmf_svc_descs") __used = {		 		\
+	__section(".pmf_svc_descs") __used = {		 		\
 		.h.type = PARAM_EP, 					\
 		.h.version = VERSION_1, 				\
 		.h.size = sizeof(pmf_svc_desc_t),			\
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 69ad027..4d16ced 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -203,7 +203,7 @@
 					 (_virt_addr_space_size),	\
 					 (_phy_addr_space_size),	\
 					 EL_REGIME_INVALID,		\
-					 "xlat_table", "base_xlat_table")
+					 ".xlat_table", ".base_xlat_table")
 
 /*
  * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 714d52c..23bae45 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -107,7 +107,7 @@
 /*
  * Define a region within the TZC secured DRAM for use by EL3 runtime
  * firmware. This region is meant to be NOLOAD and will not be zero
- * initialized. Data sections with the attribute `arm_el3_tzc_dram` will be
+ * initialized. Data sections with the attribute `.arm_el3_tzc_dram` will be
  * placed here. 3MB region is reserved if RME is enabled, 2MB otherwise.
  */
 #define ARM_EL3_TZC_DRAM1_SIZE		UL(0x00300000) /* 3MB */
diff --git a/include/plat/arm/common/arm_reclaim_init.ld.S b/include/plat/arm/common/arm_reclaim_init.ld.S
index 788e9ff..a77c964 100644
--- a/include/plat/arm/common/arm_reclaim_init.ld.S
+++ b/include/plat/arm/common/arm_reclaim_init.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -26,9 +26,9 @@
 #define	ABS		ABSOLUTE
 
 #define STACK_SECTION							\
-	stacks (NOLOAD) : {						\
+	.stacks (NOLOAD) : {						\
 		__STACKS_START__ = .;					\
-		*(tzfw_normal_stacks)					\
+		*(.tzfw_normal_stacks)					\
 		__STACKS_END__ = .;					\
 		/* Allow room for the init section where necessary. */	\
 		OFFSET = ABS(SIZEOF(.init) - (. - __STACKS_START__));	\
diff --git a/include/plat/arm/common/arm_tzc_dram.ld.S b/include/plat/arm/common/arm_tzc_dram.ld.S
index 6dcea0b..c790bb9 100644
--- a/include/plat/arm/common/arm_tzc_dram.ld.S
+++ b/include/plat/arm/common/arm_tzc_dram.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -17,9 +17,9 @@
 	. = ARM_EL3_TZC_DRAM1_BASE;
 	ASSERT(. == ALIGN(PAGE_SIZE),
 	"ARM_EL3_TZC_DRAM_BASE address is not aligned on a page boundary.")
-	el3_tzc_dram (NOLOAD) : ALIGN(PAGE_SIZE) {
+	.el3_tzc_dram (NOLOAD) : ALIGN(PAGE_SIZE) {
 	__EL3_SEC_DRAM_START__ = .;
-	*(arm_el3_tzc_dram)
+	*(.arm_el3_tzc_dram)
 	__EL3_SEC_DRAM_UNALIGNED_END__ = .;
 
 	. = ALIGN(PAGE_SIZE);
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index 645b388..85b6b83 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,6 +13,7 @@
 #define SMCCC_ARCH_WORKAROUND_1		U(0x80008000)
 #define SMCCC_ARCH_WORKAROUND_2		U(0x80007FFF)
 #define SMCCC_ARCH_WORKAROUND_3		U(0x80003FFF)
+#define SMCCC_ARCH_WORKAROUND_4		U(0x80000004)
 
 #define SMCCC_GET_SOC_VERSION		U(0)
 #define SMCCC_GET_SOC_REVISION		U(1)
diff --git a/include/services/el3_spmc_logical_sp.h b/include/services/el3_spmc_logical_sp.h
index 7ec9958..5ce33ed 100644
--- a/include/services/el3_spmc_logical_sp.h
+++ b/include/services/el3_spmc_logical_sp.h
@@ -35,7 +35,7 @@
 #define DECLARE_LOGICAL_PARTITION(_name, _init, _sp_id, _uuid, _properties, \
 				  _direct_req)				    \
 	static const struct el3_lp_desc __partition_desc_ ## _name	    \
-		__section("el3_lp_descs") __used = {			    \
+		__section(".el3_lp_descs") __used = {			    \
 			.debug_name = #_name,				    \
 			.init = (_init),				    \
 			.sp_id = (_sp_id),				    \
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index fd929c6..db3ec74 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -104,6 +104,17 @@
 
 check_erratum_chosen cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+workaround_reset_start cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+	/* ---------------------------------
+	 * Sets BIT41 of CPUACTLR6_EL1 which
+	 * disables L1 Data cache prefetcher
+	 * ---------------------------------
+	 */
+	sysreg_bit_set CORTEX_X3_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end cortex_x3, CVE(2024, 7881)
+
+check_erratum_chosen cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
 cpu_reset_func_start cortex_x3
 	/* Disable speculative loads */
 	msr	SSBS, xzr
@@ -146,6 +157,10 @@
 	ret
 endfunc cortex_x3_cpu_reg_dump
 
-declare_cpu_ops cortex_x3, CORTEX_X3_MIDR, \
+declare_cpu_ops_wa_4 cortex_x3, CORTEX_X3_MIDR, \
 	cortex_x3_reset_func, \
+	CPU_NO_EXTRA1_FUNC, \
+	CPU_NO_EXTRA2_FUNC, \
+	CPU_NO_EXTRA3_FUNC, \
+	check_erratum_cortex_x3_7881, \
 	cortex_x3_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index 644bc58..ebcb5aa 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -108,6 +108,17 @@
 
 check_erratum_chosen cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+workaround_reset_start cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+	/* ---------------------------------
+	 * Sets BIT41 of CPUACTLR6_EL1 which
+	 * disables L1 Data cache prefetcher
+	 * ---------------------------------
+	 */
+	sysreg_bit_set CORTEX_X4_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end cortex_x4, CVE(2024, 7881)
+
+check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
 cpu_reset_func_start cortex_x4
 	/* Disable speculative loads */
 	msr	SSBS, xzr
@@ -151,6 +162,10 @@
 	ret
 endfunc cortex_x4_cpu_reg_dump
 
-declare_cpu_ops cortex_x4, CORTEX_X4_MIDR, \
+declare_cpu_ops_wa_4 cortex_x4, CORTEX_X4_MIDR, \
 	cortex_x4_reset_func, \
+	CPU_NO_EXTRA1_FUNC, \
+	CPU_NO_EXTRA2_FUNC, \
+	CPU_NO_EXTRA3_FUNC, \
+	check_erratum_cortex_x4_7881, \
 	cortex_x4_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index f93e8f8..9bfe9de 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -317,6 +317,43 @@
 endfunc check_wa_cve_2017_5715
 
 /*
+ * int check_wa_cve_2024_7881(void);
+ *
+ * This function returns:
+ *  - ERRATA_APPLIES when firmware mitigation is required.
+ *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
+ *  - ERRATA_MISSING when firmware mitigation would be required but
+ *    is not compiled in.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ *       in per-CPU data.
+ */
+.globl	check_wa_cve_2024_7881
+func check_wa_cve_2024_7881
+	mrs	x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
+	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
+	ldr	x0, [x0, #CPU_EXTRA4_FUNC]
+	/*
+	 * If the reserved function pointer is NULL, this CPU
+	 * is unaffected by CVE-2024-7881 so bail out.
+	 */
+	cmp	x0, #CPU_NO_EXTRA4_FUNC
+	beq	1f
+	br	x0
+1:
+	mov	x0, #ERRATA_NOT_APPLIES
+	ret
+endfunc check_wa_cve_2024_7881
+
+/*
  * void *wa_cve_2018_3639_get_disable_ptr(void);
  *
  * Returns a function pointer which is used to disable mitigation
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index ca62c52..9f14155 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -97,6 +97,17 @@
 	wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
 #endif /* WORKAROUND_CVE_2022_23960 */
 
+workaround_reset_start neoverse_v2, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+       /* ---------------------------------
+        * Sets BIT41 of CPUACTLR6_EL1 which
+        * disables L1 Data cache prefetcher
+        * ---------------------------------
+        */
+       sysreg_bit_set NEOVERSE_V2_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end neoverse_v2, CVE(2024, 7881)
+
+check_erratum_chosen neoverse_v2, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -138,6 +149,10 @@
 	ret
 endfunc neoverse_v2_cpu_reg_dump
 
-declare_cpu_ops neoverse_v2, NEOVERSE_V2_MIDR, \
+declare_cpu_ops_wa_4 neoverse_v2, NEOVERSE_V2_MIDR, \
 	neoverse_v2_reset_func, \
+	CPU_NO_EXTRA1_FUNC, \
+	CPU_NO_EXTRA2_FUNC, \
+	CPU_NO_EXTRA3_FUNC, \
+	check_erratum_neoverse_v2_7881, \
 	neoverse_v2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S
index 031d3c8..d7e8252 100644
--- a/lib/cpus/aarch64/neoverse_v3.S
+++ b/lib/cpus/aarch64/neoverse_v3.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -46,6 +46,17 @@
 
 check_erratum_chosen neoverse_v3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 
+workaround_reset_start neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+       /* ---------------------------------
+        * Sets BIT41 of CPUACTLR6_EL1 which
+        * disables L1 Data cache prefetcher
+        * ---------------------------------
+        */
+       sysreg_bit_set NEOVERSE_V3_CPUACTLR6_EL1, BIT(41)
+workaround_reset_end neoverse_v3, CVE(2024, 7881)
+
+check_erratum_chosen neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -92,6 +103,10 @@
 	neoverse_v3_reset_func, \
 	neoverse_v3_core_pwr_dwn
 
-declare_cpu_ops neoverse_v3, NEOVERSE_V3_MIDR, \
+declare_cpu_ops_wa_4 neoverse_v3, NEOVERSE_V3_MIDR, \
 	neoverse_v3_reset_func, \
+	CPU_NO_EXTRA1_FUNC, \
+	CPU_NO_EXTRA2_FUNC, \
+	CPU_NO_EXTRA3_FUNC, \
+	check_erratum_neoverse_v3_7881, \
 	neoverse_v3_core_pwr_dwn
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 144d0aa..242819a 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
 # Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
@@ -37,6 +37,8 @@
 CPU_FLAG_LIST += DYNAMIC_WORKAROUND_CVE_2018_3639
 WORKAROUND_CVE_2022_23960		?=1
 CPU_FLAG_LIST += WORKAROUND_CVE_2022_23960
+WORKAROUND_CVE_2024_7881		?=1
+CPU_FLAG_LIST += WORKAROUND_CVE_2024_7881
 
 # Flags to indicate internal or external Last level cache
 # By default internal
diff --git a/lib/pmf/pmf_main.c b/lib/pmf/pmf_main.c
index 07137f2..4b98ebb 100644
--- a/lib/pmf/pmf_main.c
+++ b/lib/pmf/pmf_main.c
@@ -17,7 +17,7 @@
 
 /*******************************************************************************
  * The 'pmf_svc_descs' array holds the PMF service descriptors exported by
- * services by placing them in the 'pmf_svc_descs' linker section.
+ * services by placing them in the '.pmf_svc_descs' linker section.
  * The 'pmf_svc_descs_indices' array holds the index of a descriptor in the
  * 'pmf_svc_descs' array. The TIF[15:10] bits in the time-stamp id are used
  * to get an index into the 'pmf_svc_descs_indices' array. This gives the
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index a74f105..c02cca6 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -55,7 +55,7 @@
  ******************************************************************************/
 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
 #if USE_COHERENT_MEM
-__section("tzfw_coherent_mem")
+__section(".tzfw_coherent_mem")
 #endif
 ;
 
diff --git a/lib/romlib/romlib.ld.S b/lib/romlib/romlib.ld.S
index 2aac4ad..d54a684 100644
--- a/lib/romlib/romlib.ld.S
+++ b/lib/romlib/romlib.ld.S
@@ -8,37 +8,42 @@
 #include <platform_def.h>
 
 MEMORY {
-	ROM (rx): ORIGIN = ROMLIB_RO_BASE, LENGTH = ROMLIB_RO_LIMIT - ROMLIB_RO_BASE
-	RAM (rwx): ORIGIN = ROMLIB_RW_BASE, LENGTH = ROMLIB_RW_END - ROMLIB_RW_BASE
+    ROM (rx): ORIGIN = ROMLIB_RO_BASE, LENGTH = ROMLIB_RO_LIMIT - ROMLIB_RO_BASE
+    RAM (rwx): ORIGIN = ROMLIB_RW_BASE, LENGTH = ROMLIB_RW_END - ROMLIB_RW_BASE
 }
 
 OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
 OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
 ENTRY(jmptbl)
 
-SECTIONS
-{
-	. = ROMLIB_RO_BASE;
-	.text : {
-		*jmptbl.o(.text)
-		*(.text*)
-		*(.rodata*)
-	} >ROM
+SECTIONS {
+    . = ROMLIB_RO_BASE;
 
-	__DATA_ROM_START__ = LOADADDR(.data);
+    .text : {
+        *jmptbl.o(.text)
+        *(.text*)
+        *(.rodata*)
+    } >ROM
 
-	.data : {
-		__DATA_RAM_START__ = .;
-		*(.data*)
-		__DATA_RAM_END__ = .;
-	} >RAM AT>ROM
+    __DATA_ROM_START__ = LOADADDR(.data);
 
-	__DATA_SIZE__ = SIZEOF(.data);
+    .data : {
+        __DATA_RAM_START__ = .;
 
-	.bss : {
-		__BSS_START__ = .;
-		*(.bss*)
-		__BSS_END__ = .;
-	 } >RAM
-	__BSS_SIZE__ = SIZEOF(.bss);
+        *(.data*)
+
+        __DATA_RAM_END__ = .;
+    } >RAM AT>ROM
+
+    __DATA_SIZE__ = SIZEOF(.data);
+
+    .bss : {
+        __BSS_START__ = .;
+
+        *(.bss*)
+
+        __BSS_END__ = .;
+     } >RAM
+
+    __BSS_SIZE__ = SIZEOF(.bss);
 }
diff --git a/lib/xlat_tables/aarch32/nonlpae_tables.c b/lib/xlat_tables/aarch32/nonlpae_tables.c
index 7cd509d..1e207a4 100644
--- a/lib/xlat_tables/aarch32/nonlpae_tables.c
+++ b/lib/xlat_tables/aarch32/nonlpae_tables.c
@@ -138,10 +138,10 @@
 static uintptr_t xlat_max_va;
 
 static uint32_t mmu_l1_base[NUM_1MB_IN_4GB]
-	__aligned(MMU32B_L1_TABLE_ALIGN) __attribute__((section("xlat_table")));
+	__aligned(MMU32B_L1_TABLE_ALIGN) __attribute__((section(".xlat_table")));
 
 static uint32_t mmu_l2_base[MAX_XLAT_TABLES][NUM_4K_IN_1MB]
-	__aligned(MMU32B_L2_TABLE_ALIGN) __attribute__((section("xlat_table")));
+	__aligned(MMU32B_L2_TABLE_ALIGN) __attribute__((section(".xlat_table")));
 
 /*
  * Array of all memory regions stored in order of ascending base address.
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index 23fe3f0..71273cb 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -39,7 +39,7 @@
 #define MT_UNKNOWN	~0U
 
 static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
-			__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
+			__aligned(XLAT_TABLE_SIZE) __section(".xlat_table");
 
 static unsigned int next_xlat;
 static unsigned long long xlat_max_pa;
diff --git a/plat/arm/board/arm_fpga/build_axf.ld.S b/plat/arm/board/arm_fpga/build_axf.ld.S
index d8254e5..bd3d163 100644
--- a/plat/arm/board/arm_fpga/build_axf.ld.S
+++ b/plat/arm/board/arm_fpga/build_axf.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, ARM Limited. All rights reserved.
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  *
@@ -46,7 +46,7 @@
 		KEEP(*(.kern_tramp))
 	}
 
-	/DISCARD/ : { *(stacks) }
+	/DISCARD/ : { *(.stacks) }
 	/DISCARD/ : { *(.debug_*) }
 	/DISCARD/ : { *(.note*) }
 	/DISCARD/ : { *(.comment*) }
diff --git a/plat/arm/board/fvp/fvp_el3_spmc.c b/plat/arm/board/fvp/fvp_el3_spmc.c
index 2b347ed..6b44f63 100644
--- a/plat/arm/board/fvp/fvp_el3_spmc.c
+++ b/plat/arm/board/fvp/fvp_el3_spmc.c
@@ -18,7 +18,7 @@
 
 #define PLAT_SPMC_SHMEM_DATASTORE_SIZE 512 * 1024
 
-__section("arm_el3_tzc_dram") static uint8_t
+__section(".arm_el3_tzc_dram") static uint8_t
 plat_spmc_shmem_datastore[PLAT_SPMC_SHMEM_DATASTORE_SIZE];
 
 int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size)
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index 4875c00..d79e219 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -39,7 +39,7 @@
  */
 #define PLAT_ARM_CLUSTER_COUNT		U(FVP_CLUSTER_COUNT)
 
-#define PLAT_ARM_TRUSTED_SRAM_SIZE	UL(0x00040000)	/* 256 KB */
+#define PLAT_ARM_TRUSTED_SRAM_SIZE	(FVP_TRUSTED_SRAM_SIZE * UL(1024))
 
 #define PLAT_ARM_TRUSTED_ROM_BASE	UL(0x00000000)
 #define PLAT_ARM_TRUSTED_ROM_SIZE	UL(0x04000000)	/* 64 MB */
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 35b4f11..da5d07d 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -24,6 +24,10 @@
 
 FVP_DT_PREFIX		:= fvp-base-gicv3-psci
 
+# Size (in kilobytes) of the Trusted SRAM region to  utilize when building for
+# the FVP platform. This option defaults to 256.
+FVP_TRUSTED_SRAM_SIZE	:= 256
+
 # The FVP platform depends on this macro to build with correct GIC driver.
 $(eval $(call add_define,FVP_USE_GIC_DRIVER))
 
@@ -39,6 +43,9 @@
 # Pass FVP_GICR_REGION_PROTECTION to the build system.
 $(eval $(call add_define,FVP_GICR_REGION_PROTECTION))
 
+# Pass FVP_TRUSTED_SRAM_SIZE to the build system.
+$(eval $(call add_define,FVP_TRUSTED_SRAM_SIZE))
+
 # Sanity check the cluster count and if FVP_CLUSTER_COUNT <= 2,
 # choose the CCI driver , else the CCN driver
 ifeq ($(FVP_CLUSTER_COUNT), 0)
diff --git a/plat/arm/common/arm_gicv3.c b/plat/arm/common/arm_gicv3.c
index 469e22a..1c95afb 100644
--- a/plat/arm/common/arm_gicv3.c
+++ b/plat/arm/common/arm_gicv3.c
@@ -48,8 +48,8 @@
  * data in the designated EL3 Secure carve-out memory. The `used` attribute
  * is used to prevent the compiler from removing the gicv3 contexts.
  */
-static gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram") __used;
-static gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram") __used;
+static gicv3_redist_ctx_t rdist_ctx __section(".arm_el3_tzc_dram") __used;
+static gicv3_dist_ctx_t dist_ctx __section(".arm_el3_tzc_dram") __used;
 
 /* Define accessor function to get reference to the GICv3 context */
 DEFINE_LOAD_SYM_ADDR(rdist_ctx)
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
index 6c3d08d..314e87a 100644
--- a/plat/common/aarch32/platform_mp_stack.S
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -43,5 +43,5 @@
 	 * stack of PLATFORM_STACK_SIZE bytes.
 	 * -----------------------------------------------------
 	 */
-declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+declare_stack platform_normal_stacks, .tzfw_normal_stacks, \
 		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
index 836c13a..69e112f 100644
--- a/plat/common/aarch32/platform_up_stack.S
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -43,5 +43,5 @@
 	 * stack of PLATFORM_STACK_SIZE bytes.
 	 * -----------------------------------------------------
 	 */
-declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+declare_stack platform_normal_stacks, .tzfw_normal_stacks, \
 		PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
index c0668ea..fa1ca22 100644
--- a/plat/common/aarch64/platform_mp_stack.S
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -56,6 +56,6 @@
 	 * stack of PLATFORM_STACK_SIZE bytes.
 	 * -----------------------------------------------------
 	 */
-declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+declare_stack platform_normal_stacks, .tzfw_normal_stacks, \
 		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT, \
 		CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
index c6e5e2d..2c87219 100644
--- a/plat/common/aarch64/platform_up_stack.S
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -46,5 +46,5 @@
 	 * are allocated
 	 * -----------------------------------------------------
 	 */
-declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+declare_stack platform_normal_stacks, .tzfw_normal_stacks, \
 		PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/hisilicon/hikey960/hikey960_bl31_setup.c b/plat/hisilicon/hikey960/hikey960_bl31_setup.c
index 0debe1e..50751ee 100644
--- a/plat/hisilicon/hikey960/hikey960_bl31_setup.c
+++ b/plat/hisilicon/hikey960/hikey960_bl31_setup.c
@@ -183,7 +183,7 @@
 
 #define SPMC_SHARED_MEMORY_OBJ_SIZE (512 * 1024)
 
-__section("ram2_region") uint8_t plat_spmc_shmem_datastore[SPMC_SHARED_MEMORY_OBJ_SIZE];
+__section(".ram2_region") uint8_t plat_spmc_shmem_datastore[SPMC_SHARED_MEMORY_OBJ_SIZE];
 
 int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size)
 {
diff --git a/plat/hisilicon/hikey960/include/plat.ld.S b/plat/hisilicon/hikey960/include/plat.ld.S
index 0cc25cd..f8bd376 100644
--- a/plat/hisilicon/hikey960/include/plat.ld.S
+++ b/plat/hisilicon/hikey960/include/plat.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,8 +14,8 @@
 
 SECTIONS
 {
-	ram2_region (NOLOAD) : {
-	*(ram2_region)
+	.ram2_region (NOLOAD) : {
+	*(.ram2_region)
 	}>RAM2
 }
 
diff --git a/plat/marvell/armada/a8k/common/ble/ble.ld.S b/plat/marvell/armada/a8k/common/ble/ble.ld.S
index d7a0592..446849b 100644
--- a/plat/marvell/armada/a8k/common/ble/ble.ld.S
+++ b/plat/marvell/armada/a8k/common/ble/ble.ld.S
@@ -19,7 +19,7 @@
 {
     . = BLE_BASE;
 
-    ro . : {
+    .ro . : {
         __RO_START__ = .;
         *ble_main.o(.entry*)
         *(.text*)
@@ -40,9 +40,9 @@
         __DATA_END__ = .;
     } >RAM
 
-    stacks . (NOLOAD) : {
+    .stacks . (NOLOAD) : {
         __STACKS_START__ = .;
-        *(tzfw_normal_stacks)
+        *(.tzfw_normal_stacks)
         __STACKS_END__ = .;
     } >RAM
 
diff --git a/plat/marvell/armada/common/marvell_gicv3.c b/plat/marvell/armada/common/marvell_gicv3.c
index 0bd5545..5419506 100644
--- a/plat/marvell/armada/common/marvell_gicv3.c
+++ b/plat/marvell/armada/common/marvell_gicv3.c
@@ -38,8 +38,8 @@
  * We save and restore the GICv3 context on system suspend. Allocate the
  * data in the designated EL3 Secure carve-out memory
  */
-static gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram");
-static gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram");
+static gicv3_redist_ctx_t rdist_ctx __section(".arm_el3_tzc_dram");
+static gicv3_dist_ctx_t dist_ctx __section(".arm_el3_tzc_dram");
 
 /*
  * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
diff --git a/plat/mediatek/common/mtk_smc_handlers.c b/plat/mediatek/common/mtk_smc_handlers.c
index 51a960f..9443e17 100644
--- a/plat/mediatek/common/mtk_smc_handlers.c
+++ b/plat/mediatek/common/mtk_smc_handlers.c
@@ -71,7 +71,7 @@
 	}
 
 #define SMC_ID_EXPAND_AS_DESCRIPTOR_INDEX(_smc_id, _smc_num) \
-	short _smc_id##_descriptor_index __section("mtk_plat_ro") = -1;
+	short _smc_id##_descriptor_index __section(".mtk_plat_ro") = -1;
 
 MTK_SIP_SMC_FROM_BL33_TABLE(SMC_ID_EXPAND_AS_DESCRIPTOR_INDEX);
 MTK_SIP_SMC_FROM_NS_EL1_TABLE(SMC_ID_EXPAND_AS_DESCRIPTOR_INDEX);
diff --git a/plat/mediatek/include/plat.ld.rodata.inc b/plat/mediatek/include/plat.ld.rodata.inc
index 06ad491..e766472 100644
--- a/plat/mediatek/include/plat.ld.rodata.inc
+++ b/plat/mediatek/include/plat.ld.rodata.inc
@@ -25,6 +25,6 @@
 	__MTK_SMC_POOL_END_UNALIGNED__ = .;
 	. = ALIGN(8);
 #include <vendor_pubsub_events.h>
-	*(mtk_plat_ro)
+	*(.mtk_plat_ro)
 
 #endif /* PLAT_LD_RODATA_INC */
diff --git a/plat/mediatek/mt8173/drivers/spm/spm.c b/plat/mediatek/mt8173/drivers/spm/spm.c
index 1caab3b..8980e07 100644
--- a/plat/mediatek/mt8173/drivers/spm/spm.c
+++ b/plat/mediatek/mt8173/drivers/spm/spm.c
@@ -29,9 +29,9 @@
 
 DEFINE_BAKERY_LOCK(spm_lock);
 
-static int spm_hotplug_ready __section("tzfw_coherent_mem");
-static int spm_mcdi_ready __section("tzfw_coherent_mem");
-static int spm_suspend_ready __section("tzfw_coherent_mem");
+static int spm_hotplug_ready __section(".tzfw_coherent_mem");
+static int spm_mcdi_ready __section(".tzfw_coherent_mem");
+static int spm_suspend_ready __section(".tzfw_coherent_mem");
 
 void spm_lock_init(void)
 {
diff --git a/plat/mediatek/mt8186/drivers/mcdi/mt_mcdi.c b/plat/mediatek/mt8186/drivers/mcdi/mt_mcdi.c
index 0103612..efcf87f 100644
--- a/plat/mediatek/mt8186/drivers/mcdi/mt_mcdi.c
+++ b/plat/mediatek/mt8186/drivers/mcdi/mt_mcdi.c
@@ -62,7 +62,7 @@
 #define MCDI_INIT_2			U(2)
 #define MCDI_INIT_DONE			U(3)
 
-static int mcdi_init_status __section("tzfw_coherent_mem");
+static int mcdi_init_status __section(".tzfw_coherent_mem");
 
 static inline uint32_t mcdi_mbox_read(uint32_t id)
 {
diff --git a/plat/mediatek/mt8192/drivers/mcdi/mt_mcdi.c b/plat/mediatek/mt8192/drivers/mcdi/mt_mcdi.c
index 1635b67..765c7b2 100644
--- a/plat/mediatek/mt8192/drivers/mcdi/mt_mcdi.c
+++ b/plat/mediatek/mt8192/drivers/mcdi/mt_mcdi.c
@@ -63,7 +63,7 @@
 #define MCDI_INIT_2			2
 #define MCDI_INIT_DONE			3
 
-static int mcdi_init_status __section("tzfw_coherent_mem");
+static int mcdi_init_status __section(".tzfw_coherent_mem");
 
 static inline uint32_t mcdi_mbox_read(uint32_t id)
 {
diff --git a/plat/mediatek/mt8195/drivers/mcdi/mt_mcdi.c b/plat/mediatek/mt8195/drivers/mcdi/mt_mcdi.c
index c14e83b..f7dfec3 100644
--- a/plat/mediatek/mt8195/drivers/mcdi/mt_mcdi.c
+++ b/plat/mediatek/mt8195/drivers/mcdi/mt_mcdi.c
@@ -63,7 +63,7 @@
 #define MCDI_INIT_2			2
 #define MCDI_INIT_DONE			3
 
-static int mcdi_init_status __section("tzfw_coherent_mem");
+static int mcdi_init_status __section(".tzfw_coherent_mem");
 
 static inline uint32_t mcdi_mbox_read(uint32_t id)
 {
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
index 6ed1cdf..2365564 100644
--- a/plat/nvidia/tegra/platform.mk
+++ b/plat/nvidia/tegra/platform.mk
@@ -90,8 +90,8 @@
 # o resolve undefined symbols to el3_panic
 # o include only required sections
 TF_LDFLAGS	+= --diag_suppress=L6314,L6332 --no_scanlib --callgraph
-TF_LDFLAGS	+= --keep="*(__pubsub*)" --keep="*(rt_svc_descs*)" --keep="*(*cpu_ops)"
+TF_LDFLAGS	+= --keep="*(.__pubsub*)" --keep="*(.rt_svc_descs*)" --keep="*(.cpu_ops)"
 ifeq (${ENABLE_PMF},1)
-TF_LDFLAGS	+= --keep="*(*pmf_svc_descs*)"
+TF_LDFLAGS	+= --keep="*(.pmf_svc_descs*)"
 endif
 endif
diff --git a/plat/nvidia/tegra/scat/bl31.scat b/plat/nvidia/tegra/scat/bl31.scat
index 2d6d2b3..fdd6e33 100644
--- a/plat/nvidia/tegra/scat/bl31.scat
+++ b/plat/nvidia/tegra/scat/bl31.scat
@@ -48,14 +48,14 @@
 	/* Ensure 8-byte alignment for descriptors and ensure inclusion */
 	__RT_SVC_DESCS__ AlignExpr(ImageLimit(__RODATA__), 8) FIXED
 	{
-		*(rt_svc_descs)
+		*(.rt_svc_descs)
 	}
 
 #if ENABLE_PMF
 	/* Ensure 8-byte alignment for descriptors and ensure inclusion */
 	__PMF_SVC_DESCS__ AlignExpr(ImageLimit(__RT_SVC_DESCS__), 8) FIXED
 	{
-		*(pmf_svc_descs)
+		*(.pmf_svc_descs)
 	}
 #endif /* ENABLE_PMF */
 
@@ -65,7 +65,7 @@
 	 */
 	__CPU_OPS__ AlignExpr(+0, 8) FIXED
 	{
-		*(cpu_ops)
+		*(.cpu_ops)
 	}
 
 	/*
@@ -150,7 +150,7 @@
 {
 	__STACKS__ AlignExpr(+0, 64) FIXED
 	{
-		*(tzfw_normal_stacks)
+		*(.tzfw_normal_stacks)
 	}
 }
 
@@ -180,7 +180,7 @@
 	 */
 	__BAKERY_LOCKS__ AlignExpr(ImageLimit(__BSS__), CACHE_WRITEBACK_GRANULE) FIXED
 	{
-		*(bakery_lock)
+		*(.bakery_lock)
 	}
 
 	__BAKERY_LOCKS_EPILOGUE__ AlignExpr(ImageLimit(__BAKERY_LOCKS__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
@@ -229,9 +229,9 @@
 
 LR_XLAT_TABLE +0
 {
-	xlat_table +0 FIXED
+	.xlat_table +0 FIXED
 	{
-		*(xlat_table)
+		*(.xlat_table)
 	}
 }
 
@@ -251,8 +251,8 @@
 		 *
 		 * Each lock's data is contiguous and fully allocated by the compiler
 		 */
-		*(bakery_lock)
-		*(tzfw_coherent_mem)
+		*(.bakery_lock)
+		*(.tzfw_coherent_mem)
 	}
 
 	__COHERENT_RAM_EPILOGUE_UNALIGNED__ +0 FIXED EMPTY 0
diff --git a/plat/qemu/qemu_sbsa/include/platform_def.h b/plat/qemu/qemu_sbsa/include/platform_def.h
index d971ebe..85fbb4d 100644
--- a/plat/qemu/qemu_sbsa/include/platform_def.h
+++ b/plat/qemu/qemu_sbsa/include/platform_def.h
@@ -364,8 +364,8 @@
  * Name of the section to put the translation tables used by the S-EL1/S-EL0
  * context of a Secure Partition.
  */
-#define PLAT_SP_IMAGE_XLAT_SECTION_NAME		"qemu_sp_xlat_table"
-#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME	"qemu_sp_xlat_table"
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME		".qemu_sp_xlat_table"
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME	".qemu_sp_xlat_table"
 
 /* Cookies passed to the Secure Partition at boot. Not used by QEMU platforms.*/
 #define PLAT_SPM_COOKIE_0		ULL(0)
diff --git a/plat/renesas/common/aarch64/platform_common.c b/plat/renesas/common/aarch64/platform_common.c
index b0a88cb..17ccb28 100644
--- a/plat/renesas/common/aarch64/platform_common.c
+++ b/plat/renesas/common/aarch64/platform_common.c
@@ -28,7 +28,7 @@
 #endif
 
 const uint8_t version_of_renesas[VERSION_OF_RENESAS_MAXLEN]
-		__attribute__ ((__section__("ro"))) = VERSION_OF_RENESAS;
+		__attribute__ ((__section__(".ro"))) = VERSION_OF_RENESAS;
 
 #define MAP_SHARED_RAM		MAP_REGION_FLAT(RCAR_SHARED_MEM_BASE,	\
 					RCAR_SHARED_MEM_SIZE,		\
diff --git a/plat/rockchip/common/aarch32/plat_helpers.S b/plat/rockchip/common/aarch32/plat_helpers.S
index 475c297..9f49cbd 100644
--- a/plat/rockchip/common/aarch32/plat_helpers.S
+++ b/plat/rockchip/common/aarch32/plat_helpers.S
@@ -151,7 +151,7 @@
 	 * Per-CPU Secure entry point - resume or power up
 	 * --------------------------------------------------------------------
 	 */
-	.section tzfw_coherent_mem, "a"
+	.section .tzfw_coherent_mem, "a"
 	.align  3
 cpuson_entry_point:
 	.rept	PLATFORM_CORE_COUNT
diff --git a/plat/rockchip/common/aarch64/plat_helpers.S b/plat/rockchip/common/aarch64/plat_helpers.S
index 4af052b..c4c0dec 100644
--- a/plat/rockchip/common/aarch64/plat_helpers.S
+++ b/plat/rockchip/common/aarch64/plat_helpers.S
@@ -150,7 +150,7 @@
 	 * Per-CPU Secure entry point - resume or power up
 	 * --------------------------------------------------------------------
 	 */
-	.section tzfw_coherent_mem, "a"
+	.section .tzfw_coherent_mem, "a"
 	.align  3
 cpuson_entry_point:
 	.rept	PLATFORM_CORE_COUNT
diff --git a/plat/rockchip/px30/drivers/pmu/pmu.c b/plat/rockchip/px30/drivers/pmu/pmu.c
index 5f4e64f..8770b2e 100644
--- a/plat/rockchip/px30/drivers/pmu/pmu.c
+++ b/plat/rockchip/px30/drivers/pmu/pmu.c
@@ -45,7 +45,7 @@
 
 static uint32_t cores_pd_cfg_info[PLATFORM_CORE_COUNT]
 #if USE_COHERENT_MEM
-__attribute__ ((section("tzfw_coherent_mem")))
+__attribute__ ((section(".tzfw_coherent_mem")))
 #endif
 ;
 
@@ -101,7 +101,7 @@
 
 static struct px30_sleep_ddr_data ddr_data
 #if USE_COHERENT_MEM
-__attribute__ ((section("tzfw_coherent_mem")))
+__attribute__ ((section(".tzfw_coherent_mem")))
 #endif
 ;
 
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
index 3084c4f..7bdefcc 100644
--- a/plat/rockchip/rk3399/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -64,7 +64,7 @@
 
 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
 #if USE_COHERENT_MEM
-__attribute__ ((section("tzfw_coherent_mem")))
+__attribute__ ((section(".tzfw_coherent_mem")))
 #endif
 ;/* coheront */
 
diff --git a/plat/socionext/synquacer/include/plat.ld.S b/plat/socionext/synquacer/include/plat.ld.S
index af7a172..d02afa7 100644
--- a/plat/socionext/synquacer/include/plat.ld.S
+++ b/plat/socionext/synquacer/include/plat.ld.S
@@ -23,8 +23,8 @@
 	 * not support inner shareable WBWA mappings so it is mapped normal
 	 * non-cacheable)
 	 */
-	sp_xlat_table (NOLOAD) : ALIGN(PAGE_SIZE) {
-		*(sp_xlat_table)
+	.sp_xlat_table (NOLOAD) : ALIGN(PAGE_SIZE) {
+		*(.sp_xlat_table)
 	} >SP_DRAM
 }
 
diff --git a/plat/socionext/synquacer/include/platform_def.h b/plat/socionext/synquacer/include/platform_def.h
index d6bfe42..acc74e2 100644
--- a/plat/socionext/synquacer/include/platform_def.h
+++ b/plat/socionext/synquacer/include/platform_def.h
@@ -180,8 +180,8 @@
 
 #define PLAT_SP_IMAGE_MMAP_REGIONS	30
 #define PLAT_SP_IMAGE_MAX_XLAT_TABLES	20
-#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	"sp_xlat_table"
-#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME	"sp_xlat_table"
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	".sp_xlat_table"
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME	".sp_xlat_table"
 
 #define PLAT_SQ_UART1_BASE		PLAT_SQ_BOOT_UART_BASE
 #define PLAT_SQ_UART1_SIZE		ULL(0x1000)
diff --git a/plat/ti/k3/common/drivers/ti_sci/ti_sci.c b/plat/ti/k3/common/drivers/ti_sci/ti_sci.c
index 2cbfa3d..381f596 100644
--- a/plat/ti/k3/common/drivers/ti_sci/ti_sci.c
+++ b/plat/ti/k3/common/drivers/ti_sci/ti_sci.c
@@ -21,7 +21,7 @@
 #include "ti_sci.h"
 
 #if USE_COHERENT_MEM
-__section("tzfw_coherent_mem")
+__section(".tzfw_coherent_mem")
 #endif
 static uint8_t message_sequence;
 
diff --git a/plat/xilinx/versal/versal_gicv3.c b/plat/xilinx/versal/versal_gicv3.c
index d410906..0959c8e 100644
--- a/plat/xilinx/versal/versal_gicv3.c
+++ b/plat/xilinx/versal/versal_gicv3.c
@@ -36,8 +36,8 @@
  * We save and restore the GICv3 context on system suspend. Allocate the
  * data in the designated EL3 Secure carve-out memory.
  */
-static gicv3_redist_ctx_t rdist_ctx __section("versal_el3_tzc_dram");
-static gicv3_dist_ctx_t dist_ctx __section("versal_el3_tzc_dram");
+static gicv3_redist_ctx_t rdist_ctx __section(".versal_el3_tzc_dram");
+static gicv3_dist_ctx_t dist_ctx __section(".versal_el3_tzc_dram");
 
 /*
  * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
diff --git a/plat/xilinx/versal_net/versal_net_gicv3.c b/plat/xilinx/versal_net/versal_net_gicv3.c
index b7ac6ab..46ebb67 100644
--- a/plat/xilinx/versal_net/versal_net_gicv3.c
+++ b/plat/xilinx/versal_net/versal_net_gicv3.c
@@ -47,8 +47,8 @@
  * We save and restore the GICv3 context on system suspend. Allocate the
  * data in the designated EL3 Secure carve-out memory.
  */
-static gicv3_redist_ctx_t rdist_ctx __section("versal_net_el3_tzc_dram");
-static gicv3_dist_ctx_t dist_ctx __section("versal_net_el3_tzc_dram");
+static gicv3_redist_ctx_t rdist_ctx __section(".versal_net_el3_tzc_dram");
+static gicv3_dist_ctx_t dist_ctx __section(".versal_net_el3_tzc_dram");
 
 /*
  * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index bb042c7..3895b4a 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -28,6 +28,8 @@
 		return SMC_ARCH_CALL_SUCCESS;
 	case SMCCC_ARCH_SOC_ID:
 		return plat_is_smccc_feature_available(arg1);
+#ifdef __aarch64__
+	/* Workaround checks are currently only implemented for aarch64 */
 #if WORKAROUND_CVE_2017_5715
 	case SMCCC_ARCH_WORKAROUND_1:
 		if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
@@ -89,6 +91,16 @@
 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
 #endif
 
+#if WORKAROUND_CVE_2024_7881
+	case SMCCC_ARCH_WORKAROUND_4:
+		if (check_wa_cve_2024_7881() != ERRATA_APPLIES) {
+			return SMC_ARCH_CALL_NOT_SUPPORTED;
+		}
+		return 0;
+#endif /* WORKAROUND_CVE_2024_7881 */
+
+#endif /* __aarch64__ */
+
 	/* Fallthrough */
 
 	default:
@@ -128,6 +140,7 @@
 		SMC_RET1(handle, smccc_arch_features(x1));
 	case SMCCC_ARCH_SOC_ID:
 		SMC_RET1(handle, smccc_arch_id(x1));
+#ifdef __aarch64__
 #if WORKAROUND_CVE_2017_5715
 	case SMCCC_ARCH_WORKAROUND_1:
 		/*
@@ -156,6 +169,16 @@
 		 */
 		SMC_RET0(handle);
 #endif
+#if WORKAROUND_CVE_2024_7881
+	case SMCCC_ARCH_WORKAROUND_4:
+		/*
+		 * The workaround has already been applied on affected PEs
+		 * during cold boot. This function has no effect whether PE is
+		 * affected or not.
+		 */
+		SMC_RET0(handle);
+#endif /* WORKAROUND_CVE_2024_7881 */
+#endif /* __aarch64__ */
 	default:
 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
 			smc_fid);
diff --git a/services/std_svc/spm/spm_mm/spm_mm_xlat.c b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
index 6261016..b1ca55a 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_xlat.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
@@ -19,7 +19,7 @@
 
 /* Place translation tables by default along with the ones used by BL31. */
 #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
-#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	"xlat_table"
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	".xlat_table"
 #endif
 #ifndef PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME
 #define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME	".bss"