aboutsummaryrefslogtreecommitdiff
path: root/platform/ext/common/armclang/tfm_common_s.sct
diff options
context:
space:
mode:
Diffstat (limited to 'platform/ext/common/armclang/tfm_common_s.sct')
-rw-r--r--platform/ext/common/armclang/tfm_common_s.sct226
1 files changed, 131 insertions, 95 deletions
diff --git a/platform/ext/common/armclang/tfm_common_s.sct b/platform/ext/common/armclang/tfm_common_s.sct
index 4186d09cbd..651400b573 100644
--- a/platform/ext/common/armclang/tfm_common_s.sct
+++ b/platform/ext/common/armclang/tfm_common_s.sct
@@ -1,5 +1,7 @@
/*
- * Copyright (c) 2017-2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2017-2023 Arm Limited. All rights reserved.
+ * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon company)
+ * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,22 +18,64 @@
#include "region_defs.h"
+/* Include file with definitions for section alignments.
+ * Note: it should be included after region_defs.h to let platform define
+ * default values if needed. */
+#include "tfm_s_linker_alignments.h"
+
LR_CODE S_CODE_START S_CODE_SIZE {
/**** This initial section contains common code for secure binary */
- ER_TFM_CODE S_CODE_START {
+ ER_VECTORS S_CODE_START S_CODE_VECTOR_TABLE_SIZE {
*.o (RESET +First)
- * (+RO)
+ }
+#ifdef CONFIG_TFM_USE_TRUSTZONE
+ ER_VECTORS_FILL +0 EMPTY (S_CODE_VECTOR_TABLE_SIZE - ImageLength(ER_VECTORS)) {
+ }
+ /*
+ * Place the CMSE Veneers (containing the SG instruction) in a separate
+ * 32 bytes aligned region so that the SAU can be programmed to
+ * just set this region as Non-Secure Callable.
+ */
+ ER_VENEER +0 FIXED ALIGN TFM_LINKER_VENEERS_ALIGNMENT {
+ *(Veneer$$CMSE)
+ }
+ /*
+ * The Limit of the VENEER_ALIGN region should be at least 32 bytes aligned
+ * so that the SAU can set this region as Non-Secure Callable.
+ */
+ VENEER_ALIGN +0 ALIGN TFM_LINKER_VENEERS_ALIGNMENT EMPTY 0x0 {
+ }
+#endif
+
+ ER_TFM_CODE +0 {
+ *startup*(.text*)
+ *libplatform_s* (.text*, .rodata*)
+ *libtfm_spm* (+RO)
}
/**** Unprivileged Secure code start here */
- TFM_UNPRIV_CODE +0 ALIGN 32 {
- *(SFN)
- *armlib*
- *libtfm_sprt.a (+RO)
- *psa_client.* (+RO)
- *psa_service.* (+RO)
- *psa_lifecycle.* (+RO)
+ TFM_UNPRIV_CODE_START +0 ALIGN TFM_LINKER_UNPRIV_CODE_ALIGNMENT {
+ * (+RO)
+ }
+
+ /*
+ * This empty, zero long execution region is here to mark the end address
+ * of TFM unprivileged code.
+ */
+ TFM_UNPRIV_CODE_END +0 ALIGN TFM_LINKER_UNPRIV_CODE_ALIGNMENT EMPTY 0x0 {
+ }
+
+ /**** Section for holding partition RO load data */
+ /*
+ * Sort the partition info by priority to guarantee the initing order.
+ * The first loaded partition will be inited at last in SFN model.
+ */
+ TFM_SP_LOAD_LIST +0 ALIGN 4 {
+ *(.part_load_priority_00)
+ *(.part_load_priority_01)
+ *(.part_load_priority_02)
+ *(.part_load_priority_03)
}
/**** PSA RoT RO part (CODE + RODATA) start here */
@@ -39,19 +83,20 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the start address
* of PSA RoT code.
*/
- TFM_PSA_CODE_START +0 ALIGN 32 EMPTY 0x0 {
+ TFM_PSA_CODE_START +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT EMPTY 0x0 {
}
- TFM_PSA_ROT_LINKER +0 ALIGN 32 {
- *tfm_psa_rot_partition* (+RO)
- *.o(TFM_*_PSA-ROT_ATTR_FN)
+ TFM_PSA_ROT_LINKER +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT {
+ *tfm_psa_rot_partition* (+RO-CODE, +RO-DATA)
+ *libplatform_s* (TFM_*_PSA-ROT_ATTR_FN)
+ *.o (TFM_*_PSA-ROT_ATTR_FN)
}
/*
* This empty, zero long execution region is here to mark the end address
* of PSA RoT code.
*/
- TFM_PSA_CODE_END +0 ALIGN 32 EMPTY 0x0 {
+ TFM_PSA_CODE_END +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT EMPTY 0x0 {
}
/**** APPLICATION RoT RO part (CODE + RODATA) start here */
@@ -59,28 +104,40 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the start address
* of APP RoT code.
*/
- TFM_APP_CODE_START +0 ALIGN 32 EMPTY 0x0 {
+ TFM_APP_CODE_START +0 ALIGN TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT EMPTY 0x0 {
}
- TFM_APP_ROT_LINKER +0 ALIGN 32 {
- *tfm_app_rot_partition* (+RO)
- *.o(TFM_*_APP-ROT_ATTR_FN)
+ TFM_APP_ROT_LINKER +0 ALIGN TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT {
+ *tfm_app_rot_partition* (+RO-CODE, +RO-DATA)
+ *libplatform_s* (TFM_*_APP-ROT_ATTR_FN)
+ *.o (TFM_*_APP-ROT_ATTR_FN)
}
/*
* This empty, zero long execution region is here to mark the end address
* of APP RoT code.
*/
- TFM_APP_CODE_END +0 ALIGN 32 EMPTY 0x0 {
+ TFM_APP_CODE_END +0 ALIGN TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT EMPTY 0x0 {
}
-#if defined(S_CODE_SRAM_ALIAS_BASE)
- /* eFlash driver code that gets copied from Flash to SRAM */
- ER_CODE_SRAM S_CODE_SRAM_ALIAS_BASE ALIGN 4 {
- Driver_GFC100_EFlash.o (+RO)
- gfc100_eflash_drv.o (+RO)
- musca_b1_eflash_drv.o (+RO)
+#if defined(S_RAM_CODE_START)
+ /* Flash drivers code that gets copied from Flash */
+ ER_CODE_SRAM S_RAM_CODE_START ALIGN 4 {
+ *libflash_drivers* (+RO)
+ * (.ramfunc)
}
+
+ /* This empty, zero long execution region is here to mark the limit
+ * address of the last execution region that is allocated in CODE_SRAM.
+ */
+ ER_CODE_SRAM_WATERMARK +0 EMPTY 0x0 {
+ }
+
+ /* Make sure that the sections allocated in the CODE_SRAM does not exceed
+ * the size of the SRAM available.
+ */
+ ScatterAssert(ImageLimit(ER_CODE_SRAM_WATERMARK) <=
+ S_RAM_CODE_START + S_RAM_CODE_SIZE)
#endif
/**** Base address of secure data area */
@@ -91,52 +148,46 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* MPU on Armv6-M/v7-M core in multi-core topology may require more strict
* alignment that MPU region base address must align with the MPU region
* size.
- * As a result, in multi-core topology, to save memory resource and MPU
+ * As a result, on Armv6-M/v7-M cores, to save memory resource and MPU
* regions, unprivileged data sections and privileged data sections are
* separated and gathered in unprivileged/privileged data area respectively.
* Keep BL2 shared data and MSP stack at the beginning of the secure data
- * area in single Armv8-M topology, while move the two areas to the
- * beginning of privileged data region in multi-core topology.
+ * area on Armv8-M cores, while move the two areas to the beginning of
+ * privileged data region on Armv6-M/v7-M cores.
*/
-#ifndef TFM_MULTI_CORE_TOPOLOGY
+#if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \
+ defined(__ARM_ARCH_8_1M_MAIN__)
#ifdef CODE_SHARING
/* The code sharing between bootloader and runtime requires to share the
* global variables.
*/
- TFM_SHARED_SYMBOLS +0 ALIGN 32 EMPTY SHARED_SYMBOL_AREA_SIZE {
+ TFM_SHARED_SYMBOLS +0 ALIGN TFM_LINKER_SHARED_SYMBOLS_ALIGNMENT EMPTY SHARED_SYMBOL_AREA_SIZE {
}
#endif
/* Shared area between BL2 and runtime to exchange data */
- TFM_SHARED_DATA +0 ALIGN 32 OVERLAY EMPTY BOOT_TFM_SHARED_DATA_SIZE {
+ TFM_SHARED_DATA +0 ALIGN TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT OVERLAY EMPTY BOOT_TFM_SHARED_DATA_SIZE {
}
/* MSP */
- ARM_LIB_STACK_MSP +0 ALIGN 32 OVERLAY EMPTY S_MSP_STACK_SIZE {
+ ARM_LIB_STACK +0 ALIGN TFM_LINKER_MSP_STACK_ALIGNMENT OVERLAY EMPTY S_MSP_STACK_SIZE - 0x8 {
}
-# if !defined(TFM_PSA_API)
- /* PSP is unprivileged in single-core topology. Reserve 8 bytes for seal */
- ARM_LIB_STACK +0 ALIGN 32 EMPTY S_PSP_STACK_SIZE - 0x8 {
+ STACKSEAL +0 EMPTY 0x8 {
}
- ARM_LIB_STACK_SEAL +0 EMPTY 0x8 {
- }
-# else
- /* PSP is unprivileged in single-core topology */
- ARM_LIB_STACK +0 ALIGN 32 EMPTY S_PSP_STACK_SIZE {
- }
-# endif /* !defined(TFM_PSA_API) */
-#endif
+#endif /* defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \
+ * defined(__ARM_ARCH_8_1M_MAIN__) */
-#if !defined(TFM_PSA_API)
- TFM_SECURE_STACK +0 ALIGN 128 EMPTY 0x2000 {
+#if defined(CONFIG_TFM_PARTITION_META)
+ TFM_SP_META_PTR +0 ALIGN TFM_LINKER_SP_META_PTR_ALIGNMENT {
+ *(.bss.SP_META_PTR_SPRTL_INST)
}
-#endif /* !defined(TFM_PSA_API) */
-
-#if defined(TFM_SP_META_PTR_ENABLE)
- TFM_SP_META_PTR +0 ALIGN 32 {
- *(SP_META_PTR_SPRTL_INST)
+ /*
+ * This empty, zero long execution region is here to mark the end address
+ * of TFM partition metadata pointer region.
+ */
+ TFM_SP_META_PTR_END +0 ALIGN TFM_LINKER_SP_META_PTR_ALIGNMENT EMPTY 0x0 {
}
#endif
@@ -145,10 +196,10 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the start address
* of APP RoT RW and Stack.
*/
- TFM_APP_RW_STACK_START +0 ALIGN 32 EMPTY 0x0 {
+ TFM_APP_RW_STACK_START +0 ALIGN TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT EMPTY 0x0 {
}
- TFM_APP_ROT_LINKER_DATA +0 ALIGN 32 {
+ TFM_APP_ROT_LINKER_DATA +0 ALIGN TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT {
*tfm_app_rot_partition* (+RW +ZI)
*.o(TFM_*_APP-ROT_ATTR_RW)
*.o(TFM_*_APP-ROT_ATTR_ZI)
@@ -158,38 +209,49 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the end address
* of APP RoT RW and Stack.
*/
- TFM_APP_RW_STACK_END +0 ALIGN 32 EMPTY 0x0 {
+ TFM_APP_RW_STACK_END +0 ALIGN TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT EMPTY 0x0 {
}
-#ifdef TFM_MULTI_CORE_TOPOLOGY
+#if defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_7EM__)
#ifdef S_DATA_PRIV_START
- /**** Privileged data area base address specified by multi-core platform */
+ /**** Privileged data area base address specified by Armv6-M/v7-M platform */
TFM_SECURE_PRIV_DATA_BOUNDARY S_DATA_PRIV_START {
}
#endif
/*
* Move BL2 shared area and MSP stack to the beginning of privileged data
- * area in multi-core topology.
+ * area on Armv6-M/v7-M platforms.
*/
/* Shared area between BL2 and runtime to exchange data */
- TFM_SHARED_DATA +0 ALIGN 32 OVERLAY EMPTY BOOT_TFM_SHARED_DATA_SIZE {
+ TFM_SHARED_DATA +0 ALIGN TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT OVERLAY EMPTY BOOT_TFM_SHARED_DATA_SIZE {
}
/* MSP */
- ARM_LIB_STACK_MSP +0 ALIGN 32 OVERLAY EMPTY S_MSP_STACK_SIZE {
+ ARM_LIB_STACK +0 ALIGN TFM_LINKER_MSP_STACK_ALIGNMENT OVERLAY EMPTY S_MSP_STACK_SIZE {
}
+#endif /* defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
+ * defined(__ARM_ARCH_7EM__) */
- /* PSP is privileged in multi-core topology */
- ARM_LIB_STACK +0 ALIGN 32 EMPTY S_PSP_STACK_SIZE {
+ ER_TFM_DATA +0 {
+ * (+RW +ZI)
}
-#endif
- ARM_LIB_HEAP +0 ALIGN 8 EMPTY S_HEAP_SIZE {
+ /**** The runtime partition placed order is same as load partition */
+ ER_PART_RT_POOL +0 ALIGN 4 {
+ *(.bss.part_runtime_priority_00)
+ *(.bss.part_runtime_priority_01)
+ *(.bss.part_runtime_priority_02)
+ *(.bss.part_runtime_priority_03)
}
- ER_TFM_DATA +0 {
- * (+RW +ZI)
+ /**** The runtime service placed order is same as load partition */
+ ER_SERV_RT_POOL +0 ALIGN 4 {
+ *(.bss.serv_runtime_priority_00)
+ *(.bss.serv_runtime_priority_01)
+ *(.bss.serv_runtime_priority_02)
+ *(.bss.serv_runtime_priority_03)
}
/**** PSA RoT DATA start here */
@@ -197,10 +259,10 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the start address
* of PSA RoT RW and Stack.
*/
- TFM_PSA_RW_STACK_START +0 ALIGN 32 EMPTY 0x0 {
+ TFM_PSA_RW_STACK_START +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT EMPTY 0x0 {
}
- TFM_PSA_ROT_LINKER_DATA +0 ALIGN 32 {
+ TFM_PSA_ROT_LINKER_DATA +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT {
*tfm_psa_rot_partition* (+RW +ZI)
*.o(TFM_*_PSA-ROT_ATTR_RW)
*.o(TFM_*_PSA-ROT_ATTR_ZI)
@@ -210,28 +272,15 @@ LR_CODE S_CODE_START S_CODE_SIZE {
* This empty, zero long execution region is here to mark the end address
* of PSA RoT RW and Stack.
*/
- TFM_PSA_RW_STACK_END +0 ALIGN 32 EMPTY 0x0 {
+ TFM_PSA_RW_STACK_END +0 ALIGN TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT EMPTY 0x0 {
}
#ifdef RAM_VECTORS_SUPPORT
- ER_RAM_VECTORS +0 ALIGN 256 UNINIT {
+ ER_RAM_VECTORS +0 ALIGN TFM_LINKER_RAM_VECTORS_ALIGNMENT UNINIT {
* (RESET_RAM)
}
#endif
-#if defined(PSA_PROXY_SHARED_MEMORY_BASE)
- PSA_PROXY_SHARED_MEMORY PSA_PROXY_SHARED_MEMORY_BASE PSA_PROXY_SHARED_MEMORY_SIZE {
- *(PSA_PROXY_SHARED_MEMORY_SECTION)
- }
-#endif
-
-#if defined (S_RAM_CODE_START)
- /* Executable code allocated in RAM */
- TFM_RAM_CODE S_RAM_CODE_START {
- * (.ramfunc)
- }
-#endif
-
/* This empty, zero long execution region is here to mark the limit address
* of the last execution region that is allocated in SRAM.
*/
@@ -244,19 +293,6 @@ LR_CODE S_CODE_START S_CODE_SIZE {
ScatterAssert(ImageLimit(SRAM_WATERMARK) <= S_DATA_START + S_DATA_SIZE)
}
-#ifndef TFM_MULTI_CORE_TOPOLOGY
-LR_VENEER CMSE_VENEER_REGION_START {
- /*
- * Place the CMSE Veneers (containing the SG instruction) in a separate
- * 32 bytes aligned region so that the SAU can be programmed to
- * just set this region as Non-Secure Callable.
- */
- ER_CODE_CMSE_VENEER CMSE_VENEER_REGION_START CMSE_VENEER_REGION_SIZE {
- *(Veneer$$CMSE)
- }
-}
-#endif
-
LR_NS_PARTITION NS_PARTITION_START {
/* Reserved place for NS application.
* No code will be placed here, just address of this region is used in the