blob: 6b7f9157a34c5676014e2a4353db75765d778fc2 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Madhukar Pappireddyc367b752020-01-27 15:32:15 -06002 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Masahiro Yamada665e71b2020-03-09 17:39:48 +09007#include <common/bl_common.ld.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00008#include <lib/xlat_tables/xlat_tables_defs.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +01009
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
Jeenu Viswambharan9f98aa12014-03-11 11:06:45 +000012ENTRY(bl31_entrypoint)
Achin Gupta4f6ad662013-10-25 09:08:21 +010013
14
15MEMORY {
Juan Castillod7fbf132014-09-16 10:40:35 +010016 RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
Samuel Hollandf8578e62018-10-17 21:40:18 -050017#if SEPARATE_NOBITS_REGION
18 NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else
20#define NOBITS RAM
21#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010022}
23
Caesar Wangec693562016-10-11 09:36:00 +080024#ifdef PLAT_EXTRA_LD_SCRIPT
25#include <plat.ld.S>
26#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010027
28SECTIONS
29{
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000030 . = BL31_BASE;
Antonio Nino Diaza2aedac2017-11-15 11:45:35 +000031 ASSERT(. == ALIGN(PAGE_SIZE),
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000032 "BL31_BASE address is not aligned on a page boundary.")
Achin Gupta4f6ad662013-10-25 09:08:21 +010033
Soby Mathew931f7c62018-10-14 08:09:22 +010034 __BL31_START__ = .;
35
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010036#if SEPARATE_CODE_AND_RODATA
37 .text . : {
38 __TEXT_START__ = .;
39 *bl31_entrypoint.o(.text*)
Samuel Hollandebd6efa2019-10-20 16:11:25 -050040 *(SORT_BY_ALIGNMENT(.text*))
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010041 *(.vectors)
Roberto Vargas5629b2b2018-04-11 11:53:31 +010042 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010043 __TEXT_END__ = .;
44 } >RAM
45
46 .rodata . : {
47 __RODATA_START__ = .;
Samuel Hollandebd6efa2019-10-20 16:11:25 -050048 *(SORT_BY_ALIGNMENT(.rodata*))
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010049
Masahiro Yamada9fb288a2020-03-26 10:51:39 +090050 RT_SVC_DESCS
51 FCONF_POPULATOR
52 PMF_SVC_DESCS
53 CPU_OPS
54 GOT
Soby Mathew931f7c62018-10-14 08:09:22 +010055
Jeenu Viswambharan8e743bc2017-09-22 08:32:10 +010056 /* Place pubsub sections for events */
57 . = ALIGN(8);
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000058#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharan8e743bc2017-09-22 08:32:10 +010059
Roberto Vargas5629b2b2018-04-11 11:53:31 +010060 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010061 __RODATA_END__ = .;
62 } >RAM
63#else
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000064 ro . : {
65 __RO_START__ = .;
Andrew Thoelkedccc5372014-03-18 07:13:52 +000066 *bl31_entrypoint.o(.text*)
Samuel Hollandebd6efa2019-10-20 16:11:25 -050067 *(SORT_BY_ALIGNMENT(.text*))
68 *(SORT_BY_ALIGNMENT(.rodata*))
Achin Gupta7421b462014-02-01 18:53:26 +000069
Masahiro Yamada9fb288a2020-03-26 10:51:39 +090070 RT_SVC_DESCS
71 FCONF_POPULATOR
72 PMF_SVC_DESCS
73 CPU_OPS
74 GOT
Soby Mathew5bfac4f2018-12-12 14:33:11 +000075
Jeenu Viswambharan8e743bc2017-09-22 08:32:10 +010076 /* Place pubsub sections for events */
77 . = ALIGN(8);
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000078#include <lib/el3_runtime/pubsub_events.h>
Jeenu Viswambharan8e743bc2017-09-22 08:32:10 +010079
Achin Guptab739f222014-01-18 16:50:09 +000080 *(.vectors)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000081 __RO_END_UNALIGNED__ = .;
82 /*
83 * Memory page(s) mapped to this section will be marked as read-only,
84 * executable. No RW data from the next section must creep in.
85 * Ensure the rest of the current memory page is unused.
86 */
Roberto Vargas5629b2b2018-04-11 11:53:31 +010087 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000088 __RO_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +010089 } >RAM
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010090#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010091
Soby Mathew9b476842014-08-14 11:33:56 +010092 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
93 "cpu_ops not defined for this platform.")
94
Paul Beesley538b0022019-10-14 15:27:12 +000095#if SPM_MM
Ard Biesheuvel32e83532019-01-06 10:07:24 +010096#ifndef SPM_SHIM_EXCEPTIONS_VMA
97#define SPM_SHIM_EXCEPTIONS_VMA RAM
98#endif
99
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100100 /*
101 * Exception vectors of the SPM shim layer. They must be aligned to a 2K
102 * address, but we need to place them in a separate page so that we can set
103 * individual permissions to them, so the actual alignment needed is 4K.
104 *
105 * There's no need to include this into the RO section of BL31 because it
106 * doesn't need to be accessed by BL31.
107 */
Antonio Nino Diaza2aedac2017-11-15 11:45:35 +0000108 spm_shim_exceptions : ALIGN(PAGE_SIZE) {
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100109 __SPM_SHIM_EXCEPTIONS_START__ = .;
110 *(.spm_shim_exceptions)
Roberto Vargas5629b2b2018-04-11 11:53:31 +0100111 . = ALIGN(PAGE_SIZE);
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100112 __SPM_SHIM_EXCEPTIONS_END__ = .;
Ard Biesheuvel32e83532019-01-06 10:07:24 +0100113 } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
114
115 PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
116 . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100117#endif
118
Achin Gupta54dc71e2015-09-11 16:03:13 +0100119 /*
120 * Define a linker symbol to mark start of the RW memory area for this
121 * image.
122 */
123 __RW_START__ = . ;
124
Douglas Raillard51faada2017-02-24 18:14:15 +0000125 /*
126 * .data must be placed at a lower address than the stacks if the stack
127 * protector is enabled. Alternatively, the .data.stack_protector_canary
128 * section can be placed independently of the main .data section.
129 */
130 .data . : {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000131 __DATA_START__ = .;
Samuel Hollandebd6efa2019-10-20 16:11:25 -0500132 *(SORT_BY_ALIGNMENT(.data*))
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000133 __DATA_END__ = .;
134 } >RAM
135
Soby Mathew931f7c62018-10-14 08:09:22 +0100136 /*
137 * .rela.dyn needs to come after .data for the read-elf utility to parse
Soby Mathew5bfac4f2018-12-12 14:33:11 +0000138 * this section correctly. Ensure 8-byte alignment so that the fields of
139 * RELA data structure are aligned.
Soby Mathew931f7c62018-10-14 08:09:22 +0100140 */
Soby Mathew5bfac4f2018-12-12 14:33:11 +0000141 . = ALIGN(8);
Soby Mathew931f7c62018-10-14 08:09:22 +0100142 __RELA_START__ = .;
143 .rela.dyn . : {
144 } >RAM
145 __RELA_END__ = .;
146
Sandrine Bailleuxa1b6db62014-06-16 16:12:27 +0100147#ifdef BL31_PROGBITS_LIMIT
Juan Castillod1786372015-12-14 09:35:25 +0000148 ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
Sandrine Bailleuxa1b6db62014-06-16 16:12:27 +0100149#endif
150
Samuel Hollandf8578e62018-10-17 21:40:18 -0500151#if SEPARATE_NOBITS_REGION
152 /*
153 * Define a linker symbol to mark end of the RW memory area for this
154 * image.
155 */
Madhukar Pappireddyc367b752020-01-27 15:32:15 -0600156 . = ALIGN(PAGE_SIZE);
Samuel Hollandf8578e62018-10-17 21:40:18 -0500157 __RW_END__ = .;
158 __BL31_END__ = .;
159
160 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
161
162 . = BL31_NOBITS_BASE;
163 ASSERT(. == ALIGN(PAGE_SIZE),
164 "BL31 NOBITS base address is not aligned on a page boundary.")
165
166 __NOBITS_START__ = .;
167#endif
168
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000169 stacks (NOLOAD) : {
170 __STACKS_START__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100171 *(tzfw_normal_stacks)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000172 __STACKS_END__ = .;
Samuel Hollandf8578e62018-10-17 21:40:18 -0500173 } >NOBITS
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000175 /*
176 * The .bss section gets initialised to 0 at runtime.
Douglas Raillard308d3592016-12-02 13:51:54 +0000177 * Its base address should be 16-byte aligned for better performance of the
178 * zero-initialization code.
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000179 */
Andrew Thoelkeee7b35c2015-09-10 11:39:36 +0100180 .bss (NOLOAD) : ALIGN(16) {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000181 __BSS_START__ = .;
Samuel Hollandebd6efa2019-10-20 16:11:25 -0500182 *(SORT_BY_ALIGNMENT(.bss*))
Achin Gupta4f6ad662013-10-25 09:08:21 +0100183 *(COMMON)
Masahiro Yamada9fb288a2020-03-26 10:51:39 +0900184 BAKERY_LOCK_NORMAL
185 PMF_TIMESTAMP
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000186 __BSS_END__ = .;
Samuel Hollandf8578e62018-10-17 21:40:18 -0500187 } >NOBITS
Achin Gupta4f6ad662013-10-25 09:08:21 +0100188
Masahiro Yamada665e71b2020-03-09 17:39:48 +0900189 XLAT_TABLE_SECTION >NOBITS
Achin Guptaa0cd9892014-02-09 13:30:38 +0000190
Soby Mathewab8707e2015-01-08 18:02:44 +0000191#if USE_COHERENT_MEM
Achin Guptaa0cd9892014-02-09 13:30:38 +0000192 /*
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000193 * The base address of the coherent memory section must be page-aligned (4K)
194 * to guarantee that the coherent data are stored on their own pages and
195 * are not mixed with normal data. This is required to set up the correct
196 * memory attributes for the coherent data page tables.
197 */
Antonio Nino Diaza2aedac2017-11-15 11:45:35 +0000198 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000199 __COHERENT_RAM_START__ = .;
Andrew Thoelkeee7b35c2015-09-10 11:39:36 +0100200 /*
201 * Bakery locks are stored in coherent memory
202 *
203 * Each lock's data is contiguous and fully allocated by the compiler
204 */
205 *(bakery_lock)
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000206 *(tzfw_coherent_mem)
207 __COHERENT_RAM_END_UNALIGNED__ = .;
208 /*
209 * Memory page(s) mapped to this section will be marked
210 * as device memory. No other unexpected data must creep in.
211 * Ensure the rest of the current memory page is unused.
212 */
Roberto Vargas5629b2b2018-04-11 11:53:31 +0100213 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000214 __COHERENT_RAM_END__ = .;
Samuel Hollandf8578e62018-10-17 21:40:18 -0500215 } >NOBITS
Soby Mathewab8707e2015-01-08 18:02:44 +0000216#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100217
Samuel Hollandf8578e62018-10-17 21:40:18 -0500218#if SEPARATE_NOBITS_REGION
219 /*
220 * Define a linker symbol to mark end of the NOBITS memory area for this
221 * image.
222 */
223 __NOBITS_END__ = .;
224
225 ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
226#else
Achin Gupta54dc71e2015-09-11 16:03:13 +0100227 /*
228 * Define a linker symbol to mark end of the RW memory area for this
229 * image.
230 */
231 __RW_END__ = .;
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000232 __BL31_END__ = .;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233
Masahiro Yamada511046e2020-01-17 13:44:50 +0900234 /DISCARD/ : {
235 *(.dynsym .dynstr .hash .gnu.hash)
236 }
237
Juan Castillod1786372015-12-14 09:35:25 +0000238 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
Samuel Hollandf8578e62018-10-17 21:40:18 -0500239#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240}