diff options
author | Alexei Fedorov <Alexei.Fedorov@arm.com> | 2020-12-25 10:52:56 +0000 |
---|---|---|
committer | Alexei Fedorov <Alexei.Fedorov@arm.com> | 2021-01-06 10:59:22 +0000 |
commit | db9736e3d86d7098f9785a9db834746a8b2ed335 (patch) | |
tree | bce5e8ee4902d6aeab8a92e5c5c1ef851c4678ff /lib | |
parent | 4811168aafc8bdb5869d26744fa8636752b5ca32 (diff) | |
download | trusted-firmware-a-db9736e3d86d7098f9785a9db834746a8b2ed335.tar.gz |
AArch64: Fix assertions in processing dynamic relocations
This patch provides the following changes in fixup_gdt_reloc()
function:
- Fixes assertions in processing dynamic relocations, when
relocation entries not matching R_AARCH64_RELATIVE type are found.
Linker might generate entries of relocation type R_AARCH64_NONE
(code 0), which should be ignored to make the code boot. Similar
issue was fixed in OP-TEE (see optee_os/ldelf/ta_elf_rel.c
commit 7a4dc765c133125428136a496a7644c6fec9b3c2)
- Fixes bug when "b.ge" (signed greater than or equal) condition
codes were used instead of "b.hs" (greater than or equal) for
comparison of absolute addresses.
- Adds optimisation which skips fixing Global Object Table (GOT)
entries when offset value is 0.
Change-Id: I35e34e055b7476843903859be947b883a1feb1b5
Signed-off-by: Alexei Fedorov <Alexei.Fedorov@arm.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/aarch64/misc_helpers.S | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S index 052891683f..b6f6c9d881 100644 --- a/lib/aarch64/misc_helpers.S +++ b/lib/aarch64/misc_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -486,15 +486,20 @@ endfunc enable_vfp * arguments (which is usually the limits of the relocable BL image). * x0 - the start of the fixup region * x1 - the limit of the fixup region - * These addresses have to be page (4KB aligned). + * These addresses have to be 4KB page aligned. * --------------------------------------------------------------------------- */ + +/* Relocation codes */ +#define R_AARCH64_NONE 0 +#define R_AARCH64_RELATIVE 1027 + func fixup_gdt_reloc mov x6, x0 mov x7, x1 - /* Test if the limits are 4K aligned */ #if ENABLE_ASSERTIONS + /* Test if the limits are 4KB aligned */ orr x0, x0, x1 tst x0, #(PAGE_SIZE_MASK) ASM_ASSERT(eq) @@ -505,7 +510,8 @@ func fixup_gdt_reloc * fixup region. */ and x2, x30, #~(PAGE_SIZE_MASK) - sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */ + subs x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */ + b.eq 3f /* Diff(S) = 0. No relocation needed */ adrp x1, __GOT_START__ add x1, x1, :lo12:__GOT_START__ @@ -518,31 +524,32 @@ func fixup_gdt_reloc * The new_addr is the address currently the binary is executing from * and old_addr is the address at compile time. */ -1: - ldr x3, [x1] +1: ldr x3, [x1] + /* Skip adding offset if address is < lower limit */ cmp x3, x6 b.lo 2f + /* Skip adding offset if address is >= upper limit */ cmp x3, x7 - b.ge 2f + b.hs 2f add x3, x3, x0 str x3, [x1] -2: - add x1, x1, #8 + +2: add x1, x1, #8 cmp x1, x2 b.lo 1b /* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */ - adrp x1, __RELA_START__ +3: adrp x1, __RELA_START__ add x1, x1, :lo12:__RELA_START__ adrp x2, __RELA_END__ add x2, x2, :lo12:__RELA_END__ + /* * According to ELF-64 specification, the RELA data structure is as * follows: - * typedef struct - * { + * typedef struct { * Elf64_Addr r_offset; * Elf64_Xword r_info; * Elf64_Sxword r_addend; @@ -550,16 +557,19 @@ func fixup_gdt_reloc * * r_offset is address of reference * r_info is symbol index and type of relocation (in this case - * 0x403 which corresponds to R_AARCH64_RELATIVE). + * code 1027 which corresponds to R_AARCH64_RELATIVE). * r_addend is constant part of expression. * * Size of Elf64_Rela structure is 24 bytes. */ -1: - /* Assert that the relocation type is R_AARCH64_RELATIVE */ + + /* Skip R_AARCH64_NONE entry with code 0 */ +1: ldr x3, [x1, #8] + cbz x3, 2f + #if ENABLE_ASSERTIONS - ldr x3, [x1, #8] - cmp x3, #0x403 + /* Assert that the relocation type is R_AARCH64_RELATIVE */ + cmp x3, #R_AARCH64_RELATIVE ASM_ASSERT(eq) #endif ldr x3, [x1] /* r_offset */ @@ -569,9 +579,10 @@ func fixup_gdt_reloc /* Skip adding offset if r_addend is < lower limit */ cmp x4, x6 b.lo 2f + /* Skip adding offset if r_addend entry is >= upper limit */ cmp x4, x7 - b.ge 2f + b.hs 2f add x4, x0, x4 /* Diff(S) + r_addend */ str x4, [x3] @@ -579,6 +590,5 @@ func fixup_gdt_reloc 2: add x1, x1, #24 cmp x1, x2 b.lo 1b - ret endfunc fixup_gdt_reloc |