Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * ELF register definitions.. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | #ifndef _ASM_POWERPC_ELF_H |
| 10 | #define _ASM_POWERPC_ELF_H |
| 11 | |
| 12 | #include <linux/sched.h> /* for task_struct */ |
| 13 | #include <asm/page.h> |
| 14 | #include <asm/string.h> |
| 15 | #include <uapi/asm/elf.h> |
| 16 | |
| 17 | /* |
| 18 | * This is used to ensure we don't load something for the wrong architecture. |
| 19 | */ |
| 20 | #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) |
| 21 | #define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) |
| 22 | |
| 23 | #define CORE_DUMP_USE_REGSET |
| 24 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
| 25 | |
| 26 | /* |
| 27 | * This is the base location for PIE (ET_DYN with INTERP) loads. On |
| 28 | * 64-bit, this is raised to 4GB to leave the entire 32-bit address |
| 29 | * space open for things that want to use the area for 32-bit pointers. |
| 30 | */ |
| 31 | #define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ |
| 32 | 0x100000000UL) |
| 33 | |
| 34 | #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) |
| 35 | |
| 36 | /* |
| 37 | * Our registers are always unsigned longs, whether we're a 32 bit |
| 38 | * process or 64 bit, on either a 64 bit or 32 bit kernel. |
| 39 | * |
| 40 | * This macro relies on elf_regs[i] having the right type to truncate to, |
| 41 | * either u32 or u64. It defines the body of the elf_core_copy_regs |
| 42 | * function, either the native one with elf_gregset_t elf_regs or |
| 43 | * the 32-bit one with elf_gregset_t32 elf_regs. |
| 44 | */ |
| 45 | #define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \ |
| 46 | int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \ |
| 47 | (size_t)ELF_NGREG); \ |
| 48 | for (i = 0; i < nregs; i++) \ |
| 49 | elf_regs[i] = ((unsigned long *) regs)[i]; \ |
| 50 | memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0])) |
| 51 | |
| 52 | /* Common routine for both 32-bit and 64-bit native processes */ |
| 53 | static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, |
| 54 | struct pt_regs *regs) |
| 55 | { |
| 56 | PPC_ELF_CORE_COPY_REGS(elf_regs, regs); |
| 57 | } |
| 58 | #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); |
| 59 | |
| 60 | typedef elf_vrregset_t elf_fpxregset_t; |
| 61 | |
| 62 | /* ELF_HWCAP yields a mask that user programs can use to figure out what |
| 63 | instruction set this cpu supports. This could be done in userspace, |
| 64 | but it's not easy, and we've already done it here. */ |
| 65 | # define ELF_HWCAP (cur_cpu_spec->cpu_user_features) |
| 66 | # define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2) |
| 67 | |
| 68 | /* This yields a string that ld.so will use to load implementation |
| 69 | specific libraries for optimization. This is more specific in |
| 70 | intent than poking at uname or /proc/cpuinfo. */ |
| 71 | |
| 72 | #define ELF_PLATFORM (cur_cpu_spec->platform) |
| 73 | |
| 74 | /* While ELF_PLATFORM indicates the ISA supported by the platform, it |
| 75 | * may not accurately reflect the underlying behavior of the hardware |
| 76 | * (as in the case of running in Power5+ compatibility mode on a |
| 77 | * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries |
| 78 | * that are tuned for the real hardware. |
| 79 | */ |
| 80 | #define ELF_BASE_PLATFORM (powerpc_base_platform) |
| 81 | |
| 82 | #ifdef __powerpc64__ |
| 83 | # define ELF_PLAT_INIT(_r, load_addr) do { \ |
| 84 | _r->gpr[2] = load_addr; \ |
| 85 | } while (0) |
| 86 | #endif /* __powerpc64__ */ |
| 87 | |
| 88 | #ifdef __powerpc64__ |
| 89 | # define SET_PERSONALITY(ex) \ |
| 90 | do { \ |
| 91 | if (((ex).e_flags & 0x3) == 2) \ |
| 92 | set_thread_flag(TIF_ELF2ABI); \ |
| 93 | else \ |
| 94 | clear_thread_flag(TIF_ELF2ABI); \ |
| 95 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ |
| 96 | set_thread_flag(TIF_32BIT); \ |
| 97 | else \ |
| 98 | clear_thread_flag(TIF_32BIT); \ |
| 99 | if (personality(current->personality) != PER_LINUX32) \ |
| 100 | set_personality(PER_LINUX | \ |
| 101 | (current->personality & (~PER_MASK))); \ |
| 102 | } while (0) |
| 103 | /* |
| 104 | * An executable for which elf_read_implies_exec() returns TRUE will |
| 105 | * have the READ_IMPLIES_EXEC personality flag set automatically. This |
| 106 | * is only required to work around bugs in old 32bit toolchains. Since |
| 107 | * the 64bit ABI has never had these issues dont enable the workaround |
| 108 | * even if we have an executable stack. |
| 109 | */ |
| 110 | # define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \ |
| 111 | (exec_stk == EXSTACK_DEFAULT) : 0) |
| 112 | #else |
| 113 | # define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) |
| 114 | #endif /* __powerpc64__ */ |
| 115 | |
| 116 | extern int dcache_bsize; |
| 117 | extern int icache_bsize; |
| 118 | extern int ucache_bsize; |
| 119 | |
| 120 | /* vDSO has arch_setup_additional_pages */ |
| 121 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES |
| 122 | struct linux_binprm; |
| 123 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
| 124 | int uses_interp); |
| 125 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b) |
| 126 | |
| 127 | /* 1GB for 64bit, 8MB for 32bit */ |
| 128 | #define STACK_RND_MASK (is_32bit_task() ? \ |
| 129 | (0x7ff >> (PAGE_SHIFT - 12)) : \ |
| 130 | (0x3ffff >> (PAGE_SHIFT - 12))) |
| 131 | |
| 132 | #ifdef CONFIG_SPU_BASE |
| 133 | /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ |
| 134 | #define NT_SPU 1 |
| 135 | |
| 136 | #define ARCH_HAVE_EXTRA_ELF_NOTES |
| 137 | |
| 138 | #endif /* CONFIG_SPU_BASE */ |
| 139 | |
| 140 | #ifdef CONFIG_PPC64 |
| 141 | |
| 142 | #define get_cache_geometry(level) \ |
| 143 | (ppc64_caches.level.assoc << 16 | ppc64_caches.level.line_size) |
| 144 | |
| 145 | #define ARCH_DLINFO_CACHE_GEOMETRY \ |
| 146 | NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ |
| 147 | NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ |
| 148 | NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \ |
| 149 | NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \ |
| 150 | NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ |
| 151 | NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ |
| 152 | NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ |
| 153 | NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, get_cache_geometry(l3)) |
| 154 | |
| 155 | #else |
| 156 | #define ARCH_DLINFO_CACHE_GEOMETRY |
| 157 | #endif |
| 158 | |
| 159 | /* |
| 160 | * The requirements here are: |
| 161 | * - keep the final alignment of sp (sp & 0xf) |
| 162 | * - make sure the 32-bit value at the first 16 byte aligned position of |
| 163 | * AUXV is greater than 16 for glibc compatibility. |
| 164 | * AT_IGNOREPPC is used for that. |
| 165 | * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, |
| 166 | * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. |
| 167 | * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes |
| 168 | */ |
| 169 | #define ARCH_DLINFO \ |
| 170 | do { \ |
| 171 | /* Handle glibc compatibility. */ \ |
| 172 | NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ |
| 173 | NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ |
| 174 | /* Cache size items */ \ |
| 175 | NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ |
| 176 | NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ |
| 177 | NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ |
| 178 | VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ |
| 179 | ARCH_DLINFO_CACHE_GEOMETRY; \ |
| 180 | } while (0) |
| 181 | |
| 182 | #endif /* _ASM_POWERPC_ELF_H */ |