Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_KEXEC_H |
| 3 | #define _ASM_X86_KEXEC_H |
| 4 | |
| 5 | #ifdef CONFIG_X86_32 |
| 6 | # define PA_CONTROL_PAGE 0 |
| 7 | # define VA_CONTROL_PAGE 1 |
| 8 | # define PA_PGD 2 |
| 9 | # define PA_SWAP_PAGE 3 |
| 10 | # define PAGES_NR 4 |
| 11 | #else |
| 12 | # define PA_CONTROL_PAGE 0 |
| 13 | # define VA_CONTROL_PAGE 1 |
| 14 | # define PA_TABLE_PAGE 2 |
| 15 | # define PA_SWAP_PAGE 3 |
| 16 | # define PAGES_NR 4 |
| 17 | #endif |
| 18 | |
| 19 | # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 |
| 20 | |
| 21 | #ifndef __ASSEMBLY__ |
| 22 | |
| 23 | #include <linux/string.h> |
| 24 | |
| 25 | #include <asm/page.h> |
| 26 | #include <asm/ptrace.h> |
| 27 | #include <asm/bootparam.h> |
| 28 | |
| 29 | struct kimage; |
| 30 | |
| 31 | /* |
| 32 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. |
| 33 | * I.e. Maximum page that is mapped directly into kernel memory, |
| 34 | * and kmap is not required. |
| 35 | * |
| 36 | * So far x86_64 is limited to 40 physical address bits. |
| 37 | */ |
| 38 | #ifdef CONFIG_X86_32 |
| 39 | /* Maximum physical address we can use pages from */ |
| 40 | # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) |
| 41 | /* Maximum address we can reach in physical address mode */ |
| 42 | # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) |
| 43 | /* Maximum address we can use for the control code buffer */ |
| 44 | # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE |
| 45 | |
| 46 | # define KEXEC_CONTROL_PAGE_SIZE 4096 |
| 47 | |
| 48 | /* The native architecture */ |
| 49 | # define KEXEC_ARCH KEXEC_ARCH_386 |
| 50 | |
| 51 | /* We can also handle crash dumps from 64 bit kernel. */ |
| 52 | # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) |
| 53 | #else |
| 54 | /* Maximum physical address we can use pages from */ |
| 55 | # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1) |
| 56 | /* Maximum address we can reach in physical address mode */ |
| 57 | # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1) |
| 58 | /* Maximum address we can use for the control pages */ |
| 59 | # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1) |
| 60 | |
| 61 | /* Allocate one page for the pdp and the second for the code */ |
| 62 | # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL) |
| 63 | |
| 64 | /* The native architecture */ |
| 65 | # define KEXEC_ARCH KEXEC_ARCH_X86_64 |
| 66 | #endif |
| 67 | |
| 68 | /* Memory to backup during crash kdump */ |
| 69 | #define KEXEC_BACKUP_SRC_START (0UL) |
| 70 | #define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */ |
| 71 | |
| 72 | /* |
| 73 | * CPU does not save ss and sp on stack if execution is already |
| 74 | * running in kernel mode at the time of NMI occurrence. This code |
| 75 | * fixes it. |
| 76 | */ |
| 77 | static inline void crash_fixup_ss_esp(struct pt_regs *newregs, |
| 78 | struct pt_regs *oldregs) |
| 79 | { |
| 80 | #ifdef CONFIG_X86_32 |
| 81 | newregs->sp = (unsigned long)&(oldregs->sp); |
| 82 | asm volatile("xorl %%eax, %%eax\n\t" |
| 83 | "movw %%ss, %%ax\n\t" |
| 84 | :"=a"(newregs->ss)); |
| 85 | #endif |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * This function is responsible for capturing register states if coming |
| 90 | * via panic otherwise just fix up the ss and sp if coming via kernel |
| 91 | * mode exception. |
| 92 | */ |
| 93 | static inline void crash_setup_regs(struct pt_regs *newregs, |
| 94 | struct pt_regs *oldregs) |
| 95 | { |
| 96 | if (oldregs) { |
| 97 | memcpy(newregs, oldregs, sizeof(*newregs)); |
| 98 | crash_fixup_ss_esp(newregs, oldregs); |
| 99 | } else { |
| 100 | #ifdef CONFIG_X86_32 |
| 101 | asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); |
| 102 | asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); |
| 103 | asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); |
| 104 | asm volatile("movl %%esi,%0" : "=m"(newregs->si)); |
| 105 | asm volatile("movl %%edi,%0" : "=m"(newregs->di)); |
| 106 | asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); |
| 107 | asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); |
| 108 | asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); |
| 109 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
| 110 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
| 111 | asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); |
| 112 | asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); |
| 113 | asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); |
| 114 | #else |
| 115 | asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); |
| 116 | asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); |
| 117 | asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); |
| 118 | asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); |
| 119 | asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); |
| 120 | asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); |
| 121 | asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); |
| 122 | asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); |
| 123 | asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); |
| 124 | asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); |
| 125 | asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); |
| 126 | asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); |
| 127 | asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); |
| 128 | asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); |
| 129 | asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); |
| 130 | asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); |
| 131 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
| 132 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
| 133 | asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); |
| 134 | #endif |
| 135 | newregs->ip = (unsigned long)current_text_addr(); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | #ifdef CONFIG_X86_32 |
| 140 | asmlinkage unsigned long |
| 141 | relocate_kernel(unsigned long indirection_page, |
| 142 | unsigned long control_page, |
| 143 | unsigned long start_address, |
| 144 | unsigned int has_pae, |
| 145 | unsigned int preserve_context); |
| 146 | #else |
| 147 | unsigned long |
| 148 | relocate_kernel(unsigned long indirection_page, |
| 149 | unsigned long page_list, |
| 150 | unsigned long start_address, |
| 151 | unsigned int preserve_context, |
| 152 | unsigned int sme_active); |
| 153 | #endif |
| 154 | |
| 155 | #define ARCH_HAS_KIMAGE_ARCH |
| 156 | |
| 157 | #ifdef CONFIG_X86_32 |
| 158 | struct kimage_arch { |
| 159 | pgd_t *pgd; |
| 160 | #ifdef CONFIG_X86_PAE |
| 161 | pmd_t *pmd0; |
| 162 | pmd_t *pmd1; |
| 163 | #endif |
| 164 | pte_t *pte0; |
| 165 | pte_t *pte1; |
| 166 | }; |
| 167 | #else |
| 168 | struct kimage_arch { |
| 169 | p4d_t *p4d; |
| 170 | pud_t *pud; |
| 171 | pmd_t *pmd; |
| 172 | pte_t *pte; |
| 173 | /* Details of backup region */ |
| 174 | unsigned long backup_src_start; |
| 175 | unsigned long backup_src_sz; |
| 176 | |
| 177 | /* Physical address of backup segment */ |
| 178 | unsigned long backup_load_addr; |
| 179 | |
| 180 | /* Core ELF header buffer */ |
| 181 | void *elf_headers; |
| 182 | unsigned long elf_headers_sz; |
| 183 | unsigned long elf_load_addr; |
| 184 | }; |
| 185 | #endif /* CONFIG_X86_32 */ |
| 186 | |
| 187 | #ifdef CONFIG_X86_64 |
| 188 | /* |
| 189 | * Number of elements and order of elements in this structure should match |
| 190 | * with the ones in arch/x86/purgatory/entry64.S. If you make a change here |
| 191 | * make an appropriate change in purgatory too. |
| 192 | */ |
| 193 | struct kexec_entry64_regs { |
| 194 | uint64_t rax; |
| 195 | uint64_t rcx; |
| 196 | uint64_t rdx; |
| 197 | uint64_t rbx; |
| 198 | uint64_t rsp; |
| 199 | uint64_t rbp; |
| 200 | uint64_t rsi; |
| 201 | uint64_t rdi; |
| 202 | uint64_t r8; |
| 203 | uint64_t r9; |
| 204 | uint64_t r10; |
| 205 | uint64_t r11; |
| 206 | uint64_t r12; |
| 207 | uint64_t r13; |
| 208 | uint64_t r14; |
| 209 | uint64_t r15; |
| 210 | uint64_t rip; |
| 211 | }; |
| 212 | |
| 213 | extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, |
| 214 | gfp_t gfp); |
| 215 | #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages |
| 216 | |
| 217 | extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); |
| 218 | #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages |
| 219 | |
| 220 | #endif |
| 221 | |
| 222 | typedef void crash_vmclear_fn(void); |
| 223 | extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; |
| 224 | extern void kdump_nmi_shootdown_cpus(void); |
| 225 | |
| 226 | #endif /* __ASSEMBLY__ */ |
| 227 | |
| 228 | #endif /* _ASM_X86_KEXEC_H */ |