Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_TEXT_PATCHING_H |
| 3 | #define _ASM_X86_TEXT_PATCHING_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/stddef.h> |
| 7 | #include <asm/ptrace.h> |
| 8 | |
| 9 | struct paravirt_patch_site; |
| 10 | #ifdef CONFIG_PARAVIRT |
| 11 | void apply_paravirt(struct paravirt_patch_site *start, |
| 12 | struct paravirt_patch_site *end); |
| 13 | #else |
| 14 | static inline void apply_paravirt(struct paravirt_patch_site *start, |
| 15 | struct paravirt_patch_site *end) |
| 16 | {} |
| 17 | #define __parainstructions NULL |
| 18 | #define __parainstructions_end NULL |
| 19 | #endif |
| 20 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 21 | /* |
| 22 | * Currently, the max observed size in the kernel code is |
| 23 | * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. |
| 24 | * Raise it if needed. |
| 25 | */ |
| 26 | #define POKE_MAX_OPCODE_SIZE 5 |
| 27 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 28 | extern void text_poke_early(void *addr, const void *opcode, size_t len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * Clear and restore the kernel write-protection flag on the local CPU. |
| 32 | * Allows the kernel to edit read-only pages. |
| 33 | * Side-effect: any interrupt handler running between save and restore will have |
| 34 | * the ability to write to read-only pages. |
| 35 | * |
| 36 | * Warning: |
| 37 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
| 38 | * no thread can be preempted in the instructions being modified (no iret to an |
| 39 | * invalid instruction possible) or if the instructions are changed from a |
| 40 | * consistent state to another consistent state atomically. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 41 | * On the local CPU you need to be protected against NMI or MCE handlers seeing |
| 42 | * an inconsistent instruction while you patch. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | */ |
| 44 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 45 | extern void text_poke_sync(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 46 | extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | extern int poke_int3_handler(struct pt_regs *regs); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 48 | extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); |
| 49 | |
| 50 | extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); |
| 51 | extern void text_poke_finish(void); |
| 52 | |
| 53 | #define INT3_INSN_SIZE 1 |
| 54 | #define INT3_INSN_OPCODE 0xCC |
| 55 | |
| 56 | #define RET_INSN_SIZE 1 |
| 57 | #define RET_INSN_OPCODE 0xC3 |
| 58 | |
| 59 | #define CALL_INSN_SIZE 5 |
| 60 | #define CALL_INSN_OPCODE 0xE8 |
| 61 | |
| 62 | #define JMP32_INSN_SIZE 5 |
| 63 | #define JMP32_INSN_OPCODE 0xE9 |
| 64 | |
| 65 | #define JMP8_INSN_SIZE 2 |
| 66 | #define JMP8_INSN_OPCODE 0xEB |
| 67 | |
| 68 | #define DISP32_SIZE 4 |
| 69 | |
| 70 | static __always_inline int text_opcode_size(u8 opcode) |
| 71 | { |
| 72 | int size = 0; |
| 73 | |
| 74 | #define __CASE(insn) \ |
| 75 | case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break |
| 76 | |
| 77 | switch(opcode) { |
| 78 | __CASE(INT3); |
| 79 | __CASE(RET); |
| 80 | __CASE(CALL); |
| 81 | __CASE(JMP32); |
| 82 | __CASE(JMP8); |
| 83 | } |
| 84 | |
| 85 | #undef __CASE |
| 86 | |
| 87 | return size; |
| 88 | } |
| 89 | |
| 90 | union text_poke_insn { |
| 91 | u8 text[POKE_MAX_OPCODE_SIZE]; |
| 92 | struct { |
| 93 | u8 opcode; |
| 94 | s32 disp; |
| 95 | } __attribute__((packed)); |
| 96 | }; |
| 97 | |
| 98 | static __always_inline |
| 99 | void *text_gen_insn(u8 opcode, const void *addr, const void *dest) |
| 100 | { |
| 101 | static union text_poke_insn insn; /* per instance */ |
| 102 | int size = text_opcode_size(opcode); |
| 103 | |
| 104 | insn.opcode = opcode; |
| 105 | |
| 106 | if (size > 1) { |
| 107 | insn.disp = (long)dest - (long)(addr + size); |
| 108 | if (size == 2) { |
| 109 | /* |
| 110 | * Ensure that for JMP9 the displacement |
| 111 | * actually fits the signed byte. |
| 112 | */ |
| 113 | BUG_ON((insn.disp >> 31) != (insn.disp >> 7)); |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | return &insn.text; |
| 118 | } |
| 119 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 120 | extern int after_bootmem; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 121 | extern __ro_after_init struct mm_struct *poking_mm; |
| 122 | extern __ro_after_init unsigned long poking_addr; |
| 123 | |
| 124 | #ifndef CONFIG_UML_X86 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 125 | static __always_inline |
| 126 | void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 127 | { |
| 128 | regs->ip = ip; |
| 129 | } |
| 130 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 131 | static __always_inline |
| 132 | void int3_emulate_push(struct pt_regs *regs, unsigned long val) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 133 | { |
| 134 | /* |
| 135 | * The int3 handler in entry_64.S adds a gap between the |
| 136 | * stack where the break point happened, and the saving of |
| 137 | * pt_regs. We can extend the original stack because of |
| 138 | * this gap. See the idtentry macro's create_gap option. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 139 | * |
| 140 | * Similarly entry_32.S will have a gap on the stack for (any) hardware |
| 141 | * exception and pt_regs; see FIXUP_FRAME. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 142 | */ |
| 143 | regs->sp -= sizeof(unsigned long); |
| 144 | *(unsigned long *)regs->sp = val; |
| 145 | } |
| 146 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 147 | static __always_inline |
| 148 | unsigned long int3_emulate_pop(struct pt_regs *regs) |
| 149 | { |
| 150 | unsigned long val = *(unsigned long *)regs->sp; |
| 151 | regs->sp += sizeof(unsigned long); |
| 152 | return val; |
| 153 | } |
| 154 | |
| 155 | static __always_inline |
| 156 | void int3_emulate_call(struct pt_regs *regs, unsigned long func) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 157 | { |
| 158 | int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); |
| 159 | int3_emulate_jmp(regs, func); |
| 160 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 161 | |
| 162 | static __always_inline |
| 163 | void int3_emulate_ret(struct pt_regs *regs) |
| 164 | { |
| 165 | unsigned long ip = int3_emulate_pop(regs); |
| 166 | int3_emulate_jmp(regs, ip); |
| 167 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 168 | #endif /* !CONFIG_UML_X86 */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | |
| 170 | #endif /* _ASM_X86_TEXT_PATCHING_H */ |