blob: b7421780e4e92959689646cb425485c9deeabfc3 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_TEXT_PATCHING_H
3#define _ASM_X86_TEXT_PATCHING_H
4
5#include <linux/types.h>
6#include <linux/stddef.h>
7#include <asm/ptrace.h>
8
9struct paravirt_patch_site;
10#ifdef CONFIG_PARAVIRT
11void apply_paravirt(struct paravirt_patch_site *start,
12 struct paravirt_patch_site *end);
13#else
14static inline void apply_paravirt(struct paravirt_patch_site *start,
15 struct paravirt_patch_site *end)
16{}
17#define __parainstructions NULL
18#define __parainstructions_end NULL
19#endif
20
David Brazdil0f672f62019-12-10 10:32:29 +000021/*
22 * Currently, the max observed size in the kernel code is
23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
24 * Raise it if needed.
25 */
26#define POKE_MAX_OPCODE_SIZE 5
27
David Brazdil0f672f62019-12-10 10:32:29 +000028extern void text_poke_early(void *addr, const void *opcode, size_t len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30/*
31 * Clear and restore the kernel write-protection flag on the local CPU.
32 * Allows the kernel to edit read-only pages.
33 * Side-effect: any interrupt handler running between save and restore will have
34 * the ability to write to read-only pages.
35 *
36 * Warning:
37 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
38 * no thread can be preempted in the instructions being modified (no iret to an
39 * invalid instruction possible) or if the instructions are changed from a
40 * consistent state to another consistent state atomically.
David Brazdil0f672f62019-12-10 10:32:29 +000041 * On the local CPU you need to be protected against NMI or MCE handlers seeing
42 * an inconsistent instruction while you patch.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043 */
44extern void *text_poke(void *addr, const void *opcode, size_t len);
Olivier Deprez157378f2022-04-04 15:47:50 +020045extern void text_poke_sync(void);
David Brazdil0f672f62019-12-10 10:32:29 +000046extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047extern int poke_int3_handler(struct pt_regs *regs);
Olivier Deprez157378f2022-04-04 15:47:50 +020048extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
49
50extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
51extern void text_poke_finish(void);
52
53#define INT3_INSN_SIZE 1
54#define INT3_INSN_OPCODE 0xCC
55
56#define RET_INSN_SIZE 1
57#define RET_INSN_OPCODE 0xC3
58
59#define CALL_INSN_SIZE 5
60#define CALL_INSN_OPCODE 0xE8
61
62#define JMP32_INSN_SIZE 5
63#define JMP32_INSN_OPCODE 0xE9
64
65#define JMP8_INSN_SIZE 2
66#define JMP8_INSN_OPCODE 0xEB
67
68#define DISP32_SIZE 4
69
70static __always_inline int text_opcode_size(u8 opcode)
71{
72 int size = 0;
73
74#define __CASE(insn) \
75 case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
76
77 switch(opcode) {
78 __CASE(INT3);
79 __CASE(RET);
80 __CASE(CALL);
81 __CASE(JMP32);
82 __CASE(JMP8);
83 }
84
85#undef __CASE
86
87 return size;
88}
89
90union text_poke_insn {
91 u8 text[POKE_MAX_OPCODE_SIZE];
92 struct {
93 u8 opcode;
94 s32 disp;
95 } __attribute__((packed));
96};
97
98static __always_inline
99void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
100{
101 static union text_poke_insn insn; /* per instance */
102 int size = text_opcode_size(opcode);
103
104 insn.opcode = opcode;
105
106 if (size > 1) {
107 insn.disp = (long)dest - (long)(addr + size);
108 if (size == 2) {
109 /*
110 * Ensure that for JMP9 the displacement
111 * actually fits the signed byte.
112 */
113 BUG_ON((insn.disp >> 31) != (insn.disp >> 7));
114 }
115 }
116
117 return &insn.text;
118}
119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120extern int after_bootmem;
David Brazdil0f672f62019-12-10 10:32:29 +0000121extern __ro_after_init struct mm_struct *poking_mm;
122extern __ro_after_init unsigned long poking_addr;
123
124#ifndef CONFIG_UML_X86
Olivier Deprez157378f2022-04-04 15:47:50 +0200125static __always_inline
126void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
David Brazdil0f672f62019-12-10 10:32:29 +0000127{
128 regs->ip = ip;
129}
130
Olivier Deprez157378f2022-04-04 15:47:50 +0200131static __always_inline
132void int3_emulate_push(struct pt_regs *regs, unsigned long val)
David Brazdil0f672f62019-12-10 10:32:29 +0000133{
134 /*
135 * The int3 handler in entry_64.S adds a gap between the
136 * stack where the break point happened, and the saving of
137 * pt_regs. We can extend the original stack because of
138 * this gap. See the idtentry macro's create_gap option.
Olivier Deprez157378f2022-04-04 15:47:50 +0200139 *
140 * Similarly entry_32.S will have a gap on the stack for (any) hardware
141 * exception and pt_regs; see FIXUP_FRAME.
David Brazdil0f672f62019-12-10 10:32:29 +0000142 */
143 regs->sp -= sizeof(unsigned long);
144 *(unsigned long *)regs->sp = val;
145}
146
Olivier Deprez157378f2022-04-04 15:47:50 +0200147static __always_inline
148unsigned long int3_emulate_pop(struct pt_regs *regs)
149{
150 unsigned long val = *(unsigned long *)regs->sp;
151 regs->sp += sizeof(unsigned long);
152 return val;
153}
154
155static __always_inline
156void int3_emulate_call(struct pt_regs *regs, unsigned long func)
David Brazdil0f672f62019-12-10 10:32:29 +0000157{
158 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
159 int3_emulate_jmp(regs, func);
160}
Olivier Deprez157378f2022-04-04 15:47:50 +0200161
162static __always_inline
163void int3_emulate_ret(struct pt_regs *regs)
164{
165 unsigned long ip = int3_emulate_pop(regs);
166 int3_emulate_jmp(regs, ip);
167}
David Brazdil0f672f62019-12-10 10:32:29 +0000168#endif /* !CONFIG_UML_X86 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169
170#endif /* _ASM_X86_TEXT_PATCHING_H */