blob: 7df6b2642ddb0d32ab57f23c57c47b6ed2076b93 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracer architecture backend.
4 *
5 * Copyright IBM Corp. 2009,2014
6 *
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/moduleloader.h>
12#include <linux/hardirq.h>
13#include <linux/uaccess.h>
14#include <linux/ftrace.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/kprobes.h>
18#include <trace/syscall.h>
19#include <asm/asm-offsets.h>
20#include <asm/cacheflush.h>
21#include <asm/set_memory.h>
22#include "entry.h"
23
24/*
25 * The mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0
27 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. Only the first instruction will be patched
31 * by ftrace_make_call / ftrace_make_nop.
32 * The enabled ftrace code block looks like this:
33 * > brasl %r0,ftrace_caller # offset 0
34 * larl %r1,<&counter> # offset 6
35 * brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * The ftrace function gets called with a non-standard C function call ABI
38 * where r0 contains the return address. It is also expected that the called
39 * function only clobbers r0 and r1, but restores r2-r15.
40 * For module code we can't directly jump to ftrace caller, but need a
41 * trampoline (ftrace_plt), which clobbers also r1.
42 * The return point of the ftrace function has offset 24, so execution
43 * continues behind the mcount block.
44 * The disabled ftrace code block looks like this:
45 * > jg .+24 # offset 0
46 * larl %r1,<&counter> # offset 6
47 * brasl %r14,_mcount # offset 12
48 * lg %r14,8(%r15) # offset 18
49 * The jg instruction branches to offset 24 to skip as many instructions
50 * as possible.
51 * In case we use gcc's hotpatch feature the original and also the disabled
52 * function prologue contains only a single six byte instruction and looks
53 * like this:
54 * > brcl 0,0 # offset 0
55 * To enable ftrace the code gets patched like above and afterwards looks
56 * like this:
57 * > brasl %r0,ftrace_caller # offset 0
58 */
59
Olivier Deprez0e641232021-09-23 10:07:05 +020060void *ftrace_func __read_mostly = ftrace_stub;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061unsigned long ftrace_plt;
62
63static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
64{
65#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
66 /* brcl 0,0 */
67 insn->opc = 0xc004;
68 insn->disp = 0;
69#else
70 /* stg r14,8(r15) */
71 insn->opc = 0xe3e0;
72 insn->disp = 0xf0080024;
73#endif
74}
75
76static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
77{
78#ifdef CONFIG_KPROBES
79 if (insn->opc == BREAKPOINT_INSTRUCTION)
80 return 1;
81#endif
82 return 0;
83}
84
85static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
86{
87#ifdef CONFIG_KPROBES
88 insn->opc = BREAKPOINT_INSTRUCTION;
89 insn->disp = KPROBE_ON_FTRACE_NOP;
90#endif
91}
92
93static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
94{
95#ifdef CONFIG_KPROBES
96 insn->opc = BREAKPOINT_INSTRUCTION;
97 insn->disp = KPROBE_ON_FTRACE_CALL;
98#endif
99}
100
101int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
102 unsigned long addr)
103{
104 return 0;
105}
106
107int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
108 unsigned long addr)
109{
110 struct ftrace_insn orig, new, old;
111
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
113 return -EFAULT;
114 if (addr == MCOUNT_ADDR) {
115 /* Initial code replacement */
116 ftrace_generate_orig_insn(&orig);
117 ftrace_generate_nop_insn(&new);
118 } else if (is_kprobe_on_ftrace(&old)) {
119 /*
120 * If we find a breakpoint instruction, a kprobe has been
121 * placed at the beginning of the function. We write the
122 * constant KPROBE_ON_FTRACE_NOP into the remaining four
123 * bytes of the original instruction so that the kprobes
124 * handler can execute a nop, if it reaches this breakpoint.
125 */
126 ftrace_generate_kprobe_call_insn(&orig);
127 ftrace_generate_kprobe_nop_insn(&new);
128 } else {
129 /* Replace ftrace call with a nop. */
130 ftrace_generate_call_insn(&orig, rec->ip);
131 ftrace_generate_nop_insn(&new);
132 }
133 /* Verify that the to be replaced code matches what we expect. */
134 if (memcmp(&orig, &old, sizeof(old)))
135 return -EINVAL;
136 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
137 return 0;
138}
139
140int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
141{
142 struct ftrace_insn orig, new, old;
143
144 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
145 return -EFAULT;
146 if (is_kprobe_on_ftrace(&old)) {
147 /*
148 * If we find a breakpoint instruction, a kprobe has been
149 * placed at the beginning of the function. We write the
150 * constant KPROBE_ON_FTRACE_CALL into the remaining four
151 * bytes of the original instruction so that the kprobes
152 * handler can execute a brasl if it reaches this breakpoint.
153 */
154 ftrace_generate_kprobe_nop_insn(&orig);
155 ftrace_generate_kprobe_call_insn(&new);
156 } else {
157 /* Replace nop with an ftrace call. */
158 ftrace_generate_nop_insn(&orig);
159 ftrace_generate_call_insn(&new, rec->ip);
160 }
161 /* Verify that the to be replaced code matches what we expect. */
162 if (memcmp(&orig, &old, sizeof(old)))
163 return -EINVAL;
164 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
165 return 0;
166}
167
168int ftrace_update_ftrace_func(ftrace_func_t func)
169{
Olivier Deprez0e641232021-09-23 10:07:05 +0200170 ftrace_func = func;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 return 0;
172}
173
174int __init ftrace_dyn_arch_init(void)
175{
176 return 0;
177}
178
179#ifdef CONFIG_MODULES
180
181static int __init ftrace_plt_init(void)
182{
183 unsigned int *ip;
184
185 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
186 if (!ftrace_plt)
187 panic("cannot allocate ftrace plt\n");
188 ip = (unsigned int *) ftrace_plt;
189 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
190 ip[1] = 0x100a0004;
191 ip[2] = 0x07f10000;
192 ip[3] = FTRACE_ADDR >> 32;
193 ip[4] = FTRACE_ADDR & 0xffffffff;
194 set_memory_ro(ftrace_plt, 1);
195 return 0;
196}
197device_initcall(ftrace_plt_init);
198
199#endif /* CONFIG_MODULES */
200
201#ifdef CONFIG_FUNCTION_GRAPH_TRACER
202/*
203 * Hook the return address and push it in the stack of return addresses
204 * in current thread info.
205 */
David Brazdil0f672f62019-12-10 10:32:29 +0000206unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
207 unsigned long ip)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208{
209 if (unlikely(ftrace_graph_is_dead()))
210 goto out;
211 if (unlikely(atomic_read(&current->tracing_graph_pause)))
212 goto out;
213 ip -= MCOUNT_INSN_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +0000214 if (!function_graph_enter(ra, ip, 0, (void *) sp))
215 ra = (unsigned long) return_to_handler;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216out:
David Brazdil0f672f62019-12-10 10:32:29 +0000217 return ra;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218}
219NOKPROBE_SYMBOL(prepare_ftrace_return);
220
221/*
222 * Patch the kernel code at ftrace_graph_caller location. The instruction
223 * there is branch relative on condition. To enable the ftrace graph code
224 * block, we simply patch the mask field of the instruction to zero and
225 * turn the instruction into a nop.
226 * To disable the ftrace graph code the mask field will be patched to
227 * all ones, which turns the instruction into an unconditional branch.
228 */
229int ftrace_enable_ftrace_graph_caller(void)
230{
231 u8 op = 0x04; /* set mask field to zero */
232
233 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
234 return 0;
235}
236
237int ftrace_disable_ftrace_graph_caller(void)
238{
239 u8 op = 0xf4; /* set mask field to all ones */
240
241 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
242 return 0;
243}
244
245#endif /* CONFIG_FUNCTION_GRAPH_TRACER */