Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Code for tracing calls in Linux kernel. |
| 4 | * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de> |
| 5 | * |
| 6 | * based on code for x86 which is: |
| 7 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 8 | * |
| 9 | * future possible enhancements: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | * - add CONFIG_STACK_TRACER |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/ftrace.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
| 16 | #include <linux/kprobes.h> |
| 17 | #include <linux/ptrace.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
| 19 | #include <asm/assembly.h> |
| 20 | #include <asm/sections.h> |
| 21 | #include <asm/ftrace.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | #include <asm/patch.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 24 | #define __hot __section(".text.hot") |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 27 | /* |
| 28 | * Hook the return address and push it in the stack of return addrs |
| 29 | * in current thread info. |
| 30 | */ |
| 31 | static void __hot prepare_ftrace_return(unsigned long *parent, |
| 32 | unsigned long self_addr) |
| 33 | { |
| 34 | unsigned long old; |
| 35 | extern int parisc_return_to_handler; |
| 36 | |
| 37 | if (unlikely(ftrace_graph_is_dead())) |
| 38 | return; |
| 39 | |
| 40 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 41 | return; |
| 42 | |
| 43 | old = *parent; |
| 44 | |
| 45 | if (!function_graph_enter(old, self_addr, 0, NULL)) |
| 46 | /* activate parisc_return_to_handler() as return point */ |
| 47 | *parent = (unsigned long) &parisc_return_to_handler; |
| 48 | } |
| 49 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 50 | |
| 51 | void notrace __hot ftrace_function_trampoline(unsigned long parent, |
| 52 | unsigned long self_addr, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 53 | unsigned long org_sp_gr3, |
| 54 | struct pt_regs *regs) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | #ifndef CONFIG_DYNAMIC_FTRACE |
| 57 | extern ftrace_func_t ftrace_trace_function; |
| 58 | #endif |
| 59 | extern struct ftrace_ops *function_trace_op; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 61 | if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED && |
| 62 | ftrace_trace_function != ftrace_stub) |
| 63 | ftrace_trace_function(self_addr, parent, |
| 64 | function_trace_op, regs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | |
| 66 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 67 | if (dereference_function_descriptor(ftrace_graph_return) != |
| 68 | dereference_function_descriptor(ftrace_stub) || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 69 | ftrace_graph_entry != ftrace_graph_entry_stub) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | unsigned long *parent_rp; |
| 71 | |
| 72 | /* calculate pointer to %rp in stack */ |
| 73 | parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET); |
| 74 | /* sanity check: parent_rp should hold parent */ |
| 75 | if (*parent_rp != parent) |
| 76 | return; |
| 77 | |
| 78 | prepare_ftrace_return(parent_rp, self_addr); |
| 79 | return; |
| 80 | } |
| 81 | #endif |
| 82 | } |
| 83 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 84 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 85 | int ftrace_enable_ftrace_graph_caller(void) |
| 86 | { |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | int ftrace_disable_ftrace_graph_caller(void) |
| 91 | { |
| 92 | return 0; |
| 93 | } |
| 94 | #endif |
| 95 | |
| 96 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 97 | |
| 98 | int __init ftrace_dyn_arch_init(void) |
| 99 | { |
| 100 | return 0; |
| 101 | } |
| 102 | int ftrace_update_ftrace_func(ftrace_func_t func) |
| 103 | { |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
| 108 | unsigned long addr) |
| 109 | { |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | unsigned long ftrace_call_adjust(unsigned long addr) |
| 114 | { |
| 115 | return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4; |
| 116 | } |
| 117 | |
| 118 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 119 | { |
| 120 | u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; |
| 121 | u32 *tramp; |
| 122 | int size, ret, i; |
| 123 | void *ip; |
| 124 | |
| 125 | #ifdef CONFIG_64BIT |
| 126 | unsigned long addr2 = |
| 127 | (unsigned long)dereference_function_descriptor((void *)addr); |
| 128 | |
| 129 | u32 ftrace_trampoline[] = { |
| 130 | 0x73c10208, /* std,ma r1,100(sp) */ |
| 131 | 0x0c2110c1, /* ldd -10(r1),r1 */ |
| 132 | 0xe820d002, /* bve,n (r1) */ |
| 133 | addr2 >> 32, |
| 134 | addr2 & 0xffffffff, |
| 135 | 0xe83f1fd7, /* b,l,n .-14,r1 */ |
| 136 | }; |
| 137 | |
| 138 | u32 ftrace_trampoline_unaligned[] = { |
| 139 | addr2 >> 32, |
| 140 | addr2 & 0xffffffff, |
| 141 | 0x37de0200, /* ldo 100(sp),sp */ |
| 142 | 0x73c13e01, /* std r1,-100(sp) */ |
| 143 | 0x34213ff9, /* ldo -4(r1),r1 */ |
| 144 | 0x50213fc1, /* ldd -20(r1),r1 */ |
| 145 | 0xe820d002, /* bve,n (r1) */ |
| 146 | 0xe83f1fcf, /* b,l,n .-20,r1 */ |
| 147 | }; |
| 148 | |
| 149 | BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) > |
| 150 | FTRACE_PATCHABLE_FUNCTION_SIZE); |
| 151 | #else |
| 152 | u32 ftrace_trampoline[] = { |
| 153 | (u32)addr, |
| 154 | 0x6fc10080, /* stw,ma r1,40(sp) */ |
| 155 | 0x48213fd1, /* ldw -18(r1),r1 */ |
| 156 | 0xe820c002, /* bv,n r0(r1) */ |
| 157 | 0xe83f1fdf, /* b,l,n .-c,r1 */ |
| 158 | }; |
| 159 | #endif |
| 160 | |
| 161 | BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) > |
| 162 | FTRACE_PATCHABLE_FUNCTION_SIZE); |
| 163 | |
| 164 | size = sizeof(ftrace_trampoline); |
| 165 | tramp = ftrace_trampoline; |
| 166 | |
| 167 | #ifdef CONFIG_64BIT |
| 168 | if (rec->ip & 0x4) { |
| 169 | size = sizeof(ftrace_trampoline_unaligned); |
| 170 | tramp = ftrace_trampoline_unaligned; |
| 171 | } |
| 172 | #endif |
| 173 | |
| 174 | ip = (void *)(rec->ip + 4 - size); |
| 175 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 176 | ret = copy_from_kernel_nofault(insn, ip, size); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 177 | if (ret) |
| 178 | return ret; |
| 179 | |
| 180 | for (i = 0; i < size / 4; i++) { |
| 181 | if (insn[i] != INSN_NOP) |
| 182 | return -EINVAL; |
| 183 | } |
| 184 | |
| 185 | __patch_text_multiple(ip, tramp, size); |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
| 190 | unsigned long addr) |
| 191 | { |
| 192 | u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; |
| 193 | int i; |
| 194 | |
| 195 | for (i = 0; i < ARRAY_SIZE(insn); i++) |
| 196 | insn[i] = INSN_NOP; |
| 197 | |
| 198 | __patch_text((void *)rec->ip, INSN_NOP); |
| 199 | __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), |
| 200 | insn, sizeof(insn)-4); |
| 201 | return 0; |
| 202 | } |
| 203 | #endif |
| 204 | |
| 205 | #ifdef CONFIG_KPROBES_ON_FTRACE |
| 206 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
| 207 | struct ftrace_ops *ops, struct pt_regs *regs) |
| 208 | { |
| 209 | struct kprobe_ctlblk *kcb; |
| 210 | struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); |
| 211 | |
| 212 | if (unlikely(!p) || kprobe_disabled(p)) |
| 213 | return; |
| 214 | |
| 215 | if (kprobe_running()) { |
| 216 | kprobes_inc_nmissed_count(p); |
| 217 | return; |
| 218 | } |
| 219 | |
| 220 | __this_cpu_write(current_kprobe, p); |
| 221 | |
| 222 | kcb = get_kprobe_ctlblk(); |
| 223 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 224 | |
| 225 | regs->iaoq[0] = ip; |
| 226 | regs->iaoq[1] = ip + 4; |
| 227 | |
| 228 | if (!p->pre_handler || !p->pre_handler(p, regs)) { |
| 229 | regs->iaoq[0] = ip + 4; |
| 230 | regs->iaoq[1] = ip + 8; |
| 231 | |
| 232 | if (unlikely(p->post_handler)) { |
| 233 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 234 | p->post_handler(p, regs, 0); |
| 235 | } |
| 236 | } |
| 237 | __this_cpu_write(current_kprobe, NULL); |
| 238 | } |
| 239 | NOKPROBE_SYMBOL(kprobe_ftrace_handler); |
| 240 | |
| 241 | int arch_prepare_kprobe_ftrace(struct kprobe *p) |
| 242 | { |
| 243 | p->ainsn.insn = NULL; |
| 244 | return 0; |
| 245 | } |
| 246 | #endif |