David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
| 4 | * (wrappers over ARC dwarf based unwinder) |
| 5 | * |
| 6 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 7 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | * vineetg: aug 2009 |
| 9 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) |
| 10 | * for displaying task's kernel mode call stack in /proc/<pid>/stack |
| 11 | * -Iterator based approach to have single copy of unwinding core and APIs |
| 12 | * needing unwinding, implement the logic in iterator regarding: |
| 13 | * = which frame onwards to start capture |
| 14 | * = which frame to stop capturing (wchan) |
| 15 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) |
| 16 | * |
| 17 | * vineetg: March 2009 |
| 18 | * -Implemented correct versions of thread_saved_pc() and get_wchan() |
| 19 | * |
| 20 | * rajeshwarr: 2008 |
| 21 | * -Initial implementation |
| 22 | */ |
| 23 | |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/export.h> |
| 26 | #include <linux/stacktrace.h> |
| 27 | #include <linux/kallsyms.h> |
| 28 | #include <linux/sched/debug.h> |
| 29 | |
| 30 | #include <asm/arcregs.h> |
| 31 | #include <asm/unwind.h> |
| 32 | #include <asm/switch_to.h> |
| 33 | |
| 34 | /*------------------------------------------------------------------------- |
| 35 | * Unwinder Iterator |
| 36 | *------------------------------------------------------------------------- |
| 37 | */ |
| 38 | |
| 39 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 40 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 41 | static int |
| 42 | seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, |
| 43 | struct unwind_frame_info *frame_info) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | { |
| 45 | /* |
| 46 | * synchronous unwinding (e.g. dump_stack) |
| 47 | * - uses current values of SP and friends |
| 48 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 49 | if (regs == NULL && (tsk == NULL || tsk == current)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | unsigned long fp, sp, blink, ret; |
| 51 | frame_info->task = current; |
| 52 | |
| 53 | __asm__ __volatile__( |
| 54 | "mov %0,r27\n\t" |
| 55 | "mov %1,r28\n\t" |
| 56 | "mov %2,r31\n\t" |
| 57 | "mov %3,r63\n\t" |
| 58 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) |
| 59 | ); |
| 60 | |
| 61 | frame_info->regs.r27 = fp; |
| 62 | frame_info->regs.r28 = sp; |
| 63 | frame_info->regs.r31 = blink; |
| 64 | frame_info->regs.r63 = ret; |
| 65 | frame_info->call_frame = 0; |
| 66 | } else if (regs == NULL) { |
| 67 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 68 | * Asynchronous unwinding of a likely sleeping task |
| 69 | * - first ensure it is actually sleeping |
| 70 | * - if so, it will be in __switch_to, kernel mode SP of task |
| 71 | * is safe-kept and BLINK at a well known location in there |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | */ |
| 73 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 74 | if (tsk->state == TASK_RUNNING) |
| 75 | return -1; |
| 76 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | frame_info->task = tsk; |
| 78 | |
| 79 | frame_info->regs.r27 = TSK_K_FP(tsk); |
| 80 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
| 81 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
| 82 | frame_info->regs.r63 = (unsigned int)__switch_to; |
| 83 | |
| 84 | /* In the prologue of __switch_to, first FP is saved on stack |
| 85 | * and then SP is copied to FP. Dwarf assumes cfa as FP based |
| 86 | * but we didn't save FP. The value retrieved above is FP's |
| 87 | * state in previous frame. |
| 88 | * As a work around for this, we unwind from __switch_to start |
| 89 | * and adjust SP accordingly. The other limitation is that |
| 90 | * __switch_to macro is dwarf rules are not generated for inline |
| 91 | * assembly code |
| 92 | */ |
| 93 | frame_info->regs.r27 = 0; |
| 94 | frame_info->regs.r28 += 60; |
| 95 | frame_info->call_frame = 0; |
| 96 | |
| 97 | } else { |
| 98 | /* |
| 99 | * Asynchronous unwinding of intr/exception |
| 100 | * - Just uses the pt_regs passed |
| 101 | */ |
| 102 | frame_info->task = tsk; |
| 103 | |
| 104 | frame_info->regs.r27 = regs->fp; |
| 105 | frame_info->regs.r28 = regs->sp; |
| 106 | frame_info->regs.r31 = regs->blink; |
| 107 | frame_info->regs.r63 = regs->ret; |
| 108 | frame_info->call_frame = 0; |
| 109 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 110 | |
| 111 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | #endif |
| 115 | |
| 116 | notrace noinline unsigned int |
| 117 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
| 118 | int (*consumer_fn) (unsigned int, void *), void *arg) |
| 119 | { |
| 120 | #ifdef CONFIG_ARC_DW2_UNWIND |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 121 | int ret = 0, cnt = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | unsigned int address; |
| 123 | struct unwind_frame_info frame_info; |
| 124 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 125 | if (seed_unwind_frame_info(tsk, regs, &frame_info)) |
| 126 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | |
| 128 | while (1) { |
| 129 | address = UNW_PC(&frame_info); |
| 130 | |
| 131 | if (!address || !__kernel_text_address(address)) |
| 132 | break; |
| 133 | |
| 134 | if (consumer_fn(address, arg) == -1) |
| 135 | break; |
| 136 | |
| 137 | ret = arc_unwind(&frame_info); |
| 138 | if (ret) |
| 139 | break; |
| 140 | |
| 141 | frame_info.regs.r63 = frame_info.regs.r31; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 142 | |
| 143 | if (cnt++ > 128) { |
| 144 | printk("unwinder looping too long, aborting !\n"); |
| 145 | return 0; |
| 146 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | return address; /* return the last address it saw */ |
| 150 | #else |
| 151 | /* On ARC, only Dward based unwinder works. fp based backtracing is |
| 152 | * not possible (-fno-omit-frame-pointer) because of the way function |
| 153 | * prelogue is setup (callee regs saved and then fp set and not other |
| 154 | * way around |
| 155 | */ |
| 156 | pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
| 157 | return 0; |
| 158 | |
| 159 | #endif |
| 160 | } |
| 161 | |
| 162 | /*------------------------------------------------------------------------- |
| 163 | * callbacks called by unwinder iterator to implement kernel APIs |
| 164 | * |
| 165 | * The callback can return -1 to force the iterator to stop, which by default |
| 166 | * keeps going till the bottom-most frame. |
| 167 | *------------------------------------------------------------------------- |
| 168 | */ |
| 169 | |
| 170 | /* Call-back which plugs into unwinding core to dump the stack in |
| 171 | * case of panic/OOPs/BUG etc |
| 172 | */ |
| 173 | static int __print_sym(unsigned int address, void *unused) |
| 174 | { |
| 175 | printk(" %pS\n", (void *)address); |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | #ifdef CONFIG_STACKTRACE |
| 180 | |
| 181 | /* Call-back which plugs into unwinding core to capture the |
| 182 | * traces needed by kernel on /proc/<pid>/stack |
| 183 | */ |
| 184 | static int __collect_all(unsigned int address, void *arg) |
| 185 | { |
| 186 | struct stack_trace *trace = arg; |
| 187 | |
| 188 | if (trace->skip > 0) |
| 189 | trace->skip--; |
| 190 | else |
| 191 | trace->entries[trace->nr_entries++] = address; |
| 192 | |
| 193 | if (trace->nr_entries >= trace->max_entries) |
| 194 | return -1; |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | static int __collect_all_but_sched(unsigned int address, void *arg) |
| 200 | { |
| 201 | struct stack_trace *trace = arg; |
| 202 | |
| 203 | if (in_sched_functions(address)) |
| 204 | return 0; |
| 205 | |
| 206 | if (trace->skip > 0) |
| 207 | trace->skip--; |
| 208 | else |
| 209 | trace->entries[trace->nr_entries++] = address; |
| 210 | |
| 211 | if (trace->nr_entries >= trace->max_entries) |
| 212 | return -1; |
| 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | |
| 217 | #endif |
| 218 | |
| 219 | static int __get_first_nonsched(unsigned int address, void *unused) |
| 220 | { |
| 221 | if (in_sched_functions(address)) |
| 222 | return 0; |
| 223 | |
| 224 | return -1; |
| 225 | } |
| 226 | |
| 227 | /*------------------------------------------------------------------------- |
| 228 | * APIs expected by various kernel sub-systems |
| 229 | *------------------------------------------------------------------------- |
| 230 | */ |
| 231 | |
| 232 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) |
| 233 | { |
| 234 | pr_info("\nStack Trace:\n"); |
| 235 | arc_unwind_core(tsk, regs, __print_sym, NULL); |
| 236 | } |
| 237 | EXPORT_SYMBOL(show_stacktrace); |
| 238 | |
| 239 | /* Expected by sched Code */ |
| 240 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
| 241 | { |
| 242 | show_stacktrace(tsk, NULL); |
| 243 | } |
| 244 | |
| 245 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
| 246 | * Of course just returning schedule( ) would be pointless so unwind until |
| 247 | * the function is not in schedular code |
| 248 | */ |
| 249 | unsigned int get_wchan(struct task_struct *tsk) |
| 250 | { |
| 251 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
| 252 | } |
| 253 | |
| 254 | #ifdef CONFIG_STACKTRACE |
| 255 | |
| 256 | /* |
| 257 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. |
| 258 | * A typical use is when /proc/<pid>/stack is queried by userland |
| 259 | */ |
| 260 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 261 | { |
| 262 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
| 263 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
| 264 | } |
| 265 | |
| 266 | void save_stack_trace(struct stack_trace *trace) |
| 267 | { |
| 268 | /* Pass NULL for task so it unwinds the current call frame */ |
| 269 | arc_unwind_core(NULL, NULL, __collect_all, trace); |
| 270 | } |
| 271 | EXPORT_SYMBOL_GPL(save_stack_trace); |
| 272 | #endif |