David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
| 4 | * (wrappers over ARC dwarf based unwinder) |
| 5 | * |
| 6 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 7 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | * vineetg: aug 2009 |
| 9 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) |
| 10 | * for displaying task's kernel mode call stack in /proc/<pid>/stack |
| 11 | * -Iterator based approach to have single copy of unwinding core and APIs |
| 12 | * needing unwinding, implement the logic in iterator regarding: |
| 13 | * = which frame onwards to start capture |
| 14 | * = which frame to stop capturing (wchan) |
| 15 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) |
| 16 | * |
| 17 | * vineetg: March 2009 |
| 18 | * -Implemented correct versions of thread_saved_pc() and get_wchan() |
| 19 | * |
| 20 | * rajeshwarr: 2008 |
| 21 | * -Initial implementation |
| 22 | */ |
| 23 | |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/export.h> |
| 26 | #include <linux/stacktrace.h> |
| 27 | #include <linux/kallsyms.h> |
| 28 | #include <linux/sched/debug.h> |
| 29 | |
| 30 | #include <asm/arcregs.h> |
| 31 | #include <asm/unwind.h> |
| 32 | #include <asm/switch_to.h> |
| 33 | |
| 34 | /*------------------------------------------------------------------------- |
| 35 | * Unwinder Iterator |
| 36 | *------------------------------------------------------------------------- |
| 37 | */ |
| 38 | |
| 39 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 40 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 41 | static int |
| 42 | seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, |
| 43 | struct unwind_frame_info *frame_info) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 45 | if (regs) { |
| 46 | /* |
| 47 | * Asynchronous unwinding of intr/exception |
| 48 | * - Just uses the pt_regs passed |
| 49 | */ |
| 50 | frame_info->task = tsk; |
| 51 | |
| 52 | frame_info->regs.r27 = regs->fp; |
| 53 | frame_info->regs.r28 = regs->sp; |
| 54 | frame_info->regs.r31 = regs->blink; |
| 55 | frame_info->regs.r63 = regs->ret; |
| 56 | frame_info->call_frame = 0; |
| 57 | } else if (tsk == NULL || tsk == current) { |
| 58 | /* |
| 59 | * synchronous unwinding (e.g. dump_stack) |
| 60 | * - uses current values of SP and friends |
| 61 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | unsigned long fp, sp, blink, ret; |
| 63 | frame_info->task = current; |
| 64 | |
| 65 | __asm__ __volatile__( |
| 66 | "mov %0,r27\n\t" |
| 67 | "mov %1,r28\n\t" |
| 68 | "mov %2,r31\n\t" |
| 69 | "mov %3,r63\n\t" |
| 70 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) |
| 71 | ); |
| 72 | |
| 73 | frame_info->regs.r27 = fp; |
| 74 | frame_info->regs.r28 = sp; |
| 75 | frame_info->regs.r31 = blink; |
| 76 | frame_info->regs.r63 = ret; |
| 77 | frame_info->call_frame = 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 78 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 80 | * Asynchronous unwinding of a likely sleeping task |
| 81 | * - first ensure it is actually sleeping |
| 82 | * - if so, it will be in __switch_to, kernel mode SP of task |
| 83 | * is safe-kept and BLINK at a well known location in there |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | */ |
| 85 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 86 | if (tsk->state == TASK_RUNNING) |
| 87 | return -1; |
| 88 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | frame_info->task = tsk; |
| 90 | |
| 91 | frame_info->regs.r27 = TSK_K_FP(tsk); |
| 92 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
| 93 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
| 94 | frame_info->regs.r63 = (unsigned int)__switch_to; |
| 95 | |
| 96 | /* In the prologue of __switch_to, first FP is saved on stack |
| 97 | * and then SP is copied to FP. Dwarf assumes cfa as FP based |
| 98 | * but we didn't save FP. The value retrieved above is FP's |
| 99 | * state in previous frame. |
| 100 | * As a work around for this, we unwind from __switch_to start |
| 101 | * and adjust SP accordingly. The other limitation is that |
| 102 | * __switch_to macro is dwarf rules are not generated for inline |
| 103 | * assembly code |
| 104 | */ |
| 105 | frame_info->regs.r27 = 0; |
| 106 | frame_info->regs.r28 += 60; |
| 107 | frame_info->call_frame = 0; |
| 108 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 110 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | #endif |
| 114 | |
| 115 | notrace noinline unsigned int |
| 116 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
| 117 | int (*consumer_fn) (unsigned int, void *), void *arg) |
| 118 | { |
| 119 | #ifdef CONFIG_ARC_DW2_UNWIND |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 120 | int ret = 0, cnt = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | unsigned int address; |
| 122 | struct unwind_frame_info frame_info; |
| 123 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 124 | if (seed_unwind_frame_info(tsk, regs, &frame_info)) |
| 125 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 126 | |
| 127 | while (1) { |
| 128 | address = UNW_PC(&frame_info); |
| 129 | |
| 130 | if (!address || !__kernel_text_address(address)) |
| 131 | break; |
| 132 | |
| 133 | if (consumer_fn(address, arg) == -1) |
| 134 | break; |
| 135 | |
| 136 | ret = arc_unwind(&frame_info); |
| 137 | if (ret) |
| 138 | break; |
| 139 | |
| 140 | frame_info.regs.r63 = frame_info.regs.r31; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 141 | |
| 142 | if (cnt++ > 128) { |
| 143 | printk("unwinder looping too long, aborting !\n"); |
| 144 | return 0; |
| 145 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | return address; /* return the last address it saw */ |
| 149 | #else |
| 150 | /* On ARC, only Dward based unwinder works. fp based backtracing is |
| 151 | * not possible (-fno-omit-frame-pointer) because of the way function |
| 152 | * prelogue is setup (callee regs saved and then fp set and not other |
| 153 | * way around |
| 154 | */ |
| 155 | pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
| 156 | return 0; |
| 157 | |
| 158 | #endif |
| 159 | } |
| 160 | |
| 161 | /*------------------------------------------------------------------------- |
| 162 | * callbacks called by unwinder iterator to implement kernel APIs |
| 163 | * |
| 164 | * The callback can return -1 to force the iterator to stop, which by default |
| 165 | * keeps going till the bottom-most frame. |
| 166 | *------------------------------------------------------------------------- |
| 167 | */ |
| 168 | |
| 169 | /* Call-back which plugs into unwinding core to dump the stack in |
| 170 | * case of panic/OOPs/BUG etc |
| 171 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 172 | static int __print_sym(unsigned int address, void *arg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 174 | const char *loglvl = arg; |
| 175 | |
| 176 | printk("%s %pS\n", loglvl, (void *)address); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | return 0; |
| 178 | } |
| 179 | |
| 180 | #ifdef CONFIG_STACKTRACE |
| 181 | |
| 182 | /* Call-back which plugs into unwinding core to capture the |
| 183 | * traces needed by kernel on /proc/<pid>/stack |
| 184 | */ |
| 185 | static int __collect_all(unsigned int address, void *arg) |
| 186 | { |
| 187 | struct stack_trace *trace = arg; |
| 188 | |
| 189 | if (trace->skip > 0) |
| 190 | trace->skip--; |
| 191 | else |
| 192 | trace->entries[trace->nr_entries++] = address; |
| 193 | |
| 194 | if (trace->nr_entries >= trace->max_entries) |
| 195 | return -1; |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | static int __collect_all_but_sched(unsigned int address, void *arg) |
| 201 | { |
| 202 | struct stack_trace *trace = arg; |
| 203 | |
| 204 | if (in_sched_functions(address)) |
| 205 | return 0; |
| 206 | |
| 207 | if (trace->skip > 0) |
| 208 | trace->skip--; |
| 209 | else |
| 210 | trace->entries[trace->nr_entries++] = address; |
| 211 | |
| 212 | if (trace->nr_entries >= trace->max_entries) |
| 213 | return -1; |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | #endif |
| 219 | |
| 220 | static int __get_first_nonsched(unsigned int address, void *unused) |
| 221 | { |
| 222 | if (in_sched_functions(address)) |
| 223 | return 0; |
| 224 | |
| 225 | return -1; |
| 226 | } |
| 227 | |
| 228 | /*------------------------------------------------------------------------- |
| 229 | * APIs expected by various kernel sub-systems |
| 230 | *------------------------------------------------------------------------- |
| 231 | */ |
| 232 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 233 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs, |
| 234 | const char *loglvl) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 235 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 236 | printk("%s\nStack Trace:\n", loglvl); |
| 237 | arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | } |
| 239 | EXPORT_SYMBOL(show_stacktrace); |
| 240 | |
| 241 | /* Expected by sched Code */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 242 | void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 243 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 244 | show_stacktrace(tsk, NULL, loglvl); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
| 248 | * Of course just returning schedule( ) would be pointless so unwind until |
| 249 | * the function is not in schedular code |
| 250 | */ |
| 251 | unsigned int get_wchan(struct task_struct *tsk) |
| 252 | { |
| 253 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
| 254 | } |
| 255 | |
| 256 | #ifdef CONFIG_STACKTRACE |
| 257 | |
| 258 | /* |
| 259 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. |
| 260 | * A typical use is when /proc/<pid>/stack is queried by userland |
| 261 | */ |
| 262 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 263 | { |
| 264 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
| 265 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
| 266 | } |
| 267 | |
| 268 | void save_stack_trace(struct stack_trace *trace) |
| 269 | { |
| 270 | /* Pass NULL for task so it unwinds the current call frame */ |
| 271 | arc_unwind_core(NULL, NULL, __collect_all, trace); |
| 272 | } |
| 273 | EXPORT_SYMBOL_GPL(save_stack_trace); |
| 274 | #endif |