David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | #include <linux/sched.h> |
| 3 | #include <linux/sched/debug.h> |
| 4 | #include <linux/stacktrace.h> |
| 5 | #include <linux/thread_info.h> |
| 6 | #include <linux/ftrace.h> |
| 7 | #include <linux/export.h> |
| 8 | #include <asm/ptrace.h> |
| 9 | #include <asm/stacktrace.h> |
| 10 | |
| 11 | #include "kstack.h" |
| 12 | |
| 13 | static void __save_stack_trace(struct thread_info *tp, |
| 14 | struct stack_trace *trace, |
| 15 | bool skip_sched) |
| 16 | { |
| 17 | unsigned long ksp, fp; |
| 18 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 19 | struct task_struct *t; |
| 20 | int graph = 0; |
| 21 | #endif |
| 22 | |
| 23 | if (tp == current_thread_info()) { |
| 24 | stack_trace_flush(); |
| 25 | __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp)); |
| 26 | } else { |
| 27 | ksp = tp->ksp; |
| 28 | } |
| 29 | |
| 30 | fp = ksp + STACK_BIAS; |
| 31 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 32 | t = tp->task; |
| 33 | #endif |
| 34 | do { |
| 35 | struct sparc_stackf *sf; |
| 36 | struct pt_regs *regs; |
| 37 | unsigned long pc; |
| 38 | |
| 39 | if (!kstack_valid(tp, fp)) |
| 40 | break; |
| 41 | |
| 42 | sf = (struct sparc_stackf *) fp; |
| 43 | regs = (struct pt_regs *) (sf + 1); |
| 44 | |
| 45 | if (kstack_is_trap_frame(tp, regs)) { |
| 46 | if (!(regs->tstate & TSTATE_PRIV)) |
| 47 | break; |
| 48 | pc = regs->tpc; |
| 49 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; |
| 50 | } else { |
| 51 | pc = sf->callers_pc; |
| 52 | fp = (unsigned long)sf->fp + STACK_BIAS; |
| 53 | } |
| 54 | |
| 55 | if (trace->skip > 0) |
| 56 | trace->skip--; |
| 57 | else if (!skip_sched || !in_sched_functions(pc)) { |
| 58 | trace->entries[trace->nr_entries++] = pc; |
| 59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 60 | if ((pc + 8UL) == (unsigned long) &return_to_handler) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 61 | struct ftrace_ret_stack *ret_stack; |
| 62 | ret_stack = ftrace_graph_get_ret_stack(t, |
| 63 | graph); |
| 64 | if (ret_stack) { |
| 65 | pc = ret_stack->ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | if (trace->nr_entries < |
| 67 | trace->max_entries) |
| 68 | trace->entries[trace->nr_entries++] = pc; |
| 69 | graph++; |
| 70 | } |
| 71 | } |
| 72 | #endif |
| 73 | } |
| 74 | } while (trace->nr_entries < trace->max_entries); |
| 75 | } |
| 76 | |
| 77 | void save_stack_trace(struct stack_trace *trace) |
| 78 | { |
| 79 | __save_stack_trace(current_thread_info(), trace, false); |
| 80 | } |
| 81 | EXPORT_SYMBOL_GPL(save_stack_trace); |
| 82 | |
| 83 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 84 | { |
| 85 | struct thread_info *tp = task_thread_info(tsk); |
| 86 | |
| 87 | __save_stack_trace(tp, trace, true); |
| 88 | } |
| 89 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |