David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __ASM_STACKTRACE_H |
| 6 | #define __ASM_STACKTRACE_H |
| 7 | |
| 8 | #include <linux/percpu.h> |
| 9 | #include <linux/sched.h> |
| 10 | #include <linux/sched/task_stack.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 11 | #include <linux/types.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | |
| 13 | #include <asm/memory.h> |
| 14 | #include <asm/ptrace.h> |
| 15 | #include <asm/sdei.h> |
| 16 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | enum stack_type { |
| 18 | STACK_TYPE_UNKNOWN, |
| 19 | STACK_TYPE_TASK, |
| 20 | STACK_TYPE_IRQ, |
| 21 | STACK_TYPE_OVERFLOW, |
| 22 | STACK_TYPE_SDEI_NORMAL, |
| 23 | STACK_TYPE_SDEI_CRITICAL, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 24 | __NR_STACK_TYPES |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | }; |
| 26 | |
| 27 | struct stack_info { |
| 28 | unsigned long low; |
| 29 | unsigned long high; |
| 30 | enum stack_type type; |
| 31 | }; |
| 32 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 33 | /* |
| 34 | * A snapshot of a frame record or fp/lr register values, along with some |
| 35 | * accounting information necessary for robust unwinding. |
| 36 | * |
| 37 | * @fp: The fp value in the frame record (or the real fp) |
| 38 | * @pc: The fp value in the frame record (or the real lr) |
| 39 | * |
| 40 | * @stacks_done: Stacks which have been entirely unwound, for which it is no |
| 41 | * longer valid to unwind to. |
| 42 | * |
| 43 | * @prev_fp: The fp that pointed to this frame record, or a synthetic value |
| 44 | * of 0. This is used to ensure that within a stack, each |
| 45 | * subsequent frame record is at an increasing address. |
| 46 | * @prev_type: The type of stack this frame record was on, or a synthetic |
| 47 | * value of STACK_TYPE_UNKNOWN. This is used to detect a |
| 48 | * transition from one stack to another. |
| 49 | * |
| 50 | * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a |
| 51 | * replacement lr value in the ftrace graph stack. |
| 52 | */ |
| 53 | struct stackframe { |
| 54 | unsigned long fp; |
| 55 | unsigned long pc; |
| 56 | DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); |
| 57 | unsigned long prev_fp; |
| 58 | enum stack_type prev_type; |
| 59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 60 | int graph; |
| 61 | #endif |
| 62 | }; |
| 63 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); |
| 65 | extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, |
| 66 | int (*fn)(struct stackframe *, void *), void *data); |
| 67 | extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); |
| 68 | |
| 69 | DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); |
| 70 | |
| 71 | static inline bool on_irq_stack(unsigned long sp, |
| 72 | struct stack_info *info) |
| 73 | { |
| 74 | unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); |
| 75 | unsigned long high = low + IRQ_STACK_SIZE; |
| 76 | |
| 77 | if (!low) |
| 78 | return false; |
| 79 | |
| 80 | if (sp < low || sp >= high) |
| 81 | return false; |
| 82 | |
| 83 | if (info) { |
| 84 | info->low = low; |
| 85 | info->high = high; |
| 86 | info->type = STACK_TYPE_IRQ; |
| 87 | } |
| 88 | |
| 89 | return true; |
| 90 | } |
| 91 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 92 | static inline bool on_task_stack(const struct task_struct *tsk, |
| 93 | unsigned long sp, |
| 94 | struct stack_info *info) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | { |
| 96 | unsigned long low = (unsigned long)task_stack_page(tsk); |
| 97 | unsigned long high = low + THREAD_SIZE; |
| 98 | |
| 99 | if (sp < low || sp >= high) |
| 100 | return false; |
| 101 | |
| 102 | if (info) { |
| 103 | info->low = low; |
| 104 | info->high = high; |
| 105 | info->type = STACK_TYPE_TASK; |
| 106 | } |
| 107 | |
| 108 | return true; |
| 109 | } |
| 110 | |
| 111 | #ifdef CONFIG_VMAP_STACK |
| 112 | DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); |
| 113 | |
| 114 | static inline bool on_overflow_stack(unsigned long sp, |
| 115 | struct stack_info *info) |
| 116 | { |
| 117 | unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); |
| 118 | unsigned long high = low + OVERFLOW_STACK_SIZE; |
| 119 | |
| 120 | if (sp < low || sp >= high) |
| 121 | return false; |
| 122 | |
| 123 | if (info) { |
| 124 | info->low = low; |
| 125 | info->high = high; |
| 126 | info->type = STACK_TYPE_OVERFLOW; |
| 127 | } |
| 128 | |
| 129 | return true; |
| 130 | } |
| 131 | #else |
| 132 | static inline bool on_overflow_stack(unsigned long sp, |
| 133 | struct stack_info *info) { return false; } |
| 134 | #endif |
| 135 | |
| 136 | |
| 137 | /* |
| 138 | * We can only safely access per-cpu stacks from current in a non-preemptible |
| 139 | * context. |
| 140 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 141 | static inline bool on_accessible_stack(const struct task_struct *tsk, |
| 142 | unsigned long sp, |
| 143 | struct stack_info *info) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 145 | if (info) |
| 146 | info->type = STACK_TYPE_UNKNOWN; |
| 147 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | if (on_task_stack(tsk, sp, info)) |
| 149 | return true; |
| 150 | if (tsk != current || preemptible()) |
| 151 | return false; |
| 152 | if (on_irq_stack(sp, info)) |
| 153 | return true; |
| 154 | if (on_overflow_stack(sp, info)) |
| 155 | return true; |
| 156 | if (on_sdei_stack(sp, info)) |
| 157 | return true; |
| 158 | |
| 159 | return false; |
| 160 | } |
| 161 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 162 | static inline void start_backtrace(struct stackframe *frame, |
| 163 | unsigned long fp, unsigned long pc) |
| 164 | { |
| 165 | frame->fp = fp; |
| 166 | frame->pc = pc; |
| 167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 168 | frame->graph = 0; |
| 169 | #endif |
| 170 | |
| 171 | /* |
| 172 | * Prime the first unwind. |
| 173 | * |
| 174 | * In unwind_frame() we'll check that the FP points to a valid stack, |
| 175 | * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be |
| 176 | * treated as a transition to whichever stack that happens to be. The |
| 177 | * prev_fp value won't be used, but we set it to 0 such that it is |
| 178 | * definitely not an accessible stack address. |
| 179 | */ |
| 180 | bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); |
| 181 | frame->prev_fp = 0; |
| 182 | frame->prev_type = STACK_TYPE_UNKNOWN; |
| 183 | } |
| 184 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | #endif /* __ASM_STACKTRACE_H */ |