blob: 86a57fb0e6faebe01a63d7eb0927c31713f79d3c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel unwinding support
4 *
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 *
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/sort.h>
17
18#include <linux/uaccess.h>
19#include <asm/assembly.h>
20#include <asm/asm-offsets.h>
21#include <asm/ptrace.h>
22
23#include <asm/unwind.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020024#include <asm/switch_to.h>
25#include <asm/sections.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026
27/* #define DEBUG 1 */
28#ifdef DEBUG
29#define dbg(x...) pr_debug(x)
30#else
31#define dbg(x...)
32#endif
33
34#define KERNEL_START (KERNEL_BINARY_TEXT_START)
35
36extern struct unwind_table_entry __start___unwind[];
37extern struct unwind_table_entry __stop___unwind[];
38
39static DEFINE_SPINLOCK(unwind_lock);
40/*
41 * the kernel unwind block is not dynamically allocated so that
42 * we can call unwind_init as early in the bootup process as
43 * possible (before the slab allocator is initialized)
44 */
David Brazdil0f672f62019-12-10 10:32:29 +000045static struct unwind_table kernel_unwind_table __ro_after_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046static LIST_HEAD(unwind_tables);
47
48static inline const struct unwind_table_entry *
49find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
50{
51 const struct unwind_table_entry *e = NULL;
52 unsigned long lo, hi, mid;
53
54 lo = 0;
55 hi = table->length - 1;
56
57 while (lo <= hi) {
58 mid = (hi - lo) / 2 + lo;
59 e = &table->table[mid];
60 if (addr < e->region_start)
61 hi = mid - 1;
62 else if (addr > e->region_end)
63 lo = mid + 1;
64 else
65 return e;
66 }
67
68 return NULL;
69}
70
71static const struct unwind_table_entry *
72find_unwind_entry(unsigned long addr)
73{
74 struct unwind_table *table;
75 const struct unwind_table_entry *e = NULL;
76
77 if (addr >= kernel_unwind_table.start &&
78 addr <= kernel_unwind_table.end)
79 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
80 else {
81 unsigned long flags;
82
83 spin_lock_irqsave(&unwind_lock, flags);
84 list_for_each_entry(table, &unwind_tables, list) {
85 if (addr >= table->start &&
86 addr <= table->end)
87 e = find_unwind_entry_in_table(table, addr);
88 if (e) {
89 /* Move-to-front to exploit common traces */
90 list_move(&table->list, &unwind_tables);
91 break;
92 }
93 }
94 spin_unlock_irqrestore(&unwind_lock, flags);
95 }
96
97 return e;
98}
99
100static void
101unwind_table_init(struct unwind_table *table, const char *name,
102 unsigned long base_addr, unsigned long gp,
103 void *table_start, void *table_end)
104{
105 struct unwind_table_entry *start = table_start;
106 struct unwind_table_entry *end =
107 (struct unwind_table_entry *)table_end - 1;
108
109 table->name = name;
110 table->base_addr = base_addr;
111 table->gp = gp;
112 table->start = base_addr + start->region_start;
113 table->end = base_addr + end->region_end;
114 table->table = (struct unwind_table_entry *)table_start;
115 table->length = end - start + 1;
116 INIT_LIST_HEAD(&table->list);
117
118 for (; start <= end; start++) {
119 if (start < end &&
120 start->region_end > (start+1)->region_start) {
121 pr_warn("Out of order unwind entry! %px and %px\n",
122 start, start+1);
123 }
124
125 start->region_start += base_addr;
126 start->region_end += base_addr;
127 }
128}
129
130static int cmp_unwind_table_entry(const void *a, const void *b)
131{
132 return ((const struct unwind_table_entry *)a)->region_start
133 - ((const struct unwind_table_entry *)b)->region_start;
134}
135
136static void
137unwind_table_sort(struct unwind_table_entry *start,
138 struct unwind_table_entry *finish)
139{
140 sort(start, finish - start, sizeof(struct unwind_table_entry),
141 cmp_unwind_table_entry, NULL);
142}
143
144struct unwind_table *
145unwind_table_add(const char *name, unsigned long base_addr,
146 unsigned long gp,
147 void *start, void *end)
148{
149 struct unwind_table *table;
150 unsigned long flags;
151 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
152 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
153
154 unwind_table_sort(s, e);
155
156 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
157 if (table == NULL)
158 return NULL;
159 unwind_table_init(table, name, base_addr, gp, start, end);
160 spin_lock_irqsave(&unwind_lock, flags);
161 list_add_tail(&table->list, &unwind_tables);
162 spin_unlock_irqrestore(&unwind_lock, flags);
163
164 return table;
165}
166
167void unwind_table_remove(struct unwind_table *table)
168{
169 unsigned long flags;
170
171 spin_lock_irqsave(&unwind_lock, flags);
172 list_del(&table->list);
173 spin_unlock_irqrestore(&unwind_lock, flags);
174
175 kfree(table);
176}
177
178/* Called from setup_arch to import the kernel unwind info */
179int __init unwind_init(void)
180{
181 long start, stop;
182 register unsigned long gp __asm__ ("r27");
183
184 start = (long)&__start___unwind[0];
185 stop = (long)&__stop___unwind[0];
186
187 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
188 start, stop,
189 (stop - start) / sizeof(struct unwind_table_entry));
190
191 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
192 gp,
193 &__start___unwind[0], &__stop___unwind[0]);
194#if 0
195 {
196 int i;
197 for (i = 0; i < 10; i++)
198 {
199 printk("region 0x%x-0x%x\n",
200 __start___unwind[i].region_start,
201 __start___unwind[i].region_end);
202 }
203 }
204#endif
205 return 0;
206}
207
Olivier Deprez157378f2022-04-04 15:47:50 +0200208static bool pc_is_kernel_fn(unsigned long pc, void *fn)
209{
210 return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
211}
212
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
214{
215 /*
216 * We have to use void * instead of a function pointer, because
217 * function pointers aren't a pointer to the function on 64-bit.
218 * Make them const so the compiler knows they live in .text
219 * Note: We could use dereference_kernel_function_descriptor()
220 * instead but we want to keep it simple here.
221 */
222 extern void * const handle_interruption;
223 extern void * const ret_from_kernel_thread;
224 extern void * const syscall_exit;
225 extern void * const intr_return;
226 extern void * const _switch_to_ret;
227#ifdef CONFIG_IRQSTACKS
228 extern void * const _call_on_stack;
229#endif /* CONFIG_IRQSTACKS */
230
Olivier Deprez157378f2022-04-04 15:47:50 +0200231 if (pc_is_kernel_fn(pc, handle_interruption)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000232 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
233 dbg("Unwinding through handle_interruption()\n");
234 info->prev_sp = regs->gr[30];
235 info->prev_ip = regs->iaoq[0];
236 return 1;
237 }
238
Olivier Deprez157378f2022-04-04 15:47:50 +0200239 if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
240 pc_is_kernel_fn(pc, syscall_exit)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 info->prev_sp = info->prev_ip = 0;
242 return 1;
243 }
244
Olivier Deprez157378f2022-04-04 15:47:50 +0200245 if (pc_is_kernel_fn(pc, intr_return)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246 struct pt_regs *regs;
247
248 dbg("Found intr_return()\n");
249 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
250 info->prev_sp = regs->gr[30];
251 info->prev_ip = regs->iaoq[0];
252 info->rp = regs->gr[2];
253 return 1;
254 }
255
Olivier Deprez157378f2022-04-04 15:47:50 +0200256 if (pc_is_kernel_fn(pc, _switch_to) ||
257 pc_is_kernel_fn(pc, _switch_to_ret)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
259 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
260 return 1;
261 }
262
263#ifdef CONFIG_IRQSTACKS
Olivier Deprez157378f2022-04-04 15:47:50 +0200264 if (pc_is_kernel_fn(pc, _call_on_stack)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
266 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
267 return 1;
268 }
269#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 return 0;
271}
272
273static void unwind_frame_regs(struct unwind_frame_info *info)
274{
275 const struct unwind_table_entry *e;
276 unsigned long npc;
277 unsigned int insn;
278 long frame_size = 0;
279 int looking_for_rp, rpoffset = 0;
280
281 e = find_unwind_entry(info->ip);
282 if (e == NULL) {
283 unsigned long sp;
284
285 dbg("Cannot find unwind entry for %pS; forced unwinding\n",
286 (void *) info->ip);
287
288 /* Since we are doing the unwinding blind, we don't know if
289 we are adjusting the stack correctly or extracting the rp
290 correctly. The rp is checked to see if it belongs to the
291 kernel text section, if not we assume we don't have a
292 correct stack frame and we continue to unwind the stack.
293 This is not quite correct, and will fail for loadable
294 modules. */
295 sp = info->sp & ~63;
296 do {
297 unsigned long tmp;
298
299 info->prev_sp = sp - 64;
300 info->prev_ip = 0;
301
302 /* The stack is at the end inside the thread_union
303 * struct. If we reach data, we have reached the
304 * beginning of the stack and should stop unwinding. */
305 if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
306 info->prev_sp < ((unsigned long) task_thread_info(info->t)
307 + THREAD_SZ_ALGN)) {
308 info->prev_sp = 0;
309 break;
310 }
311
312 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
313 break;
314 info->prev_ip = tmp;
315 sp = info->prev_sp;
316 } while (!kernel_text_address(info->prev_ip));
317
318 info->rp = 0;
319
320 dbg("analyzing func @ %lx with no unwind info, setting "
321 "prev_sp=%lx prev_ip=%lx\n", info->ip,
322 info->prev_sp, info->prev_ip);
323 } else {
324 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
325 "Save_RP = %d, Millicode = %d size = %u\n",
326 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
327 e->Millicode, e->Total_frame_size);
328
329 looking_for_rp = e->Save_RP;
330
331 for (npc = e->region_start;
332 (frame_size < (e->Total_frame_size << 3) ||
333 looking_for_rp) &&
334 npc < info->ip;
335 npc += 4) {
336
337 insn = *(unsigned int *)npc;
338
339 if ((insn & 0xffffc001) == 0x37de0000 ||
340 (insn & 0xffe00001) == 0x6fc00000) {
341 /* ldo X(sp), sp, or stwm X,D(sp) */
342 frame_size += (insn & 0x3fff) >> 1;
343 dbg("analyzing func @ %lx, insn=%08x @ "
344 "%lx, frame_size = %ld\n", info->ip,
345 insn, npc, frame_size);
346 } else if ((insn & 0xffe00009) == 0x73c00008) {
347 /* std,ma X,D(sp) */
348 frame_size += ((insn >> 4) & 0x3ff) << 3;
349 dbg("analyzing func @ %lx, insn=%08x @ "
350 "%lx, frame_size = %ld\n", info->ip,
351 insn, npc, frame_size);
352 } else if (insn == 0x6bc23fd9) {
353 /* stw rp,-20(sp) */
354 rpoffset = 20;
355 looking_for_rp = 0;
356 dbg("analyzing func @ %lx, insn=stw rp,"
357 "-20(sp) @ %lx\n", info->ip, npc);
358 } else if (insn == 0x0fc212c1) {
359 /* std rp,-16(sr0,sp) */
360 rpoffset = 16;
361 looking_for_rp = 0;
362 dbg("analyzing func @ %lx, insn=std rp,"
363 "-16(sp) @ %lx\n", info->ip, npc);
364 }
365 }
366
367 if (frame_size > e->Total_frame_size << 3)
368 frame_size = e->Total_frame_size << 3;
369
370 if (!unwind_special(info, e->region_start, frame_size)) {
371 info->prev_sp = info->sp - frame_size;
372 if (e->Millicode)
373 info->rp = info->r31;
374 else if (rpoffset)
375 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
376 info->prev_ip = info->rp;
377 info->rp = 0;
378 }
379
380 dbg("analyzing func @ %lx, setting prev_sp=%lx "
381 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
382 info->prev_ip, npc);
383 }
384}
385
386void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
387 struct pt_regs *regs)
388{
389 memset(info, 0, sizeof(struct unwind_frame_info));
390 info->t = t;
391 info->sp = regs->gr[30];
392 info->ip = regs->iaoq[0];
393 info->rp = regs->gr[2];
394 info->r31 = regs->gr[31];
395
396 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
397 t ? (int)t->pid : -1, info->sp, info->ip);
398}
399
400void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
401{
402 struct pt_regs *r = &t->thread.regs;
403 struct pt_regs *r2;
404
405 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
406 if (!r2)
407 return;
408 *r2 = *r;
409 r2->gr[30] = r->ksp;
410 r2->iaoq[0] = r->kpc;
411 unwind_frame_init(info, t, r2);
412 kfree(r2);
413}
414
415#define get_parisc_stackpointer() ({ \
416 unsigned long sp; \
417 __asm__("copy %%r30, %0" : "=r"(sp)); \
418 (sp); \
419})
420
421void unwind_frame_init_task(struct unwind_frame_info *info,
422 struct task_struct *task, struct pt_regs *regs)
423{
424 task = task ? task : current;
425
426 if (task == current) {
427 struct pt_regs r;
428
429 if (!regs) {
430 memset(&r, 0, sizeof(r));
431 r.iaoq[0] = _THIS_IP_;
432 r.gr[2] = _RET_IP_;
433 r.gr[30] = get_parisc_stackpointer();
434 regs = &r;
435 }
436 unwind_frame_init(info, task, regs);
437 } else {
438 unwind_frame_init_from_blocked_task(info, task);
439 }
440}
441
442int unwind_once(struct unwind_frame_info *next_frame)
443{
444 unwind_frame_regs(next_frame);
445
446 if (next_frame->prev_sp == 0 ||
447 next_frame->prev_ip == 0)
448 return -1;
449
450 next_frame->sp = next_frame->prev_sp;
451 next_frame->ip = next_frame->prev_ip;
452 next_frame->prev_sp = 0;
453 next_frame->prev_ip = 0;
454
455 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
456 next_frame->t ? (int)next_frame->t->pid : -1,
457 next_frame->sp, next_frame->ip);
458
459 return 0;
460}
461
462int unwind_to_user(struct unwind_frame_info *info)
463{
464 int ret;
465
466 do {
467 ret = unwind_once(info);
468 } while (!ret && !(info->ip & 3));
469
470 return ret;
471}
472
473unsigned long return_address(unsigned int level)
474{
475 struct unwind_frame_info info;
476
477 /* initialize unwind info */
478 unwind_frame_init_task(&info, current, NULL);
479
480 /* unwind stack */
481 level += 2;
482 do {
483 if (unwind_once(&info) < 0 || info.ip == 0)
484 return 0;
485 if (!kernel_text_address(info.ip))
486 return 0;
487 } while (info.ip && level--);
488
489 return info.ip;
490}