blob: 8627fda8d9930dd553d8fef0576f01b581109fdf [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Stack trace management functions
3 *
4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */
6#include <linux/sched.h>
7#include <linux/sched/debug.h>
8#include <linux/sched/task_stack.h>
9#include <linux/stacktrace.h>
10#include <linux/export.h>
11#include <linux/uaccess.h>
12#include <asm/stacktrace.h>
13#include <asm/unwind.h>
14
David Brazdil0f672f62019-12-10 10:32:29 +000015void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16 struct task_struct *task, struct pt_regs *regs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017{
18 struct unwind_state state;
19 unsigned long addr;
20
Olivier Deprez157378f2022-04-04 15:47:50 +020021 if (regs && !consume_entry(cookie, regs->ip))
David Brazdil0f672f62019-12-10 10:32:29 +000022 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023
24 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
25 unwind_next_frame(&state)) {
26 addr = unwind_get_return_address(&state);
Olivier Deprez157378f2022-04-04 15:47:50 +020027 if (!addr || !consume_entry(cookie, addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 break;
29 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030}
31
32/*
David Brazdil0f672f62019-12-10 10:32:29 +000033 * This function returns an error if it detects any unreliable features of the
34 * stack. Otherwise it guarantees that the stack trace is reliable.
35 *
36 * If the task is not 'current', the caller *must* ensure the task is inactive.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037 */
David Brazdil0f672f62019-12-10 10:32:29 +000038int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
39 void *cookie, struct task_struct *task)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040{
41 struct unwind_state state;
42 struct pt_regs *regs;
43 unsigned long addr;
44
45 for (unwind_start(&state, task, NULL, NULL);
46 !unwind_done(&state) && !unwind_error(&state);
47 unwind_next_frame(&state)) {
48
49 regs = unwind_get_entry_regs(&state, NULL);
50 if (regs) {
51 /* Success path for user tasks */
52 if (user_mode(regs))
David Brazdil0f672f62019-12-10 10:32:29 +000053 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054
55 /*
56 * Kernel mode registers on the stack indicate an
57 * in-kernel interrupt or exception (e.g., preemption
58 * or a page fault), which can make frame pointers
59 * unreliable.
60 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 if (IS_ENABLED(CONFIG_FRAME_POINTER))
62 return -EINVAL;
63 }
64
65 addr = unwind_get_return_address(&state);
66
67 /*
68 * A NULL or invalid return address probably means there's some
69 * generated code which __kernel_text_address() doesn't know
70 * about.
71 */
72 if (!addr)
73 return -EINVAL;
74
Olivier Deprez157378f2022-04-04 15:47:50 +020075 if (!consume_entry(cookie, addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 return -EINVAL;
77 }
78
79 /* Check for stack corruption */
80 if (unwind_error(&state))
81 return -EINVAL;
82
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 return 0;
84}
85
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
87
88struct stack_frame_user {
89 const void __user *next_fp;
90 unsigned long ret_addr;
91};
92
93static int
Olivier Deprez157378f2022-04-04 15:47:50 +020094copy_stack_frame(const struct stack_frame_user __user *fp,
95 struct stack_frame_user *frame)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096{
97 int ret;
98
David Brazdil0f672f62019-12-10 10:32:29 +000099 if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 return 0;
101
102 ret = 1;
103 pagefault_disable();
Olivier Deprez157378f2022-04-04 15:47:50 +0200104 if (__get_user(frame->next_fp, &fp->next_fp) ||
105 __get_user(frame->ret_addr, &fp->ret_addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106 ret = 0;
107 pagefault_enable();
108
109 return ret;
110}
111
David Brazdil0f672f62019-12-10 10:32:29 +0000112void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
113 const struct pt_regs *regs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 const void __user *fp = (const void __user *)regs->bp;
116
Olivier Deprez157378f2022-04-04 15:47:50 +0200117 if (!consume_entry(cookie, regs->ip))
David Brazdil0f672f62019-12-10 10:32:29 +0000118 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119
David Brazdil0f672f62019-12-10 10:32:29 +0000120 while (1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121 struct stack_frame_user frame;
122
123 frame.next_fp = NULL;
124 frame.ret_addr = 0;
125 if (!copy_stack_frame(fp, &frame))
126 break;
127 if ((unsigned long)fp < regs->sp)
128 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000129 if (!frame.ret_addr)
130 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200131 if (!consume_entry(cookie, frame.ret_addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 break;
133 fp = frame.next_fp;
134 }
135}
136