Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Stack trace management functions |
| 3 | * |
| 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | */ |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/sched/debug.h> |
| 8 | #include <linux/sched/task_stack.h> |
| 9 | #include <linux/stacktrace.h> |
| 10 | #include <linux/export.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <asm/stacktrace.h> |
| 13 | #include <asm/unwind.h> |
| 14 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 15 | void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
| 16 | struct task_struct *task, struct pt_regs *regs) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | { |
| 18 | struct unwind_state state; |
| 19 | unsigned long addr; |
| 20 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | if (regs && !consume_entry(cookie, regs->ip)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | |
| 24 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
| 25 | unwind_next_frame(&state)) { |
| 26 | addr = unwind_get_return_address(&state); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | if (!addr || !consume_entry(cookie, addr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | break; |
| 29 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | } |
| 31 | |
| 32 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 33 | * This function returns an error if it detects any unreliable features of the |
| 34 | * stack. Otherwise it guarantees that the stack trace is reliable. |
| 35 | * |
| 36 | * If the task is not 'current', the caller *must* ensure the task is inactive. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 38 | int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
| 39 | void *cookie, struct task_struct *task) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | { |
| 41 | struct unwind_state state; |
| 42 | struct pt_regs *regs; |
| 43 | unsigned long addr; |
| 44 | |
| 45 | for (unwind_start(&state, task, NULL, NULL); |
| 46 | !unwind_done(&state) && !unwind_error(&state); |
| 47 | unwind_next_frame(&state)) { |
| 48 | |
| 49 | regs = unwind_get_entry_regs(&state, NULL); |
| 50 | if (regs) { |
| 51 | /* Success path for user tasks */ |
| 52 | if (user_mode(regs)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 53 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | |
| 55 | /* |
| 56 | * Kernel mode registers on the stack indicate an |
| 57 | * in-kernel interrupt or exception (e.g., preemption |
| 58 | * or a page fault), which can make frame pointers |
| 59 | * unreliable. |
| 60 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) |
| 62 | return -EINVAL; |
| 63 | } |
| 64 | |
| 65 | addr = unwind_get_return_address(&state); |
| 66 | |
| 67 | /* |
| 68 | * A NULL or invalid return address probably means there's some |
| 69 | * generated code which __kernel_text_address() doesn't know |
| 70 | * about. |
| 71 | */ |
| 72 | if (!addr) |
| 73 | return -EINVAL; |
| 74 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 75 | if (!consume_entry(cookie, addr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | return -EINVAL; |
| 77 | } |
| 78 | |
| 79 | /* Check for stack corruption */ |
| 80 | if (unwind_error(&state)) |
| 81 | return -EINVAL; |
| 82 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | return 0; |
| 84 | } |
| 85 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 86 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
| 87 | |
| 88 | struct stack_frame_user { |
| 89 | const void __user *next_fp; |
| 90 | unsigned long ret_addr; |
| 91 | }; |
| 92 | |
| 93 | static int |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 94 | copy_stack_frame(const struct stack_frame_user __user *fp, |
| 95 | struct stack_frame_user *frame) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | { |
| 97 | int ret; |
| 98 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 99 | if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | return 0; |
| 101 | |
| 102 | ret = 1; |
| 103 | pagefault_disable(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 104 | if (__get_user(frame->next_fp, &fp->next_fp) || |
| 105 | __get_user(frame->ret_addr, &fp->ret_addr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | ret = 0; |
| 107 | pagefault_enable(); |
| 108 | |
| 109 | return ret; |
| 110 | } |
| 111 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 112 | void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
| 113 | const struct pt_regs *regs) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | const void __user *fp = (const void __user *)regs->bp; |
| 116 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 117 | if (!consume_entry(cookie, regs->ip)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 120 | while (1) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | struct stack_frame_user frame; |
| 122 | |
| 123 | frame.next_fp = NULL; |
| 124 | frame.ret_addr = 0; |
| 125 | if (!copy_stack_frame(fp, &frame)) |
| 126 | break; |
| 127 | if ((unsigned long)fp < regs->sp) |
| 128 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | if (!frame.ret_addr) |
| 130 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 131 | if (!consume_entry(cookie, frame.ret_addr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | break; |
| 133 | fp = frame.next_fp; |
| 134 | } |
| 135 | } |
| 136 | |