blob: b0e03e052dd1d5e2d80db3448b37a77b5634e094 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * arm64 callchain support
4 *
5 * Copyright (C) 2015 ARM Limited
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7#include <linux/perf_event.h>
8#include <linux/uaccess.h>
9
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <asm/pointer_auth.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <asm/stacktrace.h>
12
13struct frame_tail {
14 struct frame_tail __user *fp;
15 unsigned long lr;
16} __attribute__((packed));
17
18/*
19 * Get the return address for a single stackframe and return a pointer to the
20 * next frame tail.
21 */
22static struct frame_tail __user *
23user_backtrace(struct frame_tail __user *tail,
24 struct perf_callchain_entry_ctx *entry)
25{
26 struct frame_tail buftail;
27 unsigned long err;
David Brazdil0f672f62019-12-10 10:32:29 +000028 unsigned long lr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30 /* Also check accessibility of one struct frame_tail beyond */
David Brazdil0f672f62019-12-10 10:32:29 +000031 if (!access_ok(tail, sizeof(buftail)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032 return NULL;
33
34 pagefault_disable();
35 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
36 pagefault_enable();
37
38 if (err)
39 return NULL;
40
David Brazdil0f672f62019-12-10 10:32:29 +000041 lr = ptrauth_strip_insn_pac(buftail.lr);
42
43 perf_callchain_store(entry, lr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044
45 /*
46 * Frame pointers should strictly progress back up the stack
47 * (towards higher addresses).
48 */
49 if (tail >= buftail.fp)
50 return NULL;
51
52 return buftail.fp;
53}
54
55#ifdef CONFIG_COMPAT
56/*
57 * The registers we're interested in are at the end of the variable
58 * length saved register structure. The fp points at the end of this
59 * structure so the address of this struct is:
60 * (struct compat_frame_tail *)(xxx->fp)-1
61 *
62 * This code has been adapted from the ARM OProfile support.
63 */
64struct compat_frame_tail {
65 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
66 u32 sp;
67 u32 lr;
68} __attribute__((packed));
69
70static struct compat_frame_tail __user *
71compat_user_backtrace(struct compat_frame_tail __user *tail,
72 struct perf_callchain_entry_ctx *entry)
73{
74 struct compat_frame_tail buftail;
75 unsigned long err;
76
77 /* Also check accessibility of one struct frame_tail beyond */
David Brazdil0f672f62019-12-10 10:32:29 +000078 if (!access_ok(tail, sizeof(buftail)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 return NULL;
80
81 pagefault_disable();
82 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
83 pagefault_enable();
84
85 if (err)
86 return NULL;
87
88 perf_callchain_store(entry, buftail.lr);
89
90 /*
91 * Frame pointers should strictly progress back up the stack
92 * (towards higher addresses).
93 */
94 if (tail + 1 >= (struct compat_frame_tail __user *)
95 compat_ptr(buftail.fp))
96 return NULL;
97
98 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
99}
100#endif /* CONFIG_COMPAT */
101
102void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
103 struct pt_regs *regs)
104{
105 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
106 /* We don't support guest os callchain now */
107 return;
108 }
109
110 perf_callchain_store(entry, regs->pc);
111
112 if (!compat_user_mode(regs)) {
113 /* AARCH64 mode */
114 struct frame_tail __user *tail;
115
116 tail = (struct frame_tail __user *)regs->regs[29];
117
118 while (entry->nr < entry->max_stack &&
119 tail && !((unsigned long)tail & 0xf))
120 tail = user_backtrace(tail, entry);
121 } else {
122#ifdef CONFIG_COMPAT
123 /* AARCH32 compat mode */
124 struct compat_frame_tail __user *tail;
125
126 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
127
128 while ((entry->nr < entry->max_stack) &&
129 tail && !((unsigned long)tail & 0x3))
130 tail = compat_user_backtrace(tail, entry);
131#endif
132 }
133}
134
135/*
136 * Gets called by walk_stackframe() for every stackframe. This will be called
137 * whist unwinding the stackframe and is like a subroutine return so we use
138 * the PC.
139 */
140static int callchain_trace(struct stackframe *frame, void *data)
141{
142 struct perf_callchain_entry_ctx *entry = data;
143 perf_callchain_store(entry, frame->pc);
144 return 0;
145}
146
147void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
148 struct pt_regs *regs)
149{
150 struct stackframe frame;
151
152 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
153 /* We don't support guest os callchain now */
154 return;
155 }
156
David Brazdil0f672f62019-12-10 10:32:29 +0000157 start_backtrace(&frame, regs->regs[29], regs->pc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 walk_stackframe(current, &frame, callchain_trace, entry);
159}
160
161unsigned long perf_instruction_pointer(struct pt_regs *regs)
162{
163 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
164 return perf_guest_cbs->get_guest_ip();
165
166 return instruction_pointer(regs);
167}
168
169unsigned long perf_misc_flags(struct pt_regs *regs)
170{
171 int misc = 0;
172
173 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
174 if (perf_guest_cbs->is_user_mode())
175 misc |= PERF_RECORD_MISC_GUEST_USER;
176 else
177 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
178 } else {
179 if (user_mode(regs))
180 misc |= PERF_RECORD_MISC_USER;
181 else
182 misc |= PERF_RECORD_MISC_KERNEL;
183 }
184
185 return misc;
186}