blob: 228801e2078869e5d0fbe4618deca7b1dc88d9a8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
5#include <linux/hardirq.h>
6#include <linux/uaccess.h>
David Brazdil0f672f62019-12-10 10:32:29 +00007#include <linux/refcount.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008
9/* Buffer handling */
10
11#define RING_BUFFER_WRITABLE 0x01
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013struct perf_buffer {
David Brazdil0f672f62019-12-10 10:32:29 +000014 refcount_t refcount;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015 struct rcu_head rcu_head;
16#ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19#endif
20 int nr_pages; /* nr of data pages */
21 int overwrite; /* can overwrite itself */
22 int paused; /* can write into ring buffer */
23
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
David Brazdil0f672f62019-12-10 10:32:29 +000027 unsigned int nest; /* nested writers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
33 long aux_watermark;
34 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
37
38 atomic_t mmap_count;
39 unsigned long mmap_locked;
40 struct user_struct *mmap_user;
41
42 /* AUX area */
43 long aux_head;
David Brazdil0f672f62019-12-10 10:32:29 +000044 unsigned int aux_nest;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
46 unsigned long aux_pgoff;
47 int aux_nr_pages;
48 int aux_overwrite;
49 atomic_t aux_mmap_count;
50 unsigned long aux_mmap_locked;
51 void (*free_aux)(void *);
David Brazdil0f672f62019-12-10 10:32:29 +000052 refcount_t aux_refcount;
Olivier Deprez157378f2022-04-04 15:47:50 +020053 int aux_in_sampling;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054 void **aux_pages;
55 void *aux_priv;
56
57 struct perf_event_mmap_page *user_page;
Olivier Deprez157378f2022-04-04 15:47:50 +020058 void *data_pages[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059};
60
Olivier Deprez157378f2022-04-04 15:47:50 +020061extern void rb_free(struct perf_buffer *rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062
63static inline void rb_free_rcu(struct rcu_head *rcu_head)
64{
Olivier Deprez157378f2022-04-04 15:47:50 +020065 struct perf_buffer *rb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
Olivier Deprez157378f2022-04-04 15:47:50 +020067 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 rb_free(rb);
69}
70
Olivier Deprez157378f2022-04-04 15:47:50 +020071static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072{
73 if (!pause && rb->nr_pages)
74 rb->paused = 0;
75 else
76 rb->paused = 1;
77}
78
Olivier Deprez157378f2022-04-04 15:47:50 +020079extern struct perf_buffer *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080rb_alloc(int nr_pages, long watermark, int cpu, int flags);
81extern void perf_event_wakeup(struct perf_event *event);
Olivier Deprez157378f2022-04-04 15:47:50 +020082extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Olivier Deprez157378f2022-04-04 15:47:50 +020084extern void rb_free_aux(struct perf_buffer *rb);
85extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
86extern void ring_buffer_put(struct perf_buffer *rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087
Olivier Deprez157378f2022-04-04 15:47:50 +020088static inline bool rb_has_aux(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089{
90 return !!rb->aux_nr_pages;
91}
92
93void perf_event_aux_event(struct perf_event *event, unsigned long head,
94 unsigned long size, u64 flags);
95
96extern struct page *
Olivier Deprez157378f2022-04-04 15:47:50 +020097perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99#ifdef CONFIG_PERF_USE_VMALLOC
100/*
101 * Back perf_mmap() with vmalloc memory.
102 *
103 * Required for architectures that have d-cache aliasing issues.
104 */
105
Olivier Deprez157378f2022-04-04 15:47:50 +0200106static inline int page_order(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107{
108 return rb->page_order;
109}
110
111#else
112
Olivier Deprez157378f2022-04-04 15:47:50 +0200113static inline int page_order(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114{
115 return 0;
116}
117#endif
118
Olivier Deprez157378f2022-04-04 15:47:50 +0200119static inline unsigned long perf_data_size(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120{
121 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
122}
123
Olivier Deprez157378f2022-04-04 15:47:50 +0200124static inline unsigned long perf_aux_size(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125{
126 return rb->aux_nr_pages << PAGE_SHIFT;
127}
128
129#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
130{ \
131 unsigned long size, written; \
132 \
133 do { \
134 size = min(handle->size, len); \
135 written = memcpy_func(__VA_ARGS__); \
136 written = size - written; \
137 \
138 len -= written; \
139 handle->addr += written; \
140 if (advance_buf) \
141 buf += written; \
142 handle->size -= written; \
143 if (!handle->size) { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 struct perf_buffer *rb = handle->rb; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145 \
146 handle->page++; \
147 handle->page &= rb->nr_pages - 1; \
148 handle->addr = rb->data_pages[handle->page]; \
149 handle->size = PAGE_SIZE << page_order(rb); \
150 } \
151 } while (len && written == size); \
152 \
153 return len; \
154}
155
156#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
157static inline unsigned long \
158func_name(struct perf_output_handle *handle, \
159 const void *buf, unsigned long len) \
160__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
161
162static inline unsigned long
163__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
164 const void *buf, unsigned long len)
165{
166 unsigned long orig_len = len;
167 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
168 orig_len - len, size)
169}
170
171static inline unsigned long
172memcpy_common(void *dst, const void *src, unsigned long n)
173{
174 memcpy(dst, src, n);
175 return 0;
176}
177
178DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
179
180static inline unsigned long
181memcpy_skip(void *dst, const void *src, unsigned long n)
182{
183 return 0;
184}
185
186DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
187
188#ifndef arch_perf_out_copy_user
189#define arch_perf_out_copy_user arch_perf_out_copy_user
190
191static inline unsigned long
192arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
193{
194 unsigned long ret;
195
196 pagefault_disable();
197 ret = __copy_from_user_inatomic(dst, src, n);
198 pagefault_enable();
199
200 return ret;
201}
202#endif
203
204DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
205
206static inline int get_recursion_context(int *recursion)
207{
Olivier Deprez157378f2022-04-04 15:47:50 +0200208 unsigned int pc = preempt_count();
209 unsigned char rctx = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210
Olivier Deprez157378f2022-04-04 15:47:50 +0200211 rctx += !!(pc & (NMI_MASK));
212 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
213 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214
215 if (recursion[rctx])
216 return -1;
217
218 recursion[rctx]++;
219 barrier();
220
221 return rctx;
222}
223
224static inline void put_recursion_context(int *recursion, int rctx)
225{
226 barrier();
227 recursion[rctx]--;
228}
229
230#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
231static inline bool arch_perf_have_user_stack_dump(void)
232{
233 return true;
234}
235
236#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
237#else
238static inline bool arch_perf_have_user_stack_dump(void)
239{
240 return false;
241}
242
243#define perf_user_stack_pointer(regs) 0
244#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
245
246#endif /* _KERNEL_EVENTS_INTERNAL_H */