blob: aa23ffdaf819fb08fd8f1b69701215a697fd3250 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
5#include <linux/hardirq.h>
6#include <linux/uaccess.h>
David Brazdil0f672f62019-12-10 10:32:29 +00007#include <linux/refcount.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008
9/* Buffer handling */
10
11#define RING_BUFFER_WRITABLE 0x01
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013struct perf_buffer {
David Brazdil0f672f62019-12-10 10:32:29 +000014 refcount_t refcount;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015 struct rcu_head rcu_head;
16#ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19#endif
20 int nr_pages; /* nr of data pages */
21 int overwrite; /* can overwrite itself */
22 int paused; /* can write into ring buffer */
23
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
David Brazdil0f672f62019-12-10 10:32:29 +000027 unsigned int nest; /* nested writers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
33 long aux_watermark;
34 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
37
38 atomic_t mmap_count;
39 unsigned long mmap_locked;
40 struct user_struct *mmap_user;
41
42 /* AUX area */
43 long aux_head;
David Brazdil0f672f62019-12-10 10:32:29 +000044 unsigned int aux_nest;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
46 unsigned long aux_pgoff;
47 int aux_nr_pages;
48 int aux_overwrite;
49 atomic_t aux_mmap_count;
50 unsigned long aux_mmap_locked;
51 void (*free_aux)(void *);
David Brazdil0f672f62019-12-10 10:32:29 +000052 refcount_t aux_refcount;
Olivier Deprez157378f2022-04-04 15:47:50 +020053 int aux_in_sampling;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054 void **aux_pages;
55 void *aux_priv;
56
57 struct perf_event_mmap_page *user_page;
Olivier Deprez157378f2022-04-04 15:47:50 +020058 void *data_pages[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059};
60
Olivier Deprez157378f2022-04-04 15:47:50 +020061extern void rb_free(struct perf_buffer *rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062
63static inline void rb_free_rcu(struct rcu_head *rcu_head)
64{
Olivier Deprez157378f2022-04-04 15:47:50 +020065 struct perf_buffer *rb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
Olivier Deprez157378f2022-04-04 15:47:50 +020067 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 rb_free(rb);
69}
70
Olivier Deprez157378f2022-04-04 15:47:50 +020071static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072{
73 if (!pause && rb->nr_pages)
74 rb->paused = 0;
75 else
76 rb->paused = 1;
77}
78
Olivier Deprez157378f2022-04-04 15:47:50 +020079extern struct perf_buffer *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080rb_alloc(int nr_pages, long watermark, int cpu, int flags);
81extern void perf_event_wakeup(struct perf_event *event);
Olivier Deprez157378f2022-04-04 15:47:50 +020082extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Olivier Deprez157378f2022-04-04 15:47:50 +020084extern void rb_free_aux(struct perf_buffer *rb);
85extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
86extern void ring_buffer_put(struct perf_buffer *rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087
Olivier Deprez157378f2022-04-04 15:47:50 +020088static inline bool rb_has_aux(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089{
90 return !!rb->aux_nr_pages;
91}
92
93void perf_event_aux_event(struct perf_event *event, unsigned long head,
94 unsigned long size, u64 flags);
95
96extern struct page *
Olivier Deprez157378f2022-04-04 15:47:50 +020097perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99#ifdef CONFIG_PERF_USE_VMALLOC
100/*
101 * Back perf_mmap() with vmalloc memory.
102 *
103 * Required for architectures that have d-cache aliasing issues.
104 */
105
Olivier Deprez157378f2022-04-04 15:47:50 +0200106static inline int page_order(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107{
108 return rb->page_order;
109}
110
111#else
112
Olivier Deprez157378f2022-04-04 15:47:50 +0200113static inline int page_order(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114{
115 return 0;
116}
117#endif
118
Olivier Deprez92d4c212022-12-06 15:05:30 +0100119static inline int data_page_nr(struct perf_buffer *rb)
120{
121 return rb->nr_pages << page_order(rb);
122}
123
Olivier Deprez157378f2022-04-04 15:47:50 +0200124static inline unsigned long perf_data_size(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125{
126 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
127}
128
Olivier Deprez157378f2022-04-04 15:47:50 +0200129static inline unsigned long perf_aux_size(struct perf_buffer *rb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130{
131 return rb->aux_nr_pages << PAGE_SHIFT;
132}
133
134#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
135{ \
136 unsigned long size, written; \
137 \
138 do { \
139 size = min(handle->size, len); \
140 written = memcpy_func(__VA_ARGS__); \
141 written = size - written; \
142 \
143 len -= written; \
144 handle->addr += written; \
145 if (advance_buf) \
146 buf += written; \
147 handle->size -= written; \
148 if (!handle->size) { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200149 struct perf_buffer *rb = handle->rb; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150 \
151 handle->page++; \
152 handle->page &= rb->nr_pages - 1; \
153 handle->addr = rb->data_pages[handle->page]; \
154 handle->size = PAGE_SIZE << page_order(rb); \
155 } \
156 } while (len && written == size); \
157 \
158 return len; \
159}
160
161#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
162static inline unsigned long \
163func_name(struct perf_output_handle *handle, \
164 const void *buf, unsigned long len) \
165__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
166
167static inline unsigned long
168__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
169 const void *buf, unsigned long len)
170{
171 unsigned long orig_len = len;
172 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
173 orig_len - len, size)
174}
175
176static inline unsigned long
177memcpy_common(void *dst, const void *src, unsigned long n)
178{
179 memcpy(dst, src, n);
180 return 0;
181}
182
183DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
184
185static inline unsigned long
186memcpy_skip(void *dst, const void *src, unsigned long n)
187{
188 return 0;
189}
190
191DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
192
193#ifndef arch_perf_out_copy_user
194#define arch_perf_out_copy_user arch_perf_out_copy_user
195
196static inline unsigned long
197arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
198{
199 unsigned long ret;
200
201 pagefault_disable();
202 ret = __copy_from_user_inatomic(dst, src, n);
203 pagefault_enable();
204
205 return ret;
206}
207#endif
208
209DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
210
211static inline int get_recursion_context(int *recursion)
212{
Olivier Deprez157378f2022-04-04 15:47:50 +0200213 unsigned int pc = preempt_count();
214 unsigned char rctx = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000215
Olivier Deprez157378f2022-04-04 15:47:50 +0200216 rctx += !!(pc & (NMI_MASK));
217 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
218 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219
220 if (recursion[rctx])
221 return -1;
222
223 recursion[rctx]++;
224 barrier();
225
226 return rctx;
227}
228
229static inline void put_recursion_context(int *recursion, int rctx)
230{
231 barrier();
232 recursion[rctx]--;
233}
234
235#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236static inline bool arch_perf_have_user_stack_dump(void)
237{
238 return true;
239}
240
241#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242#else
243static inline bool arch_perf_have_user_stack_dump(void)
244{
245 return false;
246}
247
248#define perf_user_stack_pointer(regs) 0
249#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
250
251#endif /* _KERNEL_EVENTS_INTERNAL_H */