blob: 136ea0997e6df6c6d7e7910d02d2c54799ca85d8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_RING_BUFFER_H
3#define _LINUX_RING_BUFFER_H
4
5#include <linux/mm.h>
6#include <linux/seq_file.h>
7#include <linux/poll.h>
8
Olivier Deprez157378f2022-04-04 15:47:50 +02009struct trace_buffer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010struct ring_buffer_iter;
11
12/*
13 * Don't refer to this struct directly, use functions below.
14 */
15struct ring_buffer_event {
16 u32 type_len:5, time_delta:27;
17
18 u32 array[];
19};
20
21/**
22 * enum ring_buffer_type - internal ring buffer types
23 *
24 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
25 * If time_delta is 0:
26 * array is ignored
27 * size is variable depending on how much
28 * padding is needed
29 * If time_delta is non zero:
30 * array[0] holds the actual length
31 * size = 4 + length (bytes)
32 *
33 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
34 * array[0] = time delta (28 .. 59)
35 * size = 8 bytes
36 *
37 * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
38 * Same format as TIME_EXTEND except that the
39 * value is an absolute timestamp, not a delta
40 * event.time_delta contains bottom 27 bits
41 * array[0] = top (28 .. 59) bits
42 * size = 8 bytes
43 *
44 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45 * Data record
46 * If type_len is zero:
47 * array[0] holds the actual length
48 * array[1..(length+3)/4] holds data
49 * size = 4 + length (bytes)
50 * else
51 * length = type_len << 2
52 * array[0..(length+3)/4-1] holds data
53 * size = 4 + length (bytes)
54 */
55enum ring_buffer_type {
56 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
57 RINGBUF_TYPE_PADDING,
58 RINGBUF_TYPE_TIME_EXTEND,
59 RINGBUF_TYPE_TIME_STAMP,
60};
61
62unsigned ring_buffer_event_length(struct ring_buffer_event *event);
63void *ring_buffer_event_data(struct ring_buffer_event *event);
64u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
65
66/*
67 * ring_buffer_discard_commit will remove an event that has not
68 * been committed yet. If this is used, then ring_buffer_unlock_commit
69 * must not be called on the discarded event. This function
70 * will try to remove the event from the ring buffer completely
71 * if another event has not been written after it.
72 *
73 * Example use:
74 *
75 * if (some_condition)
76 * ring_buffer_discard_commit(buffer, event);
77 * else
78 * ring_buffer_unlock_commit(buffer, event);
79 */
Olivier Deprez157378f2022-04-04 15:47:50 +020080void ring_buffer_discard_commit(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 struct ring_buffer_event *event);
82
83/*
84 * size is in bytes for each per CPU buffer.
85 */
Olivier Deprez157378f2022-04-04 15:47:50 +020086struct trace_buffer *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
88
89/*
90 * Because the ring buffer is generic, if other users of the ring buffer get
91 * traced by ftrace, it can produce lockdep warnings. We need to keep each
92 * ring buffer's lock class separate.
93 */
94#define ring_buffer_alloc(size, flags) \
95({ \
96 static struct lock_class_key __key; \
97 __ring_buffer_alloc((size), (flags), &__key); \
98})
99
Olivier Deprez157378f2022-04-04 15:47:50 +0200100int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
101__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 struct file *filp, poll_table *poll_table);
103
104
105#define RING_BUFFER_ALL_CPUS -1
106
Olivier Deprez157378f2022-04-04 15:47:50 +0200107void ring_buffer_free(struct trace_buffer *buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108
Olivier Deprez157378f2022-04-04 15:47:50 +0200109int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110
Olivier Deprez157378f2022-04-04 15:47:50 +0200111void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112
Olivier Deprez157378f2022-04-04 15:47:50 +0200113struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 unsigned long length);
Olivier Deprez157378f2022-04-04 15:47:50 +0200115int ring_buffer_unlock_commit(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 struct ring_buffer_event *event);
Olivier Deprez157378f2022-04-04 15:47:50 +0200117int ring_buffer_write(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 unsigned long length, void *data);
119
Olivier Deprez157378f2022-04-04 15:47:50 +0200120void ring_buffer_nest_start(struct trace_buffer *buffer);
121void ring_buffer_nest_end(struct trace_buffer *buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122
123struct ring_buffer_event *
Olivier Deprez157378f2022-04-04 15:47:50 +0200124ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125 unsigned long *lost_events);
126struct ring_buffer_event *
Olivier Deprez157378f2022-04-04 15:47:50 +0200127ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128 unsigned long *lost_events);
129
130struct ring_buffer_iter *
Olivier Deprez157378f2022-04-04 15:47:50 +0200131ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132void ring_buffer_read_prepare_sync(void);
133void ring_buffer_read_start(struct ring_buffer_iter *iter);
134void ring_buffer_read_finish(struct ring_buffer_iter *iter);
135
136struct ring_buffer_event *
137ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
Olivier Deprez157378f2022-04-04 15:47:50 +0200138void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
140int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
Olivier Deprez157378f2022-04-04 15:47:50 +0200141bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142
Olivier Deprez157378f2022-04-04 15:47:50 +0200143unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144
Olivier Deprez157378f2022-04-04 15:47:50 +0200145void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
146void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
147void ring_buffer_reset(struct trace_buffer *buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148
149#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Olivier Deprez157378f2022-04-04 15:47:50 +0200150int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
151 struct trace_buffer *buffer_b, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152#else
153static inline int
Olivier Deprez157378f2022-04-04 15:47:50 +0200154ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
155 struct trace_buffer *buffer_b, int cpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156{
157 return -ENODEV;
158}
159#endif
160
Olivier Deprez157378f2022-04-04 15:47:50 +0200161bool ring_buffer_empty(struct trace_buffer *buffer);
162bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163
Olivier Deprez157378f2022-04-04 15:47:50 +0200164void ring_buffer_record_disable(struct trace_buffer *buffer);
165void ring_buffer_record_enable(struct trace_buffer *buffer);
166void ring_buffer_record_off(struct trace_buffer *buffer);
167void ring_buffer_record_on(struct trace_buffer *buffer);
168bool ring_buffer_record_is_on(struct trace_buffer *buffer);
169bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
170void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
171void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172
Olivier Deprez157378f2022-04-04 15:47:50 +0200173u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
174unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
175unsigned long ring_buffer_entries(struct trace_buffer *buffer);
176unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
177unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
178unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
179unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
180unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
181unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
Olivier Deprez157378f2022-04-04 15:47:50 +0200183u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
184void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185 int cpu, u64 *ts);
Olivier Deprez157378f2022-04-04 15:47:50 +0200186void ring_buffer_set_clock(struct trace_buffer *buffer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 u64 (*clock)(void));
Olivier Deprez157378f2022-04-04 15:47:50 +0200188void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
189bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190
Olivier Deprez157378f2022-04-04 15:47:50 +0200191size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
192size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
Olivier Deprez157378f2022-04-04 15:47:50 +0200194void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
195void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
196int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197 size_t len, int cpu, int full);
198
199struct trace_seq;
200
201int ring_buffer_print_entry_header(struct trace_seq *s);
202int ring_buffer_print_page_header(struct trace_seq *s);
203
204enum ring_buffer_flags {
205 RB_FL_OVERWRITE = 1 << 0,
206};
207
208#ifdef CONFIG_RING_BUFFER
209int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
210#else
211#define trace_rb_cpu_prepare NULL
212#endif
213
214#endif /* _LINUX_RING_BUFFER_H */