Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __PERF_EVSEL_H |
| 3 | #define __PERF_EVSEL_H 1 |
| 4 | |
| 5 | #include <linux/list.h> |
| 6 | #include <stdbool.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7 | #include <sys/types.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | #include <linux/perf_event.h> |
| 9 | #include <linux/types.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10 | #include <internal/evsel.h> |
| 11 | #include <perf/evsel.h> |
| 12 | #include "symbol_conf.h" |
| 13 | #include <internal/cpumap.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 15 | struct bpf_object; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | struct cgroup; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | struct perf_counts; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | struct perf_stat_evsel; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 19 | union perf_event; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | typedef int (evsel__sb_cb_t)(union perf_event *event, void *data); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | |
| 23 | enum perf_tool_event { |
| 24 | PERF_TOOL_NONE = 0, |
| 25 | PERF_TOOL_DURATION_TIME = 1, |
| 26 | }; |
| 27 | |
| 28 | /** struct evsel - event selector |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | * |
| 30 | * @evlist - evlist this evsel is in, if it is in one. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 31 | * @core - libperf evsel object |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | * @name - Can be set to retain the original event name passed by the user, |
| 33 | * so that when showing results in tools such as 'perf stat', we |
| 34 | * show the name used, not some alias. |
| 35 | * @id_pos: the position of the event id (PERF_SAMPLE_ID or |
| 36 | * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | * struct perf_record_sample |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or |
| 39 | * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all |
| 40 | * is used there is an id sample appended to non-sample events |
| 41 | * @priv: And what is in its containing unnamed union are tool specific |
| 42 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 43 | struct evsel { |
| 44 | struct perf_evsel core; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 45 | struct evlist *evlist; |
| 46 | off_t id_offset; |
| 47 | int idx; |
| 48 | int id_pos; |
| 49 | int is_pos; |
| 50 | unsigned int sample_size; |
| 51 | |
| 52 | /* |
| 53 | * These fields can be set in the parse-events code or similar. |
| 54 | * Please check evsel__clone() to copy them properly so that |
| 55 | * they can be released properly. |
| 56 | */ |
| 57 | struct { |
| 58 | char *name; |
| 59 | char *group_name; |
| 60 | const char *pmu_name; |
| 61 | struct tep_event *tp_format; |
| 62 | char *filter; |
| 63 | unsigned long max_events; |
| 64 | double scale; |
| 65 | const char *unit; |
| 66 | struct cgroup *cgrp; |
| 67 | enum perf_tool_event tool_event; |
| 68 | /* parse modifier helper */ |
| 69 | int exclude_GH; |
| 70 | int sample_read; |
| 71 | bool snapshot; |
| 72 | bool per_pkg; |
| 73 | bool percore; |
| 74 | bool precise_max; |
| 75 | bool use_uncore_alias; |
| 76 | bool is_libpfm_event; |
| 77 | bool auto_merge_stats; |
| 78 | bool collect_stat; |
| 79 | bool weak_group; |
| 80 | int bpf_fd; |
| 81 | struct bpf_object *bpf_obj; |
| 82 | }; |
| 83 | |
| 84 | /* |
| 85 | * metric fields are similar, but needs more care as they can have |
| 86 | * references to other metric (evsel). |
| 87 | */ |
| 88 | const char * metric_expr; |
| 89 | const char * metric_name; |
| 90 | struct evsel **metric_events; |
| 91 | struct evsel *metric_leader; |
| 92 | |
| 93 | void *handler; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | struct perf_counts *counts; |
| 95 | struct perf_counts *prev_raw_counts; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 96 | unsigned long nr_events_printed; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | struct perf_stat_evsel *stats; |
| 98 | void *priv; |
| 99 | u64 db_id; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | bool uniquified_name; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | bool supported; |
| 102 | bool needs_swap; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 103 | bool disabled; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 104 | bool no_aux_samples; |
| 105 | bool immediate; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | bool tracking; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | bool ignore_missing_thread; |
| 108 | bool forced_leader; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 109 | bool cmdline_group_boundary; |
| 110 | bool merged_stat; |
| 111 | bool reset_group; |
| 112 | bool errored; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | unsigned long *per_pkg_mask; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 114 | struct evsel *leader; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | struct list_head config_terms; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 116 | int err; |
| 117 | int cpu_iter; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | struct { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 119 | evsel__sb_cb_t *cb; |
| 120 | void *data; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 121 | } side_band; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 122 | /* |
| 123 | * For reporting purposes, an evsel sample can have a callchain |
| 124 | * synthesized from AUX area data. Keep track of synthesized sample |
| 125 | * types here. Note, the recorded sample_type cannot be changed because |
| 126 | * it is needed to continue to parse events. |
| 127 | * See also evsel__has_callchain(). |
| 128 | */ |
| 129 | __u64 synth_sample_type; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 130 | }; |
| 131 | |
| 132 | struct perf_missing_features { |
| 133 | bool sample_id_all; |
| 134 | bool exclude_guest; |
| 135 | bool mmap2; |
| 136 | bool cloexec; |
| 137 | bool clockid; |
| 138 | bool clockid_wrong; |
| 139 | bool lbr_flags; |
| 140 | bool write_backward; |
| 141 | bool group_read; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 142 | bool ksymbol; |
| 143 | bool bpf; |
| 144 | bool aux_output; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 145 | bool branch_hw_idx; |
| 146 | bool cgroup; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | extern struct perf_missing_features perf_missing_features; |
| 150 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 151 | struct perf_cpu_map; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | struct target; |
| 153 | struct thread_map; |
| 154 | struct record_opts; |
| 155 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 156 | static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 158 | return perf_evsel__cpus(&evsel->core); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | } |
| 160 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 161 | static inline int evsel__nr_cpus(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 162 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 163 | return evsel__cpus(evsel)->nr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | void perf_counts_values__scale(struct perf_counts_values *count, |
| 167 | bool scale, s8 *pscaled); |
| 168 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 169 | void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, |
| 170 | struct perf_counts_values *count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 172 | int evsel__object_config(size_t object_size, |
| 173 | int (*init)(struct evsel *evsel), |
| 174 | void (*fini)(struct evsel *evsel)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 176 | struct perf_pmu *evsel__find_pmu(struct evsel *evsel); |
| 177 | bool evsel__is_aux_event(struct evsel *evsel); |
| 178 | |
| 179 | struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 180 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 181 | static inline struct evsel *evsel__new(struct perf_event_attr *attr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 182 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 183 | return evsel__new_idx(attr, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 184 | } |
| 185 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 186 | struct evsel *evsel__clone(struct evsel *orig); |
| 187 | struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * Returns pointer with encoded error via <linux/err.h> interface. |
| 191 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 192 | static inline struct evsel *evsel__newtp(const char *sys, const char *name) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 194 | return evsel__newtp_idx(sys, name, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 195 | } |
| 196 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 197 | struct evsel *evsel__new_cycles(bool precise); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 199 | struct tep_event *event_format__new(const char *sys, const char *name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 201 | void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 202 | void evsel__exit(struct evsel *evsel); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 203 | void evsel__delete(struct evsel *evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | |
| 205 | struct callchain_param; |
| 206 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 207 | void evsel__config(struct evsel *evsel, struct record_opts *opts, |
| 208 | struct callchain_param *callchain); |
| 209 | void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, |
| 210 | struct callchain_param *callchain); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 212 | int __evsel__sample_size(u64 sample_type); |
| 213 | void evsel__calc_id_pos(struct evsel *evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 215 | bool evsel__is_cache_op_valid(u8 type, u8 op); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | #define EVSEL__MAX_ALIASES 8 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 219 | extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES]; |
| 220 | extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES]; |
| 221 | extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES]; |
| 222 | extern const char *evsel__hw_names[PERF_COUNT_HW_MAX]; |
| 223 | extern const char *evsel__sw_names[PERF_COUNT_SW_MAX]; |
| 224 | int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size); |
| 225 | const char *evsel__name(struct evsel *evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 226 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 227 | const char *evsel__group_name(struct evsel *evsel); |
| 228 | int evsel__group_desc(struct evsel *evsel, char *buf, size_t size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 229 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit); |
| 231 | void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 233 | #define evsel__set_sample_bit(evsel, bit) \ |
| 234 | __evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 235 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 236 | #define evsel__reset_sample_bit(evsel, bit) \ |
| 237 | __evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 239 | void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 240 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 241 | int evsel__set_filter(struct evsel *evsel, const char *filter); |
| 242 | int evsel__append_tp_filter(struct evsel *evsel, const char *filter); |
| 243 | int evsel__append_addr_filter(struct evsel *evsel, const char *filter); |
| 244 | int evsel__enable_cpu(struct evsel *evsel, int cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 245 | int evsel__enable(struct evsel *evsel); |
| 246 | int evsel__disable(struct evsel *evsel); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 247 | int evsel__disable_cpu(struct evsel *evsel, int cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 248 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 249 | int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu); |
| 250 | int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 251 | int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, |
| 252 | struct perf_thread_map *threads); |
| 253 | void evsel__close(struct evsel *evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | |
| 255 | struct perf_sample; |
| 256 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 257 | void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); |
| 258 | u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 260 | static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 261 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 262 | return evsel__rawptr(evsel, sample, name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | } |
| 264 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 265 | struct tep_format_field; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 266 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 267 | u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 269 | struct tep_format_field *evsel__field(struct evsel *evsel, const char *name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 270 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 271 | #define evsel__match(evsel, t, c) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 272 | (evsel->core.attr.type == PERF_TYPE_##t && \ |
| 273 | evsel->core.attr.config == PERF_COUNT_##c) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 274 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 275 | static inline bool evsel__match2(struct evsel *e1, struct evsel *e2) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 277 | return (e1->core.attr.type == e2->core.attr.type) && |
| 278 | (e1->core.attr.config == e2->core.attr.config); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | } |
| 280 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 281 | int evsel__read_counter(struct evsel *evsel, int cpu, int thread); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 283 | int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 284 | |
| 285 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 286 | * evsel__read_on_cpu - Read out the results on a CPU and thread |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | * |
| 288 | * @evsel - event selector to read value |
| 289 | * @cpu - CPU of interest |
| 290 | * @thread - thread of interest |
| 291 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 292 | static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 294 | return __evsel__read_on_cpu(evsel, cpu, thread, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | * |
| 300 | * @evsel - event selector to read value |
| 301 | * @cpu - CPU of interest |
| 302 | * @thread - thread of interest |
| 303 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 304 | static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 305 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 306 | return __evsel__read_on_cpu(evsel, cpu, thread, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 307 | } |
| 308 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 309 | int evsel__parse_sample(struct evsel *evsel, union perf_event *event, |
| 310 | struct perf_sample *sample); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 311 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 312 | int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, |
| 313 | u64 *timestamp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 315 | static inline struct evsel *evsel__next(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 317 | return list_entry(evsel->core.node.next, struct evsel, core.node); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 318 | } |
| 319 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 320 | static inline struct evsel *evsel__prev(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 321 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 322 | return list_entry(evsel->core.node.prev, struct evsel, core.node); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 323 | } |
| 324 | |
| 325 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 326 | * evsel__is_group_leader - Return whether given evsel is a leader event |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | * |
| 328 | * @evsel - evsel selector to be tested |
| 329 | * |
| 330 | * Return %true if @evsel is a group leader or a stand-alone event |
| 331 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 332 | static inline bool evsel__is_group_leader(const struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 333 | { |
| 334 | return evsel->leader == evsel; |
| 335 | } |
| 336 | |
| 337 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 338 | * evsel__is_group_event - Return whether given evsel is a group event |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | * |
| 340 | * @evsel - evsel selector to be tested |
| 341 | * |
| 342 | * Return %true iff event group view is enabled and @evsel is a actual group |
| 343 | * leader which has other members in the group |
| 344 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 345 | static inline bool evsel__is_group_event(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 346 | { |
| 347 | if (!symbol_conf.event_group) |
| 348 | return false; |
| 349 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 350 | return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 351 | } |
| 352 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 353 | bool evsel__is_function_event(struct evsel *evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 354 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 355 | static inline bool evsel__is_bpf_output(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 356 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 357 | return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 358 | } |
| 359 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 360 | static inline bool evsel__is_clock(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 362 | return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || |
| 363 | evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | } |
| 365 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 366 | bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize); |
| 367 | int evsel__open_strerror(struct evsel *evsel, struct target *target, |
| 368 | int err, char *msg, size_t size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 369 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 370 | static inline int evsel__group_idx(struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 371 | { |
| 372 | return evsel->idx - evsel->leader->idx; |
| 373 | } |
| 374 | |
| 375 | /* Iterates group WITHOUT the leader. */ |
| 376 | #define for_each_group_member(_evsel, _leader) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 377 | for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 378 | (_evsel) && (_evsel)->leader == (_leader); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 379 | (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | |
| 381 | /* Iterates group WITH the leader. */ |
| 382 | #define for_each_group_evsel(_evsel, _leader) \ |
| 383 | for ((_evsel) = _leader; \ |
| 384 | (_evsel) && (_evsel)->leader == (_leader); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 385 | (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 386 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 387 | static inline bool evsel__has_branch_callstack(const struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 388 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | } |
| 391 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 392 | static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 394 | return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 395 | } |
| 396 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 397 | static inline bool evsel__has_callchain(const struct evsel *evsel) |
| 398 | { |
| 399 | /* |
| 400 | * For reporting purposes, an evsel sample can have a recorded callchain |
| 401 | * or a callchain synthesized from AUX area data. |
| 402 | */ |
| 403 | return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN || |
| 404 | evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN; |
| 405 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 406 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 407 | static inline bool evsel__has_br_stack(const struct evsel *evsel) |
| 408 | { |
| 409 | /* |
| 410 | * For reporting purposes, an evsel sample can have a recorded branch |
| 411 | * stack or a branch stack synthesized from AUX area data. |
| 412 | */ |
| 413 | return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK || |
| 414 | evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK; |
| 415 | } |
| 416 | |
| 417 | static inline bool evsel__is_dummy_event(struct evsel *evsel) |
| 418 | { |
| 419 | return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) && |
| 420 | (evsel->core.attr.config == PERF_COUNT_SW_DUMMY); |
| 421 | } |
| 422 | |
| 423 | struct perf_env *evsel__env(struct evsel *evsel); |
| 424 | |
| 425 | int evsel__store_ids(struct evsel *evsel, struct evlist *evlist); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 426 | #endif /* __PERF_EVSEL_H */ |