Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Performance events x86 architecture header |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
| 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
| 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
| 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
| 10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
| 11 | * |
| 12 | * For licencing details see kernel-base/COPYING |
| 13 | */ |
| 14 | |
| 15 | #include <linux/perf_event.h> |
| 16 | |
| 17 | #include <asm/intel_ds.h> |
| 18 | |
| 19 | /* To enable MSR tracing please use the generic trace points. */ |
| 20 | |
| 21 | /* |
| 22 | * | NHM/WSM | SNB | |
| 23 | * register ------------------------------- |
| 24 | * | HT | no HT | HT | no HT | |
| 25 | *----------------------------------------- |
| 26 | * offcore | core | core | cpu | core | |
| 27 | * lbr_sel | core | core | cpu | core | |
| 28 | * ld_lat | cpu | core | cpu | core | |
| 29 | *----------------------------------------- |
| 30 | * |
| 31 | * Given that there is a small number of shared regs, |
| 32 | * we can pre-allocate their slot in the per-cpu |
| 33 | * per-core reg tables. |
| 34 | */ |
| 35 | enum extra_reg_type { |
| 36 | EXTRA_REG_NONE = -1, /* not used */ |
| 37 | |
| 38 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ |
| 39 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
| 40 | EXTRA_REG_LBR = 2, /* lbr_select */ |
| 41 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
| 42 | EXTRA_REG_FE = 4, /* fe_* */ |
| 43 | |
| 44 | EXTRA_REG_MAX /* number of entries needed */ |
| 45 | }; |
| 46 | |
| 47 | struct event_constraint { |
| 48 | union { |
| 49 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 50 | u64 idxmsk64; |
| 51 | }; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 52 | u64 code; |
| 53 | u64 cmask; |
| 54 | int weight; |
| 55 | int overlap; |
| 56 | int flags; |
| 57 | unsigned int size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | }; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 59 | |
| 60 | static inline bool constraint_match(struct event_constraint *c, u64 ecode) |
| 61 | { |
| 62 | return ((ecode & c->cmask) - c->code) <= (u64)c->size; |
| 63 | } |
| 64 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | /* |
| 66 | * struct hw_perf_event.flags flags |
| 67 | */ |
| 68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ |
| 69 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ |
| 70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 71 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */ |
| 72 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ |
| 73 | #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ |
| 74 | #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ |
| 75 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */ |
| 76 | #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ |
| 77 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ |
| 78 | #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ |
| 79 | #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | |
| 81 | struct amd_nb { |
| 82 | int nb_id; /* NorthBridge id */ |
| 83 | int refcnt; /* reference count */ |
| 84 | struct perf_event *owners[X86_PMC_IDX_MAX]; |
| 85 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
| 86 | }; |
| 87 | |
| 88 | #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 89 | #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) |
| 90 | #define PEBS_OUTPUT_OFFSET 61 |
| 91 | #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) |
| 92 | #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) |
| 93 | #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * Flags PEBS can handle without an PMI. |
| 97 | * |
| 98 | * TID can only be handled by flushing at context switch. |
| 99 | * REGS_USER can be handled for events limited to ring 3. |
| 100 | * |
| 101 | */ |
| 102 | #define LARGE_PEBS_FLAGS \ |
| 103 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
| 104 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
| 105 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ |
| 106 | PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ |
| 107 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
| 108 | PERF_SAMPLE_PERIOD) |
| 109 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 110 | #define PEBS_GP_REGS \ |
| 111 | ((1ULL << PERF_REG_X86_AX) | \ |
| 112 | (1ULL << PERF_REG_X86_BX) | \ |
| 113 | (1ULL << PERF_REG_X86_CX) | \ |
| 114 | (1ULL << PERF_REG_X86_DX) | \ |
| 115 | (1ULL << PERF_REG_X86_DI) | \ |
| 116 | (1ULL << PERF_REG_X86_SI) | \ |
| 117 | (1ULL << PERF_REG_X86_SP) | \ |
| 118 | (1ULL << PERF_REG_X86_BP) | \ |
| 119 | (1ULL << PERF_REG_X86_IP) | \ |
| 120 | (1ULL << PERF_REG_X86_FLAGS) | \ |
| 121 | (1ULL << PERF_REG_X86_R8) | \ |
| 122 | (1ULL << PERF_REG_X86_R9) | \ |
| 123 | (1ULL << PERF_REG_X86_R10) | \ |
| 124 | (1ULL << PERF_REG_X86_R11) | \ |
| 125 | (1ULL << PERF_REG_X86_R12) | \ |
| 126 | (1ULL << PERF_REG_X86_R13) | \ |
| 127 | (1ULL << PERF_REG_X86_R14) | \ |
| 128 | (1ULL << PERF_REG_X86_R15)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | |
| 130 | /* |
| 131 | * Per register state. |
| 132 | */ |
| 133 | struct er_account { |
| 134 | raw_spinlock_t lock; /* per-core: protect structure */ |
| 135 | u64 config; /* extra MSR config */ |
| 136 | u64 reg; /* extra MSR number */ |
| 137 | atomic_t ref; /* reference count */ |
| 138 | }; |
| 139 | |
| 140 | /* |
| 141 | * Per core/cpu state |
| 142 | * |
| 143 | * Used to coordinate shared registers between HT threads or |
| 144 | * among events on a single PMU. |
| 145 | */ |
| 146 | struct intel_shared_regs { |
| 147 | struct er_account regs[EXTRA_REG_MAX]; |
| 148 | int refcnt; /* per-core: #HT threads */ |
| 149 | unsigned core_id; /* per-core: core id */ |
| 150 | }; |
| 151 | |
| 152 | enum intel_excl_state_type { |
| 153 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ |
| 154 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ |
| 155 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ |
| 156 | }; |
| 157 | |
| 158 | struct intel_excl_states { |
| 159 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
| 160 | bool sched_started; /* true if scheduling has started */ |
| 161 | }; |
| 162 | |
| 163 | struct intel_excl_cntrs { |
| 164 | raw_spinlock_t lock; |
| 165 | |
| 166 | struct intel_excl_states states[2]; |
| 167 | |
| 168 | union { |
| 169 | u16 has_exclusive[2]; |
| 170 | u32 exclusive_present; |
| 171 | }; |
| 172 | |
| 173 | int refcnt; /* per-core: #HT threads */ |
| 174 | unsigned core_id; /* per-core: core id */ |
| 175 | }; |
| 176 | |
| 177 | struct x86_perf_task_context; |
| 178 | #define MAX_LBR_ENTRIES 32 |
| 179 | |
| 180 | enum { |
| 181 | X86_PERF_KFREE_SHARED = 0, |
| 182 | X86_PERF_KFREE_EXCL = 1, |
| 183 | X86_PERF_KFREE_MAX |
| 184 | }; |
| 185 | |
| 186 | struct cpu_hw_events { |
| 187 | /* |
| 188 | * Generic x86 PMC bits |
| 189 | */ |
| 190 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
| 191 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 192 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 193 | int enabled; |
| 194 | |
| 195 | int n_events; /* the # of events in the below arrays */ |
| 196 | int n_added; /* the # last events in the below arrays; |
| 197 | they've never been enabled yet */ |
| 198 | int n_txn; /* the # last events in the below arrays; |
| 199 | added in the current transaction */ |
| 200 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
| 201 | u64 tags[X86_PMC_IDX_MAX]; |
| 202 | |
| 203 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
| 204 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
| 205 | |
| 206 | int n_excl; /* the number of exclusive events */ |
| 207 | |
| 208 | unsigned int txn_flags; |
| 209 | int is_fake; |
| 210 | |
| 211 | /* |
| 212 | * Intel DebugStore bits |
| 213 | */ |
| 214 | struct debug_store *ds; |
| 215 | void *ds_pebs_vaddr; |
| 216 | void *ds_bts_vaddr; |
| 217 | u64 pebs_enabled; |
| 218 | int n_pebs; |
| 219 | int n_large_pebs; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 220 | int n_pebs_via_pt; |
| 221 | int pebs_output; |
| 222 | |
| 223 | /* Current super set of events hardware configuration */ |
| 224 | u64 pebs_data_cfg; |
| 225 | u64 active_pebs_data_cfg; |
| 226 | int pebs_record_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 227 | |
| 228 | /* |
| 229 | * Intel LBR bits |
| 230 | */ |
| 231 | int lbr_users; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 232 | int lbr_pebs_users; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 233 | struct perf_branch_stack lbr_stack; |
| 234 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
| 235 | struct er_account *lbr_sel; |
| 236 | u64 br_sel; |
| 237 | struct x86_perf_task_context *last_task_ctx; |
| 238 | int last_log_id; |
| 239 | |
| 240 | /* |
| 241 | * Intel host/guest exclude bits |
| 242 | */ |
| 243 | u64 intel_ctrl_guest_mask; |
| 244 | u64 intel_ctrl_host_mask; |
| 245 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; |
| 246 | |
| 247 | /* |
| 248 | * Intel checkpoint mask |
| 249 | */ |
| 250 | u64 intel_cp_status; |
| 251 | |
| 252 | /* |
| 253 | * manage shared (per-core, per-cpu) registers |
| 254 | * used on Intel NHM/WSM/SNB |
| 255 | */ |
| 256 | struct intel_shared_regs *shared_regs; |
| 257 | /* |
| 258 | * manage exclusive counter access between hyperthread |
| 259 | */ |
| 260 | struct event_constraint *constraint_list; /* in enable order */ |
| 261 | struct intel_excl_cntrs *excl_cntrs; |
| 262 | int excl_thread_id; /* 0 or 1 */ |
| 263 | |
| 264 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 265 | * SKL TSX_FORCE_ABORT shadow |
| 266 | */ |
| 267 | u64 tfa_shadow; |
| 268 | |
| 269 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 270 | * AMD specific bits |
| 271 | */ |
| 272 | struct amd_nb *amd_nb; |
| 273 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ |
| 274 | u64 perf_ctr_virt_mask; |
| 275 | |
| 276 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
| 277 | }; |
| 278 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 279 | #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | { .idxmsk64 = (n) }, \ |
| 281 | .code = (c), \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 282 | .size = (e) - (c), \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 283 | .cmask = (m), \ |
| 284 | .weight = (w), \ |
| 285 | .overlap = (o), \ |
| 286 | .flags = f, \ |
| 287 | } |
| 288 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 289 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ |
| 290 | __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) |
| 291 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | #define EVENT_CONSTRAINT(c, n, m) \ |
| 293 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
| 294 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 295 | /* |
| 296 | * The constraint_match() function only works for 'simple' event codes |
| 297 | * and not for extended (AMD64_EVENTSEL_EVENT) events codes. |
| 298 | */ |
| 299 | #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ |
| 300 | __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) |
| 301 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 302 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
| 303 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ |
| 304 | 0, PERF_X86_EVENT_EXCL) |
| 305 | |
| 306 | /* |
| 307 | * The overlap flag marks event constraints with overlapping counter |
| 308 | * masks. This is the case if the counter mask of such an event is not |
| 309 | * a subset of any other counter mask of a constraint with an equal or |
| 310 | * higher weight, e.g.: |
| 311 | * |
| 312 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
| 313 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); |
| 314 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); |
| 315 | * |
| 316 | * The event scheduler may not select the correct counter in the first |
| 317 | * cycle because it needs to know which subsequent events will be |
| 318 | * scheduled. It may fail to schedule the events then. So we set the |
| 319 | * overlap flag for such constraints to give the scheduler a hint which |
| 320 | * events to select for counter rescheduling. |
| 321 | * |
| 322 | * Care must be taken as the rescheduling algorithm is O(n!) which |
| 323 | * will increase scheduling cycles for an over-committed system |
| 324 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
| 325 | * and its counter masks must be kept at a minimum. |
| 326 | */ |
| 327 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ |
| 328 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
| 329 | |
| 330 | /* |
| 331 | * Constraint on the Event code. |
| 332 | */ |
| 333 | #define INTEL_EVENT_CONSTRAINT(c, n) \ |
| 334 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) |
| 335 | |
| 336 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 337 | * Constraint on a range of Event codes |
| 338 | */ |
| 339 | #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
| 340 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) |
| 341 | |
| 342 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 343 | * Constraint on the Event code + UMask + fixed-mask |
| 344 | * |
| 345 | * filter mask to validate fixed counter events. |
| 346 | * the following filters disqualify for fixed counters: |
| 347 | * - inv |
| 348 | * - edge |
| 349 | * - cnt-mask |
| 350 | * - in_tx |
| 351 | * - in_tx_checkpointed |
| 352 | * The other filters are supported by fixed counters. |
| 353 | * The any-thread option is supported starting with v3. |
| 354 | */ |
| 355 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
| 356 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
| 357 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
| 358 | |
| 359 | /* |
| 360 | * Constraint on the Event code + UMask |
| 361 | */ |
| 362 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
| 363 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
| 364 | |
| 365 | /* Constraint on specific umask bit only + event */ |
| 366 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ |
| 367 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) |
| 368 | |
| 369 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
| 370 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ |
| 371 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
| 372 | |
| 373 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
| 374 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
| 375 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) |
| 376 | |
| 377 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
| 378 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 379 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
| 380 | |
| 381 | #define INTEL_PST_CONSTRAINT(c, n) \ |
| 382 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 383 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
| 384 | |
| 385 | /* Event constraint, but match on all event flags too. */ |
| 386 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 387 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
| 388 | |
| 389 | #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
| 390 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | |
| 392 | /* Check only flags, but allow all event/umask */ |
| 393 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ |
| 394 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) |
| 395 | |
| 396 | /* Check flags and event code, and set the HSW store flag */ |
| 397 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 398 | __EVENT_CONSTRAINT(code, n, \ |
| 399 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 400 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 401 | |
| 402 | /* Check flags and event code, and set the HSW load flag */ |
| 403 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ |
| 404 | __EVENT_CONSTRAINT(code, n, \ |
| 405 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 406 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 407 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 408 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ |
| 409 | __EVENT_CONSTRAINT_RANGE(code, end, n, \ |
| 410 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 411 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 412 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 413 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 414 | __EVENT_CONSTRAINT(code, n, \ |
| 415 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 416 | HWEIGHT(n), 0, \ |
| 417 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 418 | |
| 419 | /* Check flags and event code/umask, and set the HSW store flag */ |
| 420 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 421 | __EVENT_CONSTRAINT(code, n, \ |
| 422 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 423 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 424 | |
| 425 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
| 426 | __EVENT_CONSTRAINT(code, n, \ |
| 427 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 428 | HWEIGHT(n), 0, \ |
| 429 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) |
| 430 | |
| 431 | /* Check flags and event code/umask, and set the HSW load flag */ |
| 432 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ |
| 433 | __EVENT_CONSTRAINT(code, n, \ |
| 434 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 435 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 436 | |
| 437 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 438 | __EVENT_CONSTRAINT(code, n, \ |
| 439 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 440 | HWEIGHT(n), 0, \ |
| 441 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 442 | |
| 443 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
| 444 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ |
| 445 | __EVENT_CONSTRAINT(code, n, \ |
| 446 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 447 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
| 448 | |
| 449 | |
| 450 | /* |
| 451 | * We define the end marker as having a weight of -1 |
| 452 | * to enable blacklisting of events using a counter bitmask |
| 453 | * of zero and thus a weight of zero. |
| 454 | * The end marker has a weight that cannot possibly be |
| 455 | * obtained from counting the bits in the bitmask. |
| 456 | */ |
| 457 | #define EVENT_CONSTRAINT_END { .weight = -1 } |
| 458 | |
| 459 | /* |
| 460 | * Check for end marker with weight == -1 |
| 461 | */ |
| 462 | #define for_each_event_constraint(e, c) \ |
| 463 | for ((e) = (c); (e)->weight != -1; (e)++) |
| 464 | |
| 465 | /* |
| 466 | * Extra registers for specific events. |
| 467 | * |
| 468 | * Some events need large masks and require external MSRs. |
| 469 | * Those extra MSRs end up being shared for all events on |
| 470 | * a PMU and sometimes between PMU of sibling HT threads. |
| 471 | * In either case, the kernel needs to handle conflicting |
| 472 | * accesses to those extra, shared, regs. The data structure |
| 473 | * to manage those registers is stored in cpu_hw_event. |
| 474 | */ |
| 475 | struct extra_reg { |
| 476 | unsigned int event; |
| 477 | unsigned int msr; |
| 478 | u64 config_mask; |
| 479 | u64 valid_mask; |
| 480 | int idx; /* per_xxx->regs[] reg index */ |
| 481 | bool extra_msr_access; |
| 482 | }; |
| 483 | |
| 484 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
| 485 | .event = (e), \ |
| 486 | .msr = (ms), \ |
| 487 | .config_mask = (m), \ |
| 488 | .valid_mask = (vm), \ |
| 489 | .idx = EXTRA_REG_##i, \ |
| 490 | .extra_msr_access = true, \ |
| 491 | } |
| 492 | |
| 493 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 494 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) |
| 495 | |
| 496 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 497 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ |
| 498 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) |
| 499 | |
| 500 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ |
| 501 | INTEL_UEVENT_EXTRA_REG(c, \ |
| 502 | MSR_PEBS_LD_LAT_THRESHOLD, \ |
| 503 | 0xffff, \ |
| 504 | LDLAT) |
| 505 | |
| 506 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
| 507 | |
| 508 | union perf_capabilities { |
| 509 | struct { |
| 510 | u64 lbr_format:6; |
| 511 | u64 pebs_trap:1; |
| 512 | u64 pebs_arch_reg:1; |
| 513 | u64 pebs_format:4; |
| 514 | u64 smm_freeze:1; |
| 515 | /* |
| 516 | * PMU supports separate counter range for writing |
| 517 | * values > 32bit. |
| 518 | */ |
| 519 | u64 full_width_write:1; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 520 | u64 pebs_baseline:1; |
| 521 | u64 pebs_metrics_available:1; |
| 522 | u64 pebs_output_pt_available:1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 523 | }; |
| 524 | u64 capabilities; |
| 525 | }; |
| 526 | |
| 527 | struct x86_pmu_quirk { |
| 528 | struct x86_pmu_quirk *next; |
| 529 | void (*func)(void); |
| 530 | }; |
| 531 | |
| 532 | union x86_pmu_config { |
| 533 | struct { |
| 534 | u64 event:8, |
| 535 | umask:8, |
| 536 | usr:1, |
| 537 | os:1, |
| 538 | edge:1, |
| 539 | pc:1, |
| 540 | interrupt:1, |
| 541 | __reserved1:1, |
| 542 | en:1, |
| 543 | inv:1, |
| 544 | cmask:8, |
| 545 | event2:4, |
| 546 | __reserved2:4, |
| 547 | go:1, |
| 548 | ho:1; |
| 549 | } bits; |
| 550 | u64 value; |
| 551 | }; |
| 552 | |
| 553 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value |
| 554 | |
| 555 | enum { |
| 556 | x86_lbr_exclusive_lbr, |
| 557 | x86_lbr_exclusive_bts, |
| 558 | x86_lbr_exclusive_pt, |
| 559 | x86_lbr_exclusive_max, |
| 560 | }; |
| 561 | |
| 562 | /* |
| 563 | * struct x86_pmu - generic x86 pmu |
| 564 | */ |
| 565 | struct x86_pmu { |
| 566 | /* |
| 567 | * Generic x86 PMC bits |
| 568 | */ |
| 569 | const char *name; |
| 570 | int version; |
| 571 | int (*handle_irq)(struct pt_regs *); |
| 572 | void (*disable_all)(void); |
| 573 | void (*enable_all)(int added); |
| 574 | void (*enable)(struct perf_event *); |
| 575 | void (*disable)(struct perf_event *); |
| 576 | void (*add)(struct perf_event *); |
| 577 | void (*del)(struct perf_event *); |
| 578 | void (*read)(struct perf_event *event); |
| 579 | int (*hw_config)(struct perf_event *event); |
| 580 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
| 581 | unsigned eventsel; |
| 582 | unsigned perfctr; |
| 583 | int (*addr_offset)(int index, bool eventsel); |
| 584 | int (*rdpmc_index)(int index); |
| 585 | u64 (*event_map)(int); |
| 586 | int max_events; |
| 587 | int num_counters; |
| 588 | int num_counters_fixed; |
| 589 | int cntval_bits; |
| 590 | u64 cntval_mask; |
| 591 | union { |
| 592 | unsigned long events_maskl; |
| 593 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; |
| 594 | }; |
| 595 | int events_mask_len; |
| 596 | int apic; |
| 597 | u64 max_period; |
| 598 | struct event_constraint * |
| 599 | (*get_event_constraints)(struct cpu_hw_events *cpuc, |
| 600 | int idx, |
| 601 | struct perf_event *event); |
| 602 | |
| 603 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
| 604 | struct perf_event *event); |
| 605 | |
| 606 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
| 607 | |
| 608 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
| 609 | |
| 610 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
| 611 | |
| 612 | struct event_constraint *event_constraints; |
| 613 | struct x86_pmu_quirk *quirks; |
| 614 | int perfctr_second_write; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 615 | u64 (*limit_period)(struct perf_event *event, u64 l); |
| 616 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 617 | /* PMI handler bits */ |
| 618 | unsigned int late_ack :1, |
| 619 | counter_freezing :1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 620 | /* |
| 621 | * sysfs attrs |
| 622 | */ |
| 623 | int attr_rdpmc_broken; |
| 624 | int attr_rdpmc; |
| 625 | struct attribute **format_attrs; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 626 | |
| 627 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 628 | const struct attribute_group **attr_update; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 629 | |
| 630 | unsigned long attr_freeze_on_smi; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | |
| 632 | /* |
| 633 | * CPU Hotplug hooks |
| 634 | */ |
| 635 | int (*cpu_prepare)(int cpu); |
| 636 | void (*cpu_starting)(int cpu); |
| 637 | void (*cpu_dying)(int cpu); |
| 638 | void (*cpu_dead)(int cpu); |
| 639 | |
| 640 | void (*check_microcode)(void); |
| 641 | void (*sched_task)(struct perf_event_context *ctx, |
| 642 | bool sched_in); |
| 643 | |
| 644 | /* |
| 645 | * Intel Arch Perfmon v2+ |
| 646 | */ |
| 647 | u64 intel_ctrl; |
| 648 | union perf_capabilities intel_cap; |
| 649 | |
| 650 | /* |
| 651 | * Intel DebugStore bits |
| 652 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 653 | unsigned int bts :1, |
| 654 | bts_active :1, |
| 655 | pebs :1, |
| 656 | pebs_active :1, |
| 657 | pebs_broken :1, |
| 658 | pebs_prec_dist :1, |
| 659 | pebs_no_tlb :1, |
| 660 | pebs_no_isolation :1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 661 | int pebs_record_size; |
| 662 | int pebs_buffer_size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 663 | int max_pebs_events; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 664 | void (*drain_pebs)(struct pt_regs *regs); |
| 665 | struct event_constraint *pebs_constraints; |
| 666 | void (*pebs_aliases)(struct perf_event *event); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 667 | unsigned long large_pebs_flags; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 668 | u64 rtm_abort_event; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 669 | |
| 670 | /* |
| 671 | * Intel LBR |
| 672 | */ |
| 673 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
| 674 | int lbr_nr; /* hardware stack size */ |
| 675 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
| 676 | const int *lbr_sel_map; /* lbr_select mappings */ |
| 677 | bool lbr_double_abort; /* duplicated lbr aborts */ |
| 678 | bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ |
| 679 | |
| 680 | /* |
| 681 | * Intel PT/LBR/BTS are exclusive |
| 682 | */ |
| 683 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; |
| 684 | |
| 685 | /* |
| 686 | * AMD bits |
| 687 | */ |
| 688 | unsigned int amd_nb_constraints : 1; |
| 689 | |
| 690 | /* |
| 691 | * Extra registers for events |
| 692 | */ |
| 693 | struct extra_reg *extra_regs; |
| 694 | unsigned int flags; |
| 695 | |
| 696 | /* |
| 697 | * Intel host/guest support (KVM) |
| 698 | */ |
| 699 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 700 | |
| 701 | /* |
| 702 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. |
| 703 | */ |
| 704 | int (*check_period) (struct perf_event *event, u64 period); |
| 705 | |
| 706 | int (*aux_output_match) (struct perf_event *event); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 707 | }; |
| 708 | |
| 709 | struct x86_perf_task_context { |
| 710 | u64 lbr_from[MAX_LBR_ENTRIES]; |
| 711 | u64 lbr_to[MAX_LBR_ENTRIES]; |
| 712 | u64 lbr_info[MAX_LBR_ENTRIES]; |
| 713 | int tos; |
| 714 | int valid_lbrs; |
| 715 | int lbr_callstack_users; |
| 716 | int lbr_stack_state; |
| 717 | int log_id; |
| 718 | }; |
| 719 | |
| 720 | #define x86_add_quirk(func_) \ |
| 721 | do { \ |
| 722 | static struct x86_pmu_quirk __quirk __initdata = { \ |
| 723 | .func = func_, \ |
| 724 | }; \ |
| 725 | __quirk.next = x86_pmu.quirks; \ |
| 726 | x86_pmu.quirks = &__quirk; \ |
| 727 | } while (0) |
| 728 | |
| 729 | /* |
| 730 | * x86_pmu flags |
| 731 | */ |
| 732 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ |
| 733 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ |
| 734 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
| 735 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
| 736 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 737 | #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 738 | |
| 739 | #define EVENT_VAR(_id) event_attr_##_id |
| 740 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
| 741 | |
| 742 | #define EVENT_ATTR(_name, _id) \ |
| 743 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ |
| 744 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 745 | .id = PERF_COUNT_HW_##_id, \ |
| 746 | .event_str = NULL, \ |
| 747 | }; |
| 748 | |
| 749 | #define EVENT_ATTR_STR(_name, v, str) \ |
| 750 | static struct perf_pmu_events_attr event_attr_##v = { \ |
| 751 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 752 | .id = 0, \ |
| 753 | .event_str = str, \ |
| 754 | }; |
| 755 | |
| 756 | #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ |
| 757 | static struct perf_pmu_events_ht_attr event_attr_##v = { \ |
| 758 | .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ |
| 759 | .id = 0, \ |
| 760 | .event_str_noht = noht, \ |
| 761 | .event_str_ht = ht, \ |
| 762 | } |
| 763 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 764 | struct pmu *x86_get_pmu(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 765 | extern struct x86_pmu x86_pmu __read_mostly; |
| 766 | |
| 767 | static inline bool x86_pmu_has_lbr_callstack(void) |
| 768 | { |
| 769 | return x86_pmu.lbr_sel_map && |
| 770 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; |
| 771 | } |
| 772 | |
| 773 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
| 774 | |
| 775 | int x86_perf_event_set_period(struct perf_event *event); |
| 776 | |
| 777 | /* |
| 778 | * Generalized hw caching related hw_event table, filled |
| 779 | * in on a per model basis. A value of 0 means |
| 780 | * 'not supported', -1 means 'hw_event makes no sense on |
| 781 | * this CPU', any other value means the raw hw_event |
| 782 | * ID. |
| 783 | */ |
| 784 | |
| 785 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 786 | |
| 787 | extern u64 __read_mostly hw_cache_event_ids |
| 788 | [PERF_COUNT_HW_CACHE_MAX] |
| 789 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 790 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 791 | extern u64 __read_mostly hw_cache_extra_regs |
| 792 | [PERF_COUNT_HW_CACHE_MAX] |
| 793 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 794 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 795 | |
| 796 | u64 x86_perf_event_update(struct perf_event *event); |
| 797 | |
| 798 | static inline unsigned int x86_pmu_config_addr(int index) |
| 799 | { |
| 800 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
| 801 | x86_pmu.addr_offset(index, true) : index); |
| 802 | } |
| 803 | |
| 804 | static inline unsigned int x86_pmu_event_addr(int index) |
| 805 | { |
| 806 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
| 807 | x86_pmu.addr_offset(index, false) : index); |
| 808 | } |
| 809 | |
| 810 | static inline int x86_pmu_rdpmc_index(int index) |
| 811 | { |
| 812 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
| 813 | } |
| 814 | |
| 815 | int x86_add_exclusive(unsigned int what); |
| 816 | |
| 817 | void x86_del_exclusive(unsigned int what); |
| 818 | |
| 819 | int x86_reserve_hardware(void); |
| 820 | |
| 821 | void x86_release_hardware(void); |
| 822 | |
| 823 | int x86_pmu_max_precise(void); |
| 824 | |
| 825 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
| 826 | |
| 827 | int x86_setup_perfctr(struct perf_event *event); |
| 828 | |
| 829 | int x86_pmu_hw_config(struct perf_event *event); |
| 830 | |
| 831 | void x86_pmu_disable_all(void); |
| 832 | |
| 833 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
| 834 | u64 enable_mask) |
| 835 | { |
| 836 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
| 837 | |
| 838 | if (hwc->extra_reg.reg) |
| 839 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
| 840 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
| 841 | } |
| 842 | |
| 843 | void x86_pmu_enable_all(int added); |
| 844 | |
| 845 | int perf_assign_events(struct event_constraint **constraints, int n, |
| 846 | int wmin, int wmax, int gpmax, int *assign); |
| 847 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
| 848 | |
| 849 | void x86_pmu_stop(struct perf_event *event, int flags); |
| 850 | |
| 851 | static inline void x86_pmu_disable_event(struct perf_event *event) |
| 852 | { |
| 853 | struct hw_perf_event *hwc = &event->hw; |
| 854 | |
| 855 | wrmsrl(hwc->config_base, hwc->config); |
| 856 | } |
| 857 | |
| 858 | void x86_pmu_enable_event(struct perf_event *event); |
| 859 | |
| 860 | int x86_pmu_handle_irq(struct pt_regs *regs); |
| 861 | |
| 862 | extern struct event_constraint emptyconstraint; |
| 863 | |
| 864 | extern struct event_constraint unconstrained; |
| 865 | |
| 866 | static inline bool kernel_ip(unsigned long ip) |
| 867 | { |
| 868 | #ifdef CONFIG_X86_32 |
| 869 | return ip > PAGE_OFFSET; |
| 870 | #else |
| 871 | return (long)ip < 0; |
| 872 | #endif |
| 873 | } |
| 874 | |
| 875 | /* |
| 876 | * Not all PMUs provide the right context information to place the reported IP |
| 877 | * into full context. Specifically segment registers are typically not |
| 878 | * supplied. |
| 879 | * |
| 880 | * Assuming the address is a linear address (it is for IBS), we fake the CS and |
| 881 | * vm86 mode using the known zero-based code segment and 'fix up' the registers |
| 882 | * to reflect this. |
| 883 | * |
| 884 | * Intel PEBS/LBR appear to typically provide the effective address, nothing |
| 885 | * much we can do about that but pray and treat it like a linear address. |
| 886 | */ |
| 887 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) |
| 888 | { |
| 889 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; |
| 890 | if (regs->flags & X86_VM_MASK) |
| 891 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); |
| 892 | regs->ip = ip; |
| 893 | } |
| 894 | |
| 895 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
| 896 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
| 897 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 898 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 899 | char *page); |
| 900 | ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 901 | char *page); |
| 902 | |
| 903 | #ifdef CONFIG_CPU_SUP_AMD |
| 904 | |
| 905 | int amd_pmu_init(void); |
| 906 | |
| 907 | #else /* CONFIG_CPU_SUP_AMD */ |
| 908 | |
| 909 | static inline int amd_pmu_init(void) |
| 910 | { |
| 911 | return 0; |
| 912 | } |
| 913 | |
| 914 | #endif /* CONFIG_CPU_SUP_AMD */ |
| 915 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 916 | static inline int is_pebs_pt(struct perf_event *event) |
| 917 | { |
| 918 | return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); |
| 919 | } |
| 920 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 921 | #ifdef CONFIG_CPU_SUP_INTEL |
| 922 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 923 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 924 | { |
| 925 | struct hw_perf_event *hwc = &event->hw; |
| 926 | unsigned int hw_event, bts_event; |
| 927 | |
| 928 | if (event->attr.freq) |
| 929 | return false; |
| 930 | |
| 931 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
| 932 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
| 933 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 934 | return hw_event == bts_event && period == 1; |
| 935 | } |
| 936 | |
| 937 | static inline bool intel_pmu_has_bts(struct perf_event *event) |
| 938 | { |
| 939 | struct hw_perf_event *hwc = &event->hw; |
| 940 | |
| 941 | return intel_pmu_has_bts_period(event, hwc->sample_period); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 942 | } |
| 943 | |
| 944 | int intel_pmu_save_and_restart(struct perf_event *event); |
| 945 | |
| 946 | struct event_constraint * |
| 947 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
| 948 | struct perf_event *event); |
| 949 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 950 | extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
| 951 | extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 952 | |
| 953 | int intel_pmu_init(void); |
| 954 | |
| 955 | void init_debug_store_on_cpu(int cpu); |
| 956 | |
| 957 | void fini_debug_store_on_cpu(int cpu); |
| 958 | |
| 959 | void release_ds_buffers(void); |
| 960 | |
| 961 | void reserve_ds_buffers(void); |
| 962 | |
| 963 | extern struct event_constraint bts_constraint; |
| 964 | |
| 965 | void intel_pmu_enable_bts(u64 config); |
| 966 | |
| 967 | void intel_pmu_disable_bts(void); |
| 968 | |
| 969 | int intel_pmu_drain_bts_buffer(void); |
| 970 | |
| 971 | extern struct event_constraint intel_core2_pebs_event_constraints[]; |
| 972 | |
| 973 | extern struct event_constraint intel_atom_pebs_event_constraints[]; |
| 974 | |
| 975 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
| 976 | |
| 977 | extern struct event_constraint intel_glm_pebs_event_constraints[]; |
| 978 | |
| 979 | extern struct event_constraint intel_glp_pebs_event_constraints[]; |
| 980 | |
| 981 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
| 982 | |
| 983 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; |
| 984 | |
| 985 | extern struct event_constraint intel_snb_pebs_event_constraints[]; |
| 986 | |
| 987 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
| 988 | |
| 989 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
| 990 | |
| 991 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; |
| 992 | |
| 993 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
| 994 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 995 | extern struct event_constraint intel_icl_pebs_event_constraints[]; |
| 996 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 997 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
| 998 | |
| 999 | void intel_pmu_pebs_add(struct perf_event *event); |
| 1000 | |
| 1001 | void intel_pmu_pebs_del(struct perf_event *event); |
| 1002 | |
| 1003 | void intel_pmu_pebs_enable(struct perf_event *event); |
| 1004 | |
| 1005 | void intel_pmu_pebs_disable(struct perf_event *event); |
| 1006 | |
| 1007 | void intel_pmu_pebs_enable_all(void); |
| 1008 | |
| 1009 | void intel_pmu_pebs_disable_all(void); |
| 1010 | |
| 1011 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 1012 | |
| 1013 | void intel_pmu_auto_reload_read(struct perf_event *event); |
| 1014 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1015 | void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); |
| 1016 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1017 | void intel_ds_init(void); |
| 1018 | |
| 1019 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 1020 | |
| 1021 | u64 lbr_from_signext_quirk_wr(u64 val); |
| 1022 | |
| 1023 | void intel_pmu_lbr_reset(void); |
| 1024 | |
| 1025 | void intel_pmu_lbr_add(struct perf_event *event); |
| 1026 | |
| 1027 | void intel_pmu_lbr_del(struct perf_event *event); |
| 1028 | |
| 1029 | void intel_pmu_lbr_enable_all(bool pmi); |
| 1030 | |
| 1031 | void intel_pmu_lbr_disable_all(void); |
| 1032 | |
| 1033 | void intel_pmu_lbr_read(void); |
| 1034 | |
| 1035 | void intel_pmu_lbr_init_core(void); |
| 1036 | |
| 1037 | void intel_pmu_lbr_init_nhm(void); |
| 1038 | |
| 1039 | void intel_pmu_lbr_init_atom(void); |
| 1040 | |
| 1041 | void intel_pmu_lbr_init_slm(void); |
| 1042 | |
| 1043 | void intel_pmu_lbr_init_snb(void); |
| 1044 | |
| 1045 | void intel_pmu_lbr_init_hsw(void); |
| 1046 | |
| 1047 | void intel_pmu_lbr_init_skl(void); |
| 1048 | |
| 1049 | void intel_pmu_lbr_init_knl(void); |
| 1050 | |
| 1051 | void intel_pmu_pebs_data_source_nhm(void); |
| 1052 | |
| 1053 | void intel_pmu_pebs_data_source_skl(bool pmem); |
| 1054 | |
| 1055 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
| 1056 | |
| 1057 | void intel_pt_interrupt(void); |
| 1058 | |
| 1059 | int intel_bts_interrupt(void); |
| 1060 | |
| 1061 | void intel_bts_enable_local(void); |
| 1062 | |
| 1063 | void intel_bts_disable_local(void); |
| 1064 | |
| 1065 | int p4_pmu_init(void); |
| 1066 | |
| 1067 | int p6_pmu_init(void); |
| 1068 | |
| 1069 | int knc_pmu_init(void); |
| 1070 | |
| 1071 | static inline int is_ht_workaround_enabled(void) |
| 1072 | { |
| 1073 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
| 1074 | } |
| 1075 | |
| 1076 | #else /* CONFIG_CPU_SUP_INTEL */ |
| 1077 | |
| 1078 | static inline void reserve_ds_buffers(void) |
| 1079 | { |
| 1080 | } |
| 1081 | |
| 1082 | static inline void release_ds_buffers(void) |
| 1083 | { |
| 1084 | } |
| 1085 | |
| 1086 | static inline int intel_pmu_init(void) |
| 1087 | { |
| 1088 | return 0; |
| 1089 | } |
| 1090 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1091 | static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1092 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
| 1097 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1098 | } |
| 1099 | |
| 1100 | static inline int is_ht_workaround_enabled(void) |
| 1101 | { |
| 1102 | return 0; |
| 1103 | } |
| 1104 | #endif /* CONFIG_CPU_SUP_INTEL */ |