blob: 505480217cf1a9b4f779b454d42bc78c464e189b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/arch/arm/include/asm/pmu.h
4 *
5 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#ifndef __ARM_PMU_H__
9#define __ARM_PMU_H__
10
11#include <linux/interrupt.h>
12#include <linux/perf_event.h>
13#include <linux/platform_device.h>
14#include <linux/sysfs.h>
15#include <asm/cputype.h>
16
17#ifdef CONFIG_ARM_PMU
18
19/*
20 * The ARMv7 CPU PMU supports up to 32 event counters.
21 */
22#define ARMPMU_MAX_HWEVENTS 32
23
24/*
25 * ARM PMU hw_event flags
26 */
27/* Event uses a 64bit counter */
28#define ARMPMU_EVT_64BIT 1
29
30#define HW_OP_UNSUPPORTED 0xFFFF
31#define C(_x) PERF_COUNT_HW_CACHE_##_x
32#define CACHE_OP_UNSUPPORTED 0xFFFF
33
34#define PERF_MAP_ALL_UNSUPPORTED \
35 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
36
37#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
38[0 ... C(MAX) - 1] = { \
39 [0 ... C(OP_MAX) - 1] = { \
40 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
41 }, \
42}
43
44/* The events for a given PMU register set. */
45struct pmu_hw_events {
46 /*
47 * The events that are active on the PMU for the given index.
48 */
49 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
50
51 /*
52 * A 1 bit for an index indicates that the counter is being used for
53 * an event. A 0 means that the counter can be used.
54 */
55 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
56
57 /*
58 * Hardware lock to serialize accesses to PMU registers. Needed for the
59 * read/modify/write sequences.
60 */
61 raw_spinlock_t pmu_lock;
62
63 /*
64 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
65 * already have to allocate this struct per cpu.
66 */
67 struct arm_pmu *percpu_pmu;
68
69 int irq;
70};
71
72enum armpmu_attr_groups {
73 ARMPMU_ATTR_GROUP_COMMON,
74 ARMPMU_ATTR_GROUP_EVENTS,
75 ARMPMU_ATTR_GROUP_FORMATS,
Olivier Deprez157378f2022-04-04 15:47:50 +020076 ARMPMU_ATTR_GROUP_CAPS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077 ARMPMU_NR_ATTR_GROUPS
78};
79
80struct arm_pmu {
81 struct pmu pmu;
82 cpumask_t supported_cpus;
83 char *name;
Olivier Deprez157378f2022-04-04 15:47:50 +020084 int pmuver;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
86 void (*enable)(struct perf_event *event);
87 void (*disable)(struct perf_event *event);
88 int (*get_event_idx)(struct pmu_hw_events *hw_events,
89 struct perf_event *event);
90 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
91 struct perf_event *event);
92 int (*set_event_filter)(struct hw_perf_event *evt,
93 struct perf_event_attr *attr);
94 u64 (*read_counter)(struct perf_event *event);
95 void (*write_counter)(struct perf_event *event, u64 val);
96 void (*start)(struct arm_pmu *);
97 void (*stop)(struct arm_pmu *);
98 void (*reset)(void *);
99 int (*map_event)(struct perf_event *event);
100 int (*filter_match)(struct perf_event *event);
101 int num_events;
102 bool secure_access; /* 32-bit ARM only */
David Brazdil0f672f62019-12-10 10:32:29 +0000103#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
David Brazdil0f672f62019-12-10 10:32:29 +0000105#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
106 DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 struct platform_device *plat_device;
108 struct pmu_hw_events __percpu *hw_events;
109 struct hlist_node node;
110 struct notifier_block cpu_pm_nb;
111 /* the attr_groups array must be NULL-terminated */
112 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
Olivier Deprez157378f2022-04-04 15:47:50 +0200113 /* store the PMMIR_EL1 to expose slots */
114 u64 reg_pmmir;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115
116 /* Only to be used by ACPI probing code */
117 unsigned long acpi_cpuid;
118};
119
120#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
121
122u64 armpmu_event_update(struct perf_event *event);
123
124int armpmu_event_set_period(struct perf_event *event);
125
126int armpmu_map_event(struct perf_event *event,
127 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
128 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
129 [PERF_COUNT_HW_CACHE_OP_MAX]
130 [PERF_COUNT_HW_CACHE_RESULT_MAX],
131 u32 raw_event_mask);
132
133typedef int (*armpmu_init_fn)(struct arm_pmu *);
134
135struct pmu_probe_info {
136 unsigned int cpuid;
137 unsigned int mask;
138 armpmu_init_fn init;
139};
140
141#define PMU_PROBE(_cpuid, _mask, _fn) \
142{ \
143 .cpuid = (_cpuid), \
144 .mask = (_mask), \
145 .init = (_fn), \
146}
147
148#define ARM_PMU_PROBE(_cpuid, _fn) \
149 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
150
151#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
152
153#define XSCALE_PMU_PROBE(_version, _fn) \
154 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
155
156int arm_pmu_device_probe(struct platform_device *pdev,
157 const struct of_device_id *of_table,
158 const struct pmu_probe_info *probe_table);
159
160#ifdef CONFIG_ACPI
161int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
162#else
163static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
164#endif
165
166/* Internal functions only for core arm_pmu code */
167struct arm_pmu *armpmu_alloc(void);
168struct arm_pmu *armpmu_alloc_atomic(void);
169void armpmu_free(struct arm_pmu *pmu);
170int armpmu_register(struct arm_pmu *pmu);
171int armpmu_request_irq(int irq, int cpu);
172void armpmu_free_irq(int irq, int cpu);
173
174#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
175
176#endif /* CONFIG_ARM_PMU */
177
David Brazdil0f672f62019-12-10 10:32:29 +0000178#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
179
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180#endif /* __ARM_PMU_H__ */