blob: ec6f28ed21e27fb023752a33763bd23478cf1b06 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/drivers/clocksource/arm_arch_timer.c
4 *
5 * Copyright (C) 2011 ARM Ltd.
6 * All Rights Reserved
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
David Brazdil0f672f62019-12-10 10:32:29 +00009#define pr_fmt(fmt) "arch_timer: " fmt
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/cpu_pm.h>
17#include <linux/clockchips.h>
18#include <linux/clocksource.h>
19#include <linux/interrupt.h>
20#include <linux/of_irq.h>
21#include <linux/of_address.h>
22#include <linux/io.h>
23#include <linux/slab.h>
24#include <linux/sched/clock.h>
25#include <linux/sched_clock.h>
26#include <linux/acpi.h>
27
28#include <asm/arch_timer.h>
29#include <asm/virt.h>
30
31#include <clocksource/arm_arch_timer.h>
32
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033#define CNTTIDR 0x08
34#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
35
36#define CNTACR(n) (0x40 + ((n) * 4))
37#define CNTACR_RPCT BIT(0)
38#define CNTACR_RVCT BIT(1)
39#define CNTACR_RFRQ BIT(2)
40#define CNTACR_RVOFF BIT(3)
41#define CNTACR_RWVT BIT(4)
42#define CNTACR_RWPT BIT(5)
43
44#define CNTVCT_LO 0x08
45#define CNTVCT_HI 0x0c
46#define CNTFRQ 0x10
47#define CNTP_TVAL 0x28
48#define CNTP_CTL 0x2c
49#define CNTV_TVAL 0x38
50#define CNTV_CTL 0x3c
51
52static unsigned arch_timers_present __initdata;
53
54static void __iomem *arch_counter_base;
55
56struct arch_timer {
57 void __iomem *base;
58 struct clock_event_device evt;
59};
60
61#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
62
63static u32 arch_timer_rate;
64static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
65
66static struct clock_event_device __percpu *arch_timer_evt;
67
68static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
69static bool arch_timer_c3stop;
70static bool arch_timer_mem_use_virtual;
71static bool arch_counter_suspend_stop;
Olivier Deprez0e641232021-09-23 10:07:05 +020072static enum vdso_arch_clockmode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073
74static cpumask_t evtstrm_available = CPU_MASK_NONE;
75static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
76
77static int __init early_evtstrm_cfg(char *buf)
78{
79 return strtobool(buf, &evtstrm_enable);
80}
81early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
82
83/*
84 * Architected system timer support.
85 */
86
87static __always_inline
88void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
89 struct clock_event_device *clk)
90{
91 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
92 struct arch_timer *timer = to_arch_timer(clk);
93 switch (reg) {
94 case ARCH_TIMER_REG_CTRL:
95 writel_relaxed(val, timer->base + CNTP_CTL);
96 break;
97 case ARCH_TIMER_REG_TVAL:
98 writel_relaxed(val, timer->base + CNTP_TVAL);
99 break;
100 }
101 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
102 struct arch_timer *timer = to_arch_timer(clk);
103 switch (reg) {
104 case ARCH_TIMER_REG_CTRL:
105 writel_relaxed(val, timer->base + CNTV_CTL);
106 break;
107 case ARCH_TIMER_REG_TVAL:
108 writel_relaxed(val, timer->base + CNTV_TVAL);
109 break;
110 }
111 } else {
112 arch_timer_reg_write_cp15(access, reg, val);
113 }
114}
115
116static __always_inline
117u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
118 struct clock_event_device *clk)
119{
120 u32 val;
121
122 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
123 struct arch_timer *timer = to_arch_timer(clk);
124 switch (reg) {
125 case ARCH_TIMER_REG_CTRL:
126 val = readl_relaxed(timer->base + CNTP_CTL);
127 break;
128 case ARCH_TIMER_REG_TVAL:
129 val = readl_relaxed(timer->base + CNTP_TVAL);
130 break;
131 }
132 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
133 struct arch_timer *timer = to_arch_timer(clk);
134 switch (reg) {
135 case ARCH_TIMER_REG_CTRL:
136 val = readl_relaxed(timer->base + CNTV_CTL);
137 break;
138 case ARCH_TIMER_REG_TVAL:
139 val = readl_relaxed(timer->base + CNTV_TVAL);
140 break;
141 }
142 } else {
143 val = arch_timer_reg_read_cp15(access, reg);
144 }
145
146 return val;
147}
148
David Brazdil0f672f62019-12-10 10:32:29 +0000149static notrace u64 arch_counter_get_cntpct_stable(void)
150{
151 return __arch_counter_get_cntpct_stable();
152}
153
154static notrace u64 arch_counter_get_cntpct(void)
155{
156 return __arch_counter_get_cntpct();
157}
158
159static notrace u64 arch_counter_get_cntvct_stable(void)
160{
161 return __arch_counter_get_cntvct_stable();
162}
163
164static notrace u64 arch_counter_get_cntvct(void)
165{
166 return __arch_counter_get_cntvct();
167}
168
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169/*
170 * Default to cp15 based access because arm64 uses this function for
171 * sched_clock() before DT is probed and the cp15 method is guaranteed
172 * to exist on arm64. arm doesn't use this before DT is probed so even
173 * if we don't have the cp15 accessors we won't have a problem.
174 */
175u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
176EXPORT_SYMBOL_GPL(arch_timer_read_counter);
177
178static u64 arch_counter_read(struct clocksource *cs)
179{
180 return arch_timer_read_counter();
181}
182
183static u64 arch_counter_read_cc(const struct cyclecounter *cc)
184{
185 return arch_timer_read_counter();
186}
187
188static struct clocksource clocksource_counter = {
189 .name = "arch_sys_counter",
190 .rating = 400,
191 .read = arch_counter_read,
192 .mask = CLOCKSOURCE_MASK(56),
193 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
194};
195
196static struct cyclecounter cyclecounter __ro_after_init = {
197 .read = arch_counter_read_cc,
198 .mask = CLOCKSOURCE_MASK(56),
199};
200
201struct ate_acpi_oem_info {
202 char oem_id[ACPI_OEM_ID_SIZE + 1];
203 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
204 u32 oem_revision;
205};
206
207#ifdef CONFIG_FSL_ERRATUM_A008585
208/*
209 * The number of retries is an arbitrary value well beyond the highest number
210 * of iterations the loop has been observed to take.
211 */
212#define __fsl_a008585_read_reg(reg) ({ \
213 u64 _old, _new; \
214 int _retries = 200; \
215 \
216 do { \
217 _old = read_sysreg(reg); \
218 _new = read_sysreg(reg); \
219 _retries--; \
220 } while (unlikely(_old != _new) && _retries); \
221 \
222 WARN_ON_ONCE(!_retries); \
223 _new; \
224})
225
226static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
227{
228 return __fsl_a008585_read_reg(cntp_tval_el0);
229}
230
231static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
232{
233 return __fsl_a008585_read_reg(cntv_tval_el0);
234}
235
236static u64 notrace fsl_a008585_read_cntpct_el0(void)
237{
238 return __fsl_a008585_read_reg(cntpct_el0);
239}
240
241static u64 notrace fsl_a008585_read_cntvct_el0(void)
242{
243 return __fsl_a008585_read_reg(cntvct_el0);
244}
245#endif
246
247#ifdef CONFIG_HISILICON_ERRATUM_161010101
248/*
249 * Verify whether the value of the second read is larger than the first by
250 * less than 32 is the only way to confirm the value is correct, so clear the
251 * lower 5 bits to check whether the difference is greater than 32 or not.
252 * Theoretically the erratum should not occur more than twice in succession
253 * when reading the system counter, but it is possible that some interrupts
254 * may lead to more than twice read errors, triggering the warning, so setting
255 * the number of retries far beyond the number of iterations the loop has been
256 * observed to take.
257 */
258#define __hisi_161010101_read_reg(reg) ({ \
259 u64 _old, _new; \
260 int _retries = 50; \
261 \
262 do { \
263 _old = read_sysreg(reg); \
264 _new = read_sysreg(reg); \
265 _retries--; \
266 } while (unlikely((_new - _old) >> 5) && _retries); \
267 \
268 WARN_ON_ONCE(!_retries); \
269 _new; \
270})
271
272static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
273{
274 return __hisi_161010101_read_reg(cntp_tval_el0);
275}
276
277static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
278{
279 return __hisi_161010101_read_reg(cntv_tval_el0);
280}
281
282static u64 notrace hisi_161010101_read_cntpct_el0(void)
283{
284 return __hisi_161010101_read_reg(cntpct_el0);
285}
286
287static u64 notrace hisi_161010101_read_cntvct_el0(void)
288{
289 return __hisi_161010101_read_reg(cntvct_el0);
290}
291
292static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
293 /*
294 * Note that trailing spaces are required to properly match
295 * the OEM table information.
296 */
297 {
298 .oem_id = "HISI ",
299 .oem_table_id = "HIP05 ",
300 .oem_revision = 0,
301 },
302 {
303 .oem_id = "HISI ",
304 .oem_table_id = "HIP06 ",
305 .oem_revision = 0,
306 },
307 {
308 .oem_id = "HISI ",
309 .oem_table_id = "HIP07 ",
310 .oem_revision = 0,
311 },
312 { /* Sentinel indicating the end of the OEM array */ },
313};
314#endif
315
316#ifdef CONFIG_ARM64_ERRATUM_858921
317static u64 notrace arm64_858921_read_cntpct_el0(void)
318{
319 u64 old, new;
320
321 old = read_sysreg(cntpct_el0);
322 new = read_sysreg(cntpct_el0);
323 return (((old ^ new) >> 32) & 1) ? old : new;
324}
325
326static u64 notrace arm64_858921_read_cntvct_el0(void)
327{
328 u64 old, new;
329
330 old = read_sysreg(cntvct_el0);
331 new = read_sysreg(cntvct_el0);
332 return (((old ^ new) >> 32) & 1) ? old : new;
333}
334#endif
335
David Brazdil0f672f62019-12-10 10:32:29 +0000336#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
337/*
338 * The low bits of the counter registers are indeterminate while bit 10 or
339 * greater is rolling over. Since the counter value can jump both backward
340 * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
341 * with all ones or all zeros in the low bits. Bound the loop by the maximum
342 * number of CPU cycles in 3 consecutive 24 MHz counter periods.
343 */
344#define __sun50i_a64_read_reg(reg) ({ \
345 u64 _val; \
346 int _retries = 150; \
347 \
348 do { \
349 _val = read_sysreg(reg); \
350 _retries--; \
Olivier Deprez0e641232021-09-23 10:07:05 +0200351 } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
David Brazdil0f672f62019-12-10 10:32:29 +0000352 \
353 WARN_ON_ONCE(!_retries); \
354 _val; \
355})
356
357static u64 notrace sun50i_a64_read_cntpct_el0(void)
358{
359 return __sun50i_a64_read_reg(cntpct_el0);
360}
361
362static u64 notrace sun50i_a64_read_cntvct_el0(void)
363{
364 return __sun50i_a64_read_reg(cntvct_el0);
365}
366
367static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
368{
369 return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
370}
371
372static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
373{
374 return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
375}
376#endif
377
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
379DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
380EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
381
David Brazdil0f672f62019-12-10 10:32:29 +0000382static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383
384static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
385 struct clock_event_device *clk)
386{
387 unsigned long ctrl;
388 u64 cval;
389
390 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
391 ctrl |= ARCH_TIMER_CTRL_ENABLE;
392 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
393
394 if (access == ARCH_TIMER_PHYS_ACCESS) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200395 cval = evt + arch_counter_get_cntpct_stable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 write_sysreg(cval, cntp_cval_el0);
397 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +0200398 cval = evt + arch_counter_get_cntvct_stable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399 write_sysreg(cval, cntv_cval_el0);
400 }
401
402 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
403}
404
405static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
406 struct clock_event_device *clk)
407{
408 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
409 return 0;
410}
411
412static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
413 struct clock_event_device *clk)
414{
415 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
416 return 0;
417}
418
419static const struct arch_timer_erratum_workaround ool_workarounds[] = {
420#ifdef CONFIG_FSL_ERRATUM_A008585
421 {
422 .match_type = ate_match_dt,
423 .id = "fsl,erratum-a008585",
424 .desc = "Freescale erratum a005858",
425 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
426 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
427 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
428 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
429 .set_next_event_phys = erratum_set_next_event_tval_phys,
430 .set_next_event_virt = erratum_set_next_event_tval_virt,
431 },
432#endif
433#ifdef CONFIG_HISILICON_ERRATUM_161010101
434 {
435 .match_type = ate_match_dt,
436 .id = "hisilicon,erratum-161010101",
437 .desc = "HiSilicon erratum 161010101",
438 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
439 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
440 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
441 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
442 .set_next_event_phys = erratum_set_next_event_tval_phys,
443 .set_next_event_virt = erratum_set_next_event_tval_virt,
444 },
445 {
446 .match_type = ate_match_acpi_oem_info,
447 .id = hisi_161010101_oem_info,
448 .desc = "HiSilicon erratum 161010101",
449 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
450 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
451 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
452 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
453 .set_next_event_phys = erratum_set_next_event_tval_phys,
454 .set_next_event_virt = erratum_set_next_event_tval_virt,
455 },
456#endif
457#ifdef CONFIG_ARM64_ERRATUM_858921
458 {
459 .match_type = ate_match_local_cap_id,
460 .id = (void *)ARM64_WORKAROUND_858921,
461 .desc = "ARM erratum 858921",
462 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
463 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
464 },
465#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000466#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
467 {
468 .match_type = ate_match_dt,
469 .id = "allwinner,erratum-unknown1",
470 .desc = "Allwinner erratum UNKNOWN1",
471 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
472 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
473 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
474 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
475 .set_next_event_phys = erratum_set_next_event_tval_phys,
476 .set_next_event_virt = erratum_set_next_event_tval_virt,
477 },
478#endif
Olivier Deprez0e641232021-09-23 10:07:05 +0200479#ifdef CONFIG_ARM64_ERRATUM_1418040
480 {
481 .match_type = ate_match_local_cap_id,
482 .id = (void *)ARM64_WORKAROUND_1418040,
483 .desc = "ARM erratum 1418040",
484 .disable_compat_vdso = true,
485 },
486#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487};
488
489typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
490 const void *);
491
492static
493bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
494 const void *arg)
495{
496 const struct device_node *np = arg;
497
498 return of_property_read_bool(np, wa->id);
499}
500
501static
502bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
503 const void *arg)
504{
505 return this_cpu_has_cap((uintptr_t)wa->id);
506}
507
508
509static
510bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
511 const void *arg)
512{
513 static const struct ate_acpi_oem_info empty_oem_info = {};
514 const struct ate_acpi_oem_info *info = wa->id;
515 const struct acpi_table_header *table = arg;
516
517 /* Iterate over the ACPI OEM info array, looking for a match */
518 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
519 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
520 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
521 info->oem_revision == table->oem_revision)
522 return true;
523
524 info++;
525 }
526
527 return false;
528}
529
530static const struct arch_timer_erratum_workaround *
531arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
532 ate_match_fn_t match_fn,
533 void *arg)
534{
535 int i;
536
537 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
538 if (ool_workarounds[i].match_type != type)
539 continue;
540
541 if (match_fn(&ool_workarounds[i], arg))
542 return &ool_workarounds[i];
543 }
544
545 return NULL;
546}
547
548static
549void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
550 bool local)
551{
552 int i;
553
554 if (local) {
555 __this_cpu_write(timer_unstable_counter_workaround, wa);
556 } else {
557 for_each_possible_cpu(i)
558 per_cpu(timer_unstable_counter_workaround, i) = wa;
559 }
560
David Brazdil0f672f62019-12-10 10:32:29 +0000561 if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
562 atomic_set(&timer_unstable_counter_workaround_in_use, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563
564 /*
565 * Don't use the vdso fastpath if errata require using the
566 * out-of-line counter accessor. We may change our mind pretty
567 * late in the game (with a per-CPU erratum, for example), so
568 * change both the default value and the vdso itself.
569 */
570 if (wa->read_cntvct_el0) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200571 clocksource_counter.archdata.clock_mode = VDSO_CLOCKMODE_NONE;
572 vdso_default = VDSO_CLOCKMODE_NONE;
573 } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
574 vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
575 clocksource_counter.archdata.clock_mode = vdso_default;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576 }
577}
578
579static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
580 void *arg)
581{
David Brazdil0f672f62019-12-10 10:32:29 +0000582 const struct arch_timer_erratum_workaround *wa, *__wa;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583 ate_match_fn_t match_fn = NULL;
584 bool local = false;
585
586 switch (type) {
587 case ate_match_dt:
588 match_fn = arch_timer_check_dt_erratum;
589 break;
590 case ate_match_local_cap_id:
591 match_fn = arch_timer_check_local_cap_erratum;
592 local = true;
593 break;
594 case ate_match_acpi_oem_info:
595 match_fn = arch_timer_check_acpi_oem_erratum;
596 break;
597 default:
598 WARN_ON(1);
599 return;
600 }
601
602 wa = arch_timer_iterate_errata(type, match_fn, arg);
603 if (!wa)
604 return;
605
David Brazdil0f672f62019-12-10 10:32:29 +0000606 __wa = __this_cpu_read(timer_unstable_counter_workaround);
607 if (__wa && wa != __wa)
608 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
609 wa->desc, __wa->desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610
David Brazdil0f672f62019-12-10 10:32:29 +0000611 if (__wa)
612 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613
614 arch_timer_enable_workaround(wa, local);
615 pr_info("Enabling %s workaround for %s\n",
616 local ? "local" : "global", wa->desc);
617}
618
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000619static bool arch_timer_this_cpu_has_cntvct_wa(void)
620{
David Brazdil0f672f62019-12-10 10:32:29 +0000621 return has_erratum_handler(read_cntvct_el0);
622}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623
David Brazdil0f672f62019-12-10 10:32:29 +0000624static bool arch_timer_counter_has_wa(void)
625{
626 return atomic_read(&timer_unstable_counter_workaround_in_use);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627}
628#else
629#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
David Brazdil0f672f62019-12-10 10:32:29 +0000631#define arch_timer_counter_has_wa() ({false;})
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
633
634static __always_inline irqreturn_t timer_handler(const int access,
635 struct clock_event_device *evt)
636{
637 unsigned long ctrl;
638
639 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
640 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
641 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
642 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
643 evt->event_handler(evt);
644 return IRQ_HANDLED;
645 }
646
647 return IRQ_NONE;
648}
649
650static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
651{
652 struct clock_event_device *evt = dev_id;
653
654 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
655}
656
657static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
658{
659 struct clock_event_device *evt = dev_id;
660
661 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
662}
663
664static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
665{
666 struct clock_event_device *evt = dev_id;
667
668 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
669}
670
671static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
672{
673 struct clock_event_device *evt = dev_id;
674
675 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
676}
677
678static __always_inline int timer_shutdown(const int access,
679 struct clock_event_device *clk)
680{
681 unsigned long ctrl;
682
683 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
684 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
685 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
686
687 return 0;
688}
689
690static int arch_timer_shutdown_virt(struct clock_event_device *clk)
691{
692 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
693}
694
695static int arch_timer_shutdown_phys(struct clock_event_device *clk)
696{
697 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
698}
699
700static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
701{
702 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
703}
704
705static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
706{
707 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
708}
709
710static __always_inline void set_next_event(const int access, unsigned long evt,
711 struct clock_event_device *clk)
712{
713 unsigned long ctrl;
714 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
715 ctrl |= ARCH_TIMER_CTRL_ENABLE;
716 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
717 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
718 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
719}
720
721static int arch_timer_set_next_event_virt(unsigned long evt,
722 struct clock_event_device *clk)
723{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000724 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
725 return 0;
726}
727
728static int arch_timer_set_next_event_phys(unsigned long evt,
729 struct clock_event_device *clk)
730{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000731 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
732 return 0;
733}
734
735static int arch_timer_set_next_event_virt_mem(unsigned long evt,
736 struct clock_event_device *clk)
737{
738 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
739 return 0;
740}
741
742static int arch_timer_set_next_event_phys_mem(unsigned long evt,
743 struct clock_event_device *clk)
744{
745 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
746 return 0;
747}
748
749static void __arch_timer_setup(unsigned type,
750 struct clock_event_device *clk)
751{
752 clk->features = CLOCK_EVT_FEAT_ONESHOT;
753
754 if (type == ARCH_TIMER_TYPE_CP15) {
David Brazdil0f672f62019-12-10 10:32:29 +0000755 typeof(clk->set_next_event) sne;
756
757 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759 if (arch_timer_c3stop)
760 clk->features |= CLOCK_EVT_FEAT_C3STOP;
761 clk->name = "arch_sys_timer";
762 clk->rating = 450;
763 clk->cpumask = cpumask_of(smp_processor_id());
764 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
765 switch (arch_timer_uses_ppi) {
766 case ARCH_TIMER_VIRT_PPI:
767 clk->set_state_shutdown = arch_timer_shutdown_virt;
768 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
David Brazdil0f672f62019-12-10 10:32:29 +0000769 sne = erratum_handler(set_next_event_virt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 break;
771 case ARCH_TIMER_PHYS_SECURE_PPI:
772 case ARCH_TIMER_PHYS_NONSECURE_PPI:
773 case ARCH_TIMER_HYP_PPI:
774 clk->set_state_shutdown = arch_timer_shutdown_phys;
775 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
David Brazdil0f672f62019-12-10 10:32:29 +0000776 sne = erratum_handler(set_next_event_phys);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000777 break;
778 default:
779 BUG();
780 }
781
David Brazdil0f672f62019-12-10 10:32:29 +0000782 clk->set_next_event = sne;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 } else {
784 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
785 clk->name = "arch_mem_timer";
786 clk->rating = 400;
787 clk->cpumask = cpu_possible_mask;
788 if (arch_timer_mem_use_virtual) {
789 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
790 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
791 clk->set_next_event =
792 arch_timer_set_next_event_virt_mem;
793 } else {
794 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
795 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
796 clk->set_next_event =
797 arch_timer_set_next_event_phys_mem;
798 }
799 }
800
801 clk->set_state_shutdown(clk);
802
803 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
804}
805
806static void arch_timer_evtstrm_enable(int divider)
807{
808 u32 cntkctl = arch_timer_get_cntkctl();
809
810 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
811 /* Set the divider and enable virtual event stream */
812 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
813 | ARCH_TIMER_VIRT_EVT_EN;
814 arch_timer_set_cntkctl(cntkctl);
David Brazdil0f672f62019-12-10 10:32:29 +0000815 arch_timer_set_evtstrm_feature();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
817}
818
819static void arch_timer_configure_evtstream(void)
820{
Olivier Deprez0e641232021-09-23 10:07:05 +0200821 int evt_stream_div, lsb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000822
Olivier Deprez0e641232021-09-23 10:07:05 +0200823 /*
824 * As the event stream can at most be generated at half the frequency
825 * of the counter, use half the frequency when computing the divider.
826 */
827 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
828
829 /*
830 * Find the closest power of two to the divisor. If the adjacent bit
831 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
832 */
833 lsb = fls(evt_stream_div) - 1;
834 if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
835 lsb++;
836
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000837 /* enable event stream */
Olivier Deprez0e641232021-09-23 10:07:05 +0200838 arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839}
840
841static void arch_counter_set_user_access(void)
842{
843 u32 cntkctl = arch_timer_get_cntkctl();
844
845 /* Disable user access to the timers and both counters */
846 /* Also disable virtual event stream */
847 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
848 | ARCH_TIMER_USR_VT_ACCESS_EN
849 | ARCH_TIMER_USR_VCT_ACCESS_EN
850 | ARCH_TIMER_VIRT_EVT_EN
851 | ARCH_TIMER_USR_PCT_ACCESS_EN);
852
853 /*
854 * Enable user access to the virtual counter if it doesn't
855 * need to be workaround. The vdso may have been already
856 * disabled though.
857 */
858 if (arch_timer_this_cpu_has_cntvct_wa())
859 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
860 else
861 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
862
863 arch_timer_set_cntkctl(cntkctl);
864}
865
866static bool arch_timer_has_nonsecure_ppi(void)
867{
868 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
869 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
870}
871
872static u32 check_ppi_trigger(int irq)
873{
874 u32 flags = irq_get_trigger_type(irq);
875
876 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
877 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
878 pr_warn("WARNING: Please fix your firmware\n");
879 flags = IRQF_TRIGGER_LOW;
880 }
881
882 return flags;
883}
884
885static int arch_timer_starting_cpu(unsigned int cpu)
886{
887 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
888 u32 flags;
889
890 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
891
892 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
893 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
894
895 if (arch_timer_has_nonsecure_ppi()) {
896 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
897 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
898 flags);
899 }
900
901 arch_counter_set_user_access();
902 if (evtstrm_enable)
903 arch_timer_configure_evtstream();
904
905 return 0;
906}
907
908/*
909 * For historical reasons, when probing with DT we use whichever (non-zero)
910 * rate was probed first, and don't verify that others match. If the first node
911 * probed has a clock-frequency property, this overrides the HW register.
912 */
913static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
914{
915 /* Who has more than one independent system counter? */
916 if (arch_timer_rate)
917 return;
918
919 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
920 arch_timer_rate = rate;
921
922 /* Check the timer frequency. */
923 if (arch_timer_rate == 0)
924 pr_warn("frequency not available\n");
925}
926
927static void arch_timer_banner(unsigned type)
928{
929 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
930 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
931 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
932 " and " : "",
933 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
934 (unsigned long)arch_timer_rate / 1000000,
935 (unsigned long)(arch_timer_rate / 10000) % 100,
936 type & ARCH_TIMER_TYPE_CP15 ?
937 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
938 "",
939 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
940 type & ARCH_TIMER_TYPE_MEM ?
941 arch_timer_mem_use_virtual ? "virt" : "phys" :
942 "");
943}
944
945u32 arch_timer_get_rate(void)
946{
947 return arch_timer_rate;
948}
949
950bool arch_timer_evtstrm_available(void)
951{
952 /*
953 * We might get called from a preemptible context. This is fine
954 * because availability of the event stream should be always the same
955 * for a preemptible context and context where we might resume a task.
956 */
957 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
958}
959
960static u64 arch_counter_get_cntvct_mem(void)
961{
962 u32 vct_lo, vct_hi, tmp_hi;
963
964 do {
965 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
966 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
967 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
968 } while (vct_hi != tmp_hi);
969
970 return ((u64) vct_hi << 32) | vct_lo;
971}
972
973static struct arch_timer_kvm_info arch_timer_kvm_info;
974
975struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
976{
977 return &arch_timer_kvm_info;
978}
979
980static void __init arch_counter_register(unsigned type)
981{
982 u64 start_count;
983
984 /* Register the CP15 based counter if we have one */
985 if (type & ARCH_TIMER_TYPE_CP15) {
David Brazdil0f672f62019-12-10 10:32:29 +0000986 u64 (*rd)(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000987
David Brazdil0f672f62019-12-10 10:32:29 +0000988 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
989 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
990 if (arch_timer_counter_has_wa())
991 rd = arch_counter_get_cntvct_stable;
992 else
993 rd = arch_counter_get_cntvct;
994 } else {
995 if (arch_timer_counter_has_wa())
996 rd = arch_counter_get_cntpct_stable;
997 else
998 rd = arch_counter_get_cntpct;
999 }
1000
1001 arch_timer_read_counter = rd;
Olivier Deprez0e641232021-09-23 10:07:05 +02001002 clocksource_counter.archdata.clock_mode = vdso_default;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001003 } else {
1004 arch_timer_read_counter = arch_counter_get_cntvct_mem;
1005 }
1006
1007 if (!arch_counter_suspend_stop)
1008 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1009 start_count = arch_timer_read_counter();
1010 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1011 cyclecounter.mult = clocksource_counter.mult;
1012 cyclecounter.shift = clocksource_counter.shift;
1013 timecounter_init(&arch_timer_kvm_info.timecounter,
1014 &cyclecounter, start_count);
1015
1016 /* 56 bits minimum, so we assume worst case rollover */
1017 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1018}
1019
1020static void arch_timer_stop(struct clock_event_device *clk)
1021{
1022 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1023
1024 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1025 if (arch_timer_has_nonsecure_ppi())
1026 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1027
1028 clk->set_state_shutdown(clk);
1029}
1030
1031static int arch_timer_dying_cpu(unsigned int cpu)
1032{
1033 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1034
1035 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1036
1037 arch_timer_stop(clk);
1038 return 0;
1039}
1040
1041#ifdef CONFIG_CPU_PM
1042static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
1043static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1044 unsigned long action, void *hcpu)
1045{
1046 if (action == CPU_PM_ENTER) {
1047 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1048
1049 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1050 } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1051 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1052
David Brazdil0f672f62019-12-10 10:32:29 +00001053 if (arch_timer_have_evtstrm_feature())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1055 }
1056 return NOTIFY_OK;
1057}
1058
1059static struct notifier_block arch_timer_cpu_pm_notifier = {
1060 .notifier_call = arch_timer_cpu_pm_notify,
1061};
1062
1063static int __init arch_timer_cpu_pm_init(void)
1064{
1065 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1066}
1067
1068static void __init arch_timer_cpu_pm_deinit(void)
1069{
1070 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1071}
1072
1073#else
1074static int __init arch_timer_cpu_pm_init(void)
1075{
1076 return 0;
1077}
1078
1079static void __init arch_timer_cpu_pm_deinit(void)
1080{
1081}
1082#endif
1083
1084static int __init arch_timer_register(void)
1085{
1086 int err;
1087 int ppi;
1088
1089 arch_timer_evt = alloc_percpu(struct clock_event_device);
1090 if (!arch_timer_evt) {
1091 err = -ENOMEM;
1092 goto out;
1093 }
1094
1095 ppi = arch_timer_ppi[arch_timer_uses_ppi];
1096 switch (arch_timer_uses_ppi) {
1097 case ARCH_TIMER_VIRT_PPI:
1098 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1099 "arch_timer", arch_timer_evt);
1100 break;
1101 case ARCH_TIMER_PHYS_SECURE_PPI:
1102 case ARCH_TIMER_PHYS_NONSECURE_PPI:
1103 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1104 "arch_timer", arch_timer_evt);
1105 if (!err && arch_timer_has_nonsecure_ppi()) {
1106 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1107 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1108 "arch_timer", arch_timer_evt);
1109 if (err)
1110 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1111 arch_timer_evt);
1112 }
1113 break;
1114 case ARCH_TIMER_HYP_PPI:
1115 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1116 "arch_timer", arch_timer_evt);
1117 break;
1118 default:
1119 BUG();
1120 }
1121
1122 if (err) {
1123 pr_err("can't register interrupt %d (%d)\n", ppi, err);
1124 goto out_free;
1125 }
1126
1127 err = arch_timer_cpu_pm_init();
1128 if (err)
1129 goto out_unreg_notify;
1130
1131 /* Register and immediately configure the timer on the boot CPU */
1132 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1133 "clockevents/arm/arch_timer:starting",
1134 arch_timer_starting_cpu, arch_timer_dying_cpu);
1135 if (err)
1136 goto out_unreg_cpupm;
1137 return 0;
1138
1139out_unreg_cpupm:
1140 arch_timer_cpu_pm_deinit();
1141
1142out_unreg_notify:
1143 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1144 if (arch_timer_has_nonsecure_ppi())
1145 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1146 arch_timer_evt);
1147
1148out_free:
1149 free_percpu(arch_timer_evt);
1150out:
1151 return err;
1152}
1153
1154static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1155{
1156 int ret;
1157 irq_handler_t func;
1158 struct arch_timer *t;
1159
1160 t = kzalloc(sizeof(*t), GFP_KERNEL);
1161 if (!t)
1162 return -ENOMEM;
1163
1164 t->base = base;
1165 t->evt.irq = irq;
1166 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1167
1168 if (arch_timer_mem_use_virtual)
1169 func = arch_timer_handler_virt_mem;
1170 else
1171 func = arch_timer_handler_phys_mem;
1172
1173 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1174 if (ret) {
1175 pr_err("Failed to request mem timer irq\n");
1176 kfree(t);
1177 }
1178
1179 return ret;
1180}
1181
1182static const struct of_device_id arch_timer_of_match[] __initconst = {
1183 { .compatible = "arm,armv7-timer", },
1184 { .compatible = "arm,armv8-timer", },
1185 {},
1186};
1187
1188static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1189 { .compatible = "arm,armv7-timer-mem", },
1190 {},
1191};
1192
1193static bool __init arch_timer_needs_of_probing(void)
1194{
1195 struct device_node *dn;
1196 bool needs_probing = false;
1197 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1198
1199 /* We have two timers, and both device-tree nodes are probed. */
1200 if ((arch_timers_present & mask) == mask)
1201 return false;
1202
1203 /*
1204 * Only one type of timer is probed,
1205 * check if we have another type of timer node in device-tree.
1206 */
1207 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1208 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1209 else
1210 dn = of_find_matching_node(NULL, arch_timer_of_match);
1211
1212 if (dn && of_device_is_available(dn))
1213 needs_probing = true;
1214
1215 of_node_put(dn);
1216
1217 return needs_probing;
1218}
1219
1220static int __init arch_timer_common_init(void)
1221{
1222 arch_timer_banner(arch_timers_present);
1223 arch_counter_register(arch_timers_present);
1224 return arch_timer_arch_init();
1225}
1226
1227/**
1228 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1229 *
1230 * If HYP mode is available, we know that the physical timer
1231 * has been configured to be accessible from PL1. Use it, so
1232 * that a guest can use the virtual timer instead.
1233 *
1234 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1235 * accesses to CNTP_*_EL1 registers are silently redirected to
1236 * their CNTHP_*_EL2 counterparts, and use a different PPI
1237 * number.
1238 *
1239 * If no interrupt provided for virtual timer, we'll have to
1240 * stick to the physical timer. It'd better be accessible...
1241 * For arm64 we never use the secure interrupt.
1242 *
1243 * Return: a suitable PPI type for the current system.
1244 */
1245static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1246{
1247 if (is_kernel_in_hyp_mode())
1248 return ARCH_TIMER_HYP_PPI;
1249
1250 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1251 return ARCH_TIMER_VIRT_PPI;
1252
1253 if (IS_ENABLED(CONFIG_ARM64))
1254 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1255
1256 return ARCH_TIMER_PHYS_SECURE_PPI;
1257}
1258
David Brazdil0f672f62019-12-10 10:32:29 +00001259static void __init arch_timer_populate_kvm_info(void)
1260{
1261 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1262 if (is_kernel_in_hyp_mode())
1263 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1264}
1265
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001266static int __init arch_timer_of_init(struct device_node *np)
1267{
1268 int i, ret;
1269 u32 rate;
1270
1271 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1272 pr_warn("multiple nodes in dt, skipping\n");
1273 return 0;
1274 }
1275
1276 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1277 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1278 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1279
David Brazdil0f672f62019-12-10 10:32:29 +00001280 arch_timer_populate_kvm_info();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001281
1282 rate = arch_timer_get_cntfrq();
1283 arch_timer_of_configure_rate(rate, np);
1284
1285 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1286
1287 /* Check for globally applicable workarounds */
1288 arch_timer_check_ool_workaround(ate_match_dt, np);
1289
1290 /*
1291 * If we cannot rely on firmware initializing the timer registers then
1292 * we should use the physical timers instead.
1293 */
1294 if (IS_ENABLED(CONFIG_ARM) &&
1295 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1296 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1297 else
1298 arch_timer_uses_ppi = arch_timer_select_ppi();
1299
1300 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1301 pr_err("No interrupt available, giving up\n");
1302 return -EINVAL;
1303 }
1304
1305 /* On some systems, the counter stops ticking when in suspend. */
1306 arch_counter_suspend_stop = of_property_read_bool(np,
1307 "arm,no-tick-in-suspend");
1308
1309 ret = arch_timer_register();
1310 if (ret)
1311 return ret;
1312
1313 if (arch_timer_needs_of_probing())
1314 return 0;
1315
1316 return arch_timer_common_init();
1317}
1318TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1319TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1320
1321static u32 __init
1322arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1323{
1324 void __iomem *base;
1325 u32 rate;
1326
1327 base = ioremap(frame->cntbase, frame->size);
1328 if (!base) {
1329 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1330 return 0;
1331 }
1332
1333 rate = readl_relaxed(base + CNTFRQ);
1334
1335 iounmap(base);
1336
1337 return rate;
1338}
1339
1340static struct arch_timer_mem_frame * __init
1341arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1342{
1343 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1344 void __iomem *cntctlbase;
1345 u32 cnttidr;
1346 int i;
1347
1348 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1349 if (!cntctlbase) {
1350 pr_err("Can't map CNTCTLBase @ %pa\n",
1351 &timer_mem->cntctlbase);
1352 return NULL;
1353 }
1354
1355 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1356
1357 /*
1358 * Try to find a virtual capable frame. Otherwise fall back to a
1359 * physical capable frame.
1360 */
1361 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1362 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1363 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1364
1365 frame = &timer_mem->frame[i];
1366 if (!frame->valid)
1367 continue;
1368
1369 /* Try enabling everything, and see what sticks */
1370 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1371 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1372
1373 if ((cnttidr & CNTTIDR_VIRT(i)) &&
1374 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1375 best_frame = frame;
1376 arch_timer_mem_use_virtual = true;
1377 break;
1378 }
1379
1380 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1381 continue;
1382
1383 best_frame = frame;
1384 }
1385
1386 iounmap(cntctlbase);
1387
1388 return best_frame;
1389}
1390
1391static int __init
1392arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1393{
1394 void __iomem *base;
1395 int ret, irq = 0;
1396
1397 if (arch_timer_mem_use_virtual)
1398 irq = frame->virt_irq;
1399 else
1400 irq = frame->phys_irq;
1401
1402 if (!irq) {
1403 pr_err("Frame missing %s irq.\n",
1404 arch_timer_mem_use_virtual ? "virt" : "phys");
1405 return -EINVAL;
1406 }
1407
1408 if (!request_mem_region(frame->cntbase, frame->size,
1409 "arch_mem_timer"))
1410 return -EBUSY;
1411
1412 base = ioremap(frame->cntbase, frame->size);
1413 if (!base) {
1414 pr_err("Can't map frame's registers\n");
1415 return -ENXIO;
1416 }
1417
1418 ret = arch_timer_mem_register(base, irq);
1419 if (ret) {
1420 iounmap(base);
1421 return ret;
1422 }
1423
1424 arch_counter_base = base;
1425 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1426
1427 return 0;
1428}
1429
1430static int __init arch_timer_mem_of_init(struct device_node *np)
1431{
1432 struct arch_timer_mem *timer_mem;
1433 struct arch_timer_mem_frame *frame;
1434 struct device_node *frame_node;
1435 struct resource res;
1436 int ret = -EINVAL;
1437 u32 rate;
1438
1439 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1440 if (!timer_mem)
1441 return -ENOMEM;
1442
1443 if (of_address_to_resource(np, 0, &res))
1444 goto out;
1445 timer_mem->cntctlbase = res.start;
1446 timer_mem->size = resource_size(&res);
1447
1448 for_each_available_child_of_node(np, frame_node) {
1449 u32 n;
1450 struct arch_timer_mem_frame *frame;
1451
1452 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1453 pr_err(FW_BUG "Missing frame-number.\n");
1454 of_node_put(frame_node);
1455 goto out;
1456 }
1457 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1458 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1459 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1460 of_node_put(frame_node);
1461 goto out;
1462 }
1463 frame = &timer_mem->frame[n];
1464
1465 if (frame->valid) {
1466 pr_err(FW_BUG "Duplicated frame-number.\n");
1467 of_node_put(frame_node);
1468 goto out;
1469 }
1470
1471 if (of_address_to_resource(frame_node, 0, &res)) {
1472 of_node_put(frame_node);
1473 goto out;
1474 }
1475 frame->cntbase = res.start;
1476 frame->size = resource_size(&res);
1477
1478 frame->virt_irq = irq_of_parse_and_map(frame_node,
1479 ARCH_TIMER_VIRT_SPI);
1480 frame->phys_irq = irq_of_parse_and_map(frame_node,
1481 ARCH_TIMER_PHYS_SPI);
1482
1483 frame->valid = true;
1484 }
1485
1486 frame = arch_timer_mem_find_best_frame(timer_mem);
1487 if (!frame) {
1488 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1489 &timer_mem->cntctlbase);
1490 ret = -EINVAL;
1491 goto out;
1492 }
1493
1494 rate = arch_timer_mem_frame_get_cntfrq(frame);
1495 arch_timer_of_configure_rate(rate, np);
1496
1497 ret = arch_timer_mem_frame_register(frame);
1498 if (!ret && !arch_timer_needs_of_probing())
1499 ret = arch_timer_common_init();
1500out:
1501 kfree(timer_mem);
1502 return ret;
1503}
1504TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1505 arch_timer_mem_of_init);
1506
1507#ifdef CONFIG_ACPI_GTDT
1508static int __init
1509arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1510{
1511 struct arch_timer_mem_frame *frame;
1512 u32 rate;
1513 int i;
1514
1515 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1516 frame = &timer_mem->frame[i];
1517
1518 if (!frame->valid)
1519 continue;
1520
1521 rate = arch_timer_mem_frame_get_cntfrq(frame);
1522 if (rate == arch_timer_rate)
1523 continue;
1524
1525 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1526 &frame->cntbase,
1527 (unsigned long)rate, (unsigned long)arch_timer_rate);
1528
1529 return -EINVAL;
1530 }
1531
1532 return 0;
1533}
1534
1535static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1536{
1537 struct arch_timer_mem *timers, *timer;
1538 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1539 int timer_count, i, ret = 0;
1540
1541 timers = kcalloc(platform_timer_count, sizeof(*timers),
1542 GFP_KERNEL);
1543 if (!timers)
1544 return -ENOMEM;
1545
1546 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1547 if (ret || !timer_count)
1548 goto out;
1549
1550 /*
1551 * While unlikely, it's theoretically possible that none of the frames
1552 * in a timer expose the combination of feature we want.
1553 */
1554 for (i = 0; i < timer_count; i++) {
1555 timer = &timers[i];
1556
1557 frame = arch_timer_mem_find_best_frame(timer);
1558 if (!best_frame)
1559 best_frame = frame;
1560
1561 ret = arch_timer_mem_verify_cntfrq(timer);
1562 if (ret) {
1563 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1564 goto out;
1565 }
1566
1567 if (!best_frame) /* implies !frame */
1568 /*
1569 * Only complain about missing suitable frames if we
1570 * haven't already found one in a previous iteration.
1571 */
1572 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1573 &timer->cntctlbase);
1574 }
1575
1576 if (best_frame)
1577 ret = arch_timer_mem_frame_register(best_frame);
1578out:
1579 kfree(timers);
1580 return ret;
1581}
1582
1583/* Initialize per-processor generic timer and memory-mapped timer(if present) */
1584static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1585{
1586 int ret, platform_timer_count;
1587
1588 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1589 pr_warn("already initialized, skipping\n");
1590 return -EINVAL;
1591 }
1592
1593 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1594
1595 ret = acpi_gtdt_init(table, &platform_timer_count);
1596 if (ret) {
1597 pr_err("Failed to init GTDT table.\n");
1598 return ret;
1599 }
1600
1601 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1602 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1603
1604 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1605 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1606
1607 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1608 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1609
David Brazdil0f672f62019-12-10 10:32:29 +00001610 arch_timer_populate_kvm_info();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001611
1612 /*
1613 * When probing via ACPI, we have no mechanism to override the sysreg
1614 * CNTFRQ value. This *must* be correct.
1615 */
1616 arch_timer_rate = arch_timer_get_cntfrq();
1617 if (!arch_timer_rate) {
1618 pr_err(FW_BUG "frequency not available.\n");
1619 return -EINVAL;
1620 }
1621
1622 arch_timer_uses_ppi = arch_timer_select_ppi();
1623 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1624 pr_err("No interrupt available, giving up\n");
1625 return -EINVAL;
1626 }
1627
1628 /* Always-on capability */
1629 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1630
1631 /* Check for globally applicable workarounds */
1632 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1633
1634 ret = arch_timer_register();
1635 if (ret)
1636 return ret;
1637
1638 if (platform_timer_count &&
1639 arch_timer_mem_acpi_init(platform_timer_count))
1640 pr_err("Failed to initialize memory-mapped timer.\n");
1641
1642 return arch_timer_common_init();
1643}
1644TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1645#endif