Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 1991,1992,1995 Linus Torvalds |
| 4 | * Copyright (c) 1994 Alan Modra |
| 5 | * Copyright (c) 1995 Markus Kuhn |
| 6 | * Copyright (c) 1996 Ingo Molnar |
| 7 | * Copyright (c) 1998 Andrea Arcangeli |
| 8 | * Copyright (c) 2002,2006 Vojtech Pavlik |
| 9 | * Copyright (c) 2003 Andi Kleen |
| 10 | * |
| 11 | */ |
| 12 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 13 | #include <linux/clocksource.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #include <linux/clockchips.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/irq.h> |
| 17 | #include <linux/i8253.h> |
| 18 | #include <linux/time.h> |
| 19 | #include <linux/export.h> |
| 20 | |
| 21 | #include <asm/vsyscall.h> |
| 22 | #include <asm/x86_init.h> |
| 23 | #include <asm/i8259.h> |
| 24 | #include <asm/timer.h> |
| 25 | #include <asm/hpet.h> |
| 26 | #include <asm/time.h> |
| 27 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | unsigned long profile_pc(struct pt_regs *regs) |
| 29 | { |
| 30 | unsigned long pc = instruction_pointer(regs); |
| 31 | |
| 32 | if (!user_mode(regs) && in_lock_functions(pc)) { |
| 33 | #ifdef CONFIG_FRAME_POINTER |
| 34 | return *(unsigned long *)(regs->bp + sizeof(long)); |
| 35 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 36 | unsigned long *sp = (unsigned long *)regs->sp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | /* |
| 38 | * Return address is either directly at stack pointer |
| 39 | * or above a saved flags. Eflags has bits 22-31 zero, |
| 40 | * kernel addresses don't. |
| 41 | */ |
| 42 | if (sp[0] >> 22) |
| 43 | return sp[0]; |
| 44 | if (sp[1] >> 22) |
| 45 | return sp[1]; |
| 46 | #endif |
| 47 | } |
| 48 | return pc; |
| 49 | } |
| 50 | EXPORT_SYMBOL(profile_pc); |
| 51 | |
| 52 | /* |
| 53 | * Default timer interrupt handler for PIT/HPET |
| 54 | */ |
| 55 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
| 56 | { |
| 57 | global_clock_event->event_handler(global_clock_event); |
| 58 | return IRQ_HANDLED; |
| 59 | } |
| 60 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | static void __init setup_default_timer_irq(void) |
| 62 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 63 | unsigned long flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER; |
| 64 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 66 | * Unconditionally register the legacy timer interrupt; even |
| 67 | * without legacy PIC/PIT we need this for the HPET0 in legacy |
| 68 | * replacement mode. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 70 | if (request_irq(0, timer_interrupt, flags, "timer", NULL)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | pr_info("Failed to register legacy timer interrupt\n"); |
| 72 | } |
| 73 | |
| 74 | /* Default timer init function */ |
| 75 | void __init hpet_time_init(void) |
| 76 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | if (!hpet_enable()) { |
| 78 | if (!pit_timer_init()) |
| 79 | return; |
| 80 | } |
| 81 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 82 | setup_default_timer_irq(); |
| 83 | } |
| 84 | |
| 85 | static __init void x86_late_time_init(void) |
| 86 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 88 | * Before PIT/HPET init, select the interrupt mode. This is required |
| 89 | * to make the decision whether PIT should be initialized correct. |
| 90 | */ |
| 91 | x86_init.irqs.intr_mode_select(); |
| 92 | |
| 93 | /* Setup the legacy timers */ |
| 94 | x86_init.timers.timer_init(); |
| 95 | |
| 96 | /* |
| 97 | * After PIT/HPET timers init, set up the final interrupt mode for |
| 98 | * delivering IRQs. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | */ |
| 100 | x86_init.irqs.intr_mode_init(); |
| 101 | tsc_init(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 102 | |
| 103 | if (static_cpu_has(X86_FEATURE_WAITPKG)) |
| 104 | use_tpause_delay(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Initialize TSC and delay the periodic timer init to |
| 109 | * late x86_late_time_init() so ioremap works. |
| 110 | */ |
| 111 | void __init time_init(void) |
| 112 | { |
| 113 | late_time_init = x86_late_time_init; |
| 114 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * Sanity check the vdso related archdata content. |
| 118 | */ |
| 119 | void clocksource_arch_init(struct clocksource *cs) |
| 120 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 121 | if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 122 | return; |
| 123 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 124 | if (cs->mask != CLOCKSOURCE_MASK(64)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 125 | pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 126 | cs->name, cs->mask); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 127 | cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 128 | } |
| 129 | } |