Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * preemptoff and irqoff tracepoints |
| 4 | * |
| 5 | * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/kallsyms.h> |
| 9 | #include <linux/uaccess.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/ftrace.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 12 | #include <linux/kprobes.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | #include "trace.h" |
| 14 | |
| 15 | #define CREATE_TRACE_POINTS |
| 16 | #include <trace/events/preemptirq.h> |
| 17 | |
| 18 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 19 | /* Per-cpu variable to prevent redundant calls when IRQs already off */ |
| 20 | static DEFINE_PER_CPU(int, tracing_irq_cpu); |
| 21 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 22 | /* |
| 23 | * Like trace_hardirqs_on() but without the lockdep invocation. This is |
| 24 | * used in the low level entry code where the ordering vs. RCU is important |
| 25 | * and lockdep uses a staged approach which splits the lockdep hardirq |
| 26 | * tracking into a RCU on and a RCU off section. |
| 27 | */ |
| 28 | void trace_hardirqs_on_prepare(void) |
| 29 | { |
| 30 | if (this_cpu_read(tracing_irq_cpu)) { |
| 31 | if (!in_nmi()) |
| 32 | trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1); |
| 33 | tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); |
| 34 | this_cpu_write(tracing_irq_cpu, 0); |
| 35 | } |
| 36 | } |
| 37 | EXPORT_SYMBOL(trace_hardirqs_on_prepare); |
| 38 | NOKPROBE_SYMBOL(trace_hardirqs_on_prepare); |
| 39 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | void trace_hardirqs_on(void) |
| 41 | { |
| 42 | if (this_cpu_read(tracing_irq_cpu)) { |
| 43 | if (!in_nmi()) |
| 44 | trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); |
| 45 | tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); |
| 46 | this_cpu_write(tracing_irq_cpu, 0); |
| 47 | } |
| 48 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 49 | lockdep_hardirqs_on_prepare(CALLER_ADDR0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | lockdep_hardirqs_on(CALLER_ADDR0); |
| 51 | } |
| 52 | EXPORT_SYMBOL(trace_hardirqs_on); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 53 | NOKPROBE_SYMBOL(trace_hardirqs_on); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | /* |
| 56 | * Like trace_hardirqs_off() but without the lockdep invocation. This is |
| 57 | * used in the low level entry code where the ordering vs. RCU is important |
| 58 | * and lockdep uses a staged approach which splits the lockdep hardirq |
| 59 | * tracking into a RCU on and a RCU off section. |
| 60 | */ |
| 61 | void trace_hardirqs_off_finish(void) |
| 62 | { |
| 63 | if (!this_cpu_read(tracing_irq_cpu)) { |
| 64 | this_cpu_write(tracing_irq_cpu, 1); |
| 65 | tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); |
| 66 | if (!in_nmi()) |
| 67 | trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1); |
| 68 | } |
| 69 | |
| 70 | } |
| 71 | EXPORT_SYMBOL(trace_hardirqs_off_finish); |
| 72 | NOKPROBE_SYMBOL(trace_hardirqs_off_finish); |
| 73 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | void trace_hardirqs_off(void) |
| 75 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 76 | lockdep_hardirqs_off(CALLER_ADDR0); |
| 77 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | if (!this_cpu_read(tracing_irq_cpu)) { |
| 79 | this_cpu_write(tracing_irq_cpu, 1); |
| 80 | tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); |
| 81 | if (!in_nmi()) |
| 82 | trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); |
| 83 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | } |
| 85 | EXPORT_SYMBOL(trace_hardirqs_off); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 86 | NOKPROBE_SYMBOL(trace_hardirqs_off); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | |
| 88 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 89 | { |
| 90 | if (this_cpu_read(tracing_irq_cpu)) { |
| 91 | if (!in_nmi()) |
| 92 | trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); |
| 93 | tracer_hardirqs_on(CALLER_ADDR0, caller_addr); |
| 94 | this_cpu_write(tracing_irq_cpu, 0); |
| 95 | } |
| 96 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 97 | lockdep_hardirqs_on_prepare(CALLER_ADDR0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | lockdep_hardirqs_on(CALLER_ADDR0); |
| 99 | } |
| 100 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 101 | NOKPROBE_SYMBOL(trace_hardirqs_on_caller); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | |
| 103 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 104 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 105 | lockdep_hardirqs_off(CALLER_ADDR0); |
| 106 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | if (!this_cpu_read(tracing_irq_cpu)) { |
| 108 | this_cpu_write(tracing_irq_cpu, 1); |
| 109 | tracer_hardirqs_off(CALLER_ADDR0, caller_addr); |
| 110 | if (!in_nmi()) |
| 111 | trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); |
| 112 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | } |
| 114 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 115 | NOKPROBE_SYMBOL(trace_hardirqs_off_caller); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 117 | |
| 118 | #ifdef CONFIG_TRACE_PREEMPT_TOGGLE |
| 119 | |
| 120 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
| 121 | { |
| 122 | if (!in_nmi()) |
| 123 | trace_preempt_enable_rcuidle(a0, a1); |
| 124 | tracer_preempt_on(a0, a1); |
| 125 | } |
| 126 | |
| 127 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
| 128 | { |
| 129 | if (!in_nmi()) |
| 130 | trace_preempt_disable_rcuidle(a0, a1); |
| 131 | tracer_preempt_off(a0, a1); |
| 132 | } |
| 133 | #endif |