David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Context tracking: Probe on high level context boundaries such as kernel |
| 4 | * and userspace. This includes syscalls and exceptions entry/exit. |
| 5 | * |
| 6 | * This is used by RCU to remove its dependency on the timer tick while a CPU |
| 7 | * runs in userspace. |
| 8 | * |
| 9 | * Started by Frederic Weisbecker: |
| 10 | * |
| 11 | * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> |
| 12 | * |
| 13 | * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, |
| 14 | * Steven Rostedt, Peter Zijlstra for suggestions and improvements. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #include <linux/context_tracking.h> |
| 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/hardirq.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/kprobes.h> |
| 24 | |
| 25 | #define CREATE_TRACE_POINTS |
| 26 | #include <trace/events/context_tracking.h> |
| 27 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 28 | DEFINE_STATIC_KEY_FALSE(context_tracking_key); |
| 29 | EXPORT_SYMBOL_GPL(context_tracking_key); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | |
| 31 | DEFINE_PER_CPU(struct context_tracking, context_tracking); |
| 32 | EXPORT_SYMBOL_GPL(context_tracking); |
| 33 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 34 | static noinstr bool context_tracking_recursion_enter(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | { |
| 36 | int recursion; |
| 37 | |
| 38 | recursion = __this_cpu_inc_return(context_tracking.recursion); |
| 39 | if (recursion == 1) |
| 40 | return true; |
| 41 | |
| 42 | WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion); |
| 43 | __this_cpu_dec(context_tracking.recursion); |
| 44 | |
| 45 | return false; |
| 46 | } |
| 47 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 48 | static __always_inline void context_tracking_recursion_exit(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | { |
| 50 | __this_cpu_dec(context_tracking.recursion); |
| 51 | } |
| 52 | |
| 53 | /** |
| 54 | * context_tracking_enter - Inform the context tracking that the CPU is going |
| 55 | * enter user or guest space mode. |
| 56 | * |
| 57 | * This function must be called right before we switch from the kernel |
| 58 | * to user or guest space, when it's guaranteed the remaining kernel |
| 59 | * instructions to execute won't use any RCU read side critical section |
| 60 | * because this function sets RCU in extended quiescent state. |
| 61 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | void noinstr __context_tracking_enter(enum ctx_state state) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 63 | { |
| 64 | /* Kernel threads aren't supposed to go to userspace */ |
| 65 | WARN_ON_ONCE(!current->mm); |
| 66 | |
| 67 | if (!context_tracking_recursion_enter()) |
| 68 | return; |
| 69 | |
| 70 | if ( __this_cpu_read(context_tracking.state) != state) { |
| 71 | if (__this_cpu_read(context_tracking.active)) { |
| 72 | /* |
| 73 | * At this stage, only low level arch entry code remains and |
| 74 | * then we'll run in userspace. We can assume there won't be |
| 75 | * any RCU read-side critical section until the next call to |
| 76 | * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency |
| 77 | * on the tick. |
| 78 | */ |
| 79 | if (state == CONTEXT_USER) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 80 | instrumentation_begin(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | trace_user_enter(0); |
| 82 | vtime_user_enter(current); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 83 | instrumentation_end(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | } |
| 85 | rcu_user_enter(); |
| 86 | } |
| 87 | /* |
| 88 | * Even if context tracking is disabled on this CPU, because it's outside |
| 89 | * the full dynticks mask for example, we still have to keep track of the |
| 90 | * context transitions and states to prevent inconsistency on those of |
| 91 | * other CPUs. |
| 92 | * If a task triggers an exception in userspace, sleep on the exception |
| 93 | * handler and then migrate to another CPU, that new CPU must know where |
| 94 | * the exception returns by the time we call exception_exit(). |
| 95 | * This information can only be provided by the previous CPU when it called |
| 96 | * exception_enter(). |
| 97 | * OTOH we can spare the calls to vtime and RCU when context_tracking.active |
| 98 | * is false because we know that CPU is not tickless. |
| 99 | */ |
| 100 | __this_cpu_write(context_tracking.state, state); |
| 101 | } |
| 102 | context_tracking_recursion_exit(); |
| 103 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 104 | EXPORT_SYMBOL_GPL(__context_tracking_enter); |
| 105 | |
| 106 | void context_tracking_enter(enum ctx_state state) |
| 107 | { |
| 108 | unsigned long flags; |
| 109 | |
| 110 | /* |
| 111 | * Some contexts may involve an exception occuring in an irq, |
| 112 | * leading to that nesting: |
| 113 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() |
| 114 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() |
| 115 | * helpers are enough to protect RCU uses inside the exception. So |
| 116 | * just return immediately if we detect we are in an IRQ. |
| 117 | */ |
| 118 | if (in_interrupt()) |
| 119 | return; |
| 120 | |
| 121 | local_irq_save(flags); |
| 122 | __context_tracking_enter(state); |
| 123 | local_irq_restore(flags); |
| 124 | } |
| 125 | NOKPROBE_SYMBOL(context_tracking_enter); |
| 126 | EXPORT_SYMBOL_GPL(context_tracking_enter); |
| 127 | |
| 128 | void context_tracking_user_enter(void) |
| 129 | { |
| 130 | user_enter(); |
| 131 | } |
| 132 | NOKPROBE_SYMBOL(context_tracking_user_enter); |
| 133 | |
| 134 | /** |
| 135 | * context_tracking_exit - Inform the context tracking that the CPU is |
| 136 | * exiting user or guest mode and entering the kernel. |
| 137 | * |
| 138 | * This function must be called after we entered the kernel from user or |
| 139 | * guest space before any use of RCU read side critical section. This |
| 140 | * potentially include any high level kernel code like syscalls, exceptions, |
| 141 | * signal handling, etc... |
| 142 | * |
| 143 | * This call supports re-entrancy. This way it can be called from any exception |
| 144 | * handler without needing to know if we came from userspace or not. |
| 145 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 146 | void noinstr __context_tracking_exit(enum ctx_state state) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | { |
| 148 | if (!context_tracking_recursion_enter()) |
| 149 | return; |
| 150 | |
| 151 | if (__this_cpu_read(context_tracking.state) == state) { |
| 152 | if (__this_cpu_read(context_tracking.active)) { |
| 153 | /* |
| 154 | * We are going to run code that may use RCU. Inform |
| 155 | * RCU core about that (ie: we may need the tick again). |
| 156 | */ |
| 157 | rcu_user_exit(); |
| 158 | if (state == CONTEXT_USER) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 159 | instrumentation_begin(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 160 | vtime_user_exit(current); |
| 161 | trace_user_exit(0); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 162 | instrumentation_end(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | } |
| 164 | } |
| 165 | __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); |
| 166 | } |
| 167 | context_tracking_recursion_exit(); |
| 168 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | EXPORT_SYMBOL_GPL(__context_tracking_exit); |
| 170 | |
| 171 | void context_tracking_exit(enum ctx_state state) |
| 172 | { |
| 173 | unsigned long flags; |
| 174 | |
| 175 | if (in_interrupt()) |
| 176 | return; |
| 177 | |
| 178 | local_irq_save(flags); |
| 179 | __context_tracking_exit(state); |
| 180 | local_irq_restore(flags); |
| 181 | } |
| 182 | NOKPROBE_SYMBOL(context_tracking_exit); |
| 183 | EXPORT_SYMBOL_GPL(context_tracking_exit); |
| 184 | |
| 185 | void context_tracking_user_exit(void) |
| 186 | { |
| 187 | user_exit(); |
| 188 | } |
| 189 | NOKPROBE_SYMBOL(context_tracking_user_exit); |
| 190 | |
| 191 | void __init context_tracking_cpu_set(int cpu) |
| 192 | { |
| 193 | static __initdata bool initialized = false; |
| 194 | |
| 195 | if (!per_cpu(context_tracking.active, cpu)) { |
| 196 | per_cpu(context_tracking.active, cpu) = true; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 197 | static_branch_inc(&context_tracking_key); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | if (initialized) |
| 201 | return; |
| 202 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 203 | #ifdef CONFIG_HAVE_TIF_NOHZ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | /* |
| 205 | * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork |
| 206 | * This assumes that init is the only task at this early boot stage. |
| 207 | */ |
| 208 | set_tsk_thread_flag(&init_task, TIF_NOHZ); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 209 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 210 | WARN_ON_ONCE(!tasklist_empty()); |
| 211 | |
| 212 | initialized = true; |
| 213 | } |
| 214 | |
| 215 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE |
| 216 | void __init context_tracking_init(void) |
| 217 | { |
| 218 | int cpu; |
| 219 | |
| 220 | for_each_possible_cpu(cpu) |
| 221 | context_tracking_cpu_set(cpu); |
| 222 | } |
| 223 | #endif |