David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * RCU expedited grace periods |
| 4 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2016 |
| 6 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/lockdep.h> |
| 11 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 12 | static void rcu_exp_handler(void *unused); |
| 13 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
| 14 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | /* |
| 16 | * Record the start of an expedited grace period. |
| 17 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 18 | static void rcu_exp_gp_seq_start(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 20 | rcu_seq_start(&rcu_state.expedited_sequence); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | /* |
| 24 | * Return then value that expedited-grace-period counter will have |
| 25 | * at the end of the current grace period. |
| 26 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 27 | static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 29 | return rcu_seq_endval(&rcu_state.expedited_sequence); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | } |
| 31 | |
| 32 | /* |
| 33 | * Record the end of an expedited grace period. |
| 34 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 35 | static void rcu_exp_gp_seq_end(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | rcu_seq_end(&rcu_state.expedited_sequence); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | smp_mb(); /* Ensure that consecutive grace periods serialize. */ |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * Take a snapshot of the expedited-grace-period counter. |
| 43 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 44 | static unsigned long rcu_exp_gp_seq_snap(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | { |
| 46 | unsigned long s; |
| 47 | |
| 48 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | s = rcu_seq_snap(&rcu_state.expedited_sequence); |
| 50 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | return s; |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true |
| 56 | * if a full expedited grace period has elapsed since that snapshot |
| 57 | * was taken. |
| 58 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 59 | static bool rcu_exp_gp_seq_done(unsigned long s) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 61 | return rcu_seq_done(&rcu_state.expedited_sequence, s); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | /* |
| 65 | * Reset the ->expmaskinit values in the rcu_node tree to reflect any |
| 66 | * recent CPU-online activity. Note that these masks are not cleared |
| 67 | * when CPUs go offline, so they reflect the union of all CPUs that have |
| 68 | * ever been online. This means that this function normally takes its |
| 69 | * no-work-to-do fastpath. |
| 70 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 71 | static void sync_exp_reset_tree_hotplug(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | { |
| 73 | bool done; |
| 74 | unsigned long flags; |
| 75 | unsigned long mask; |
| 76 | unsigned long oldmask; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | struct rcu_node *rnp; |
| 79 | struct rcu_node *rnp_up; |
| 80 | |
| 81 | /* If no new CPUs onlined since last time, nothing to do. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 82 | if (likely(ncpus == rcu_state.ncpus_snap)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 84 | rcu_state.ncpus_snap = ncpus; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * Each pass through the following loop propagates newly onlined |
| 88 | * CPUs for the current rcu_node structure up the rcu_node tree. |
| 89 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 90 | rcu_for_each_leaf_node(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 92 | if (rnp->expmaskinit == rnp->expmaskinitnext) { |
| 93 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 94 | continue; /* No new CPUs, nothing to do. */ |
| 95 | } |
| 96 | |
| 97 | /* Update this node's mask, track old value for propagation. */ |
| 98 | oldmask = rnp->expmaskinit; |
| 99 | rnp->expmaskinit = rnp->expmaskinitnext; |
| 100 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 101 | |
| 102 | /* If was already nonzero, nothing to propagate. */ |
| 103 | if (oldmask) |
| 104 | continue; |
| 105 | |
| 106 | /* Propagate the new CPU up the tree. */ |
| 107 | mask = rnp->grpmask; |
| 108 | rnp_up = rnp->parent; |
| 109 | done = false; |
| 110 | while (rnp_up) { |
| 111 | raw_spin_lock_irqsave_rcu_node(rnp_up, flags); |
| 112 | if (rnp_up->expmaskinit) |
| 113 | done = true; |
| 114 | rnp_up->expmaskinit |= mask; |
| 115 | raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); |
| 116 | if (done) |
| 117 | break; |
| 118 | mask = rnp_up->grpmask; |
| 119 | rnp_up = rnp_up->parent; |
| 120 | } |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Reset the ->expmask values in the rcu_node tree in preparation for |
| 126 | * a new expedited grace period. |
| 127 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 128 | static void __maybe_unused sync_exp_reset_tree(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | { |
| 130 | unsigned long flags; |
| 131 | struct rcu_node *rnp; |
| 132 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 133 | sync_exp_reset_tree_hotplug(); |
| 134 | rcu_for_each_node_breadth_first(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 135 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 136 | WARN_ON_ONCE(rnp->expmask); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 137 | WRITE_ONCE(rnp->expmask, rnp->expmaskinit); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Return non-zero if there is no RCU expedited grace period in progress |
| 144 | * for the specified rcu_node structure, in other words, if all CPUs and |
| 145 | * tasks covered by the specified rcu_node structure have done their bit |
| 146 | * for the current expedited grace period. Works only for preemptible |
| 147 | * RCU -- other RCU implementation use other means. |
| 148 | * |
| 149 | * Caller must hold the specificed rcu_node structure's ->lock |
| 150 | */ |
| 151 | static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) |
| 152 | { |
| 153 | raw_lockdep_assert_held_rcu_node(rnp); |
| 154 | |
| 155 | return rnp->exp_tasks == NULL && |
| 156 | READ_ONCE(rnp->expmask) == 0; |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Like sync_rcu_preempt_exp_done(), but this function assumes the caller |
| 161 | * doesn't hold the rcu_node's ->lock, and will acquire and release the lock |
| 162 | * itself |
| 163 | */ |
| 164 | static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) |
| 165 | { |
| 166 | unsigned long flags; |
| 167 | bool ret; |
| 168 | |
| 169 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 170 | ret = sync_rcu_preempt_exp_done(rnp); |
| 171 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 172 | |
| 173 | return ret; |
| 174 | } |
| 175 | |
| 176 | |
| 177 | /* |
| 178 | * Report the exit from RCU read-side critical section for the last task |
| 179 | * that queued itself during or before the current expedited preemptible-RCU |
| 180 | * grace period. This event is reported either to the rcu_node structure on |
| 181 | * which the task was queued or to one of that rcu_node structure's ancestors, |
| 182 | * recursively up the tree. (Calm down, calm down, we do the recursion |
| 183 | * iteratively!) |
| 184 | * |
| 185 | * Caller must hold the specified rcu_node structure's ->lock. |
| 186 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 187 | static void __rcu_report_exp_rnp(struct rcu_node *rnp, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 188 | bool wake, unsigned long flags) |
| 189 | __releases(rnp->lock) |
| 190 | { |
| 191 | unsigned long mask; |
| 192 | |
| 193 | for (;;) { |
| 194 | if (!sync_rcu_preempt_exp_done(rnp)) { |
| 195 | if (!rnp->expmask) |
| 196 | rcu_initiate_boost(rnp, flags); |
| 197 | else |
| 198 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 199 | break; |
| 200 | } |
| 201 | if (rnp->parent == NULL) { |
| 202 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 203 | if (wake) { |
| 204 | smp_mb(); /* EGP done before wake_up(). */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 205 | swake_up_one(&rcu_state.expedited_wq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | } |
| 207 | break; |
| 208 | } |
| 209 | mask = rnp->grpmask; |
| 210 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ |
| 211 | rnp = rnp->parent; |
| 212 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ |
| 213 | WARN_ON_ONCE(!(rnp->expmask & mask)); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 214 | WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | } |
| 216 | } |
| 217 | |
| 218 | /* |
| 219 | * Report expedited quiescent state for specified node. This is a |
| 220 | * lock-acquisition wrapper function for __rcu_report_exp_rnp(). |
| 221 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 222 | static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 223 | { |
| 224 | unsigned long flags; |
| 225 | |
| 226 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 227 | __rcu_report_exp_rnp(rnp, wake, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Report expedited quiescent state for multiple CPUs, all covered by the |
| 232 | * specified leaf rcu_node structure. |
| 233 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 234 | static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 235 | unsigned long mask, bool wake) |
| 236 | { |
| 237 | unsigned long flags; |
| 238 | |
| 239 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 240 | if (!(rnp->expmask & mask)) { |
| 241 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 242 | return; |
| 243 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 244 | WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 245 | __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Report expedited quiescent state for specified rcu_data (CPU). |
| 250 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 251 | static void rcu_report_exp_rdp(struct rcu_data *rdp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 252 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 253 | WRITE_ONCE(rdp->exp_deferred_qs, false); |
| 254 | rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | } |
| 256 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 257 | /* Common code for work-done checking. */ |
| 258 | static bool sync_exp_work_done(unsigned long s) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 260 | if (rcu_exp_gp_seq_done(s)) { |
| 261 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); |
| 262 | smp_mb(); /* Ensure test happens before caller kfree(). */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | return true; |
| 264 | } |
| 265 | return false; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Funnel-lock acquisition for expedited grace periods. Returns true |
| 270 | * if some other task completed an expedited grace period that this task |
| 271 | * can piggy-back on, and with no mutex held. Otherwise, returns false |
| 272 | * with the mutex held, indicating that the caller must actually do the |
| 273 | * expedited grace period. |
| 274 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 275 | static bool exp_funnel_lock(unsigned long s) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 277 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 278 | struct rcu_node *rnp = rdp->mynode; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 279 | struct rcu_node *rnp_root = rcu_get_root(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | |
| 281 | /* Low-contention fastpath. */ |
| 282 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && |
| 283 | (rnp == rnp_root || |
| 284 | ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 285 | mutex_trylock(&rcu_state.exp_mutex)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 286 | goto fastpath; |
| 287 | |
| 288 | /* |
| 289 | * Each pass through the following loop works its way up |
| 290 | * the rcu_node tree, returning if others have done the work or |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 291 | * otherwise falls through to acquire ->exp_mutex. The mapping |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | * from CPU to rcu_node structure can be inexact, as it is just |
| 293 | * promoting locality and is not strictly needed for correctness. |
| 294 | */ |
| 295 | for (; rnp != NULL; rnp = rnp->parent) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 296 | if (sync_exp_work_done(s)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 297 | return true; |
| 298 | |
| 299 | /* Work not done, either wait here or go up. */ |
| 300 | spin_lock(&rnp->exp_lock); |
| 301 | if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { |
| 302 | |
| 303 | /* Someone else doing GP, so wait for them. */ |
| 304 | spin_unlock(&rnp->exp_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 305 | trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 306 | rnp->grplo, rnp->grphi, |
| 307 | TPS("wait")); |
| 308 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 309 | sync_exp_work_done(s)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 310 | return true; |
| 311 | } |
| 312 | rnp->exp_seq_rq = s; /* Followers can wait on us. */ |
| 313 | spin_unlock(&rnp->exp_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 314 | trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, |
| 315 | rnp->grplo, rnp->grphi, TPS("nxtlvl")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 317 | mutex_lock(&rcu_state.exp_mutex); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 318 | fastpath: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 319 | if (sync_exp_work_done(s)) { |
| 320 | mutex_unlock(&rcu_state.exp_mutex); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 321 | return true; |
| 322 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 323 | rcu_exp_gp_seq_start(); |
| 324 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 325 | return false; |
| 326 | } |
| 327 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 328 | /* |
| 329 | * Select the CPUs within the specified rcu_node that the upcoming |
| 330 | * expedited grace period needs to wait for. |
| 331 | */ |
| 332 | static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) |
| 333 | { |
| 334 | int cpu; |
| 335 | unsigned long flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 336 | unsigned long mask_ofl_test; |
| 337 | unsigned long mask_ofl_ipi; |
| 338 | int ret; |
| 339 | struct rcu_exp_work *rewp = |
| 340 | container_of(wp, struct rcu_exp_work, rew_work); |
| 341 | struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 342 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 343 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 344 | |
| 345 | /* Each pass checks a CPU for identity, offline, and idle. */ |
| 346 | mask_ofl_test = 0; |
| 347 | for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { |
| 348 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 349 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 350 | int snap; |
| 351 | |
| 352 | if (raw_smp_processor_id() == cpu || |
| 353 | !(rnp->qsmaskinitnext & mask)) { |
| 354 | mask_ofl_test |= mask; |
| 355 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 356 | snap = rcu_dynticks_snap(rdp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 357 | if (rcu_dynticks_in_eqs(snap)) |
| 358 | mask_ofl_test |= mask; |
| 359 | else |
| 360 | rdp->exp_dynticks_snap = snap; |
| 361 | } |
| 362 | } |
| 363 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; |
| 364 | |
| 365 | /* |
| 366 | * Need to wait for any blocked tasks as well. Note that |
| 367 | * additional blocking tasks will also block the expedited GP |
| 368 | * until such time as the ->expmask bits are cleared. |
| 369 | */ |
| 370 | if (rcu_preempt_has_tasks(rnp)) |
| 371 | rnp->exp_tasks = rnp->blkd_tasks.next; |
| 372 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 373 | |
| 374 | /* IPI the remaining CPUs for expedited quiescent state. */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 375 | for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 376 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 377 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 378 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 379 | retry_ipi: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 380 | if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 381 | mask_ofl_test |= mask; |
| 382 | continue; |
| 383 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 384 | if (get_cpu() == cpu) { |
| 385 | put_cpu(); |
| 386 | continue; |
| 387 | } |
| 388 | ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); |
| 389 | put_cpu(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | if (!ret) { |
| 391 | mask_ofl_ipi &= ~mask; |
| 392 | continue; |
| 393 | } |
| 394 | /* Failed, raced with CPU hotplug operation. */ |
| 395 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 396 | if ((rnp->qsmaskinitnext & mask) && |
| 397 | (rnp->expmask & mask)) { |
| 398 | /* Online, so delay for a bit and try again. */ |
| 399 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 400 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 401 | schedule_timeout_uninterruptible(1); |
| 402 | goto retry_ipi; |
| 403 | } |
| 404 | /* CPU really is offline, so we can ignore it. */ |
| 405 | if (!(rnp->expmask & mask)) |
| 406 | mask_ofl_ipi &= ~mask; |
| 407 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 408 | } |
| 409 | /* Report quiescent states for those that went offline. */ |
| 410 | mask_ofl_test |= mask_ofl_ipi; |
| 411 | if (mask_ofl_test) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 412 | rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | /* |
| 416 | * Select the nodes that the upcoming expedited grace period needs |
| 417 | * to wait for. |
| 418 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 419 | static void sync_rcu_exp_select_cpus(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 420 | { |
| 421 | int cpu; |
| 422 | struct rcu_node *rnp; |
| 423 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 424 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); |
| 425 | sync_exp_reset_tree(); |
| 426 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 427 | |
| 428 | /* Schedule work for each leaf rcu_node structure. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 429 | rcu_for_each_leaf_node(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 430 | rnp->exp_need_flush = false; |
| 431 | if (!READ_ONCE(rnp->expmask)) |
| 432 | continue; /* Avoid early boot non-existent wq. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 433 | if (!READ_ONCE(rcu_par_gp_wq) || |
| 434 | rcu_scheduler_active != RCU_SCHEDULER_RUNNING || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 435 | rcu_is_last_leaf_node(rnp)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | /* No workqueues yet or last leaf, do direct call. */ |
| 437 | sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); |
| 438 | continue; |
| 439 | } |
| 440 | INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 441 | cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 442 | /* If all offline, queue the work on an unbound CPU. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 443 | if (unlikely(cpu > rnp->grphi - rnp->grplo)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | cpu = WORK_CPU_UNBOUND; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 445 | else |
| 446 | cpu += rnp->grplo; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 447 | queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 448 | rnp->exp_need_flush = true; |
| 449 | } |
| 450 | |
| 451 | /* Wait for workqueue jobs (if any) to complete. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 452 | rcu_for_each_leaf_node(rnp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 453 | if (rnp->exp_need_flush) |
| 454 | flush_work(&rnp->rew.rew_work); |
| 455 | } |
| 456 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 457 | static void synchronize_sched_expedited_wait(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 458 | { |
| 459 | int cpu; |
| 460 | unsigned long jiffies_stall; |
| 461 | unsigned long jiffies_start; |
| 462 | unsigned long mask; |
| 463 | int ndetected; |
| 464 | struct rcu_node *rnp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 465 | struct rcu_node *rnp_root = rcu_get_root(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 466 | int ret; |
| 467 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 468 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 469 | jiffies_stall = rcu_jiffies_till_stall_check(); |
| 470 | jiffies_start = jiffies; |
| 471 | |
| 472 | for (;;) { |
| 473 | ret = swait_event_timeout_exclusive( |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 474 | rcu_state.expedited_wq, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 475 | sync_rcu_preempt_exp_done_unlocked(rnp_root), |
| 476 | jiffies_stall); |
| 477 | if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root)) |
| 478 | return; |
| 479 | WARN_ON(ret < 0); /* workqueues should not be signaled. */ |
| 480 | if (rcu_cpu_stall_suppress) |
| 481 | continue; |
| 482 | panic_on_rcu_stall(); |
| 483 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 484 | rcu_state.name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 485 | ndetected = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 486 | rcu_for_each_leaf_node(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 487 | ndetected += rcu_print_task_exp_stall(rnp); |
| 488 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 489 | struct rcu_data *rdp; |
| 490 | |
| 491 | mask = leaf_node_cpu_bit(rnp, cpu); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 492 | if (!(READ_ONCE(rnp->expmask) & mask)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 493 | continue; |
| 494 | ndetected++; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 495 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 496 | pr_cont(" %d-%c%c%c", cpu, |
| 497 | "O."[!!cpu_online(cpu)], |
| 498 | "o."[!!(rdp->grpmask & rnp->expmaskinit)], |
| 499 | "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); |
| 500 | } |
| 501 | } |
| 502 | pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 503 | jiffies - jiffies_start, rcu_state.expedited_sequence, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 504 | READ_ONCE(rnp_root->expmask), |
| 505 | ".T"[!!rnp_root->exp_tasks]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 506 | if (ndetected) { |
| 507 | pr_err("blocking rcu_node structures:"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 508 | rcu_for_each_node_breadth_first(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 509 | if (rnp == rnp_root) |
| 510 | continue; /* printed unconditionally */ |
| 511 | if (sync_rcu_preempt_exp_done_unlocked(rnp)) |
| 512 | continue; |
| 513 | pr_cont(" l=%u:%d-%d:%#lx/%c", |
| 514 | rnp->level, rnp->grplo, rnp->grphi, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 515 | READ_ONCE(rnp->expmask), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | ".T"[!!rnp->exp_tasks]); |
| 517 | } |
| 518 | pr_cont("\n"); |
| 519 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 520 | rcu_for_each_leaf_node(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 522 | mask = leaf_node_cpu_bit(rnp, cpu); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 523 | if (!(READ_ONCE(rnp->expmask) & mask)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | continue; |
| 525 | dump_cpu_task(cpu); |
| 526 | } |
| 527 | } |
| 528 | jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; |
| 529 | } |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * Wait for the current expedited grace period to complete, and then |
| 534 | * wake up everyone who piggybacked on the just-completed expedited |
| 535 | * grace period. Also update all the ->exp_seq_rq counters as needed |
| 536 | * in order to avoid counter-wrap problems. |
| 537 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 538 | static void rcu_exp_wait_wake(unsigned long s) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 539 | { |
| 540 | struct rcu_node *rnp; |
| 541 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 542 | synchronize_sched_expedited_wait(); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 543 | |
| 544 | // Switch over to wakeup mode, allowing the next GP to proceed. |
| 545 | // End the previous grace period only after acquiring the mutex |
| 546 | // to ensure that only one GP runs concurrently with wakeups. |
| 547 | mutex_lock(&rcu_state.exp_wake_mutex); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 548 | rcu_exp_gp_seq_end(); |
| 549 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 550 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 551 | rcu_for_each_node_breadth_first(rnp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 552 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { |
| 553 | spin_lock(&rnp->exp_lock); |
| 554 | /* Recheck, avoid hang in case someone just arrived. */ |
| 555 | if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) |
| 556 | rnp->exp_seq_rq = s; |
| 557 | spin_unlock(&rnp->exp_lock); |
| 558 | } |
| 559 | smp_mb(); /* All above changes before wakeup. */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 560 | wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 561 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 562 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); |
| 563 | mutex_unlock(&rcu_state.exp_wake_mutex); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Common code to drive an expedited grace period forward, used by |
| 568 | * workqueues and mid-boot-time tasks. |
| 569 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 570 | static void rcu_exp_sel_wait_wake(unsigned long s) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 571 | { |
| 572 | /* Initialize the rcu_node tree in preparation for the wait. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 573 | sync_rcu_exp_select_cpus(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 574 | |
| 575 | /* Wait and clean up, including waking everyone. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 576 | rcu_exp_wait_wake(s); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | } |
| 578 | |
| 579 | /* |
| 580 | * Work-queue handler to drive an expedited grace period forward. |
| 581 | */ |
| 582 | static void wait_rcu_exp_gp(struct work_struct *wp) |
| 583 | { |
| 584 | struct rcu_exp_work *rewp; |
| 585 | |
| 586 | rewp = container_of(wp, struct rcu_exp_work, rew_work); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 587 | rcu_exp_sel_wait_wake(rewp->rew_s); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 588 | } |
| 589 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 590 | #ifdef CONFIG_PREEMPT_RCU |
| 591 | |
| 592 | /* |
| 593 | * Remote handler for smp_call_function_single(). If there is an |
| 594 | * RCU read-side critical section in effect, request that the |
| 595 | * next rcu_read_unlock() record the quiescent state up the |
| 596 | * ->expmask fields in the rcu_node tree. Otherwise, immediately |
| 597 | * report the quiescent state. |
| 598 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 599 | static void rcu_exp_handler(void *unused) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 600 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 601 | unsigned long flags; |
| 602 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| 603 | struct rcu_node *rnp = rdp->mynode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 604 | struct task_struct *t = current; |
| 605 | |
| 606 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 607 | * First, the common case of not being in an RCU read-side |
| 608 | * critical section. If also enabled or idle, immediately |
| 609 | * report the quiescent state, otherwise defer. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 610 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 611 | if (!t->rcu_read_lock_nesting) { |
| 612 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || |
| 613 | rcu_dynticks_curr_cpu_in_eqs()) { |
| 614 | rcu_report_exp_rdp(rdp); |
| 615 | } else { |
| 616 | rdp->exp_deferred_qs = true; |
| 617 | set_tsk_need_resched(t); |
| 618 | set_preempt_need_resched(); |
| 619 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 620 | return; |
| 621 | } |
| 622 | |
| 623 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 624 | * Second, the less-common case of being in an RCU read-side |
| 625 | * critical section. In this case we can count on a future |
| 626 | * rcu_read_unlock(). However, this rcu_read_unlock() might |
| 627 | * execute on some other CPU, but in that case there will be |
| 628 | * a future context switch. Either way, if the expedited |
| 629 | * grace period is still waiting on this CPU, set ->deferred_qs |
| 630 | * so that the eventual quiescent state will be reported. |
| 631 | * Note that there is a large group of race conditions that |
| 632 | * can have caused this quiescent state to already have been |
| 633 | * reported, so we really do need to check ->expmask. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 634 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 635 | if (t->rcu_read_lock_nesting > 0) { |
| 636 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 637 | if (rnp->expmask & rdp->grpmask) { |
| 638 | rdp->exp_deferred_qs = true; |
| 639 | t->rcu_read_unlock_special.b.exp_hint = true; |
| 640 | } |
| 641 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 642 | return; |
| 643 | } |
| 644 | |
| 645 | /* |
| 646 | * The final and least likely case is where the interrupted |
| 647 | * code was just about to or just finished exiting the RCU-preempt |
| 648 | * read-side critical section, and no, we can't tell which. |
| 649 | * So either way, set ->deferred_qs to flag later code that |
| 650 | * a quiescent state is required. |
| 651 | * |
| 652 | * If the CPU is fully enabled (or if some buggy RCU-preempt |
| 653 | * read-side critical section is being used from idle), just |
| 654 | * invoke rcu_preempt_deferred_qs() to immediately report the |
| 655 | * quiescent state. We cannot use rcu_read_unlock_special() |
| 656 | * because we are in an interrupt handler, which will cause that |
| 657 | * function to take an early exit without doing anything. |
| 658 | * |
| 659 | * Otherwise, force a context switch after the CPU enables everything. |
| 660 | */ |
| 661 | rdp->exp_deferred_qs = true; |
| 662 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || |
| 663 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { |
| 664 | rcu_preempt_deferred_qs(t); |
| 665 | } else { |
| 666 | set_tsk_need_resched(t); |
| 667 | set_preempt_need_resched(); |
| 668 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 669 | } |
| 670 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 671 | /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ |
| 672 | static void sync_sched_exp_online_cleanup(int cpu) |
| 673 | { |
| 674 | } |
| 675 | |
| 676 | /* |
| 677 | * Scan the current list of tasks blocked within RCU read-side critical |
| 678 | * sections, printing out the tid of each that is blocking the current |
| 679 | * expedited grace period. |
| 680 | */ |
| 681 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) |
| 682 | { |
| 683 | struct task_struct *t; |
| 684 | int ndetected = 0; |
| 685 | |
| 686 | if (!rnp->exp_tasks) |
| 687 | return 0; |
| 688 | t = list_entry(rnp->exp_tasks->prev, |
| 689 | struct task_struct, rcu_node_entry); |
| 690 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
| 691 | pr_cont(" P%d", t->pid); |
| 692 | ndetected++; |
| 693 | } |
| 694 | return ndetected; |
| 695 | } |
| 696 | |
| 697 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 698 | |
| 699 | /* Request an expedited quiescent state. */ |
| 700 | static void rcu_exp_need_qs(void) |
| 701 | { |
| 702 | __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); |
| 703 | /* Store .exp before .rcu_urgent_qs. */ |
| 704 | smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); |
| 705 | set_tsk_need_resched(current); |
| 706 | set_preempt_need_resched(); |
| 707 | } |
| 708 | |
| 709 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ |
| 710 | static void rcu_exp_handler(void *unused) |
| 711 | { |
| 712 | struct rcu_data *rdp; |
| 713 | struct rcu_node *rnp; |
| 714 | |
| 715 | rdp = this_cpu_ptr(&rcu_data); |
| 716 | rnp = rdp->mynode; |
| 717 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || |
| 718 | __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) |
| 719 | return; |
| 720 | if (rcu_is_cpu_rrupt_from_idle()) { |
| 721 | rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); |
| 722 | return; |
| 723 | } |
| 724 | rcu_exp_need_qs(); |
| 725 | } |
| 726 | |
| 727 | /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ |
| 728 | static void sync_sched_exp_online_cleanup(int cpu) |
| 729 | { |
| 730 | unsigned long flags; |
| 731 | int my_cpu; |
| 732 | struct rcu_data *rdp; |
| 733 | int ret; |
| 734 | struct rcu_node *rnp; |
| 735 | |
| 736 | rdp = per_cpu_ptr(&rcu_data, cpu); |
| 737 | rnp = rdp->mynode; |
| 738 | my_cpu = get_cpu(); |
| 739 | /* Quiescent state either not needed or already requested, leave. */ |
| 740 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || |
| 741 | __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) { |
| 742 | put_cpu(); |
| 743 | return; |
| 744 | } |
| 745 | /* Quiescent state needed on current CPU, so set it up locally. */ |
| 746 | if (my_cpu == cpu) { |
| 747 | local_irq_save(flags); |
| 748 | rcu_exp_need_qs(); |
| 749 | local_irq_restore(flags); |
| 750 | put_cpu(); |
| 751 | return; |
| 752 | } |
| 753 | /* Quiescent state needed on some other CPU, send IPI. */ |
| 754 | ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); |
| 755 | put_cpu(); |
| 756 | WARN_ON_ONCE(ret); |
| 757 | } |
| 758 | |
| 759 | /* |
| 760 | * Because preemptible RCU does not exist, we never have to check for |
| 761 | * tasks blocked within RCU read-side critical sections that are |
| 762 | * blocking the current expedited grace period. |
| 763 | */ |
| 764 | static int rcu_print_task_exp_stall(struct rcu_node *rnp) |
| 765 | { |
| 766 | return 0; |
| 767 | } |
| 768 | |
| 769 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 770 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 771 | /** |
| 772 | * synchronize_rcu_expedited - Brute-force RCU grace period |
| 773 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 774 | * Wait for an RCU grace period, but expedite it. The basic idea is to |
| 775 | * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether |
| 776 | * the CPU is in an RCU critical section, and if so, it sets a flag that |
| 777 | * causes the outermost rcu_read_unlock() to report the quiescent state |
| 778 | * for RCU-preempt or asks the scheduler for help for RCU-sched. On the |
| 779 | * other hand, if the CPU is not in an RCU read-side critical section, |
| 780 | * the IPI handler reports the quiescent state immediately. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 781 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 782 | * Although this is a great improvement over previous expedited |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 783 | * implementations, it is still unfriendly to real-time workloads, so is |
| 784 | * thus not recommended for any sort of common-case code. In fact, if |
| 785 | * you are using synchronize_rcu_expedited() in a loop, please restructure |
| 786 | * your code to batch your updates, and then Use a single synchronize_rcu() |
| 787 | * instead. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 788 | * |
| 789 | * This has the same semantics as (but is more brutal than) synchronize_rcu(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 790 | */ |
| 791 | void synchronize_rcu_expedited(void) |
| 792 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 793 | bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); |
| 794 | struct rcu_exp_work rew; |
| 795 | struct rcu_node *rnp; |
| 796 | unsigned long s; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 797 | |
| 798 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
| 799 | lock_is_held(&rcu_lock_map) || |
| 800 | lock_is_held(&rcu_sched_lock_map), |
| 801 | "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); |
| 802 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 803 | /* Is the state is such that the call is a grace period? */ |
| 804 | if (rcu_blocking_is_gp()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 805 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 806 | |
| 807 | /* If expedited grace periods are prohibited, fall back to normal. */ |
| 808 | if (rcu_gp_is_normal()) { |
| 809 | wait_rcu_gp(call_rcu); |
| 810 | return; |
| 811 | } |
| 812 | |
| 813 | /* Take a snapshot of the sequence number. */ |
| 814 | s = rcu_exp_gp_seq_snap(); |
| 815 | if (exp_funnel_lock(s)) |
| 816 | return; /* Someone else did our work for us. */ |
| 817 | |
| 818 | /* Ensure that load happens before action based on it. */ |
| 819 | if (unlikely(boottime)) { |
| 820 | /* Direct call during scheduler init and early_initcalls(). */ |
| 821 | rcu_exp_sel_wait_wake(s); |
| 822 | } else { |
| 823 | /* Marshall arguments & schedule the expedited grace period. */ |
| 824 | rew.rew_s = s; |
| 825 | INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); |
| 826 | queue_work(rcu_gp_wq, &rew.rew_work); |
| 827 | } |
| 828 | |
| 829 | /* Wait for expedited grace period to complete. */ |
| 830 | rnp = rcu_get_root(); |
| 831 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
| 832 | sync_exp_work_done(s)); |
| 833 | smp_mb(); /* Workqueue actions happen before return. */ |
| 834 | |
| 835 | /* Let the next expedited grace period start. */ |
| 836 | mutex_unlock(&rcu_state.exp_mutex); |
| 837 | |
| 838 | if (likely(!boottime)) |
| 839 | destroy_work_on_stack(&rew.rew_work); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 840 | } |
| 841 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |