Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * Copyright (C) Fujitsu, 2012 |
| 20 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> |
| 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 23 | * |
| 24 | * For detailed explanation of Read-Copy Update mechanism see - |
| 25 | * Documentation/RCU/ *.txt |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #define pr_fmt(fmt) "rcu: " fmt |
| 30 | |
| 31 | #include <linux/export.h> |
| 32 | #include <linux/mutex.h> |
| 33 | #include <linux/percpu.h> |
| 34 | #include <linux/preempt.h> |
| 35 | #include <linux/rcupdate_wait.h> |
| 36 | #include <linux/sched.h> |
| 37 | #include <linux/smp.h> |
| 38 | #include <linux/delay.h> |
| 39 | #include <linux/module.h> |
| 40 | #include <linux/srcu.h> |
| 41 | |
| 42 | #include "rcu.h" |
| 43 | #include "rcu_segcblist.h" |
| 44 | |
| 45 | /* Holdoff in nanoseconds for auto-expediting. */ |
| 46 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) |
| 47 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; |
| 48 | module_param(exp_holdoff, ulong, 0444); |
| 49 | |
| 50 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
| 51 | static ulong counter_wrap_check = (ULONG_MAX >> 2); |
| 52 | module_param(counter_wrap_check, ulong, 0444); |
| 53 | |
| 54 | static void srcu_invoke_callbacks(struct work_struct *work); |
| 55 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
| 56 | static void process_srcu(struct work_struct *work); |
| 57 | |
| 58 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
| 59 | #define spin_lock_rcu_node(p) \ |
| 60 | do { \ |
| 61 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
| 62 | smp_mb__after_unlock_lock(); \ |
| 63 | } while (0) |
| 64 | |
| 65 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) |
| 66 | |
| 67 | #define spin_lock_irq_rcu_node(p) \ |
| 68 | do { \ |
| 69 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 70 | smp_mb__after_unlock_lock(); \ |
| 71 | } while (0) |
| 72 | |
| 73 | #define spin_unlock_irq_rcu_node(p) \ |
| 74 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) |
| 75 | |
| 76 | #define spin_lock_irqsave_rcu_node(p, flags) \ |
| 77 | do { \ |
| 78 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
| 79 | smp_mb__after_unlock_lock(); \ |
| 80 | } while (0) |
| 81 | |
| 82 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ |
| 83 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ |
| 84 | |
| 85 | /* |
| 86 | * Initialize SRCU combining tree. Note that statically allocated |
| 87 | * srcu_struct structures might already have srcu_read_lock() and |
| 88 | * srcu_read_unlock() running against them. So if the is_static parameter |
| 89 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
| 90 | */ |
| 91 | static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) |
| 92 | { |
| 93 | int cpu; |
| 94 | int i; |
| 95 | int level = 0; |
| 96 | int levelspread[RCU_NUM_LVLS]; |
| 97 | struct srcu_data *sdp; |
| 98 | struct srcu_node *snp; |
| 99 | struct srcu_node *snp_first; |
| 100 | |
| 101 | /* Work out the overall tree geometry. */ |
| 102 | sp->level[0] = &sp->node[0]; |
| 103 | for (i = 1; i < rcu_num_lvls; i++) |
| 104 | sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; |
| 105 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
| 106 | |
| 107 | /* Each pass through this loop initializes one srcu_node structure. */ |
| 108 | rcu_for_each_node_breadth_first(sp, snp) { |
| 109 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
| 110 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
| 111 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
| 112 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { |
| 113 | snp->srcu_have_cbs[i] = 0; |
| 114 | snp->srcu_data_have_cbs[i] = 0; |
| 115 | } |
| 116 | snp->srcu_gp_seq_needed_exp = 0; |
| 117 | snp->grplo = -1; |
| 118 | snp->grphi = -1; |
| 119 | if (snp == &sp->node[0]) { |
| 120 | /* Root node, special case. */ |
| 121 | snp->srcu_parent = NULL; |
| 122 | continue; |
| 123 | } |
| 124 | |
| 125 | /* Non-root node. */ |
| 126 | if (snp == sp->level[level + 1]) |
| 127 | level++; |
| 128 | snp->srcu_parent = sp->level[level - 1] + |
| 129 | (snp - sp->level[level]) / |
| 130 | levelspread[level - 1]; |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Initialize the per-CPU srcu_data array, which feeds into the |
| 135 | * leaves of the srcu_node tree. |
| 136 | */ |
| 137 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
| 138 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
| 139 | level = rcu_num_lvls - 1; |
| 140 | snp_first = sp->level[level]; |
| 141 | for_each_possible_cpu(cpu) { |
| 142 | sdp = per_cpu_ptr(sp->sda, cpu); |
| 143 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
| 144 | rcu_segcblist_init(&sdp->srcu_cblist); |
| 145 | sdp->srcu_cblist_invoking = false; |
| 146 | sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; |
| 147 | sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; |
| 148 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
| 149 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
| 150 | if (snp->grplo < 0) |
| 151 | snp->grplo = cpu; |
| 152 | snp->grphi = cpu; |
| 153 | } |
| 154 | sdp->cpu = cpu; |
| 155 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); |
| 156 | sdp->sp = sp; |
| 157 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
| 158 | if (is_static) |
| 159 | continue; |
| 160 | |
| 161 | /* Dynamically allocated, better be no srcu_read_locks()! */ |
| 162 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { |
| 163 | sdp->srcu_lock_count[i] = 0; |
| 164 | sdp->srcu_unlock_count[i] = 0; |
| 165 | } |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | * Initialize non-compile-time initialized fields, including the |
| 171 | * associated srcu_node and srcu_data structures. The is_static |
| 172 | * parameter is passed through to init_srcu_struct_nodes(), and |
| 173 | * also tells us that ->sda has already been wired up to srcu_data. |
| 174 | */ |
| 175 | static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) |
| 176 | { |
| 177 | mutex_init(&sp->srcu_cb_mutex); |
| 178 | mutex_init(&sp->srcu_gp_mutex); |
| 179 | sp->srcu_idx = 0; |
| 180 | sp->srcu_gp_seq = 0; |
| 181 | sp->srcu_barrier_seq = 0; |
| 182 | mutex_init(&sp->srcu_barrier_mutex); |
| 183 | atomic_set(&sp->srcu_barrier_cpu_cnt, 0); |
| 184 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
| 185 | if (!is_static) |
| 186 | sp->sda = alloc_percpu(struct srcu_data); |
| 187 | init_srcu_struct_nodes(sp, is_static); |
| 188 | sp->srcu_gp_seq_needed_exp = 0; |
| 189 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
| 190 | smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ |
| 191 | return sp->sda ? 0 : -ENOMEM; |
| 192 | } |
| 193 | |
| 194 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 195 | |
| 196 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
| 197 | struct lock_class_key *key) |
| 198 | { |
| 199 | /* Don't re-initialize a lock while it is held. */ |
| 200 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
| 201 | lockdep_init_map(&sp->dep_map, name, key, 0); |
| 202 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
| 203 | return init_srcu_struct_fields(sp, false); |
| 204 | } |
| 205 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 206 | |
| 207 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 208 | |
| 209 | /** |
| 210 | * init_srcu_struct - initialize a sleep-RCU structure |
| 211 | * @sp: structure to initialize. |
| 212 | * |
| 213 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 214 | * to any other function. Each srcu_struct represents a separate domain |
| 215 | * of SRCU protection. |
| 216 | */ |
| 217 | int init_srcu_struct(struct srcu_struct *sp) |
| 218 | { |
| 219 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
| 220 | return init_srcu_struct_fields(sp, false); |
| 221 | } |
| 222 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 223 | |
| 224 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 225 | |
| 226 | /* |
| 227 | * First-use initialization of statically allocated srcu_struct |
| 228 | * structure. Wiring up the combining tree is more than can be |
| 229 | * done with compile-time initialization, so this check is added |
| 230 | * to each update-side SRCU primitive. Use sp->lock, which -is- |
| 231 | * compile-time initialized, to resolve races involving multiple |
| 232 | * CPUs trying to garner first-use privileges. |
| 233 | */ |
| 234 | static void check_init_srcu_struct(struct srcu_struct *sp) |
| 235 | { |
| 236 | unsigned long flags; |
| 237 | |
| 238 | WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); |
| 239 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
| 240 | if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ |
| 241 | return; /* Already initialized. */ |
| 242 | spin_lock_irqsave_rcu_node(sp, flags); |
| 243 | if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { |
| 244 | spin_unlock_irqrestore_rcu_node(sp, flags); |
| 245 | return; |
| 246 | } |
| 247 | init_srcu_struct_fields(sp, true); |
| 248 | spin_unlock_irqrestore_rcu_node(sp, flags); |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
| 253 | * for the rank of per-CPU counters specified by idx. |
| 254 | */ |
| 255 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) |
| 256 | { |
| 257 | int cpu; |
| 258 | unsigned long sum = 0; |
| 259 | |
| 260 | for_each_possible_cpu(cpu) { |
| 261 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
| 262 | |
| 263 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
| 264 | } |
| 265 | return sum; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
| 270 | * for the rank of per-CPU counters specified by idx. |
| 271 | */ |
| 272 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) |
| 273 | { |
| 274 | int cpu; |
| 275 | unsigned long sum = 0; |
| 276 | |
| 277 | for_each_possible_cpu(cpu) { |
| 278 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
| 279 | |
| 280 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
| 281 | } |
| 282 | return sum; |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Return true if the number of pre-existing readers is determined to |
| 287 | * be zero. |
| 288 | */ |
| 289 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
| 290 | { |
| 291 | unsigned long unlocks; |
| 292 | |
| 293 | unlocks = srcu_readers_unlock_idx(sp, idx); |
| 294 | |
| 295 | /* |
| 296 | * Make sure that a lock is always counted if the corresponding |
| 297 | * unlock is counted. Needs to be a smp_mb() as the read side may |
| 298 | * contain a read from a variable that is written to before the |
| 299 | * synchronize_srcu() in the write side. In this case smp_mb()s |
| 300 | * A and B act like the store buffering pattern. |
| 301 | * |
| 302 | * This smp_mb() also pairs with smp_mb() C to prevent accesses |
| 303 | * after the synchronize_srcu() from being executed before the |
| 304 | * grace period ends. |
| 305 | */ |
| 306 | smp_mb(); /* A */ |
| 307 | |
| 308 | /* |
| 309 | * If the locks are the same as the unlocks, then there must have |
| 310 | * been no readers on this index at some time in between. This does |
| 311 | * not mean that there are no more readers, as one could have read |
| 312 | * the current index but not have incremented the lock counter yet. |
| 313 | * |
| 314 | * So suppose that the updater is preempted here for so long |
| 315 | * that more than ULONG_MAX non-nested readers come and go in |
| 316 | * the meantime. It turns out that this cannot result in overflow |
| 317 | * because if a reader modifies its unlock count after we read it |
| 318 | * above, then that reader's next load of ->srcu_idx is guaranteed |
| 319 | * to get the new value, which will cause it to operate on the |
| 320 | * other bank of counters, where it cannot contribute to the |
| 321 | * overflow of these counters. This means that there is a maximum |
| 322 | * of 2*NR_CPUS increments, which cannot overflow given current |
| 323 | * systems, especially not on 64-bit systems. |
| 324 | * |
| 325 | * OK, how about nesting? This does impose a limit on nesting |
| 326 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
| 327 | * especially on 64-bit systems. |
| 328 | */ |
| 329 | return srcu_readers_lock_idx(sp, idx) == unlocks; |
| 330 | } |
| 331 | |
| 332 | /** |
| 333 | * srcu_readers_active - returns true if there are readers. and false |
| 334 | * otherwise |
| 335 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
| 336 | * |
| 337 | * Note that this is not an atomic primitive, and can therefore suffer |
| 338 | * severe errors when invoked on an active srcu_struct. That said, it |
| 339 | * can be useful as an error check at cleanup time. |
| 340 | */ |
| 341 | static bool srcu_readers_active(struct srcu_struct *sp) |
| 342 | { |
| 343 | int cpu; |
| 344 | unsigned long sum = 0; |
| 345 | |
| 346 | for_each_possible_cpu(cpu) { |
| 347 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
| 348 | |
| 349 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
| 350 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
| 351 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); |
| 352 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); |
| 353 | } |
| 354 | return sum; |
| 355 | } |
| 356 | |
| 357 | #define SRCU_INTERVAL 1 |
| 358 | |
| 359 | /* |
| 360 | * Return grace-period delay, zero if there are expedited grace |
| 361 | * periods pending, SRCU_INTERVAL otherwise. |
| 362 | */ |
| 363 | static unsigned long srcu_get_delay(struct srcu_struct *sp) |
| 364 | { |
| 365 | if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), |
| 366 | READ_ONCE(sp->srcu_gp_seq_needed_exp))) |
| 367 | return 0; |
| 368 | return SRCU_INTERVAL; |
| 369 | } |
| 370 | |
| 371 | /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ |
| 372 | void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) |
| 373 | { |
| 374 | int cpu; |
| 375 | |
| 376 | if (WARN_ON(!srcu_get_delay(sp))) |
| 377 | return; /* Just leak it! */ |
| 378 | if (WARN_ON(srcu_readers_active(sp))) |
| 379 | return; /* Just leak it! */ |
| 380 | if (quiesced) { |
| 381 | if (WARN_ON(delayed_work_pending(&sp->work))) |
| 382 | return; /* Just leak it! */ |
| 383 | } else { |
| 384 | flush_delayed_work(&sp->work); |
| 385 | } |
| 386 | for_each_possible_cpu(cpu) |
| 387 | if (quiesced) { |
| 388 | if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) |
| 389 | return; /* Just leak it! */ |
| 390 | } else { |
| 391 | flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); |
| 392 | } |
| 393 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
| 394 | WARN_ON(srcu_readers_active(sp))) { |
| 395 | pr_info("%s: Active srcu_struct %p state: %d\n", |
| 396 | __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); |
| 397 | return; /* Caller forgot to stop doing call_srcu()? */ |
| 398 | } |
| 399 | free_percpu(sp->sda); |
| 400 | sp->sda = NULL; |
| 401 | } |
| 402 | EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); |
| 403 | |
| 404 | /* |
| 405 | * Counts the new reader in the appropriate per-CPU element of the |
| 406 | * srcu_struct. |
| 407 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 408 | */ |
| 409 | int __srcu_read_lock(struct srcu_struct *sp) |
| 410 | { |
| 411 | int idx; |
| 412 | |
| 413 | idx = READ_ONCE(sp->srcu_idx) & 0x1; |
| 414 | this_cpu_inc(sp->sda->srcu_lock_count[idx]); |
| 415 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 416 | return idx; |
| 417 | } |
| 418 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 419 | |
| 420 | /* |
| 421 | * Removes the count for the old reader from the appropriate per-CPU |
| 422 | * element of the srcu_struct. Note that this may well be a different |
| 423 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
| 424 | */ |
| 425 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 426 | { |
| 427 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
| 428 | this_cpu_inc(sp->sda->srcu_unlock_count[idx]); |
| 429 | } |
| 430 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 431 | |
| 432 | /* |
| 433 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 434 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 435 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 436 | * sections. If there are still some readers after a few microseconds, |
| 437 | * we repeatedly block for 1-millisecond time periods. |
| 438 | */ |
| 439 | #define SRCU_RETRY_CHECK_DELAY 5 |
| 440 | |
| 441 | /* |
| 442 | * Start an SRCU grace period. |
| 443 | */ |
| 444 | static void srcu_gp_start(struct srcu_struct *sp) |
| 445 | { |
| 446 | struct srcu_data *sdp = this_cpu_ptr(sp->sda); |
| 447 | int state; |
| 448 | |
| 449 | lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); |
| 450 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
| 451 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 452 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 453 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
| 454 | rcu_seq_snap(&sp->srcu_gp_seq)); |
| 455 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
| 456 | rcu_seq_start(&sp->srcu_gp_seq); |
| 457 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 458 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * Track online CPUs to guide callback workqueue placement. |
| 463 | */ |
| 464 | DEFINE_PER_CPU(bool, srcu_online); |
| 465 | |
| 466 | void srcu_online_cpu(unsigned int cpu) |
| 467 | { |
| 468 | WRITE_ONCE(per_cpu(srcu_online, cpu), true); |
| 469 | } |
| 470 | |
| 471 | void srcu_offline_cpu(unsigned int cpu) |
| 472 | { |
| 473 | WRITE_ONCE(per_cpu(srcu_online, cpu), false); |
| 474 | } |
| 475 | |
| 476 | /* |
| 477 | * Place the workqueue handler on the specified CPU if online, otherwise |
| 478 | * just run it whereever. This is useful for placing workqueue handlers |
| 479 | * that are to invoke the specified CPU's callbacks. |
| 480 | */ |
| 481 | static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 482 | struct delayed_work *dwork, |
| 483 | unsigned long delay) |
| 484 | { |
| 485 | bool ret; |
| 486 | |
| 487 | preempt_disable(); |
| 488 | if (READ_ONCE(per_cpu(srcu_online, cpu))) |
| 489 | ret = queue_delayed_work_on(cpu, wq, dwork, delay); |
| 490 | else |
| 491 | ret = queue_delayed_work(wq, dwork, delay); |
| 492 | preempt_enable(); |
| 493 | return ret; |
| 494 | } |
| 495 | |
| 496 | /* |
| 497 | * Schedule callback invocation for the specified srcu_data structure, |
| 498 | * if possible, on the corresponding CPU. |
| 499 | */ |
| 500 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) |
| 501 | { |
| 502 | srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * Schedule callback invocation for all srcu_data structures associated |
| 507 | * with the specified srcu_node structure that have callbacks for the |
| 508 | * just-completed grace period, the one corresponding to idx. If possible, |
| 509 | * schedule this invocation on the corresponding CPUs. |
| 510 | */ |
| 511 | static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, |
| 512 | unsigned long mask, unsigned long delay) |
| 513 | { |
| 514 | int cpu; |
| 515 | |
| 516 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 517 | if (!(mask & (1 << (cpu - snp->grplo)))) |
| 518 | continue; |
| 519 | srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); |
| 520 | } |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * Note the end of an SRCU grace period. Initiates callback invocation |
| 525 | * and starts a new grace period if needed. |
| 526 | * |
| 527 | * The ->srcu_cb_mutex acquisition does not protect any data, but |
| 528 | * instead prevents more than one grace period from starting while we |
| 529 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
| 530 | * array to have a finite number of elements. |
| 531 | */ |
| 532 | static void srcu_gp_end(struct srcu_struct *sp) |
| 533 | { |
| 534 | unsigned long cbdelay; |
| 535 | bool cbs; |
| 536 | bool last_lvl; |
| 537 | int cpu; |
| 538 | unsigned long flags; |
| 539 | unsigned long gpseq; |
| 540 | int idx; |
| 541 | unsigned long mask; |
| 542 | struct srcu_data *sdp; |
| 543 | struct srcu_node *snp; |
| 544 | |
| 545 | /* Prevent more than one additional grace period. */ |
| 546 | mutex_lock(&sp->srcu_cb_mutex); |
| 547 | |
| 548 | /* End the current grace period. */ |
| 549 | spin_lock_irq_rcu_node(sp); |
| 550 | idx = rcu_seq_state(sp->srcu_gp_seq); |
| 551 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
| 552 | cbdelay = srcu_get_delay(sp); |
| 553 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
| 554 | rcu_seq_end(&sp->srcu_gp_seq); |
| 555 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
| 556 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) |
| 557 | sp->srcu_gp_seq_needed_exp = gpseq; |
| 558 | spin_unlock_irq_rcu_node(sp); |
| 559 | mutex_unlock(&sp->srcu_gp_mutex); |
| 560 | /* A new grace period can start at this point. But only one. */ |
| 561 | |
| 562 | /* Initiate callback invocation as needed. */ |
| 563 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
| 564 | rcu_for_each_node_breadth_first(sp, snp) { |
| 565 | spin_lock_irq_rcu_node(snp); |
| 566 | cbs = false; |
| 567 | last_lvl = snp >= sp->level[rcu_num_lvls - 1]; |
| 568 | if (last_lvl) |
| 569 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
| 570 | snp->srcu_have_cbs[idx] = gpseq; |
| 571 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); |
| 572 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
| 573 | snp->srcu_gp_seq_needed_exp = gpseq; |
| 574 | mask = snp->srcu_data_have_cbs[idx]; |
| 575 | snp->srcu_data_have_cbs[idx] = 0; |
| 576 | spin_unlock_irq_rcu_node(snp); |
| 577 | if (cbs) |
| 578 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
| 579 | |
| 580 | /* Occasionally prevent srcu_data counter wrap. */ |
| 581 | if (!(gpseq & counter_wrap_check) && last_lvl) |
| 582 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 583 | sdp = per_cpu_ptr(sp->sda, cpu); |
| 584 | spin_lock_irqsave_rcu_node(sdp, flags); |
| 585 | if (ULONG_CMP_GE(gpseq, |
| 586 | sdp->srcu_gp_seq_needed + 100)) |
| 587 | sdp->srcu_gp_seq_needed = gpseq; |
| 588 | if (ULONG_CMP_GE(gpseq, |
| 589 | sdp->srcu_gp_seq_needed_exp + 100)) |
| 590 | sdp->srcu_gp_seq_needed_exp = gpseq; |
| 591 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | /* Callback initiation done, allow grace periods after next. */ |
| 596 | mutex_unlock(&sp->srcu_cb_mutex); |
| 597 | |
| 598 | /* Start a new grace period if needed. */ |
| 599 | spin_lock_irq_rcu_node(sp); |
| 600 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
| 601 | if (!rcu_seq_state(gpseq) && |
| 602 | ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { |
| 603 | srcu_gp_start(sp); |
| 604 | spin_unlock_irq_rcu_node(sp); |
| 605 | srcu_reschedule(sp, 0); |
| 606 | } else { |
| 607 | spin_unlock_irq_rcu_node(sp); |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | /* |
| 612 | * Funnel-locking scheme to scalably mediate many concurrent expedited |
| 613 | * grace-period requests. This function is invoked for the first known |
| 614 | * expedited request for a grace period that has already been requested, |
| 615 | * but without expediting. To start a completely new grace period, |
| 616 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
| 617 | */ |
| 618 | static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, |
| 619 | unsigned long s) |
| 620 | { |
| 621 | unsigned long flags; |
| 622 | |
| 623 | for (; snp != NULL; snp = snp->srcu_parent) { |
| 624 | if (rcu_seq_done(&sp->srcu_gp_seq, s) || |
| 625 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
| 626 | return; |
| 627 | spin_lock_irqsave_rcu_node(snp, flags); |
| 628 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
| 629 | spin_unlock_irqrestore_rcu_node(snp, flags); |
| 630 | return; |
| 631 | } |
| 632 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
| 633 | spin_unlock_irqrestore_rcu_node(snp, flags); |
| 634 | } |
| 635 | spin_lock_irqsave_rcu_node(sp, flags); |
| 636 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
| 637 | sp->srcu_gp_seq_needed_exp = s; |
| 638 | spin_unlock_irqrestore_rcu_node(sp, flags); |
| 639 | } |
| 640 | |
| 641 | /* |
| 642 | * Funnel-locking scheme to scalably mediate many concurrent grace-period |
| 643 | * requests. The winner has to do the work of actually starting grace |
| 644 | * period s. Losers must either ensure that their desired grace-period |
| 645 | * number is recorded on at least their leaf srcu_node structure, or they |
| 646 | * must take steps to invoke their own callbacks. |
| 647 | * |
| 648 | * Note that this function also does the work of srcu_funnel_exp_start(), |
| 649 | * in some cases by directly invoking it. |
| 650 | */ |
| 651 | static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, |
| 652 | unsigned long s, bool do_norm) |
| 653 | { |
| 654 | unsigned long flags; |
| 655 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); |
| 656 | struct srcu_node *snp = sdp->mynode; |
| 657 | unsigned long snp_seq; |
| 658 | |
| 659 | /* Each pass through the loop does one level of the srcu_node tree. */ |
| 660 | for (; snp != NULL; snp = snp->srcu_parent) { |
| 661 | if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) |
| 662 | return; /* GP already done and CBs recorded. */ |
| 663 | spin_lock_irqsave_rcu_node(snp, flags); |
| 664 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
| 665 | snp_seq = snp->srcu_have_cbs[idx]; |
| 666 | if (snp == sdp->mynode && snp_seq == s) |
| 667 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
| 668 | spin_unlock_irqrestore_rcu_node(snp, flags); |
| 669 | if (snp == sdp->mynode && snp_seq != s) { |
| 670 | srcu_schedule_cbs_sdp(sdp, do_norm |
| 671 | ? SRCU_INTERVAL |
| 672 | : 0); |
| 673 | return; |
| 674 | } |
| 675 | if (!do_norm) |
| 676 | srcu_funnel_exp_start(sp, snp, s); |
| 677 | return; |
| 678 | } |
| 679 | snp->srcu_have_cbs[idx] = s; |
| 680 | if (snp == sdp->mynode) |
| 681 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
| 682 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
| 683 | snp->srcu_gp_seq_needed_exp = s; |
| 684 | spin_unlock_irqrestore_rcu_node(snp, flags); |
| 685 | } |
| 686 | |
| 687 | /* Top of tree, must ensure the grace period will be started. */ |
| 688 | spin_lock_irqsave_rcu_node(sp, flags); |
| 689 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { |
| 690 | /* |
| 691 | * Record need for grace period s. Pair with load |
| 692 | * acquire setting up for initialization. |
| 693 | */ |
| 694 | smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ |
| 695 | } |
| 696 | if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
| 697 | sp->srcu_gp_seq_needed_exp = s; |
| 698 | |
| 699 | /* If grace period not already done and none in progress, start it. */ |
| 700 | if (!rcu_seq_done(&sp->srcu_gp_seq, s) && |
| 701 | rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
| 702 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
| 703 | srcu_gp_start(sp); |
| 704 | queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); |
| 705 | } |
| 706 | spin_unlock_irqrestore_rcu_node(sp, flags); |
| 707 | } |
| 708 | |
| 709 | /* |
| 710 | * Wait until all readers counted by array index idx complete, but |
| 711 | * loop an additional time if there is an expedited grace period pending. |
| 712 | * The caller must ensure that ->srcu_idx is not changed while checking. |
| 713 | */ |
| 714 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
| 715 | { |
| 716 | for (;;) { |
| 717 | if (srcu_readers_active_idx_check(sp, idx)) |
| 718 | return true; |
| 719 | if (--trycount + !srcu_get_delay(sp) <= 0) |
| 720 | return false; |
| 721 | udelay(SRCU_RETRY_CHECK_DELAY); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | /* |
| 726 | * Increment the ->srcu_idx counter so that future SRCU readers will |
| 727 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
| 728 | * us to wait for pre-existing readers in a starvation-free manner. |
| 729 | */ |
| 730 | static void srcu_flip(struct srcu_struct *sp) |
| 731 | { |
| 732 | /* |
| 733 | * Ensure that if this updater saw a given reader's increment |
| 734 | * from __srcu_read_lock(), that reader was using an old value |
| 735 | * of ->srcu_idx. Also ensure that if a given reader sees the |
| 736 | * new value of ->srcu_idx, this updater's earlier scans cannot |
| 737 | * have seen that reader's increments (which is OK, because this |
| 738 | * grace period need not wait on that reader). |
| 739 | */ |
| 740 | smp_mb(); /* E */ /* Pairs with B and C. */ |
| 741 | |
| 742 | WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); |
| 743 | |
| 744 | /* |
| 745 | * Ensure that if the updater misses an __srcu_read_unlock() |
| 746 | * increment, that task's next __srcu_read_lock() will see the |
| 747 | * above counter update. Note that both this memory barrier |
| 748 | * and the one in srcu_readers_active_idx_check() provide the |
| 749 | * guarantee for __srcu_read_lock(). |
| 750 | */ |
| 751 | smp_mb(); /* D */ /* Pairs with C. */ |
| 752 | } |
| 753 | |
| 754 | /* |
| 755 | * If SRCU is likely idle, return true, otherwise return false. |
| 756 | * |
| 757 | * Note that it is OK for several current from-idle requests for a new |
| 758 | * grace period from idle to specify expediting because they will all end |
| 759 | * up requesting the same grace period anyhow. So no loss. |
| 760 | * |
| 761 | * Note also that if any CPU (including the current one) is still invoking |
| 762 | * callbacks, this function will nevertheless say "idle". This is not |
| 763 | * ideal, but the overhead of checking all CPUs' callback lists is even |
| 764 | * less ideal, especially on large systems. Furthermore, the wakeup |
| 765 | * can happen before the callback is fully removed, so we have no choice |
| 766 | * but to accept this type of error. |
| 767 | * |
| 768 | * This function is also subject to counter-wrap errors, but let's face |
| 769 | * it, if this function was preempted for enough time for the counters |
| 770 | * to wrap, it really doesn't matter whether or not we expedite the grace |
| 771 | * period. The extra overhead of a needlessly expedited grace period is |
| 772 | * negligible when amoritized over that time period, and the extra latency |
| 773 | * of a needlessly non-expedited grace period is similarly negligible. |
| 774 | */ |
| 775 | static bool srcu_might_be_idle(struct srcu_struct *sp) |
| 776 | { |
| 777 | unsigned long curseq; |
| 778 | unsigned long flags; |
| 779 | struct srcu_data *sdp; |
| 780 | unsigned long t; |
| 781 | |
| 782 | /* If the local srcu_data structure has callbacks, not idle. */ |
| 783 | local_irq_save(flags); |
| 784 | sdp = this_cpu_ptr(sp->sda); |
| 785 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
| 786 | local_irq_restore(flags); |
| 787 | return false; /* Callbacks already present, so not idle. */ |
| 788 | } |
| 789 | local_irq_restore(flags); |
| 790 | |
| 791 | /* |
| 792 | * No local callbacks, so probabalistically probe global state. |
| 793 | * Exact information would require acquiring locks, which would |
| 794 | * kill scalability, hence the probabalistic nature of the probe. |
| 795 | */ |
| 796 | |
| 797 | /* First, see if enough time has passed since the last GP. */ |
| 798 | t = ktime_get_mono_fast_ns(); |
| 799 | if (exp_holdoff == 0 || |
| 800 | time_in_range_open(t, sp->srcu_last_gp_end, |
| 801 | sp->srcu_last_gp_end + exp_holdoff)) |
| 802 | return false; /* Too soon after last GP. */ |
| 803 | |
| 804 | /* Next, check for probable idleness. */ |
| 805 | curseq = rcu_seq_current(&sp->srcu_gp_seq); |
| 806 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
| 807 | if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) |
| 808 | return false; /* Grace period in progress, so not idle. */ |
| 809 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
| 810 | if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) |
| 811 | return false; /* GP # changed, so not idle. */ |
| 812 | return true; /* With reasonable probability, idle! */ |
| 813 | } |
| 814 | |
| 815 | /* |
| 816 | * SRCU callback function to leak a callback. |
| 817 | */ |
| 818 | static void srcu_leak_callback(struct rcu_head *rhp) |
| 819 | { |
| 820 | } |
| 821 | |
| 822 | /* |
| 823 | * Enqueue an SRCU callback on the srcu_data structure associated with |
| 824 | * the current CPU and the specified srcu_struct structure, initiating |
| 825 | * grace-period processing if it is not already running. |
| 826 | * |
| 827 | * Note that all CPUs must agree that the grace period extended beyond |
| 828 | * all pre-existing SRCU read-side critical section. On systems with |
| 829 | * more than one CPU, this means that when "func()" is invoked, each CPU |
| 830 | * is guaranteed to have executed a full memory barrier since the end of |
| 831 | * its last corresponding SRCU read-side critical section whose beginning |
| 832 | * preceded the call to call_srcu(). It also means that each CPU executing |
| 833 | * an SRCU read-side critical section that continues beyond the start of |
| 834 | * "func()" must have executed a memory barrier after the call_srcu() |
| 835 | * but before the beginning of that SRCU read-side critical section. |
| 836 | * Note that these guarantees include CPUs that are offline, idle, or |
| 837 | * executing in user mode, as well as CPUs that are executing in the kernel. |
| 838 | * |
| 839 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
| 840 | * resulting SRCU callback function "func()", then both CPU A and CPU |
| 841 | * B are guaranteed to execute a full memory barrier during the time |
| 842 | * interval between the call to call_srcu() and the invocation of "func()". |
| 843 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
| 844 | * again only if the system has more than one CPU). |
| 845 | * |
| 846 | * Of course, these guarantees apply only for invocations of call_srcu(), |
| 847 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
| 848 | * srcu_struct structure. |
| 849 | */ |
| 850 | void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
| 851 | rcu_callback_t func, bool do_norm) |
| 852 | { |
| 853 | unsigned long flags; |
| 854 | bool needexp = false; |
| 855 | bool needgp = false; |
| 856 | unsigned long s; |
| 857 | struct srcu_data *sdp; |
| 858 | |
| 859 | check_init_srcu_struct(sp); |
| 860 | if (debug_rcu_head_queue(rhp)) { |
| 861 | /* Probable double call_srcu(), so leak the callback. */ |
| 862 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
| 863 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); |
| 864 | return; |
| 865 | } |
| 866 | rhp->func = func; |
| 867 | local_irq_save(flags); |
| 868 | sdp = this_cpu_ptr(sp->sda); |
| 869 | spin_lock_rcu_node(sdp); |
| 870 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
| 871 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 872 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 873 | s = rcu_seq_snap(&sp->srcu_gp_seq); |
| 874 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
| 875 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
| 876 | sdp->srcu_gp_seq_needed = s; |
| 877 | needgp = true; |
| 878 | } |
| 879 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
| 880 | sdp->srcu_gp_seq_needed_exp = s; |
| 881 | needexp = true; |
| 882 | } |
| 883 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
| 884 | if (needgp) |
| 885 | srcu_funnel_gp_start(sp, sdp, s, do_norm); |
| 886 | else if (needexp) |
| 887 | srcu_funnel_exp_start(sp, sdp->mynode, s); |
| 888 | } |
| 889 | |
| 890 | /** |
| 891 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
| 892 | * @sp: srcu_struct in queue the callback |
| 893 | * @rhp: structure to be used for queueing the SRCU callback. |
| 894 | * @func: function to be invoked after the SRCU grace period |
| 895 | * |
| 896 | * The callback function will be invoked some time after a full SRCU |
| 897 | * grace period elapses, in other words after all pre-existing SRCU |
| 898 | * read-side critical sections have completed. However, the callback |
| 899 | * function might well execute concurrently with other SRCU read-side |
| 900 | * critical sections that started after call_srcu() was invoked. SRCU |
| 901 | * read-side critical sections are delimited by srcu_read_lock() and |
| 902 | * srcu_read_unlock(), and may be nested. |
| 903 | * |
| 904 | * The callback will be invoked from process context, but must nevertheless |
| 905 | * be fast and must not block. |
| 906 | */ |
| 907 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
| 908 | rcu_callback_t func) |
| 909 | { |
| 910 | __call_srcu(sp, rhp, func, true); |
| 911 | } |
| 912 | EXPORT_SYMBOL_GPL(call_srcu); |
| 913 | |
| 914 | /* |
| 915 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 916 | */ |
| 917 | static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
| 918 | { |
| 919 | struct rcu_synchronize rcu; |
| 920 | |
| 921 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
| 922 | lock_is_held(&rcu_bh_lock_map) || |
| 923 | lock_is_held(&rcu_lock_map) || |
| 924 | lock_is_held(&rcu_sched_lock_map), |
| 925 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); |
| 926 | |
| 927 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
| 928 | return; |
| 929 | might_sleep(); |
| 930 | check_init_srcu_struct(sp); |
| 931 | init_completion(&rcu.completion); |
| 932 | init_rcu_head_on_stack(&rcu.head); |
| 933 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
| 934 | wait_for_completion(&rcu.completion); |
| 935 | destroy_rcu_head_on_stack(&rcu.head); |
| 936 | |
| 937 | /* |
| 938 | * Make sure that later code is ordered after the SRCU grace |
| 939 | * period. This pairs with the spin_lock_irq_rcu_node() |
| 940 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
| 941 | * because the current CPU might have been totally uninvolved with |
| 942 | * (and thus unordered against) that grace period. |
| 943 | */ |
| 944 | smp_mb(); |
| 945 | } |
| 946 | |
| 947 | /** |
| 948 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
| 949 | * @sp: srcu_struct with which to synchronize. |
| 950 | * |
| 951 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 952 | * spinning rather than blocking when waiting. |
| 953 | * |
| 954 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 955 | * memory-ordering properties as does synchronize_srcu(). |
| 956 | */ |
| 957 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
| 958 | { |
| 959 | __synchronize_srcu(sp, rcu_gp_is_normal()); |
| 960 | } |
| 961 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 962 | |
| 963 | /** |
| 964 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
| 965 | * @sp: srcu_struct with which to synchronize. |
| 966 | * |
| 967 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 968 | * possible starvation of synchronize_srcu(), it waits for the count of |
| 969 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
| 970 | * and then flip the srcu_idx and wait for the count of the other index. |
| 971 | * |
| 972 | * Can block; must be called from process context. |
| 973 | * |
| 974 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 975 | * SRCU read-side critical section; doing so will result in deadlock. |
| 976 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 977 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 978 | * as long as the resulting graph of srcu_structs is acyclic. |
| 979 | * |
| 980 | * There are memory-ordering constraints implied by synchronize_srcu(). |
| 981 | * On systems with more than one CPU, when synchronize_srcu() returns, |
| 982 | * each CPU is guaranteed to have executed a full memory barrier since |
| 983 | * the end of its last corresponding SRCU-sched read-side critical section |
| 984 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
| 985 | * each CPU having an SRCU read-side critical section that extends beyond |
| 986 | * the return from synchronize_srcu() is guaranteed to have executed a |
| 987 | * full memory barrier after the beginning of synchronize_srcu() and before |
| 988 | * the beginning of that SRCU read-side critical section. Note that these |
| 989 | * guarantees include CPUs that are offline, idle, or executing in user mode, |
| 990 | * as well as CPUs that are executing in the kernel. |
| 991 | * |
| 992 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned |
| 993 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
| 994 | * to have executed a full memory barrier during the execution of |
| 995 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B |
| 996 | * are the same CPU, but again only if the system has more than one CPU. |
| 997 | * |
| 998 | * Of course, these memory-ordering guarantees apply only when |
| 999 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are |
| 1000 | * passed the same srcu_struct structure. |
| 1001 | * |
| 1002 | * If SRCU is likely idle, expedite the first request. This semantic |
| 1003 | * was provided by Classic SRCU, and is relied upon by its users, so TREE |
| 1004 | * SRCU must also provide it. Note that detecting idleness is heuristic |
| 1005 | * and subject to both false positives and negatives. |
| 1006 | */ |
| 1007 | void synchronize_srcu(struct srcu_struct *sp) |
| 1008 | { |
| 1009 | if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) |
| 1010 | synchronize_srcu_expedited(sp); |
| 1011 | else |
| 1012 | __synchronize_srcu(sp, true); |
| 1013 | } |
| 1014 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 1015 | |
| 1016 | /* |
| 1017 | * Callback function for srcu_barrier() use. |
| 1018 | */ |
| 1019 | static void srcu_barrier_cb(struct rcu_head *rhp) |
| 1020 | { |
| 1021 | struct srcu_data *sdp; |
| 1022 | struct srcu_struct *sp; |
| 1023 | |
| 1024 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
| 1025 | sp = sdp->sp; |
| 1026 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
| 1027 | complete(&sp->srcu_barrier_completion); |
| 1028 | } |
| 1029 | |
| 1030 | /** |
| 1031 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
| 1032 | * @sp: srcu_struct on which to wait for in-flight callbacks. |
| 1033 | */ |
| 1034 | void srcu_barrier(struct srcu_struct *sp) |
| 1035 | { |
| 1036 | int cpu; |
| 1037 | struct srcu_data *sdp; |
| 1038 | unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); |
| 1039 | |
| 1040 | check_init_srcu_struct(sp); |
| 1041 | mutex_lock(&sp->srcu_barrier_mutex); |
| 1042 | if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { |
| 1043 | smp_mb(); /* Force ordering following return. */ |
| 1044 | mutex_unlock(&sp->srcu_barrier_mutex); |
| 1045 | return; /* Someone else did our work for us. */ |
| 1046 | } |
| 1047 | rcu_seq_start(&sp->srcu_barrier_seq); |
| 1048 | init_completion(&sp->srcu_barrier_completion); |
| 1049 | |
| 1050 | /* Initial count prevents reaching zero until all CBs are posted. */ |
| 1051 | atomic_set(&sp->srcu_barrier_cpu_cnt, 1); |
| 1052 | |
| 1053 | /* |
| 1054 | * Each pass through this loop enqueues a callback, but only |
| 1055 | * on CPUs already having callbacks enqueued. Note that if |
| 1056 | * a CPU already has callbacks enqueue, it must have already |
| 1057 | * registered the need for a future grace period, so all we |
| 1058 | * need do is enqueue a callback that will use the same |
| 1059 | * grace period as the last callback already in the queue. |
| 1060 | */ |
| 1061 | for_each_possible_cpu(cpu) { |
| 1062 | sdp = per_cpu_ptr(sp->sda, cpu); |
| 1063 | spin_lock_irq_rcu_node(sdp); |
| 1064 | atomic_inc(&sp->srcu_barrier_cpu_cnt); |
| 1065 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
| 1066 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
| 1067 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
| 1068 | &sdp->srcu_barrier_head, 0)) { |
| 1069 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
| 1070 | atomic_dec(&sp->srcu_barrier_cpu_cnt); |
| 1071 | } |
| 1072 | spin_unlock_irq_rcu_node(sdp); |
| 1073 | } |
| 1074 | |
| 1075 | /* Remove the initial count, at which point reaching zero can happen. */ |
| 1076 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
| 1077 | complete(&sp->srcu_barrier_completion); |
| 1078 | wait_for_completion(&sp->srcu_barrier_completion); |
| 1079 | |
| 1080 | rcu_seq_end(&sp->srcu_barrier_seq); |
| 1081 | mutex_unlock(&sp->srcu_barrier_mutex); |
| 1082 | } |
| 1083 | EXPORT_SYMBOL_GPL(srcu_barrier); |
| 1084 | |
| 1085 | /** |
| 1086 | * srcu_batches_completed - return batches completed. |
| 1087 | * @sp: srcu_struct on which to report batch completion. |
| 1088 | * |
| 1089 | * Report the number of batches, correlated with, but not necessarily |
| 1090 | * precisely the same as, the number of grace periods that have elapsed. |
| 1091 | */ |
| 1092 | unsigned long srcu_batches_completed(struct srcu_struct *sp) |
| 1093 | { |
| 1094 | return sp->srcu_idx; |
| 1095 | } |
| 1096 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
| 1097 | |
| 1098 | /* |
| 1099 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
| 1100 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
| 1101 | * completed in that state. |
| 1102 | */ |
| 1103 | static void srcu_advance_state(struct srcu_struct *sp) |
| 1104 | { |
| 1105 | int idx; |
| 1106 | |
| 1107 | mutex_lock(&sp->srcu_gp_mutex); |
| 1108 | |
| 1109 | /* |
| 1110 | * Because readers might be delayed for an extended period after |
| 1111 | * fetching ->srcu_idx for their index, at any point in time there |
| 1112 | * might well be readers using both idx=0 and idx=1. We therefore |
| 1113 | * need to wait for readers to clear from both index values before |
| 1114 | * invoking a callback. |
| 1115 | * |
| 1116 | * The load-acquire ensures that we see the accesses performed |
| 1117 | * by the prior grace period. |
| 1118 | */ |
| 1119 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ |
| 1120 | if (idx == SRCU_STATE_IDLE) { |
| 1121 | spin_lock_irq_rcu_node(sp); |
| 1122 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
| 1123 | WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); |
| 1124 | spin_unlock_irq_rcu_node(sp); |
| 1125 | mutex_unlock(&sp->srcu_gp_mutex); |
| 1126 | return; |
| 1127 | } |
| 1128 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 1129 | if (idx == SRCU_STATE_IDLE) |
| 1130 | srcu_gp_start(sp); |
| 1131 | spin_unlock_irq_rcu_node(sp); |
| 1132 | if (idx != SRCU_STATE_IDLE) { |
| 1133 | mutex_unlock(&sp->srcu_gp_mutex); |
| 1134 | return; /* Someone else started the grace period. */ |
| 1135 | } |
| 1136 | } |
| 1137 | |
| 1138 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
| 1139 | idx = 1 ^ (sp->srcu_idx & 1); |
| 1140 | if (!try_check_zero(sp, idx, 1)) { |
| 1141 | mutex_unlock(&sp->srcu_gp_mutex); |
| 1142 | return; /* readers present, retry later. */ |
| 1143 | } |
| 1144 | srcu_flip(sp); |
| 1145 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); |
| 1146 | } |
| 1147 | |
| 1148 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
| 1149 | |
| 1150 | /* |
| 1151 | * SRCU read-side critical sections are normally short, |
| 1152 | * so check at least twice in quick succession after a flip. |
| 1153 | */ |
| 1154 | idx = 1 ^ (sp->srcu_idx & 1); |
| 1155 | if (!try_check_zero(sp, idx, 2)) { |
| 1156 | mutex_unlock(&sp->srcu_gp_mutex); |
| 1157 | return; /* readers present, retry later. */ |
| 1158 | } |
| 1159 | srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ |
| 1160 | } |
| 1161 | } |
| 1162 | |
| 1163 | /* |
| 1164 | * Invoke a limited number of SRCU callbacks that have passed through |
| 1165 | * their grace period. If there are more to do, SRCU will reschedule |
| 1166 | * the workqueue. Note that needed memory barriers have been executed |
| 1167 | * in this task's context by srcu_readers_active_idx_check(). |
| 1168 | */ |
| 1169 | static void srcu_invoke_callbacks(struct work_struct *work) |
| 1170 | { |
| 1171 | bool more; |
| 1172 | struct rcu_cblist ready_cbs; |
| 1173 | struct rcu_head *rhp; |
| 1174 | struct srcu_data *sdp; |
| 1175 | struct srcu_struct *sp; |
| 1176 | |
| 1177 | sdp = container_of(work, struct srcu_data, work.work); |
| 1178 | sp = sdp->sp; |
| 1179 | rcu_cblist_init(&ready_cbs); |
| 1180 | spin_lock_irq_rcu_node(sdp); |
| 1181 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 1182 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 1183 | if (sdp->srcu_cblist_invoking || |
| 1184 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
| 1185 | spin_unlock_irq_rcu_node(sdp); |
| 1186 | return; /* Someone else on the job or nothing to do. */ |
| 1187 | } |
| 1188 | |
| 1189 | /* We are on the job! Extract and invoke ready callbacks. */ |
| 1190 | sdp->srcu_cblist_invoking = true; |
| 1191 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); |
| 1192 | spin_unlock_irq_rcu_node(sdp); |
| 1193 | rhp = rcu_cblist_dequeue(&ready_cbs); |
| 1194 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { |
| 1195 | debug_rcu_head_unqueue(rhp); |
| 1196 | local_bh_disable(); |
| 1197 | rhp->func(rhp); |
| 1198 | local_bh_enable(); |
| 1199 | } |
| 1200 | |
| 1201 | /* |
| 1202 | * Update counts, accelerate new callbacks, and if needed, |
| 1203 | * schedule another round of callback invocation. |
| 1204 | */ |
| 1205 | spin_lock_irq_rcu_node(sdp); |
| 1206 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
| 1207 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
| 1208 | rcu_seq_snap(&sp->srcu_gp_seq)); |
| 1209 | sdp->srcu_cblist_invoking = false; |
| 1210 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
| 1211 | spin_unlock_irq_rcu_node(sdp); |
| 1212 | if (more) |
| 1213 | srcu_schedule_cbs_sdp(sdp, 0); |
| 1214 | } |
| 1215 | |
| 1216 | /* |
| 1217 | * Finished one round of SRCU grace period. Start another if there are |
| 1218 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
| 1219 | */ |
| 1220 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) |
| 1221 | { |
| 1222 | bool pushgp = true; |
| 1223 | |
| 1224 | spin_lock_irq_rcu_node(sp); |
| 1225 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
| 1226 | if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { |
| 1227 | /* All requests fulfilled, time to go idle. */ |
| 1228 | pushgp = false; |
| 1229 | } |
| 1230 | } else if (!rcu_seq_state(sp->srcu_gp_seq)) { |
| 1231 | /* Outstanding request and no GP. Start one. */ |
| 1232 | srcu_gp_start(sp); |
| 1233 | } |
| 1234 | spin_unlock_irq_rcu_node(sp); |
| 1235 | |
| 1236 | if (pushgp) |
| 1237 | queue_delayed_work(rcu_gp_wq, &sp->work, delay); |
| 1238 | } |
| 1239 | |
| 1240 | /* |
| 1241 | * This is the work-queue function that handles SRCU grace periods. |
| 1242 | */ |
| 1243 | static void process_srcu(struct work_struct *work) |
| 1244 | { |
| 1245 | struct srcu_struct *sp; |
| 1246 | |
| 1247 | sp = container_of(work, struct srcu_struct, work.work); |
| 1248 | |
| 1249 | srcu_advance_state(sp); |
| 1250 | srcu_reschedule(sp, srcu_get_delay(sp)); |
| 1251 | } |
| 1252 | |
| 1253 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
| 1254 | struct srcu_struct *sp, int *flags, |
| 1255 | unsigned long *gp_seq) |
| 1256 | { |
| 1257 | if (test_type != SRCU_FLAVOR) |
| 1258 | return; |
| 1259 | *flags = 0; |
| 1260 | *gp_seq = rcu_seq_current(&sp->srcu_gp_seq); |
| 1261 | } |
| 1262 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
| 1263 | |
| 1264 | void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) |
| 1265 | { |
| 1266 | int cpu; |
| 1267 | int idx; |
| 1268 | unsigned long s0 = 0, s1 = 0; |
| 1269 | |
| 1270 | idx = sp->srcu_idx & 0x1; |
| 1271 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
| 1272 | tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx); |
| 1273 | for_each_possible_cpu(cpu) { |
| 1274 | unsigned long l0, l1; |
| 1275 | unsigned long u0, u1; |
| 1276 | long c0, c1; |
| 1277 | struct srcu_data *sdp; |
| 1278 | |
| 1279 | sdp = per_cpu_ptr(sp->sda, cpu); |
| 1280 | u0 = sdp->srcu_unlock_count[!idx]; |
| 1281 | u1 = sdp->srcu_unlock_count[idx]; |
| 1282 | |
| 1283 | /* |
| 1284 | * Make sure that a lock is always counted if the corresponding |
| 1285 | * unlock is counted. |
| 1286 | */ |
| 1287 | smp_rmb(); |
| 1288 | |
| 1289 | l0 = sdp->srcu_lock_count[!idx]; |
| 1290 | l1 = sdp->srcu_lock_count[idx]; |
| 1291 | |
| 1292 | c0 = l0 - u0; |
| 1293 | c1 = l1 - u1; |
| 1294 | pr_cont(" %d(%ld,%ld %1p)", |
| 1295 | cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist)); |
| 1296 | s0 += c0; |
| 1297 | s1 += c1; |
| 1298 | } |
| 1299 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
| 1300 | } |
| 1301 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); |
| 1302 | |
| 1303 | static int __init srcu_bootup_announce(void) |
| 1304 | { |
| 1305 | pr_info("Hierarchical SRCU implementation.\n"); |
| 1306 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
| 1307 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); |
| 1308 | return 0; |
| 1309 | } |
| 1310 | early_initcall(srcu_bootup_announce); |