David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
| 4 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
| 5 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
| 6 | * |
| 7 | * No idle tick implementation for low and high resolution timers |
| 8 | * |
| 9 | * Started by: Thomas Gleixner and Ingo Molnar |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | #include <linux/cpu.h> |
| 12 | #include <linux/err.h> |
| 13 | #include <linux/hrtimer.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/kernel_stat.h> |
| 16 | #include <linux/percpu.h> |
| 17 | #include <linux/nmi.h> |
| 18 | #include <linux/profile.h> |
| 19 | #include <linux/sched/signal.h> |
| 20 | #include <linux/sched/clock.h> |
| 21 | #include <linux/sched/stat.h> |
| 22 | #include <linux/sched/nohz.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/irq_work.h> |
| 25 | #include <linux/posix-timers.h> |
| 26 | #include <linux/context_tracking.h> |
| 27 | #include <linux/mm.h> |
| 28 | |
| 29 | #include <asm/irq_regs.h> |
| 30 | |
| 31 | #include "tick-internal.h" |
| 32 | |
| 33 | #include <trace/events/timer.h> |
| 34 | |
| 35 | /* |
| 36 | * Per-CPU nohz control structure |
| 37 | */ |
| 38 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
| 39 | |
| 40 | struct tick_sched *tick_get_tick_sched(int cpu) |
| 41 | { |
| 42 | return &per_cpu(tick_cpu_sched, cpu); |
| 43 | } |
| 44 | |
| 45 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
| 46 | /* |
| 47 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
| 48 | */ |
| 49 | static ktime_t last_jiffies_update; |
| 50 | |
| 51 | /* |
| 52 | * Must be called with interrupts disabled ! |
| 53 | */ |
| 54 | static void tick_do_update_jiffies64(ktime_t now) |
| 55 | { |
| 56 | unsigned long ticks = 0; |
| 57 | ktime_t delta; |
| 58 | |
| 59 | /* |
| 60 | * Do a quick check without holding jiffies_lock: |
| 61 | */ |
| 62 | delta = ktime_sub(now, last_jiffies_update); |
| 63 | if (delta < tick_period) |
| 64 | return; |
| 65 | |
| 66 | /* Reevaluate with jiffies_lock held */ |
| 67 | write_seqlock(&jiffies_lock); |
| 68 | |
| 69 | delta = ktime_sub(now, last_jiffies_update); |
| 70 | if (delta >= tick_period) { |
| 71 | |
| 72 | delta = ktime_sub(delta, tick_period); |
| 73 | last_jiffies_update = ktime_add(last_jiffies_update, |
| 74 | tick_period); |
| 75 | |
| 76 | /* Slow path for long timeouts */ |
| 77 | if (unlikely(delta >= tick_period)) { |
| 78 | s64 incr = ktime_to_ns(tick_period); |
| 79 | |
| 80 | ticks = ktime_divns(delta, incr); |
| 81 | |
| 82 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
| 83 | incr * ticks); |
| 84 | } |
| 85 | do_timer(++ticks); |
| 86 | |
| 87 | /* Keep the tick_next_period variable up to date */ |
| 88 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
| 89 | } else { |
| 90 | write_sequnlock(&jiffies_lock); |
| 91 | return; |
| 92 | } |
| 93 | write_sequnlock(&jiffies_lock); |
| 94 | update_wall_time(); |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Initialize and return retrieve the jiffies update. |
| 99 | */ |
| 100 | static ktime_t tick_init_jiffy_update(void) |
| 101 | { |
| 102 | ktime_t period; |
| 103 | |
| 104 | write_seqlock(&jiffies_lock); |
| 105 | /* Did we start the jiffies update yet ? */ |
| 106 | if (last_jiffies_update == 0) |
| 107 | last_jiffies_update = tick_next_period; |
| 108 | period = last_jiffies_update; |
| 109 | write_sequnlock(&jiffies_lock); |
| 110 | return period; |
| 111 | } |
| 112 | |
| 113 | static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) |
| 114 | { |
| 115 | int cpu = smp_processor_id(); |
| 116 | |
| 117 | #ifdef CONFIG_NO_HZ_COMMON |
| 118 | /* |
| 119 | * Check if the do_timer duty was dropped. We don't care about |
| 120 | * concurrency: This happens only when the CPU in charge went |
| 121 | * into a long sleep. If two CPUs happen to assign themselves to |
| 122 | * this duty, then the jiffies update is still serialized by |
| 123 | * jiffies_lock. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 124 | * |
| 125 | * If nohz_full is enabled, this should not happen because the |
| 126 | * tick_do_timer_cpu never relinquishes. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 128 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { |
| 129 | #ifdef CONFIG_NO_HZ_FULL |
| 130 | WARN_ON(tick_nohz_full_running); |
| 131 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | tick_do_timer_cpu = cpu; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 133 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | #endif |
| 135 | |
| 136 | /* Check, if the jiffies need an update */ |
| 137 | if (tick_do_timer_cpu == cpu) |
| 138 | tick_do_update_jiffies64(now); |
| 139 | |
| 140 | if (ts->inidle) |
| 141 | ts->got_idle_tick = 1; |
| 142 | } |
| 143 | |
| 144 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
| 145 | { |
| 146 | #ifdef CONFIG_NO_HZ_COMMON |
| 147 | /* |
| 148 | * When we are idle and the tick is stopped, we have to touch |
| 149 | * the watchdog as we might not schedule for a really long |
| 150 | * time. This happens on complete idle SMP systems while |
| 151 | * waiting on the login prompt. We also increment the "start of |
| 152 | * idle" jiffy stamp so the idle accounting adjustment we do |
| 153 | * when we go busy again does not account too much ticks. |
| 154 | */ |
| 155 | if (ts->tick_stopped) { |
| 156 | touch_softlockup_watchdog_sched(); |
| 157 | if (is_idle_task(current)) |
| 158 | ts->idle_jiffies++; |
| 159 | /* |
| 160 | * In case the current tick fired too early past its expected |
| 161 | * expiration, make sure we don't bypass the next clock reprogramming |
| 162 | * to the same deadline. |
| 163 | */ |
| 164 | ts->next_tick = 0; |
| 165 | } |
| 166 | #endif |
| 167 | update_process_times(user_mode(regs)); |
| 168 | profile_tick(CPU_PROFILING); |
| 169 | } |
| 170 | #endif |
| 171 | |
| 172 | #ifdef CONFIG_NO_HZ_FULL |
| 173 | cpumask_var_t tick_nohz_full_mask; |
| 174 | bool tick_nohz_full_running; |
| 175 | static atomic_t tick_dep_mask; |
| 176 | |
| 177 | static bool check_tick_dependency(atomic_t *dep) |
| 178 | { |
| 179 | int val = atomic_read(dep); |
| 180 | |
| 181 | if (val & TICK_DEP_MASK_POSIX_TIMER) { |
| 182 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
| 183 | return true; |
| 184 | } |
| 185 | |
| 186 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
| 187 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
| 188 | return true; |
| 189 | } |
| 190 | |
| 191 | if (val & TICK_DEP_MASK_SCHED) { |
| 192 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
| 193 | return true; |
| 194 | } |
| 195 | |
| 196 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
| 197 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
| 198 | return true; |
| 199 | } |
| 200 | |
| 201 | return false; |
| 202 | } |
| 203 | |
| 204 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
| 205 | { |
| 206 | lockdep_assert_irqs_disabled(); |
| 207 | |
| 208 | if (unlikely(!cpu_online(cpu))) |
| 209 | return false; |
| 210 | |
| 211 | if (check_tick_dependency(&tick_dep_mask)) |
| 212 | return false; |
| 213 | |
| 214 | if (check_tick_dependency(&ts->tick_dep_mask)) |
| 215 | return false; |
| 216 | |
| 217 | if (check_tick_dependency(¤t->tick_dep_mask)) |
| 218 | return false; |
| 219 | |
| 220 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
| 221 | return false; |
| 222 | |
| 223 | return true; |
| 224 | } |
| 225 | |
| 226 | static void nohz_full_kick_func(struct irq_work *work) |
| 227 | { |
| 228 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
| 229 | } |
| 230 | |
| 231 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { |
| 232 | .func = nohz_full_kick_func, |
| 233 | }; |
| 234 | |
| 235 | /* |
| 236 | * Kick this CPU if it's full dynticks in order to force it to |
| 237 | * re-evaluate its dependency on the tick and restart it if necessary. |
| 238 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), |
| 239 | * is NMI safe. |
| 240 | */ |
| 241 | static void tick_nohz_full_kick(void) |
| 242 | { |
| 243 | if (!tick_nohz_full_cpu(smp_processor_id())) |
| 244 | return; |
| 245 | |
| 246 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * Kick the CPU if it's full dynticks in order to force it to |
| 251 | * re-evaluate its dependency on the tick and restart it if necessary. |
| 252 | */ |
| 253 | void tick_nohz_full_kick_cpu(int cpu) |
| 254 | { |
| 255 | if (!tick_nohz_full_cpu(cpu)) |
| 256 | return; |
| 257 | |
| 258 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * Kick all full dynticks CPUs in order to force these to re-evaluate |
| 263 | * their dependency on the tick and restart it if necessary. |
| 264 | */ |
| 265 | static void tick_nohz_full_kick_all(void) |
| 266 | { |
| 267 | int cpu; |
| 268 | |
| 269 | if (!tick_nohz_full_running) |
| 270 | return; |
| 271 | |
| 272 | preempt_disable(); |
| 273 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
| 274 | tick_nohz_full_kick_cpu(cpu); |
| 275 | preempt_enable(); |
| 276 | } |
| 277 | |
| 278 | static void tick_nohz_dep_set_all(atomic_t *dep, |
| 279 | enum tick_dep_bits bit) |
| 280 | { |
| 281 | int prev; |
| 282 | |
| 283 | prev = atomic_fetch_or(BIT(bit), dep); |
| 284 | if (!prev) |
| 285 | tick_nohz_full_kick_all(); |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Set a global tick dependency. Used by perf events that rely on freq and |
| 290 | * by unstable clock. |
| 291 | */ |
| 292 | void tick_nohz_dep_set(enum tick_dep_bits bit) |
| 293 | { |
| 294 | tick_nohz_dep_set_all(&tick_dep_mask, bit); |
| 295 | } |
| 296 | |
| 297 | void tick_nohz_dep_clear(enum tick_dep_bits bit) |
| 298 | { |
| 299 | atomic_andnot(BIT(bit), &tick_dep_mask); |
| 300 | } |
| 301 | |
| 302 | /* |
| 303 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to |
| 304 | * manage events throttling. |
| 305 | */ |
| 306 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) |
| 307 | { |
| 308 | int prev; |
| 309 | struct tick_sched *ts; |
| 310 | |
| 311 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
| 312 | |
| 313 | prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); |
| 314 | if (!prev) { |
| 315 | preempt_disable(); |
| 316 | /* Perf needs local kick that is NMI safe */ |
| 317 | if (cpu == smp_processor_id()) { |
| 318 | tick_nohz_full_kick(); |
| 319 | } else { |
| 320 | /* Remote irq work not NMI-safe */ |
| 321 | if (!WARN_ON_ONCE(in_nmi())) |
| 322 | tick_nohz_full_kick_cpu(cpu); |
| 323 | } |
| 324 | preempt_enable(); |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) |
| 329 | { |
| 330 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
| 331 | |
| 332 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * Set a per-task tick dependency. Posix CPU timers need this in order to elapse |
| 337 | * per task timers. |
| 338 | */ |
| 339 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) |
| 340 | { |
| 341 | /* |
| 342 | * We could optimize this with just kicking the target running the task |
| 343 | * if that noise matters for nohz full users. |
| 344 | */ |
| 345 | tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit); |
| 346 | } |
| 347 | |
| 348 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) |
| 349 | { |
| 350 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse |
| 355 | * per process timers. |
| 356 | */ |
| 357 | void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
| 358 | { |
| 359 | tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); |
| 360 | } |
| 361 | |
| 362 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
| 363 | { |
| 364 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | * Re-evaluate the need for the tick as we switch the current task. |
| 369 | * It might need the tick due to per task/process properties: |
| 370 | * perf events, posix CPU timers, ... |
| 371 | */ |
| 372 | void __tick_nohz_task_switch(void) |
| 373 | { |
| 374 | unsigned long flags; |
| 375 | struct tick_sched *ts; |
| 376 | |
| 377 | local_irq_save(flags); |
| 378 | |
| 379 | if (!tick_nohz_full_cpu(smp_processor_id())) |
| 380 | goto out; |
| 381 | |
| 382 | ts = this_cpu_ptr(&tick_cpu_sched); |
| 383 | |
| 384 | if (ts->tick_stopped) { |
| 385 | if (atomic_read(¤t->tick_dep_mask) || |
| 386 | atomic_read(¤t->signal->tick_dep_mask)) |
| 387 | tick_nohz_full_kick(); |
| 388 | } |
| 389 | out: |
| 390 | local_irq_restore(flags); |
| 391 | } |
| 392 | |
| 393 | /* Get the boot-time nohz CPU list from the kernel parameters. */ |
| 394 | void __init tick_nohz_full_setup(cpumask_var_t cpumask) |
| 395 | { |
| 396 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
| 397 | cpumask_copy(tick_nohz_full_mask, cpumask); |
| 398 | tick_nohz_full_running = true; |
| 399 | } |
| 400 | |
| 401 | static int tick_nohz_cpu_down(unsigned int cpu) |
| 402 | { |
| 403 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 404 | * The tick_do_timer_cpu CPU handles housekeeping duty (unbound |
| 405 | * timers, workqueues, timekeeping, ...) on behalf of full dynticks |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 406 | * CPUs. It must remain online when nohz full is enabled. |
| 407 | */ |
| 408 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) |
| 409 | return -EBUSY; |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | void __init tick_nohz_init(void) |
| 414 | { |
| 415 | int cpu, ret; |
| 416 | |
| 417 | if (!tick_nohz_full_running) |
| 418 | return; |
| 419 | |
| 420 | /* |
| 421 | * Full dynticks uses irq work to drive the tick rescheduling on safe |
| 422 | * locking contexts. But then we need irq work to raise its own |
| 423 | * interrupts to avoid circular dependency on the tick |
| 424 | */ |
| 425 | if (!arch_irq_work_has_interrupt()) { |
| 426 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); |
| 427 | cpumask_clear(tick_nohz_full_mask); |
| 428 | tick_nohz_full_running = false; |
| 429 | return; |
| 430 | } |
| 431 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 432 | if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && |
| 433 | !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { |
| 434 | cpu = smp_processor_id(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 435 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 436 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { |
| 437 | pr_warn("NO_HZ: Clearing %d from nohz_full range " |
| 438 | "for timekeeping\n", cpu); |
| 439 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
| 440 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 441 | } |
| 442 | |
| 443 | for_each_cpu(cpu, tick_nohz_full_mask) |
| 444 | context_tracking_cpu_set(cpu); |
| 445 | |
| 446 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 447 | "kernel/nohz:predown", NULL, |
| 448 | tick_nohz_cpu_down); |
| 449 | WARN_ON(ret < 0); |
| 450 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
| 451 | cpumask_pr_args(tick_nohz_full_mask)); |
| 452 | } |
| 453 | #endif |
| 454 | |
| 455 | /* |
| 456 | * NOHZ - aka dynamic tick functionality |
| 457 | */ |
| 458 | #ifdef CONFIG_NO_HZ_COMMON |
| 459 | /* |
| 460 | * NO HZ enabled ? |
| 461 | */ |
| 462 | bool tick_nohz_enabled __read_mostly = true; |
| 463 | unsigned long tick_nohz_active __read_mostly; |
| 464 | /* |
| 465 | * Enable / Disable tickless mode |
| 466 | */ |
| 467 | static int __init setup_tick_nohz(char *str) |
| 468 | { |
| 469 | return (kstrtobool(str, &tick_nohz_enabled) == 0); |
| 470 | } |
| 471 | |
| 472 | __setup("nohz=", setup_tick_nohz); |
| 473 | |
| 474 | bool tick_nohz_tick_stopped(void) |
| 475 | { |
| 476 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 477 | |
| 478 | return ts->tick_stopped; |
| 479 | } |
| 480 | |
| 481 | bool tick_nohz_tick_stopped_cpu(int cpu) |
| 482 | { |
| 483 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
| 484 | |
| 485 | return ts->tick_stopped; |
| 486 | } |
| 487 | |
| 488 | /** |
| 489 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
| 490 | * |
| 491 | * Called from interrupt entry when the CPU was idle |
| 492 | * |
| 493 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
| 494 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
| 495 | * value. We do this unconditionally on any CPU, as we don't know whether the |
| 496 | * CPU, which has the update task assigned is in a long sleep. |
| 497 | */ |
| 498 | static void tick_nohz_update_jiffies(ktime_t now) |
| 499 | { |
| 500 | unsigned long flags; |
| 501 | |
| 502 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
| 503 | |
| 504 | local_irq_save(flags); |
| 505 | tick_do_update_jiffies64(now); |
| 506 | local_irq_restore(flags); |
| 507 | |
| 508 | touch_softlockup_watchdog_sched(); |
| 509 | } |
| 510 | |
| 511 | /* |
| 512 | * Updates the per-CPU time idle statistics counters |
| 513 | */ |
| 514 | static void |
| 515 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
| 516 | { |
| 517 | ktime_t delta; |
| 518 | |
| 519 | if (ts->idle_active) { |
| 520 | delta = ktime_sub(now, ts->idle_entrytime); |
| 521 | if (nr_iowait_cpu(cpu) > 0) |
| 522 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
| 523 | else |
| 524 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 525 | ts->idle_entrytime = now; |
| 526 | } |
| 527 | |
| 528 | if (last_update_time) |
| 529 | *last_update_time = ktime_to_us(now); |
| 530 | |
| 531 | } |
| 532 | |
| 533 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
| 534 | { |
| 535 | update_ts_time_stats(smp_processor_id(), ts, now, NULL); |
| 536 | ts->idle_active = 0; |
| 537 | |
| 538 | sched_clock_idle_wakeup_event(); |
| 539 | } |
| 540 | |
| 541 | static void tick_nohz_start_idle(struct tick_sched *ts) |
| 542 | { |
| 543 | ts->idle_entrytime = ktime_get(); |
| 544 | ts->idle_active = 1; |
| 545 | sched_clock_idle_sleep_event(); |
| 546 | } |
| 547 | |
| 548 | /** |
| 549 | * get_cpu_idle_time_us - get the total idle time of a CPU |
| 550 | * @cpu: CPU number to query |
| 551 | * @last_update_time: variable to store update time in. Do not update |
| 552 | * counters if NULL. |
| 553 | * |
| 554 | * Return the cumulative idle time (since boot) for a given |
| 555 | * CPU, in microseconds. |
| 556 | * |
| 557 | * This time is measured via accounting rather than sampling, |
| 558 | * and is as accurate as ktime_get() is. |
| 559 | * |
| 560 | * This function returns -1 if NOHZ is not enabled. |
| 561 | */ |
| 562 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
| 563 | { |
| 564 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 565 | ktime_t now, idle; |
| 566 | |
| 567 | if (!tick_nohz_active) |
| 568 | return -1; |
| 569 | |
| 570 | now = ktime_get(); |
| 571 | if (last_update_time) { |
| 572 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 573 | idle = ts->idle_sleeptime; |
| 574 | } else { |
| 575 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { |
| 576 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
| 577 | |
| 578 | idle = ktime_add(ts->idle_sleeptime, delta); |
| 579 | } else { |
| 580 | idle = ts->idle_sleeptime; |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | return ktime_to_us(idle); |
| 585 | |
| 586 | } |
| 587 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
| 588 | |
| 589 | /** |
| 590 | * get_cpu_iowait_time_us - get the total iowait time of a CPU |
| 591 | * @cpu: CPU number to query |
| 592 | * @last_update_time: variable to store update time in. Do not update |
| 593 | * counters if NULL. |
| 594 | * |
| 595 | * Return the cumulative iowait time (since boot) for a given |
| 596 | * CPU, in microseconds. |
| 597 | * |
| 598 | * This time is measured via accounting rather than sampling, |
| 599 | * and is as accurate as ktime_get() is. |
| 600 | * |
| 601 | * This function returns -1 if NOHZ is not enabled. |
| 602 | */ |
| 603 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) |
| 604 | { |
| 605 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 606 | ktime_t now, iowait; |
| 607 | |
| 608 | if (!tick_nohz_active) |
| 609 | return -1; |
| 610 | |
| 611 | now = ktime_get(); |
| 612 | if (last_update_time) { |
| 613 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 614 | iowait = ts->iowait_sleeptime; |
| 615 | } else { |
| 616 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { |
| 617 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
| 618 | |
| 619 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
| 620 | } else { |
| 621 | iowait = ts->iowait_sleeptime; |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | return ktime_to_us(iowait); |
| 626 | } |
| 627 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
| 628 | |
| 629 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
| 630 | { |
| 631 | hrtimer_cancel(&ts->sched_timer); |
| 632 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); |
| 633 | |
| 634 | /* Forward the time to expire in the future */ |
| 635 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 636 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 637 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
| 638 | hrtimer_start_expires(&ts->sched_timer, |
| 639 | HRTIMER_MODE_ABS_PINNED_HARD); |
| 640 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 641 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 642 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 643 | |
| 644 | /* |
| 645 | * Reset to make sure next tick stop doesn't get fooled by past |
| 646 | * cached clock deadline. |
| 647 | */ |
| 648 | ts->next_tick = 0; |
| 649 | } |
| 650 | |
| 651 | static inline bool local_timer_softirq_pending(void) |
| 652 | { |
| 653 | return local_softirq_pending() & BIT(TIMER_SOFTIRQ); |
| 654 | } |
| 655 | |
| 656 | static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) |
| 657 | { |
| 658 | u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 659 | unsigned long basejiff; |
| 660 | unsigned int seq; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 661 | |
| 662 | /* Read jiffies and the time when jiffies were updated last */ |
| 663 | do { |
| 664 | seq = read_seqbegin(&jiffies_lock); |
| 665 | basemono = last_jiffies_update; |
| 666 | basejiff = jiffies; |
| 667 | } while (read_seqretry(&jiffies_lock, seq)); |
| 668 | ts->last_jiffies = basejiff; |
| 669 | ts->timer_expires_base = basemono; |
| 670 | |
| 671 | /* |
| 672 | * Keep the periodic tick, when RCU, architecture or irq_work |
| 673 | * requests it. |
| 674 | * Aside of that check whether the local timer softirq is |
| 675 | * pending. If so its a bad idea to call get_next_timer_interrupt() |
| 676 | * because there is an already expired timer, so it will request |
| 677 | * immeditate expiry, which rearms the hardware timer with a |
| 678 | * minimal delta which brings us back to this place |
| 679 | * immediately. Lather, rinse and repeat... |
| 680 | */ |
| 681 | if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || |
| 682 | irq_work_needs_cpu() || local_timer_softirq_pending()) { |
| 683 | next_tick = basemono + TICK_NSEC; |
| 684 | } else { |
| 685 | /* |
| 686 | * Get the next pending timer. If high resolution |
| 687 | * timers are enabled this only takes the timer wheel |
| 688 | * timers into account. If high resolution timers are |
| 689 | * disabled this also looks at the next expiring |
| 690 | * hrtimer. |
| 691 | */ |
| 692 | next_tmr = get_next_timer_interrupt(basejiff, basemono); |
| 693 | ts->next_timer = next_tmr; |
| 694 | /* Take the next rcu event into account */ |
| 695 | next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * If the tick is due in the next period, keep it ticking or |
| 700 | * force prod the timer. |
| 701 | */ |
| 702 | delta = next_tick - basemono; |
| 703 | if (delta <= (u64)TICK_NSEC) { |
| 704 | /* |
| 705 | * Tell the timer code that the base is not idle, i.e. undo |
| 706 | * the effect of get_next_timer_interrupt(): |
| 707 | */ |
| 708 | timer_clear_idle(); |
| 709 | /* |
| 710 | * We've not stopped the tick yet, and there's a timer in the |
| 711 | * next period, so no point in stopping it either, bail. |
| 712 | */ |
| 713 | if (!ts->tick_stopped) { |
| 714 | ts->timer_expires = 0; |
| 715 | goto out; |
| 716 | } |
| 717 | } |
| 718 | |
| 719 | /* |
| 720 | * If this CPU is the one which had the do_timer() duty last, we limit |
| 721 | * the sleep time to the timekeeping max_deferment value. |
| 722 | * Otherwise we can sleep as long as we want. |
| 723 | */ |
| 724 | delta = timekeeping_max_deferment(); |
| 725 | if (cpu != tick_do_timer_cpu && |
| 726 | (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) |
| 727 | delta = KTIME_MAX; |
| 728 | |
| 729 | /* Calculate the next expiry time */ |
| 730 | if (delta < (KTIME_MAX - basemono)) |
| 731 | expires = basemono + delta; |
| 732 | else |
| 733 | expires = KTIME_MAX; |
| 734 | |
| 735 | ts->timer_expires = min_t(u64, expires, next_tick); |
| 736 | |
| 737 | out: |
| 738 | return ts->timer_expires; |
| 739 | } |
| 740 | |
| 741 | static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) |
| 742 | { |
| 743 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
| 744 | u64 basemono = ts->timer_expires_base; |
| 745 | u64 expires = ts->timer_expires; |
| 746 | ktime_t tick = expires; |
| 747 | |
| 748 | /* Make sure we won't be trying to stop it twice in a row. */ |
| 749 | ts->timer_expires_base = 0; |
| 750 | |
| 751 | /* |
| 752 | * If this CPU is the one which updates jiffies, then give up |
| 753 | * the assignment and let it be taken by the CPU which runs |
| 754 | * the tick timer next, which might be this CPU as well. If we |
| 755 | * don't drop this here the jiffies might be stale and |
| 756 | * do_timer() never invoked. Keep track of the fact that it |
| 757 | * was the one which had the do_timer() duty last. |
| 758 | */ |
| 759 | if (cpu == tick_do_timer_cpu) { |
| 760 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 761 | ts->do_timer_last = 1; |
| 762 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { |
| 763 | ts->do_timer_last = 0; |
| 764 | } |
| 765 | |
| 766 | /* Skip reprogram of event if its not changed */ |
| 767 | if (ts->tick_stopped && (expires == ts->next_tick)) { |
| 768 | /* Sanity check: make sure clockevent is actually programmed */ |
| 769 | if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) |
| 770 | return; |
| 771 | |
| 772 | WARN_ON_ONCE(1); |
| 773 | printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", |
| 774 | basemono, ts->next_tick, dev->next_event, |
| 775 | hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * nohz_stop_sched_tick can be called several times before |
| 780 | * the nohz_restart_sched_tick is called. This happens when |
| 781 | * interrupts arrive which do not cause a reschedule. In the |
| 782 | * first call we save the current tick time, so we can restart |
| 783 | * the scheduler tick in nohz_restart_sched_tick. |
| 784 | */ |
| 785 | if (!ts->tick_stopped) { |
| 786 | calc_load_nohz_start(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 787 | quiet_vmstat(); |
| 788 | |
| 789 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
| 790 | ts->tick_stopped = 1; |
| 791 | trace_tick_stop(1, TICK_DEP_MASK_NONE); |
| 792 | } |
| 793 | |
| 794 | ts->next_tick = tick; |
| 795 | |
| 796 | /* |
| 797 | * If the expiration time == KTIME_MAX, then we simply stop |
| 798 | * the tick timer. |
| 799 | */ |
| 800 | if (unlikely(expires == KTIME_MAX)) { |
| 801 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
| 802 | hrtimer_cancel(&ts->sched_timer); |
| 803 | return; |
| 804 | } |
| 805 | |
| 806 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 807 | hrtimer_start(&ts->sched_timer, tick, |
| 808 | HRTIMER_MODE_ABS_PINNED_HARD); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 809 | } else { |
| 810 | hrtimer_set_expires(&ts->sched_timer, tick); |
| 811 | tick_program_event(tick, 1); |
| 812 | } |
| 813 | } |
| 814 | |
| 815 | static void tick_nohz_retain_tick(struct tick_sched *ts) |
| 816 | { |
| 817 | ts->timer_expires_base = 0; |
| 818 | } |
| 819 | |
| 820 | #ifdef CONFIG_NO_HZ_FULL |
| 821 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) |
| 822 | { |
| 823 | if (tick_nohz_next_event(ts, cpu)) |
| 824 | tick_nohz_stop_tick(ts, cpu); |
| 825 | else |
| 826 | tick_nohz_retain_tick(ts); |
| 827 | } |
| 828 | #endif /* CONFIG_NO_HZ_FULL */ |
| 829 | |
| 830 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
| 831 | { |
| 832 | /* Update jiffies first */ |
| 833 | tick_do_update_jiffies64(now); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 834 | |
| 835 | /* |
| 836 | * Clear the timer idle flag, so we avoid IPIs on remote queueing and |
| 837 | * the clock forward checks in the enqueue path: |
| 838 | */ |
| 839 | timer_clear_idle(); |
| 840 | |
| 841 | calc_load_nohz_stop(); |
| 842 | touch_softlockup_watchdog_sched(); |
| 843 | /* |
| 844 | * Cancel the scheduled timer and restore the tick |
| 845 | */ |
| 846 | ts->tick_stopped = 0; |
| 847 | ts->idle_exittime = now; |
| 848 | |
| 849 | tick_nohz_restart(ts, now); |
| 850 | } |
| 851 | |
| 852 | static void tick_nohz_full_update_tick(struct tick_sched *ts) |
| 853 | { |
| 854 | #ifdef CONFIG_NO_HZ_FULL |
| 855 | int cpu = smp_processor_id(); |
| 856 | |
| 857 | if (!tick_nohz_full_cpu(cpu)) |
| 858 | return; |
| 859 | |
| 860 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) |
| 861 | return; |
| 862 | |
| 863 | if (can_stop_full_tick(cpu, ts)) |
| 864 | tick_nohz_stop_sched_tick(ts, cpu); |
| 865 | else if (ts->tick_stopped) |
| 866 | tick_nohz_restart_sched_tick(ts, ktime_get()); |
| 867 | #endif |
| 868 | } |
| 869 | |
| 870 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
| 871 | { |
| 872 | /* |
| 873 | * If this CPU is offline and it is the one which updates |
| 874 | * jiffies, then give up the assignment and let it be taken by |
| 875 | * the CPU which runs the tick timer next. If we don't drop |
| 876 | * this here the jiffies might be stale and do_timer() never |
| 877 | * invoked. |
| 878 | */ |
| 879 | if (unlikely(!cpu_online(cpu))) { |
| 880 | if (cpu == tick_do_timer_cpu) |
| 881 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 882 | /* |
| 883 | * Make sure the CPU doesn't get fooled by obsolete tick |
| 884 | * deadline if it comes back online later. |
| 885 | */ |
| 886 | ts->next_tick = 0; |
| 887 | return false; |
| 888 | } |
| 889 | |
| 890 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| 891 | return false; |
| 892 | |
| 893 | if (need_resched()) |
| 894 | return false; |
| 895 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 896 | if (unlikely(local_softirq_pending())) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 897 | static int ratelimit; |
| 898 | |
| 899 | if (ratelimit < 10 && |
| 900 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { |
| 901 | pr_warn("NOHZ: local_softirq_pending %02x\n", |
| 902 | (unsigned int) local_softirq_pending()); |
| 903 | ratelimit++; |
| 904 | } |
| 905 | return false; |
| 906 | } |
| 907 | |
| 908 | if (tick_nohz_full_enabled()) { |
| 909 | /* |
| 910 | * Keep the tick alive to guarantee timekeeping progression |
| 911 | * if there are full dynticks CPUs around |
| 912 | */ |
| 913 | if (tick_do_timer_cpu == cpu) |
| 914 | return false; |
| 915 | /* |
| 916 | * Boot safety: make sure the timekeeping duty has been |
| 917 | * assigned before entering dyntick-idle mode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 918 | * tick_do_timer_cpu is TICK_DO_TIMER_BOOT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 919 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 920 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT)) |
| 921 | return false; |
| 922 | |
| 923 | /* Should not happen for nohz-full */ |
| 924 | if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 925 | return false; |
| 926 | } |
| 927 | |
| 928 | return true; |
| 929 | } |
| 930 | |
| 931 | static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) |
| 932 | { |
| 933 | ktime_t expires; |
| 934 | int cpu = smp_processor_id(); |
| 935 | |
| 936 | /* |
| 937 | * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the |
| 938 | * tick timer expiration time is known already. |
| 939 | */ |
| 940 | if (ts->timer_expires_base) |
| 941 | expires = ts->timer_expires; |
| 942 | else if (can_stop_idle_tick(cpu, ts)) |
| 943 | expires = tick_nohz_next_event(ts, cpu); |
| 944 | else |
| 945 | return; |
| 946 | |
| 947 | ts->idle_calls++; |
| 948 | |
| 949 | if (expires > 0LL) { |
| 950 | int was_stopped = ts->tick_stopped; |
| 951 | |
| 952 | tick_nohz_stop_tick(ts, cpu); |
| 953 | |
| 954 | ts->idle_sleeps++; |
| 955 | ts->idle_expires = expires; |
| 956 | |
| 957 | if (!was_stopped && ts->tick_stopped) { |
| 958 | ts->idle_jiffies = ts->last_jiffies; |
| 959 | nohz_balance_enter_idle(cpu); |
| 960 | } |
| 961 | } else { |
| 962 | tick_nohz_retain_tick(ts); |
| 963 | } |
| 964 | } |
| 965 | |
| 966 | /** |
| 967 | * tick_nohz_idle_stop_tick - stop the idle tick from the idle task |
| 968 | * |
| 969 | * When the next event is more than a tick into the future, stop the idle tick |
| 970 | */ |
| 971 | void tick_nohz_idle_stop_tick(void) |
| 972 | { |
| 973 | __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); |
| 974 | } |
| 975 | |
| 976 | void tick_nohz_idle_retain_tick(void) |
| 977 | { |
| 978 | tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); |
| 979 | /* |
| 980 | * Undo the effect of get_next_timer_interrupt() called from |
| 981 | * tick_nohz_next_event(). |
| 982 | */ |
| 983 | timer_clear_idle(); |
| 984 | } |
| 985 | |
| 986 | /** |
| 987 | * tick_nohz_idle_enter - prepare for entering idle on the current CPU |
| 988 | * |
| 989 | * Called when we start the idle loop. |
| 990 | */ |
| 991 | void tick_nohz_idle_enter(void) |
| 992 | { |
| 993 | struct tick_sched *ts; |
| 994 | |
| 995 | lockdep_assert_irqs_enabled(); |
| 996 | |
| 997 | local_irq_disable(); |
| 998 | |
| 999 | ts = this_cpu_ptr(&tick_cpu_sched); |
| 1000 | |
| 1001 | WARN_ON_ONCE(ts->timer_expires_base); |
| 1002 | |
| 1003 | ts->inidle = 1; |
| 1004 | tick_nohz_start_idle(ts); |
| 1005 | |
| 1006 | local_irq_enable(); |
| 1007 | } |
| 1008 | |
| 1009 | /** |
| 1010 | * tick_nohz_irq_exit - update next tick event from interrupt exit |
| 1011 | * |
| 1012 | * When an interrupt fires while we are idle and it doesn't cause |
| 1013 | * a reschedule, it may still add, modify or delete a timer, enqueue |
| 1014 | * an RCU callback, etc... |
| 1015 | * So we need to re-calculate and reprogram the next tick event. |
| 1016 | */ |
| 1017 | void tick_nohz_irq_exit(void) |
| 1018 | { |
| 1019 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1020 | |
| 1021 | if (ts->inidle) |
| 1022 | tick_nohz_start_idle(ts); |
| 1023 | else |
| 1024 | tick_nohz_full_update_tick(ts); |
| 1025 | } |
| 1026 | |
| 1027 | /** |
| 1028 | * tick_nohz_idle_got_tick - Check whether or not the tick handler has run |
| 1029 | */ |
| 1030 | bool tick_nohz_idle_got_tick(void) |
| 1031 | { |
| 1032 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1033 | |
| 1034 | if (ts->got_idle_tick) { |
| 1035 | ts->got_idle_tick = 0; |
| 1036 | return true; |
| 1037 | } |
| 1038 | return false; |
| 1039 | } |
| 1040 | |
| 1041 | /** |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1042 | * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer |
| 1043 | * or the tick, whatever that expires first. Note that, if the tick has been |
| 1044 | * stopped, it returns the next hrtimer. |
| 1045 | * |
| 1046 | * Called from power state control code with interrupts disabled |
| 1047 | */ |
| 1048 | ktime_t tick_nohz_get_next_hrtimer(void) |
| 1049 | { |
| 1050 | return __this_cpu_read(tick_cpu_device.evtdev)->next_event; |
| 1051 | } |
| 1052 | |
| 1053 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1054 | * tick_nohz_get_sleep_length - return the expected length of the current sleep |
| 1055 | * @delta_next: duration until the next event if the tick cannot be stopped |
| 1056 | * |
| 1057 | * Called from power state control code with interrupts disabled |
| 1058 | */ |
| 1059 | ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) |
| 1060 | { |
| 1061 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
| 1062 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1063 | int cpu = smp_processor_id(); |
| 1064 | /* |
| 1065 | * The idle entry time is expected to be a sufficient approximation of |
| 1066 | * the current time at this point. |
| 1067 | */ |
| 1068 | ktime_t now = ts->idle_entrytime; |
| 1069 | ktime_t next_event; |
| 1070 | |
| 1071 | WARN_ON_ONCE(!ts->inidle); |
| 1072 | |
| 1073 | *delta_next = ktime_sub(dev->next_event, now); |
| 1074 | |
| 1075 | if (!can_stop_idle_tick(cpu, ts)) |
| 1076 | return *delta_next; |
| 1077 | |
| 1078 | next_event = tick_nohz_next_event(ts, cpu); |
| 1079 | if (!next_event) |
| 1080 | return *delta_next; |
| 1081 | |
| 1082 | /* |
| 1083 | * If the next highres timer to expire is earlier than next_event, the |
| 1084 | * idle governor needs to know that. |
| 1085 | */ |
| 1086 | next_event = min_t(u64, next_event, |
| 1087 | hrtimer_next_event_without(&ts->sched_timer)); |
| 1088 | |
| 1089 | return ktime_sub(next_event, now); |
| 1090 | } |
| 1091 | |
| 1092 | /** |
| 1093 | * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value |
| 1094 | * for a particular CPU. |
| 1095 | * |
| 1096 | * Called from the schedutil frequency scaling governor in scheduler context. |
| 1097 | */ |
| 1098 | unsigned long tick_nohz_get_idle_calls_cpu(int cpu) |
| 1099 | { |
| 1100 | struct tick_sched *ts = tick_get_tick_sched(cpu); |
| 1101 | |
| 1102 | return ts->idle_calls; |
| 1103 | } |
| 1104 | |
| 1105 | /** |
| 1106 | * tick_nohz_get_idle_calls - return the current idle calls counter value |
| 1107 | * |
| 1108 | * Called from the schedutil frequency scaling governor in scheduler context. |
| 1109 | */ |
| 1110 | unsigned long tick_nohz_get_idle_calls(void) |
| 1111 | { |
| 1112 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1113 | |
| 1114 | return ts->idle_calls; |
| 1115 | } |
| 1116 | |
| 1117 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) |
| 1118 | { |
| 1119 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| 1120 | unsigned long ticks; |
| 1121 | |
| 1122 | if (vtime_accounting_cpu_enabled()) |
| 1123 | return; |
| 1124 | /* |
| 1125 | * We stopped the tick in idle. Update process times would miss the |
| 1126 | * time we slept as update_process_times does only a 1 tick |
| 1127 | * accounting. Enforce that this is accounted to idle ! |
| 1128 | */ |
| 1129 | ticks = jiffies - ts->idle_jiffies; |
| 1130 | /* |
| 1131 | * We might be one off. Do not randomly account a huge number of ticks! |
| 1132 | */ |
| 1133 | if (ticks && ticks < LONG_MAX) |
| 1134 | account_idle_ticks(ticks); |
| 1135 | #endif |
| 1136 | } |
| 1137 | |
| 1138 | static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now) |
| 1139 | { |
| 1140 | tick_nohz_restart_sched_tick(ts, now); |
| 1141 | tick_nohz_account_idle_ticks(ts); |
| 1142 | } |
| 1143 | |
| 1144 | void tick_nohz_idle_restart_tick(void) |
| 1145 | { |
| 1146 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1147 | |
| 1148 | if (ts->tick_stopped) |
| 1149 | __tick_nohz_idle_restart_tick(ts, ktime_get()); |
| 1150 | } |
| 1151 | |
| 1152 | /** |
| 1153 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
| 1154 | * |
| 1155 | * Restart the idle tick when the CPU is woken up from idle |
| 1156 | * This also exit the RCU extended quiescent state. The CPU |
| 1157 | * can use RCU again after this function is called. |
| 1158 | */ |
| 1159 | void tick_nohz_idle_exit(void) |
| 1160 | { |
| 1161 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1162 | bool idle_active, tick_stopped; |
| 1163 | ktime_t now; |
| 1164 | |
| 1165 | local_irq_disable(); |
| 1166 | |
| 1167 | WARN_ON_ONCE(!ts->inidle); |
| 1168 | WARN_ON_ONCE(ts->timer_expires_base); |
| 1169 | |
| 1170 | ts->inidle = 0; |
| 1171 | idle_active = ts->idle_active; |
| 1172 | tick_stopped = ts->tick_stopped; |
| 1173 | |
| 1174 | if (idle_active || tick_stopped) |
| 1175 | now = ktime_get(); |
| 1176 | |
| 1177 | if (idle_active) |
| 1178 | tick_nohz_stop_idle(ts, now); |
| 1179 | |
| 1180 | if (tick_stopped) |
| 1181 | __tick_nohz_idle_restart_tick(ts, now); |
| 1182 | |
| 1183 | local_irq_enable(); |
| 1184 | } |
| 1185 | |
| 1186 | /* |
| 1187 | * The nohz low res interrupt handler |
| 1188 | */ |
| 1189 | static void tick_nohz_handler(struct clock_event_device *dev) |
| 1190 | { |
| 1191 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1192 | struct pt_regs *regs = get_irq_regs(); |
| 1193 | ktime_t now = ktime_get(); |
| 1194 | |
| 1195 | dev->next_event = KTIME_MAX; |
| 1196 | |
| 1197 | tick_sched_do_timer(ts, now); |
| 1198 | tick_sched_handle(ts, regs); |
| 1199 | |
| 1200 | /* No need to reprogram if we are running tickless */ |
| 1201 | if (unlikely(ts->tick_stopped)) |
| 1202 | return; |
| 1203 | |
| 1204 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 1205 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
| 1206 | } |
| 1207 | |
| 1208 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) |
| 1209 | { |
| 1210 | if (!tick_nohz_enabled) |
| 1211 | return; |
| 1212 | ts->nohz_mode = mode; |
| 1213 | /* One update is enough */ |
| 1214 | if (!test_and_set_bit(0, &tick_nohz_active)) |
| 1215 | timers_update_nohz(); |
| 1216 | } |
| 1217 | |
| 1218 | /** |
| 1219 | * tick_nohz_switch_to_nohz - switch to nohz mode |
| 1220 | */ |
| 1221 | static void tick_nohz_switch_to_nohz(void) |
| 1222 | { |
| 1223 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1224 | ktime_t next; |
| 1225 | |
| 1226 | if (!tick_nohz_enabled) |
| 1227 | return; |
| 1228 | |
| 1229 | if (tick_switch_to_oneshot(tick_nohz_handler)) |
| 1230 | return; |
| 1231 | |
| 1232 | /* |
| 1233 | * Recycle the hrtimer in ts, so we can share the |
| 1234 | * hrtimer_forward with the highres code. |
| 1235 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1236 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1237 | /* Get the next period */ |
| 1238 | next = tick_init_jiffy_update(); |
| 1239 | |
| 1240 | hrtimer_set_expires(&ts->sched_timer, next); |
| 1241 | hrtimer_forward_now(&ts->sched_timer, tick_period); |
| 1242 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
| 1243 | tick_nohz_activate(ts, NOHZ_MODE_LOWRES); |
| 1244 | } |
| 1245 | |
| 1246 | static inline void tick_nohz_irq_enter(void) |
| 1247 | { |
| 1248 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1249 | ktime_t now; |
| 1250 | |
| 1251 | if (!ts->idle_active && !ts->tick_stopped) |
| 1252 | return; |
| 1253 | now = ktime_get(); |
| 1254 | if (ts->idle_active) |
| 1255 | tick_nohz_stop_idle(ts, now); |
| 1256 | if (ts->tick_stopped) |
| 1257 | tick_nohz_update_jiffies(now); |
| 1258 | } |
| 1259 | |
| 1260 | #else |
| 1261 | |
| 1262 | static inline void tick_nohz_switch_to_nohz(void) { } |
| 1263 | static inline void tick_nohz_irq_enter(void) { } |
| 1264 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } |
| 1265 | |
| 1266 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 1267 | |
| 1268 | /* |
| 1269 | * Called from irq_enter to notify about the possible interruption of idle() |
| 1270 | */ |
| 1271 | void tick_irq_enter(void) |
| 1272 | { |
| 1273 | tick_check_oneshot_broadcast_this_cpu(); |
| 1274 | tick_nohz_irq_enter(); |
| 1275 | } |
| 1276 | |
| 1277 | /* |
| 1278 | * High resolution timer specific code |
| 1279 | */ |
| 1280 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1281 | /* |
| 1282 | * We rearm the timer until we get disabled by the idle code. |
| 1283 | * Called with interrupts disabled. |
| 1284 | */ |
| 1285 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
| 1286 | { |
| 1287 | struct tick_sched *ts = |
| 1288 | container_of(timer, struct tick_sched, sched_timer); |
| 1289 | struct pt_regs *regs = get_irq_regs(); |
| 1290 | ktime_t now = ktime_get(); |
| 1291 | |
| 1292 | tick_sched_do_timer(ts, now); |
| 1293 | |
| 1294 | /* |
| 1295 | * Do not call, when we are not in irq context and have |
| 1296 | * no valid regs pointer |
| 1297 | */ |
| 1298 | if (regs) |
| 1299 | tick_sched_handle(ts, regs); |
| 1300 | else |
| 1301 | ts->next_tick = 0; |
| 1302 | |
| 1303 | /* No need to reprogram if we are in idle or full dynticks mode */ |
| 1304 | if (unlikely(ts->tick_stopped)) |
| 1305 | return HRTIMER_NORESTART; |
| 1306 | |
| 1307 | hrtimer_forward(timer, now, tick_period); |
| 1308 | |
| 1309 | return HRTIMER_RESTART; |
| 1310 | } |
| 1311 | |
| 1312 | static int sched_skew_tick; |
| 1313 | |
| 1314 | static int __init skew_tick(char *str) |
| 1315 | { |
| 1316 | get_option(&str, &sched_skew_tick); |
| 1317 | |
| 1318 | return 0; |
| 1319 | } |
| 1320 | early_param("skew_tick", skew_tick); |
| 1321 | |
| 1322 | /** |
| 1323 | * tick_setup_sched_timer - setup the tick emulation timer |
| 1324 | */ |
| 1325 | void tick_setup_sched_timer(void) |
| 1326 | { |
| 1327 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1328 | ktime_t now = ktime_get(); |
| 1329 | |
| 1330 | /* |
| 1331 | * Emulate tick processing via per-CPU hrtimers: |
| 1332 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1333 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1334 | ts->sched_timer.function = tick_sched_timer; |
| 1335 | |
| 1336 | /* Get the next period (per-CPU) */ |
| 1337 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
| 1338 | |
| 1339 | /* Offset the tick to avert jiffies_lock contention. */ |
| 1340 | if (sched_skew_tick) { |
| 1341 | u64 offset = ktime_to_ns(tick_period) >> 1; |
| 1342 | do_div(offset, num_possible_cpus()); |
| 1343 | offset *= smp_processor_id(); |
| 1344 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
| 1345 | } |
| 1346 | |
| 1347 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1348 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1349 | tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); |
| 1350 | } |
| 1351 | #endif /* HIGH_RES_TIMERS */ |
| 1352 | |
| 1353 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
| 1354 | void tick_cancel_sched_timer(int cpu) |
| 1355 | { |
| 1356 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 1357 | |
| 1358 | # ifdef CONFIG_HIGH_RES_TIMERS |
| 1359 | if (ts->sched_timer.base) |
| 1360 | hrtimer_cancel(&ts->sched_timer); |
| 1361 | # endif |
| 1362 | |
| 1363 | memset(ts, 0, sizeof(*ts)); |
| 1364 | } |
| 1365 | #endif |
| 1366 | |
| 1367 | /** |
| 1368 | * Async notification about clocksource changes |
| 1369 | */ |
| 1370 | void tick_clock_notify(void) |
| 1371 | { |
| 1372 | int cpu; |
| 1373 | |
| 1374 | for_each_possible_cpu(cpu) |
| 1375 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); |
| 1376 | } |
| 1377 | |
| 1378 | /* |
| 1379 | * Async notification about clock event changes |
| 1380 | */ |
| 1381 | void tick_oneshot_notify(void) |
| 1382 | { |
| 1383 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1384 | |
| 1385 | set_bit(0, &ts->check_clocks); |
| 1386 | } |
| 1387 | |
| 1388 | /** |
| 1389 | * Check, if a change happened, which makes oneshot possible. |
| 1390 | * |
| 1391 | * Called cyclic from the hrtimer softirq (driven by the timer |
| 1392 | * softirq) allow_nohz signals, that we can switch into low-res nohz |
| 1393 | * mode, because high resolution timers are disabled (either compile |
| 1394 | * or runtime). Called with interrupts disabled. |
| 1395 | */ |
| 1396 | int tick_check_oneshot_change(int allow_nohz) |
| 1397 | { |
| 1398 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1399 | |
| 1400 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
| 1401 | return 0; |
| 1402 | |
| 1403 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) |
| 1404 | return 0; |
| 1405 | |
| 1406 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
| 1407 | return 0; |
| 1408 | |
| 1409 | if (!allow_nohz) |
| 1410 | return 1; |
| 1411 | |
| 1412 | tick_nohz_switch_to_nohz(); |
| 1413 | return 0; |
| 1414 | } |